aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rw-r--r--Lib/_pydatetime.py2
-rw-r--r--Lib/_pydecimal.py4
-rw-r--r--Lib/_pyio.py80
-rw-r--r--Lib/_pyrepl/_module_completer.py4
-rw-r--r--Lib/_pyrepl/base_eventqueue.py6
-rw-r--r--Lib/_pyrepl/commands.py8
-rw-r--r--Lib/_pyrepl/readline.py5
-rw-r--r--Lib/_pyrepl/windows_console.py26
-rw-r--r--Lib/_strptime.py202
-rw-r--r--Lib/annotationlib.py105
-rw-r--r--Lib/argparse.py2
-rw-r--r--Lib/ast.py28
-rw-r--r--Lib/asyncio/base_events.py70
-rw-r--r--Lib/asyncio/tools.py168
-rw-r--r--Lib/calendar.py6
-rw-r--r--Lib/code.py2
-rw-r--r--Lib/compression/zstd/_zstdfile.py3
-rw-r--r--Lib/concurrent/futures/interpreter.py181
-rw-r--r--Lib/concurrent/futures/process.py5
-rw-r--r--Lib/concurrent/interpreters/__init__.py (renamed from Lib/test/support/interpreters/__init__.py)58
-rw-r--r--Lib/concurrent/interpreters/_crossinterp.py (renamed from Lib/test/support/interpreters/_crossinterp.py)2
-rw-r--r--Lib/concurrent/interpreters/_queues.py (renamed from Lib/test/support/interpreters/queues.py)9
-rw-r--r--Lib/configparser.py11
-rw-r--r--Lib/ctypes/__init__.py6
-rw-r--r--Lib/dbm/dumb.py32
-rw-r--r--Lib/dbm/sqlite3.py4
-rw-r--r--Lib/difflib.py54
-rw-r--r--Lib/doctest.py113
-rw-r--r--Lib/email/_header_value_parser.py6
-rw-r--r--Lib/email/header.py17
-rw-r--r--Lib/email/message.py2
-rw-r--r--Lib/email/utils.py10
-rw-r--r--Lib/encodings/idna.py2
-rw-r--r--Lib/encodings/palmos.py2
-rw-r--r--Lib/fractions.py7
-rw-r--r--Lib/genericpath.py11
-rw-r--r--Lib/hashlib.py12
-rw-r--r--Lib/html/parser.py202
-rw-r--r--Lib/http/server.py17
-rw-r--r--Lib/idlelib/NEWS2x.txt2
-rw-r--r--Lib/idlelib/News3.txt7
-rw-r--r--Lib/idlelib/configdialog.py2
-rw-r--r--Lib/idlelib/debugger.py2
-rw-r--r--Lib/idlelib/editor.py2
-rw-r--r--Lib/idlelib/idle_test/htest.py2
-rw-r--r--Lib/ipaddress.py10
-rw-r--r--Lib/json/encoder.py5
-rw-r--r--Lib/locale.py5
-rw-r--r--Lib/logging/__init__.py28
-rw-r--r--Lib/logging/config.py2
-rw-r--r--Lib/multiprocessing/forkserver.py4
-rw-r--r--Lib/netrc.py33
-rw-r--r--Lib/ntpath.py38
-rw-r--r--Lib/os.py3
-rw-r--r--Lib/platform.py24
-rw-r--r--Lib/posixpath.py57
-rw-r--r--Lib/pprint.py68
-rw-r--r--Lib/pydoc.py2
-rw-r--r--Lib/reprlib.py17
-rw-r--r--Lib/shelve.py5
-rw-r--r--Lib/sqlite3/__main__.py23
-rw-r--r--Lib/sqlite3/_completer.py42
-rw-r--r--Lib/sre_compile.py7
-rw-r--r--Lib/sre_constants.py7
-rw-r--r--Lib/sre_parse.py7
-rw-r--r--Lib/tarfile.py163
-rw-r--r--Lib/test/.ruff.toml7
-rw-r--r--Lib/test/_code_definitions.py30
-rw-r--r--Lib/test/_test_multiprocessing.py29
-rw-r--r--Lib/test/audit-tests.py28
-rw-r--r--Lib/test/libregrtest/main.py23
-rw-r--r--Lib/test/libregrtest/setup.py9
-rw-r--r--Lib/test/libregrtest/single.py2
-rw-r--r--Lib/test/libregrtest/tsan.py2
-rw-r--r--Lib/test/mp_preload_flush.py15
-rw-r--r--Lib/test/pickletester.py5
-rw-r--r--Lib/test/pythoninfo.py13
-rw-r--r--Lib/test/subprocessdata/fd_status.py4
-rw-r--r--Lib/test/support/__init__.py55
-rw-r--r--Lib/test/support/channels.py (renamed from Lib/test/support/interpreters/channels.py)18
-rw-r--r--Lib/test/test__interpchannels.py2
-rw-r--r--Lib/test/test__interpreters.py40
-rw-r--r--Lib/test/test_annotationlib.py70
-rw-r--r--Lib/test/test_ast/test_ast.py95
-rw-r--r--Lib/test/test_asyncgen.py9
-rw-r--r--Lib/test/test_asyncio/test_base_events.py59
-rw-r--r--Lib/test/test_asyncio/test_tools.py1560
-rw-r--r--Lib/test/test_audit.py8
-rw-r--r--Lib/test/test_build_details.py16
-rw-r--r--Lib/test/test_builtin.py3
-rw-r--r--Lib/test/test_calendar.py2
-rw-r--r--Lib/test/test_capi/test_abstract.py25
-rw-r--r--Lib/test/test_capi/test_config.py1
-rw-r--r--Lib/test/test_capi/test_misc.py2
-rw-r--r--Lib/test/test_capi/test_opt.py275
-rw-r--r--Lib/test/test_capi/test_sys.py64
-rw-r--r--Lib/test/test_capi/test_type.py10
-rw-r--r--Lib/test/test_capi/test_unicode.py21
-rw-r--r--Lib/test/test_class.py1
-rw-r--r--Lib/test/test_code.py53
-rw-r--r--Lib/test/test_codeccallbacks.py1
-rw-r--r--Lib/test/test_codecs.py7
-rw-r--r--Lib/test/test_concurrent_futures/test_init.py4
-rw-r--r--Lib/test/test_concurrent_futures/test_interpreter_pool.py265
-rw-r--r--Lib/test/test_concurrent_futures/test_shutdown.py58
-rw-r--r--Lib/test/test_configparser.py12
-rw-r--r--Lib/test/test_cprofile.py19
-rw-r--r--Lib/test/test_crossinterp.py2
-rw-r--r--Lib/test/test_csv.py48
-rw-r--r--Lib/test/test_ctypes/_support.py1
-rw-r--r--Lib/test/test_ctypes/test_byteswap.py1
-rw-r--r--Lib/test/test_ctypes/test_generated_structs.py2
-rw-r--r--Lib/test/test_ctypes/test_incomplete.py10
-rw-r--r--Lib/test/test_ctypes/test_parameters.py4
-rw-r--r--Lib/test/test_dbm.py61
-rw-r--r--Lib/test/test_dbm_gnu.py27
-rw-r--r--Lib/test/test_decimal.py3
-rw-r--r--Lib/test/test_descr.py28
-rw-r--r--Lib/test/test_dict.py32
-rw-r--r--Lib/test/test_difflib.py6
-rw-r--r--Lib/test/test_difflib_expect.html48
-rw-r--r--Lib/test/test_dis.py209
-rw-r--r--Lib/test/test_doctest/sample_doctest_errors.py46
-rw-r--r--Lib/test/test_doctest/test_doctest.py447
-rw-r--r--Lib/test/test_doctest/test_doctest_errors.txt14
-rw-r--r--Lib/test/test_doctest/test_doctest_skip.txt2
-rw-r--r--Lib/test/test_doctest/test_doctest_skip2.txt6
-rw-r--r--Lib/test/test_email/test__header_value_parser.py78
-rw-r--r--Lib/test/test_email/test_email.py30
-rw-r--r--Lib/test/test_enum.py2
-rw-r--r--Lib/test/test_exceptions.py2
-rw-r--r--Lib/test/test_external_inspection.py569
-rw-r--r--Lib/test/test_fcntl.py13
-rw-r--r--Lib/test/test_fileio.py12
-rw-r--r--Lib/test/test_float.py2
-rw-r--r--Lib/test/test_format.py10
-rw-r--r--Lib/test/test_fractions.py15
-rw-r--r--Lib/test/test_free_threading/test_generators.py51
-rw-r--r--Lib/test/test_free_threading/test_heapq.py267
-rw-r--r--Lib/test/test_free_threading/test_io.py42
-rw-r--r--Lib/test/test_free_threading/test_itertools.py95
-rw-r--r--Lib/test/test_free_threading/test_itertools_batched.py38
-rw-r--r--Lib/test/test_free_threading/test_itertools_combinatoric.py51
-rw-r--r--Lib/test/test_fstring.py10
-rw-r--r--Lib/test/test_generated_cases.py299
-rw-r--r--Lib/test/test_genericpath.py2
-rw-r--r--Lib/test/test_getpath.py21
-rw-r--r--Lib/test/test_grammar.py14
-rw-r--r--Lib/test/test_gzip.py1
-rw-r--r--Lib/test/test_hashlib.py249
-rw-r--r--Lib/test/test_hmac.py1
-rw-r--r--Lib/test/test_htmlparser.py282
-rw-r--r--Lib/test/test_http_cookiejar.py187
-rw-r--r--Lib/test/test_idle.py2
-rw-r--r--Lib/test/test_inspect/test_inspect.py4
-rw-r--r--Lib/test/test_interpreters/test_api.py884
-rw-r--r--Lib/test/test_interpreters/test_channels.py32
-rw-r--r--Lib/test/test_interpreters/test_lifecycle.py4
-rw-r--r--Lib/test/test_interpreters/test_queues.py95
-rw-r--r--Lib/test/test_interpreters/test_stress.py2
-rw-r--r--Lib/test/test_interpreters/utils.py3
-rw-r--r--Lib/test/test_io.py32
-rw-r--r--Lib/test/test_ioctl.py13
-rw-r--r--Lib/test/test_ipaddress.py19
-rw-r--r--Lib/test/test_iter.py2
-rw-r--r--Lib/test/test_json/test_dump.py8
-rw-r--r--Lib/test/test_json/test_tool.py2
-rw-r--r--Lib/test/test_list.py15
-rw-r--r--Lib/test/test_listcomps.py2
-rw-r--r--Lib/test/test_locale.py4
-rw-r--r--Lib/test/test_logging.py85
-rw-r--r--Lib/test/test_math.py43
-rw-r--r--Lib/test/test_monitoring.py15
-rw-r--r--Lib/test/test_netrc.py13
-rw-r--r--Lib/test/test_ntpath.py201
-rw-r--r--Lib/test/test_optparse.py4
-rw-r--r--Lib/test/test_os.py14
-rw-r--r--Lib/test/test_pathlib/test_pathlib.py8
-rw-r--r--Lib/test/test_peepholer.py101
-rw-r--r--Lib/test/test_perf_profiler.py5
-rw-r--r--Lib/test/test_platform.py16
-rw-r--r--Lib/test/test_posixpath.py236
-rw-r--r--Lib/test/test_pprint.py2
-rw-r--r--Lib/test/test_pty.py1
-rw-r--r--Lib/test/test_pyclbr.py4
-rw-r--r--Lib/test/test_pydoc/test_pydoc.py2
-rw-r--r--Lib/test/test_pyexpat.py20
-rw-r--r--Lib/test/test_pyrepl/test_pyrepl.py39
-rw-r--r--Lib/test/test_pyrepl/test_unix_console.py1
-rw-r--r--Lib/test/test_pyrepl/test_windows_console.py2
-rw-r--r--Lib/test/test_queue.py20
-rw-r--r--Lib/test/test_random.py327
-rw-r--r--Lib/test/test_re.py28
-rw-r--r--Lib/test/test_readline.py8
-rw-r--r--Lib/test/test_regrtest.py65
-rw-r--r--Lib/test/test_remote_pdb.py9
-rw-r--r--Lib/test/test_reprlib.py60
-rw-r--r--Lib/test/test_shutil.py2
-rw-r--r--Lib/test/test_sqlite3/test_cli.py113
-rw-r--r--Lib/test/test_ssl.py43
-rw-r--r--Lib/test/test_stable_abi_ctypes.py4
-rw-r--r--Lib/test/test_statistics.py3
-rw-r--r--Lib/test/test_str.py8
-rw-r--r--Lib/test/test_string/_support.py1
-rw-r--r--Lib/test/test_strptime.py37
-rw-r--r--Lib/test/test_syntax.py42
-rw-r--r--Lib/test/test_sys.py64
-rw-r--r--Lib/test/test_sysconfig.py5
-rw-r--r--Lib/test/test_tarfile.py310
-rw-r--r--Lib/test/test_threading.py65
-rw-r--r--Lib/test/test_tokenize.py78
-rw-r--r--Lib/test/test_tools/i18n_data/docstrings.py2
-rw-r--r--Lib/test/test_traceback.py29
-rw-r--r--Lib/test/test_tstring.py1
-rw-r--r--Lib/test/test_type_annotations.py22
-rw-r--r--Lib/test/test_types.py58
-rw-r--r--Lib/test/test_typing.py172
-rw-r--r--Lib/test/test_unittest/test_case.py16
-rw-r--r--Lib/test/test_unittest/testmock/testhelpers.py21
-rw-r--r--Lib/test/test_urllib.py1
-rw-r--r--Lib/test/test_urlparse.py398
-rwxr-xr-xLib/test/test_uuid.py28
-rw-r--r--Lib/test/test_venv.py2
-rw-r--r--Lib/test/test_webbrowser.py1
-rw-r--r--Lib/test/test_xml_etree.py27
-rw-r--r--Lib/test/test_zipfile/__main__.py2
-rw-r--r--Lib/test/test_zipfile/_path/_test_params.py2
-rw-r--r--Lib/test/test_zipfile/_path/test_complexity.py2
-rw-r--r--Lib/test/test_zipfile/_path/test_path.py22
-rw-r--r--Lib/test/test_zipfile/_path/write-alpharep.py1
-rw-r--r--Lib/test/test_zlib.py108
-rw-r--r--Lib/test/test_zoneinfo/test_zoneinfo.py43
-rw-r--r--Lib/test/test_zoneinfo/test_zoneinfo_property.py18
-rw-r--r--Lib/test/test_zstd.py366
-rw-r--r--Lib/tokenize.py16
-rw-r--r--Lib/traceback.py13
-rw-r--r--Lib/typing.py90
-rw-r--r--Lib/unittest/_log.py5
-rw-r--r--Lib/unittest/case.py6
-rw-r--r--Lib/unittest/mock.py15
-rw-r--r--Lib/uuid.py20
-rw-r--r--Lib/xml/etree/ElementTree.py10
-rw-r--r--Lib/xmlrpc/server.py2
-rw-r--r--Lib/zipfile/_path/__init__.py38
-rw-r--r--Lib/zipfile/_path/_functools.py20
-rw-r--r--Lib/zipfile/_path/glob.py1
-rw-r--r--Lib/zoneinfo/_common.py8
-rw-r--r--Lib/zoneinfo/_zoneinfo.py6
248 files changed, 9962 insertions, 3170 deletions
diff --git a/Lib/_pydatetime.py b/Lib/_pydatetime.py
index 71f619024e5..bc35823f701 100644
--- a/Lib/_pydatetime.py
+++ b/Lib/_pydatetime.py
@@ -2164,7 +2164,7 @@ class datetime(date):
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
- giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
+ a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py
index 46fa9ffcb1e..781b38ec26b 100644
--- a/Lib/_pydecimal.py
+++ b/Lib/_pydecimal.py
@@ -6120,9 +6120,9 @@ _parse_format_specifier_regex = re.compile(r"""\A
(?P<no_neg_0>z)?
(?P<alt>\#)?
(?P<zeropad>0)?
-(?P<minimumwidth>(?!0)\d+)?
+(?P<minimumwidth>\d+)?
(?P<thousands_sep>[,_])?
-(?:\.(?P<precision>0|(?!0)\d+))?
+(?:\.(?P<precision>\d+))?
(?P<type>[eEfFgGn%])?
\z
""", re.VERBOSE|re.DOTALL)
diff --git a/Lib/_pyio.py b/Lib/_pyio.py
index fb2a6d049ca..5db8ce9244b 100644
--- a/Lib/_pyio.py
+++ b/Lib/_pyio.py
@@ -876,16 +876,28 @@ class BytesIO(BufferedIOBase):
_buffer = None
def __init__(self, initial_bytes=None):
+ # Use to keep self._buffer and self._pos consistent.
+ self._lock = Lock()
+
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
- self._buffer = buf
- self._pos = 0
+
+ with self._lock:
+ self._buffer = buf
+ self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
- return self.__dict__.copy()
+ with self._lock:
+ state = self.__dict__.copy()
+ del state['_lock']
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self._lock = Lock()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
@@ -918,14 +930,16 @@ class BytesIO(BufferedIOBase):
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
- if size < 0:
- size = len(self._buffer)
- if len(self._buffer) <= self._pos:
- return b""
- newpos = min(len(self._buffer), self._pos + size)
- b = self._buffer[self._pos : newpos]
- self._pos = newpos
- return bytes(b)
+
+ with self._lock:
+ if size < 0:
+ size = len(self._buffer)
+ if len(self._buffer) <= self._pos:
+ return b""
+ newpos = min(len(self._buffer), self._pos + size)
+ b = self._buffer[self._pos : newpos]
+ self._pos = newpos
+ return bytes(b)
def read1(self, size=-1):
"""This is the same as read.
@@ -941,12 +955,14 @@ class BytesIO(BufferedIOBase):
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
- pos = self._pos
- if pos > len(self._buffer):
- # Pad buffer to pos with null bytes.
- self._buffer.resize(pos)
- self._buffer[pos:pos + n] = b
- self._pos += n
+
+ with self._lock:
+ pos = self._pos
+ if pos > len(self._buffer):
+ # Pad buffer to pos with null bytes.
+ self._buffer.resize(pos)
+ self._buffer[pos:pos + n] = b
+ self._pos += n
return n
def seek(self, pos, whence=0):
@@ -963,9 +979,11 @@ class BytesIO(BufferedIOBase):
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
- self._pos = max(0, self._pos + pos)
+ with self._lock:
+ self._pos = max(0, self._pos + pos)
elif whence == 2:
- self._pos = max(0, len(self._buffer) + pos)
+ with self._lock:
+ self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
@@ -978,18 +996,20 @@ class BytesIO(BufferedIOBase):
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
- if pos is None:
- pos = self._pos
- else:
- try:
- pos_index = pos.__index__
- except AttributeError:
- raise TypeError(f"{pos!r} is not an integer")
+
+ with self._lock:
+ if pos is None:
+ pos = self._pos
else:
- pos = pos_index()
- if pos < 0:
- raise ValueError("negative truncate position %r" % (pos,))
- del self._buffer[pos:]
+ try:
+ pos_index = pos.__index__
+ except AttributeError:
+ raise TypeError(f"{pos!r} is not an integer")
+ else:
+ pos = pos_index()
+ if pos < 0:
+ raise ValueError("negative truncate position %r" % (pos,))
+ del self._buffer[pos:]
return pos
def readable(self):
diff --git a/Lib/_pyrepl/_module_completer.py b/Lib/_pyrepl/_module_completer.py
index 494a501101a..1e9462a4215 100644
--- a/Lib/_pyrepl/_module_completer.py
+++ b/Lib/_pyrepl/_module_completer.py
@@ -42,11 +42,11 @@ class ModuleCompleter:
self._global_cache: list[pkgutil.ModuleInfo] = []
self._curr_sys_path: list[str] = sys.path[:]
- def get_completions(self, line: str) -> list[str]:
+ def get_completions(self, line: str) -> list[str] | None:
"""Return the next possible import completions for 'line'."""
result = ImportParser(line).parse()
if not result:
- return []
+ return None
try:
return self.complete(*result)
except Exception:
diff --git a/Lib/_pyrepl/base_eventqueue.py b/Lib/_pyrepl/base_eventqueue.py
index 842599bd187..0589a0f437e 100644
--- a/Lib/_pyrepl/base_eventqueue.py
+++ b/Lib/_pyrepl/base_eventqueue.py
@@ -87,7 +87,7 @@ class BaseEventQueue:
if isinstance(k, dict):
self.keymap = k
else:
- self.insert(Event('key', k, self.flush_buf()))
+ self.insert(Event('key', k, bytes(self.flush_buf())))
self.keymap = self.compiled_keymap
elif self.buf and self.buf[0] == 27: # escape
@@ -96,7 +96,7 @@ class BaseEventQueue:
# the docstring in keymap.py
trace('unrecognized escape sequence, propagating...')
self.keymap = self.compiled_keymap
- self.insert(Event('key', '\033', bytearray(b'\033')))
+ self.insert(Event('key', '\033', b'\033'))
for _c in self.flush_buf()[1:]:
self.push(_c)
@@ -106,5 +106,5 @@ class BaseEventQueue:
except UnicodeError:
return
else:
- self.insert(Event('key', decoded, self.flush_buf()))
+ self.insert(Event('key', decoded, bytes(self.flush_buf())))
self.keymap = self.compiled_keymap
diff --git a/Lib/_pyrepl/commands.py b/Lib/_pyrepl/commands.py
index 2354fbb2ec2..50c824995d8 100644
--- a/Lib/_pyrepl/commands.py
+++ b/Lib/_pyrepl/commands.py
@@ -370,6 +370,13 @@ class self_insert(EditCommand):
r = self.reader
text = self.event * r.get_arg()
r.insert(text)
+ if r.paste_mode:
+ data = ""
+ ev = r.console.getpending()
+ data += ev.data
+ if data:
+ r.insert(data)
+ r.last_refresh_cache.invalidated = True
class insert_nl(EditCommand):
@@ -484,7 +491,6 @@ class perform_bracketed_paste(Command):
data = ""
start = time.time()
while done not in data:
- self.reader.console.wait(100)
ev = self.reader.console.getpending()
data += ev.data
trace(
diff --git a/Lib/_pyrepl/readline.py b/Lib/_pyrepl/readline.py
index 572eee520e5..9560ae779ab 100644
--- a/Lib/_pyrepl/readline.py
+++ b/Lib/_pyrepl/readline.py
@@ -134,7 +134,8 @@ class ReadlineAlikeReader(historical_reader.HistoricalReader, CompletingReader):
return "".join(b[p + 1 : self.pos])
def get_completions(self, stem: str) -> list[str]:
- if module_completions := self.get_module_completions():
+ module_completions = self.get_module_completions()
+ if module_completions is not None:
return module_completions
if len(stem) == 0 and self.more_lines is not None:
b = self.buffer
@@ -165,7 +166,7 @@ class ReadlineAlikeReader(historical_reader.HistoricalReader, CompletingReader):
result.sort()
return result
- def get_module_completions(self) -> list[str]:
+ def get_module_completions(self) -> list[str] | None:
line = self.get_line()
return self.config.module_completer.get_completions(line)
diff --git a/Lib/_pyrepl/windows_console.py b/Lib/_pyrepl/windows_console.py
index 95749198b3b..c56dcd6d7dd 100644
--- a/Lib/_pyrepl/windows_console.py
+++ b/Lib/_pyrepl/windows_console.py
@@ -419,10 +419,7 @@ class WindowsConsole(Console):
return info.srWindow.Bottom # type: ignore[no-any-return]
- def _read_input(self, block: bool = True) -> INPUT_RECORD | None:
- if not block and not self.wait(timeout=0):
- return None
-
+ def _read_input(self) -> INPUT_RECORD | None:
rec = INPUT_RECORD()
read = DWORD()
if not ReadConsoleInput(InHandle, rec, 1, read):
@@ -431,14 +428,10 @@ class WindowsConsole(Console):
return rec
def _read_input_bulk(
- self, block: bool, n: int
+ self, n: int
) -> tuple[ctypes.Array[INPUT_RECORD], int]:
rec = (n * INPUT_RECORD)()
read = DWORD()
-
- if not block and not self.wait(timeout=0):
- return rec, 0
-
if not ReadConsoleInput(InHandle, rec, n, read):
raise WinError(GetLastError())
@@ -449,8 +442,11 @@ class WindowsConsole(Console):
and there is no event pending, otherwise waits for the
completion of an event."""
+ if not block and not self.wait(timeout=0):
+ return None
+
while self.event_queue.empty():
- rec = self._read_input(block)
+ rec = self._read_input()
if rec is None:
return None
@@ -551,12 +547,20 @@ class WindowsConsole(Console):
if e2:
e.data += e2.data
- recs, rec_count = self._read_input_bulk(False, 1024)
+ recs, rec_count = self._read_input_bulk(1024)
for i in range(rec_count):
rec = recs[i]
+ # In case of a legacy console, we do not only receive a keydown
+ # event, but also a keyup event - and for uppercase letters
+ # an additional SHIFT_PRESSED event.
if rec and rec.EventType == KEY_EVENT:
key_event = rec.Event.KeyEvent
+ if not key_event.bKeyDown:
+ continue
ch = key_event.uChar.UnicodeChar
+ if ch == "\x00":
+ # ignore SHIFT_PRESSED and special keys
+ continue
if ch == "\r":
ch += "\n"
e.data += ch
diff --git a/Lib/_strptime.py b/Lib/_strptime.py
index ae67949626d..cdc55e8daaf 100644
--- a/Lib/_strptime.py
+++ b/Lib/_strptime.py
@@ -14,6 +14,7 @@ import os
import time
import locale
import calendar
+import re
from re import compile as re_compile
from re import sub as re_sub
from re import IGNORECASE
@@ -41,6 +42,29 @@ def _findall(haystack, needle):
yield i
i += len(needle)
+def _fixmonths(months):
+ yield from months
+ # The lower case of 'ฤฐ' ('\u0130') is 'i\u0307'.
+ # The re module only supports 1-to-1 character matching in
+ # case-insensitive mode.
+ for s in months:
+ if 'i\u0307' in s:
+ yield s.replace('i\u0307', '\u0130')
+
+lzh_TW_alt_digits = (
+ # ใ€‡:ไธ€:ไบŒ:ไธ‰:ๅ››:ไบ”:ๅ…ญ:ไธƒ:ๅ…ซ:ไน
+ '\u3007', '\u4e00', '\u4e8c', '\u4e09', '\u56db',
+ '\u4e94', '\u516d', '\u4e03', '\u516b', '\u4e5d',
+ # ๅ:ๅไธ€:ๅไบŒ:ๅไธ‰:ๅๅ››:ๅไบ”:ๅๅ…ญ:ๅไธƒ:ๅๅ…ซ:ๅไน
+ '\u5341', '\u5341\u4e00', '\u5341\u4e8c', '\u5341\u4e09', '\u5341\u56db',
+ '\u5341\u4e94', '\u5341\u516d', '\u5341\u4e03', '\u5341\u516b', '\u5341\u4e5d',
+ # ๅปฟ:ๅปฟไธ€:ๅปฟไบŒ:ๅปฟไธ‰:ๅปฟๅ››:ๅปฟไบ”:ๅปฟๅ…ญ:ๅปฟไธƒ:ๅปฟๅ…ซ:ๅปฟไน
+ '\u5eff', '\u5eff\u4e00', '\u5eff\u4e8c', '\u5eff\u4e09', '\u5eff\u56db',
+ '\u5eff\u4e94', '\u5eff\u516d', '\u5eff\u4e03', '\u5eff\u516b', '\u5eff\u4e5d',
+ # ๅ…:ๅ…ไธ€
+ '\u5345', '\u5345\u4e00')
+
+
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
@@ -84,6 +108,7 @@ class LocaleTime(object):
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
+ self.__calc_alt_digits()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
@@ -119,9 +144,43 @@ class LocaleTime(object):
am_pm.append(time.strftime("%p", time_tuple).lower().strip())
self.am_pm = am_pm
+ def __calc_alt_digits(self):
+ # Set self.LC_alt_digits by using time.strftime().
+
+ # The magic data should contain all decimal digits.
+ time_tuple = time.struct_time((1998, 1, 27, 10, 43, 56, 1, 27, 0))
+ s = time.strftime("%x%X", time_tuple)
+ if s.isascii():
+ # Fast path -- all digits are ASCII.
+ self.LC_alt_digits = ()
+ return
+
+ digits = ''.join(sorted(set(re.findall(r'\d', s))))
+ if len(digits) == 10 and ord(digits[-1]) == ord(digits[0]) + 9:
+ # All 10 decimal digits from the same set.
+ if digits.isascii():
+ # All digits are ASCII.
+ self.LC_alt_digits = ()
+ return
+
+ self.LC_alt_digits = [a + b for a in digits for b in digits]
+ # Test whether the numbers contain leading zero.
+ time_tuple2 = time.struct_time((2000, 1, 1, 1, 1, 1, 5, 1, 0))
+ if self.LC_alt_digits[1] not in time.strftime("%x %X", time_tuple2):
+ self.LC_alt_digits[:10] = digits
+ return
+
+ # Either non-Gregorian calendar or non-decimal numbers.
+ if {'\u4e00', '\u4e03', '\u4e5d', '\u5341', '\u5eff'}.issubset(s):
+ # lzh_TW
+ self.LC_alt_digits = lzh_TW_alt_digits
+ return
+
+ self.LC_alt_digits = None
+
def __calc_date_time(self):
- # Set self.date_time, self.date, & self.time by using
- # time.strftime().
+ # Set self.LC_date_time, self.LC_date, self.LC_time and
+ # self.LC_time_ampm by using time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
@@ -129,26 +188,32 @@ class LocaleTime(object):
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
time_tuple2 = time.struct_time((1999,1,3,1,1,1,6,3,0))
- replacement_pairs = [
+ replacement_pairs = []
+
+ # Non-ASCII digits
+ if self.LC_alt_digits or self.LC_alt_digits is None:
+ for n, d in [(19, '%OC'), (99, '%Oy'), (22, '%OH'),
+ (44, '%OM'), (55, '%OS'), (17, '%Od'),
+ (3, '%Om'), (2, '%Ow'), (10, '%OI')]:
+ if self.LC_alt_digits is None:
+ s = chr(0x660 + n // 10) + chr(0x660 + n % 10)
+ replacement_pairs.append((s, d))
+ if n < 10:
+ replacement_pairs.append((s[1], d))
+ elif len(self.LC_alt_digits) > n:
+ replacement_pairs.append((self.LC_alt_digits[n], d))
+ else:
+ replacement_pairs.append((time.strftime(d, time_tuple), d))
+ replacement_pairs += [
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I'),
- # Non-ASCII digits
- ('\u0661\u0669\u0669\u0669', '%Y'),
- ('\u0669\u0669', '%Oy'),
- ('\u0662\u0662', '%OH'),
- ('\u0664\u0664', '%OM'),
- ('\u0665\u0665', '%OS'),
- ('\u0661\u0667', '%Od'),
- ('\u0660\u0663', '%Om'),
- ('\u0663', '%Om'),
- ('\u0662', '%Ow'),
- ('\u0661\u0660', '%OI'),
]
+
date_time = []
- for directive in ('%c', '%x', '%X'):
+ for directive in ('%c', '%x', '%X', '%r'):
current_format = time.strftime(directive, time_tuple).lower()
current_format = current_format.replace('%', '%%')
# The month and the day of the week formats are treated specially
@@ -172,9 +237,10 @@ class LocaleTime(object):
if tz:
current_format = current_format.replace(tz, "%Z")
# Transform all non-ASCII digits to digits in range U+0660 to U+0669.
- current_format = re_sub(r'\d(?<![0-9])',
- lambda m: chr(0x0660 + int(m[0])),
- current_format)
+ if not current_format.isascii() and self.LC_alt_digits is None:
+ current_format = re_sub(r'\d(?<![0-9])',
+ lambda m: chr(0x0660 + int(m[0])),
+ current_format)
for old, new in replacement_pairs:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
@@ -189,6 +255,7 @@ class LocaleTime(object):
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
+ self.LC_time_ampm = date_time[3]
def __find_month_format(self, directive):
"""Find the month format appropriate for the current locale.
@@ -213,7 +280,7 @@ class LocaleTime(object):
full_indices &= indices
indices = set(_findall(datetime, self.a_month[m]))
if abbr_indices is None:
- abbr_indices = indices
+ abbr_indices = set(indices)
else:
abbr_indices &= indices
if not full_indices and not abbr_indices:
@@ -241,7 +308,7 @@ class LocaleTime(object):
if self.f_weekday[wd] != self.a_weekday[wd]:
indices = set(_findall(datetime, self.a_weekday[wd]))
if abbr_indices is None:
- abbr_indices = indices
+ abbr_indices = set(indices)
else:
abbr_indices &= indices
if not full_indices and not abbr_indices:
@@ -288,8 +355,10 @@ class TimeRE(dict):
# The " [1-9]" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
- 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
+ 'H': r"(?P<H>2[0-3]|[0-1]\d|\d| \d)",
+ 'k': r"(?P<H>2[0-3]|[0-1]\d|\d| \d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9]| [1-9])",
+ 'l': r"(?P<I>1[0-2]|0[1-9]|[1-9]| [1-9])",
'G': r"(?P<G>\d\d\d\d)",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
@@ -305,23 +374,56 @@ class TimeRE(dict):
'z': r"(?P<z>([+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?)|(?-i:Z))?",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
- 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
- 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
+ 'B': self.__seqToRE(_fixmonths(self.locale_time.f_month[1:]), 'B'),
+ 'b': self.__seqToRE(_fixmonths(self.locale_time.a_month[1:]), 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'}
- for d in 'dmyHIMS':
- mapping['O' + d] = r'(?P<%s>\d\d|\d| \d)' % d
- mapping['Ow'] = r'(?P<w>\d)'
+ if self.locale_time.LC_alt_digits is None:
+ for d in 'dmyCHIMS':
+ mapping['O' + d] = r'(?P<%s>\d\d|\d| \d)' % d
+ mapping['Ow'] = r'(?P<w>\d)'
+ else:
+ mapping.update({
+ 'Od': self.__seqToRE(self.locale_time.LC_alt_digits[1:32], 'd',
+ '3[0-1]|[1-2][0-9]|0[1-9]|[1-9]'),
+ 'Om': self.__seqToRE(self.locale_time.LC_alt_digits[1:13], 'm',
+ '1[0-2]|0[1-9]|[1-9]'),
+ 'Ow': self.__seqToRE(self.locale_time.LC_alt_digits[:7], 'w',
+ '[0-6]'),
+ 'Oy': self.__seqToRE(self.locale_time.LC_alt_digits, 'y',
+ '[0-9][0-9]'),
+ 'OC': self.__seqToRE(self.locale_time.LC_alt_digits, 'C',
+ '[0-9][0-9]'),
+ 'OH': self.__seqToRE(self.locale_time.LC_alt_digits[:24], 'H',
+ '2[0-3]|[0-1][0-9]|[0-9]'),
+ 'OI': self.__seqToRE(self.locale_time.LC_alt_digits[1:13], 'I',
+ '1[0-2]|0[1-9]|[1-9]'),
+ 'OM': self.__seqToRE(self.locale_time.LC_alt_digits[:60], 'M',
+ '[0-5][0-9]|[0-9]'),
+ 'OS': self.__seqToRE(self.locale_time.LC_alt_digits[:62], 'S',
+ '6[0-1]|[0-5][0-9]|[0-9]'),
+ })
+ mapping.update({
+ 'e': mapping['d'],
+ 'Oe': mapping['Od'],
+ 'P': mapping['p'],
+ 'Op': mapping['p'],
+ 'W': mapping['U'].replace('U', 'W'),
+ })
mapping['W'] = mapping['U'].replace('U', 'W')
+
base.__init__(mapping)
+ base.__setitem__('T', self.pattern('%H:%M:%S'))
+ base.__setitem__('R', self.pattern('%H:%M'))
+ base.__setitem__('r', self.pattern(self.locale_time.LC_time_ampm))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
- def __seqToRE(self, to_convert, directive):
+ def __seqToRE(self, to_convert, directive, altregex=None):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
@@ -337,8 +439,9 @@ class TimeRE(dict):
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
- regex = '(?P<%s>%s' % (directive, regex)
- return '%s)' % regex
+ if altregex is not None:
+ regex += '|' + altregex
+ return '(?P<%s>%s)' % (directive, regex)
def pattern(self, format):
"""Return regex pattern for the format string.
@@ -365,7 +468,7 @@ class TimeRE(dict):
nonlocal day_of_month_in_format
day_of_month_in_format = True
return self[format_char]
- format = re_sub(r'%([OE]?\\?.?)', repl, format)
+ format = re_sub(r'%[-_0^#]*[0-9]*([OE]?\\?.?)', repl, format)
if day_of_month_in_format and not year_in_format:
import warnings
warnings.warn("""\
@@ -467,6 +570,15 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
# values
weekday = julian = None
found_dict = found.groupdict()
+ if locale_time.LC_alt_digits:
+ def parse_int(s):
+ try:
+ return locale_time.LC_alt_digits.index(s)
+ except ValueError:
+ return int(s)
+ else:
+ parse_int = int
+
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
@@ -474,30 +586,34 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
# U, W
# worthless without day of the week
if group_key == 'y':
- year = int(found_dict['y'])
- # Open Group specification for strptime() states that a %y
- #value in the range of [00, 68] is in the century 2000, while
- #[69,99] is in the century 1900
- if year <= 68:
- year += 2000
+ year = parse_int(found_dict['y'])
+ if 'C' in found_dict:
+ century = parse_int(found_dict['C'])
+ year += century * 100
else:
- year += 1900
+ # Open Group specification for strptime() states that a %y
+ #value in the range of [00, 68] is in the century 2000, while
+ #[69,99] is in the century 1900
+ if year <= 68:
+ year += 2000
+ else:
+ year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'G':
iso_year = int(found_dict['G'])
elif group_key == 'm':
- month = int(found_dict['m'])
+ month = parse_int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
- day = int(found_dict['d'])
+ day = parse_int(found_dict['d'])
elif group_key == 'H':
- hour = int(found_dict['H'])
+ hour = parse_int(found_dict['H'])
elif group_key == 'I':
- hour = int(found_dict['I'])
+ hour = parse_int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
@@ -513,9 +629,9 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
if hour != 12:
hour += 12
elif group_key == 'M':
- minute = int(found_dict['M'])
+ minute = parse_int(found_dict['M'])
elif group_key == 'S':
- second = int(found_dict['S'])
+ second = parse_int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
diff --git a/Lib/annotationlib.py b/Lib/annotationlib.py
index 32b85534589..c83a1573ccd 100644
--- a/Lib/annotationlib.py
+++ b/Lib/annotationlib.py
@@ -27,6 +27,9 @@ class Format(enum.IntEnum):
_sentinel = object()
+# Following `NAME_ERROR_MSG` in `ceval_macros.h`:
+_NAME_ERROR_MSG = "name '{name:.200}' is not defined"
+
# Slots shared by ForwardRef and _Stringifier. The __forward__ names must be
# preserved for compatibility with the old typing.ForwardRef class. The remaining
@@ -184,7 +187,7 @@ class ForwardRef:
elif is_forwardref_format:
return self
else:
- raise NameError(arg)
+ raise NameError(_NAME_ERROR_MSG.format(name=arg), name=arg)
else:
code = self.__forward_code__
try:
@@ -939,48 +942,49 @@ def get_annotations(
if not eval_str:
return dict(ann)
- if isinstance(obj, type):
- # class
- obj_globals = None
- module_name = getattr(obj, "__module__", None)
- if module_name:
- module = sys.modules.get(module_name, None)
- if module:
- obj_globals = getattr(module, "__dict__", None)
- obj_locals = dict(vars(obj))
- unwrap = obj
- elif isinstance(obj, types.ModuleType):
- # module
- obj_globals = getattr(obj, "__dict__")
- obj_locals = None
- unwrap = None
- elif callable(obj):
- # this includes types.Function, types.BuiltinFunctionType,
- # types.BuiltinMethodType, functools.partial, functools.singledispatch,
- # "class funclike" from Lib/test/test_inspect... on and on it goes.
- obj_globals = getattr(obj, "__globals__", None)
- obj_locals = None
- unwrap = obj
- else:
- obj_globals = obj_locals = unwrap = None
-
- if unwrap is not None:
- while True:
- if hasattr(unwrap, "__wrapped__"):
- unwrap = unwrap.__wrapped__
- continue
- if functools := sys.modules.get("functools"):
- if isinstance(unwrap, functools.partial):
- unwrap = unwrap.func
+ if globals is None or locals is None:
+ if isinstance(obj, type):
+ # class
+ obj_globals = None
+ module_name = getattr(obj, "__module__", None)
+ if module_name:
+ module = sys.modules.get(module_name, None)
+ if module:
+ obj_globals = getattr(module, "__dict__", None)
+ obj_locals = dict(vars(obj))
+ unwrap = obj
+ elif isinstance(obj, types.ModuleType):
+ # module
+ obj_globals = getattr(obj, "__dict__")
+ obj_locals = None
+ unwrap = None
+ elif callable(obj):
+ # this includes types.Function, types.BuiltinFunctionType,
+ # types.BuiltinMethodType, functools.partial, functools.singledispatch,
+ # "class funclike" from Lib/test/test_inspect... on and on it goes.
+ obj_globals = getattr(obj, "__globals__", None)
+ obj_locals = None
+ unwrap = obj
+ else:
+ obj_globals = obj_locals = unwrap = None
+
+ if unwrap is not None:
+ while True:
+ if hasattr(unwrap, "__wrapped__"):
+ unwrap = unwrap.__wrapped__
continue
- break
- if hasattr(unwrap, "__globals__"):
- obj_globals = unwrap.__globals__
+ if functools := sys.modules.get("functools"):
+ if isinstance(unwrap, functools.partial):
+ unwrap = unwrap.func
+ continue
+ break
+ if hasattr(unwrap, "__globals__"):
+ obj_globals = unwrap.__globals__
- if globals is None:
- globals = obj_globals
- if locals is None:
- locals = obj_locals
+ if globals is None:
+ globals = obj_globals
+ if locals is None:
+ locals = obj_locals
# "Inject" type parameters into the local namespace
# (unless they are shadowed by assignments *in* the local namespace),
@@ -1042,14 +1046,27 @@ def _get_and_call_annotate(obj, format):
return None
+_BASE_GET_ANNOTATIONS = type.__dict__["__annotations__"].__get__
+
+
def _get_dunder_annotations(obj):
"""Return the annotations for an object, checking that it is a dictionary.
Does not return a fresh dictionary.
"""
- ann = getattr(obj, "__annotations__", None)
- if ann is None:
- return None
+ # This special case is needed to support types defined under
+ # from __future__ import annotations, where accessing the __annotations__
+ # attribute directly might return annotations for the wrong class.
+ if isinstance(obj, type):
+ try:
+ ann = _BASE_GET_ANNOTATIONS(obj)
+ except AttributeError:
+ # For static types, the descriptor raises AttributeError.
+ return None
+ else:
+ ann = getattr(obj, "__annotations__", None)
+ if ann is None:
+ return None
if not isinstance(ann, dict):
raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None")
diff --git a/Lib/argparse.py b/Lib/argparse.py
index d1a6350c3fd..83258cf3e0f 100644
--- a/Lib/argparse.py
+++ b/Lib/argparse.py
@@ -1534,7 +1534,7 @@ class _ActionsContainer(object):
action_name = kwargs.get('action')
action_class = self._pop_action_class(kwargs)
if not callable(action_class):
- raise ValueError('unknown action {action_class!r}')
+ raise ValueError(f'unknown action {action_class!r}')
action = action_class(**kwargs)
# raise an error if action for positional argument does not
diff --git a/Lib/ast.py b/Lib/ast.py
index b9791bf52d3..6d3daf64f5c 100644
--- a/Lib/ast.py
+++ b/Lib/ast.py
@@ -147,18 +147,22 @@ def dump(
if value is None and getattr(cls, name, ...) is None:
keywords = True
continue
- if (
- not show_empty
- and (value is None or value == [])
- # Special cases:
- # `Constant(value=None)` and `MatchSingleton(value=None)`
- and not isinstance(node, (Constant, MatchSingleton))
- ):
- args_buffer.append(repr(value))
- continue
- elif not keywords:
- args.extend(args_buffer)
- args_buffer = []
+ if not show_empty:
+ if value == []:
+ field_type = cls._field_types.get(name, object)
+ if getattr(field_type, '__origin__', ...) is list:
+ if not keywords:
+ args_buffer.append(repr(value))
+ continue
+ elif isinstance(value, Load):
+ field_type = cls._field_types.get(name, object)
+ if field_type is expr_context:
+ if not keywords:
+ args_buffer.append(repr(value))
+ continue
+ if not keywords:
+ args.extend(args_buffer)
+ args_buffer = []
value, simple = _format(value, level)
allsimple = allsimple and simple
if keywords:
diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index 04fb961e998..520d4b39854 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -1016,38 +1016,43 @@ class BaseEventLoop(events.AbstractEventLoop):
family, type_, proto, _, address = addr_info
sock = None
try:
- sock = socket.socket(family=family, type=type_, proto=proto)
- sock.setblocking(False)
- if local_addr_infos is not None:
- for lfamily, _, _, _, laddr in local_addr_infos:
- # skip local addresses of different family
- if lfamily != family:
- continue
- try:
- sock.bind(laddr)
- break
- except OSError as exc:
- msg = (
- f'error while attempting to bind on '
- f'address {laddr!r}: {str(exc).lower()}'
- )
- exc = OSError(exc.errno, msg)
- my_exceptions.append(exc)
- else: # all bind attempts failed
- if my_exceptions:
- raise my_exceptions.pop()
- else:
- raise OSError(f"no matching local address with {family=} found")
- await self.sock_connect(sock, address)
- return sock
- except OSError as exc:
- my_exceptions.append(exc)
- if sock is not None:
- sock.close()
- raise
+ try:
+ sock = socket.socket(family=family, type=type_, proto=proto)
+ sock.setblocking(False)
+ if local_addr_infos is not None:
+ for lfamily, _, _, _, laddr in local_addr_infos:
+ # skip local addresses of different family
+ if lfamily != family:
+ continue
+ try:
+ sock.bind(laddr)
+ break
+ except OSError as exc:
+ msg = (
+ f'error while attempting to bind on '
+ f'address {laddr!r}: {str(exc).lower()}'
+ )
+ exc = OSError(exc.errno, msg)
+ my_exceptions.append(exc)
+ else: # all bind attempts failed
+ if my_exceptions:
+ raise my_exceptions.pop()
+ else:
+ raise OSError(f"no matching local address with {family=} found")
+ await self.sock_connect(sock, address)
+ return sock
+ except OSError as exc:
+ my_exceptions.append(exc)
+ raise
except:
if sock is not None:
- sock.close()
+ try:
+ sock.close()
+ except OSError:
+ # An error when closing a newly created socket is
+ # not important, but it can overwrite more important
+ # non-OSError error. So ignore it.
+ pass
raise
finally:
exceptions = my_exceptions = None
@@ -1161,7 +1166,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ExceptionGroup("create_connection failed", exceptions)
if len(exceptions) == 1:
raise exceptions[0]
- else:
+ elif exceptions:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
@@ -1170,6 +1175,9 @@ class BaseEventLoop(events.AbstractEventLoop):
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
+ else:
+ # No exceptions were collected, raise a timeout error
+ raise TimeoutError('create_connection failed')
finally:
exceptions = None
diff --git a/Lib/asyncio/tools.py b/Lib/asyncio/tools.py
index b2da7d2f6ba..2683f34cc71 100644
--- a/Lib/asyncio/tools.py
+++ b/Lib/asyncio/tools.py
@@ -1,12 +1,10 @@
"""Tools to analyze tasks running in asyncio programs."""
-from dataclasses import dataclass
-from collections import defaultdict
+from collections import defaultdict, namedtuple
from itertools import count
from enum import Enum
import sys
-from _remote_debugging import get_all_awaited_by
-
+from _remote_debugging import RemoteUnwinder, FrameInfo
class NodeType(Enum):
COROUTINE = 1
@@ -27,51 +25,75 @@ class CycleFoundException(Exception):
# โ”€โ”€โ”€ indexing helpers โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
-def _format_stack_entry(elem: tuple[str, str, int] | str) -> str:
- if isinstance(elem, tuple):
- fqname, path, line_no = elem
- return f"{fqname} {path}:{line_no}"
-
+def _format_stack_entry(elem: str|FrameInfo) -> str:
+ if not isinstance(elem, str):
+ if elem.lineno == 0 and elem.filename == "":
+ return f"{elem.funcname}"
+ else:
+ return f"{elem.funcname} {elem.filename}:{elem.lineno}"
return elem
def _index(result):
- id2name, awaits = {}, []
- for _thr_id, tasks in result:
- for tid, tname, awaited in tasks:
- id2name[tid] = tname
- for stack, parent_id in awaited:
- stack = [_format_stack_entry(elem) for elem in stack]
- awaits.append((parent_id, stack, tid))
- return id2name, awaits
-
-
-def _build_tree(id2name, awaits):
+ id2name, awaits, task_stacks = {}, [], {}
+ for awaited_info in result:
+ for task_info in awaited_info.awaited_by:
+ task_id = task_info.task_id
+ task_name = task_info.task_name
+ id2name[task_id] = task_name
+
+ # Store the internal coroutine stack for this task
+ if task_info.coroutine_stack:
+ for coro_info in task_info.coroutine_stack:
+ call_stack = coro_info.call_stack
+ internal_stack = [_format_stack_entry(frame) for frame in call_stack]
+ task_stacks[task_id] = internal_stack
+
+ # Add the awaited_by relationships (external dependencies)
+ if task_info.awaited_by:
+ for coro_info in task_info.awaited_by:
+ call_stack = coro_info.call_stack
+ parent_task_id = coro_info.task_name
+ stack = [_format_stack_entry(frame) for frame in call_stack]
+ awaits.append((parent_task_id, stack, task_id))
+ return id2name, awaits, task_stacks
+
+
+def _build_tree(id2name, awaits, task_stacks):
id2label = {(NodeType.TASK, tid): name for tid, name in id2name.items()}
children = defaultdict(list)
- cor_names = defaultdict(dict) # (parent) -> {frame: node}
- cor_id_seq = count(1)
-
- def _cor_node(parent_key, frame_name):
- """Return an existing or new (NodeType.COROUTINE, โ€ฆ) node under *parent_key*."""
- bucket = cor_names[parent_key]
- if frame_name in bucket:
- return bucket[frame_name]
- node_key = (NodeType.COROUTINE, f"c{next(cor_id_seq)}")
- id2label[node_key] = frame_name
- children[parent_key].append(node_key)
- bucket[frame_name] = node_key
+ cor_nodes = defaultdict(dict) # Maps parent -> {frame_name: node_key}
+ next_cor_id = count(1)
+
+ def get_or_create_cor_node(parent, frame):
+ """Get existing coroutine node or create new one under parent"""
+ if frame in cor_nodes[parent]:
+ return cor_nodes[parent][frame]
+
+ node_key = (NodeType.COROUTINE, f"c{next(next_cor_id)}")
+ id2label[node_key] = frame
+ children[parent].append(node_key)
+ cor_nodes[parent][frame] = node_key
return node_key
- # lay down parent โžœ โ€ฆframesโ€ฆ โžœ child paths
+ # Build task dependency tree with coroutine frames
for parent_id, stack, child_id in awaits:
cur = (NodeType.TASK, parent_id)
- for frame in reversed(stack): # outer-most โ†’ inner-most
- cur = _cor_node(cur, frame)
+ for frame in reversed(stack):
+ cur = get_or_create_cor_node(cur, frame)
+
child_key = (NodeType.TASK, child_id)
if child_key not in children[cur]:
children[cur].append(child_key)
+ # Add coroutine stacks for leaf tasks
+ awaiting_tasks = {parent_id for parent_id, _, _ in awaits}
+ for task_id in id2name:
+ if task_id not in awaiting_tasks and task_id in task_stacks:
+ cur = (NodeType.TASK, task_id)
+ for frame in reversed(task_stacks[task_id]):
+ cur = get_or_create_cor_node(cur, frame)
+
return id2label, children
@@ -118,6 +140,11 @@ def _find_cycles(graph):
# โ”€โ”€โ”€ PRINT TREE FUNCTION โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
+def get_all_awaited_by(pid):
+ unwinder = RemoteUnwinder(pid)
+ return unwinder.get_all_awaited_by()
+
+
def build_async_tree(result, task_emoji="(T)", cor_emoji=""):
"""
Build a list of strings for pretty-print an async call tree.
@@ -125,12 +152,12 @@ def build_async_tree(result, task_emoji="(T)", cor_emoji=""):
The call tree is produced by `get_all_async_stacks()`, prefixing tasks
with `task_emoji` and coroutine frames with `cor_emoji`.
"""
- id2name, awaits = _index(result)
+ id2name, awaits, task_stacks = _index(result)
g = _task_graph(awaits)
cycles = _find_cycles(g)
if cycles:
raise CycleFoundException(cycles, id2name)
- labels, children = _build_tree(id2name, awaits)
+ labels, children = _build_tree(id2name, awaits, task_stacks)
def pretty(node):
flag = task_emoji if node[0] == NodeType.TASK else cor_emoji
@@ -150,35 +177,40 @@ def build_async_tree(result, task_emoji="(T)", cor_emoji=""):
def build_task_table(result):
- id2name, awaits = _index(result)
+ id2name, _, _ = _index(result)
table = []
- for tid, tasks in result:
- for task_id, task_name, awaited in tasks:
- if not awaited:
- table.append(
- [
- tid,
- hex(task_id),
- task_name,
- "",
- "",
- "0x0"
- ]
- )
- for stack, awaiter_id in awaited:
- stack = [elem[0] if isinstance(elem, tuple) else elem for elem in stack]
- coroutine_chain = " -> ".join(stack)
- awaiter_name = id2name.get(awaiter_id, "Unknown")
- table.append(
- [
- tid,
- hex(task_id),
- task_name,
- coroutine_chain,
- awaiter_name,
- hex(awaiter_id),
- ]
- )
+
+ for awaited_info in result:
+ thread_id = awaited_info.thread_id
+ for task_info in awaited_info.awaited_by:
+ # Get task info
+ task_id = task_info.task_id
+ task_name = task_info.task_name
+
+ # Build coroutine stack string
+ frames = [frame for coro in task_info.coroutine_stack
+ for frame in coro.call_stack]
+ coro_stack = " -> ".join(_format_stack_entry(x).split(" ")[0]
+ for x in frames)
+
+ # Handle tasks with no awaiters
+ if not task_info.awaited_by:
+ table.append([thread_id, hex(task_id), task_name, coro_stack,
+ "", "", "0x0"])
+ continue
+
+ # Handle tasks with awaiters
+ for coro_info in task_info.awaited_by:
+ parent_id = coro_info.task_name
+ awaiter_frames = [_format_stack_entry(x).split(" ")[0]
+ for x in coro_info.call_stack]
+ awaiter_chain = " -> ".join(awaiter_frames)
+ awaiter_name = id2name.get(parent_id, "Unknown")
+ parent_id_str = (hex(parent_id) if isinstance(parent_id, int)
+ else str(parent_id))
+
+ table.append([thread_id, hex(task_id), task_name, coro_stack,
+ awaiter_chain, awaiter_name, parent_id_str])
return table
@@ -207,11 +239,11 @@ def display_awaited_by_tasks_table(pid: int) -> None:
table = build_task_table(tasks)
# Print the table in a simple tabular format
print(
- f"{'tid':<10} {'task id':<20} {'task name':<20} {'coroutine chain':<50} {'awaiter name':<20} {'awaiter id':<15}"
+ f"{'tid':<10} {'task id':<20} {'task name':<20} {'coroutine stack':<50} {'awaiter chain':<50} {'awaiter name':<15} {'awaiter id':<15}"
)
- print("-" * 135)
+ print("-" * 180)
for row in table:
- print(f"{row[0]:<10} {row[1]:<20} {row[2]:<20} {row[3]:<50} {row[4]:<20} {row[5]:<15}")
+ print(f"{row[0]:<10} {row[1]:<20} {row[2]:<20} {row[3]:<50} {row[4]:<50} {row[5]:<15} {row[6]:<15}")
def display_awaited_by_tasks_tree(pid: int) -> None:
diff --git a/Lib/calendar.py b/Lib/calendar.py
index 18f76d52ff8..3be1b50500e 100644
--- a/Lib/calendar.py
+++ b/Lib/calendar.py
@@ -565,7 +565,7 @@ class HTMLCalendar(Calendar):
Return a formatted year as a complete HTML page.
"""
if encoding is None:
- encoding = sys.getdefaultencoding()
+ encoding = 'utf-8'
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
@@ -846,7 +846,7 @@ def main(args=None):
parser.add_argument(
"-e", "--encoding",
default=None,
- help="encoding to use for output"
+ help="encoding to use for output (default utf-8)"
)
parser.add_argument(
"-t", "--type",
@@ -890,7 +890,7 @@ def main(args=None):
cal.setfirstweekday(options.first_weekday)
encoding = options.encoding
if encoding is None:
- encoding = sys.getdefaultencoding()
+ encoding = 'utf-8'
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if options.year is None:
diff --git a/Lib/code.py b/Lib/code.py
index b134886dc26..f7e275d8801 100644
--- a/Lib/code.py
+++ b/Lib/code.py
@@ -224,7 +224,7 @@ class InteractiveConsole(InteractiveInterpreter):
sys.ps1 = ">>> "
delete_ps1_after = True
try:
- _ps2 = sys.ps2
+ sys.ps2
delete_ps2_after = False
except AttributeError:
sys.ps2 = "... "
diff --git a/Lib/compression/zstd/_zstdfile.py b/Lib/compression/zstd/_zstdfile.py
index 8770e576f50..d709f5efc65 100644
--- a/Lib/compression/zstd/_zstdfile.py
+++ b/Lib/compression/zstd/_zstdfile.py
@@ -1,7 +1,6 @@
import io
from os import PathLike
-from _zstd import (ZstdCompressor, ZstdDecompressor, ZstdError,
- ZSTD_DStreamOutSize)
+from _zstd import ZstdCompressor, ZstdDecompressor, ZSTD_DStreamOutSize
from compression._common import _streams
__all__ = ('ZstdFile', 'open')
diff --git a/Lib/concurrent/futures/interpreter.py b/Lib/concurrent/futures/interpreter.py
index a2c4fbfd3fb..cbb60ce80c1 100644
--- a/Lib/concurrent/futures/interpreter.py
+++ b/Lib/concurrent/futures/interpreter.py
@@ -1,56 +1,39 @@
"""Implements InterpreterPoolExecutor."""
-import contextlib
-import pickle
+from concurrent import interpreters
+import sys
import textwrap
from . import thread as _thread
-import _interpreters
-import _interpqueues
+import traceback
-class ExecutionFailed(_interpreters.InterpreterError):
- """An unhandled exception happened during execution."""
-
- def __init__(self, excinfo):
- msg = excinfo.formatted
- if not msg:
- if excinfo.type and excinfo.msg:
- msg = f'{excinfo.type.__name__}: {excinfo.msg}'
- else:
- msg = excinfo.type.__name__ or excinfo.msg
- super().__init__(msg)
- self.excinfo = excinfo
-
- def __str__(self):
+def do_call(results, func, args, kwargs):
+ try:
+ return func(*args, **kwargs)
+ except BaseException as exc:
+ # Send the captured exception out on the results queue,
+ # but still leave it unhandled for the interpreter to handle.
try:
- formatted = self.excinfo.errdisplay
- except Exception:
- return super().__str__()
- else:
- return textwrap.dedent(f"""
-{super().__str__()}
-
-Uncaught in the interpreter:
-
-{formatted}
- """.strip())
+ results.put(exc)
+ except interpreters.NotShareableError:
+ # The exception is not shareable.
+ print('exception is not shareable:', file=sys.stderr)
+ traceback.print_exception(exc)
+ results.put(None)
+ raise # re-raise
class WorkerContext(_thread.WorkerContext):
@classmethod
- def prepare(cls, initializer, initargs, shared):
+ def prepare(cls, initializer, initargs):
def resolve_task(fn, args, kwargs):
if isinstance(fn, str):
# XXX Circle back to this later.
raise TypeError('scripts not supported')
else:
- # Functions defined in the __main__ module can't be pickled,
- # so they can't be used here. In the future, we could possibly
- # borrow from multiprocessing to work around this.
task = (fn, args, kwargs)
- data = pickle.dumps(task)
- return data
+ return task
if initializer is not None:
try:
@@ -62,68 +45,24 @@ class WorkerContext(_thread.WorkerContext):
else:
initdata = None
def create_context():
- return cls(initdata, shared)
+ return cls(initdata)
return create_context, resolve_task
- @classmethod
- @contextlib.contextmanager
- def _capture_exc(cls, resultsid):
- try:
- yield
- except BaseException as exc:
- # Send the captured exception out on the results queue,
- # but still leave it unhandled for the interpreter to handle.
- _interpqueues.put(resultsid, (None, exc))
- raise # re-raise
-
- @classmethod
- def _send_script_result(cls, resultsid):
- _interpqueues.put(resultsid, (None, None))
-
- @classmethod
- def _call(cls, func, args, kwargs, resultsid):
- with cls._capture_exc(resultsid):
- res = func(*args or (), **kwargs or {})
- # Send the result back.
- with cls._capture_exc(resultsid):
- _interpqueues.put(resultsid, (res, None))
-
- @classmethod
- def _call_pickled(cls, pickled, resultsid):
- with cls._capture_exc(resultsid):
- fn, args, kwargs = pickle.loads(pickled)
- cls._call(fn, args, kwargs, resultsid)
-
- def __init__(self, initdata, shared=None):
+ def __init__(self, initdata):
self.initdata = initdata
- self.shared = dict(shared) if shared else None
- self.interpid = None
- self.resultsid = None
+ self.interp = None
+ self.results = None
def __del__(self):
- if self.interpid is not None:
+ if self.interp is not None:
self.finalize()
- def _exec(self, script):
- assert self.interpid is not None
- excinfo = _interpreters.exec(self.interpid, script, restrict=True)
- if excinfo is not None:
- raise ExecutionFailed(excinfo)
-
def initialize(self):
- assert self.interpid is None, self.interpid
- self.interpid = _interpreters.create(reqrefs=True)
+ assert self.interp is None, self.interp
+ self.interp = interpreters.create()
try:
- _interpreters.incref(self.interpid)
-
maxsize = 0
- self.resultsid = _interpqueues.create(maxsize)
-
- self._exec(f'from {__name__} import WorkerContext')
-
- if self.shared:
- _interpreters.set___main___attrs(
- self.interpid, self.shared, restrict=True)
+ self.results = interpreters.create_queue(maxsize)
if self.initdata:
self.run(self.initdata)
@@ -132,53 +71,25 @@ class WorkerContext(_thread.WorkerContext):
raise # re-raise
def finalize(self):
- interpid = self.interpid
- resultsid = self.resultsid
- self.resultsid = None
- self.interpid = None
- if resultsid is not None:
- try:
- _interpqueues.destroy(resultsid)
- except _interpqueues.QueueNotFoundError:
- pass
- if interpid is not None:
- try:
- _interpreters.decref(interpid)
- except _interpreters.InterpreterNotFoundError:
- pass
+ interp = self.interp
+ results = self.results
+ self.results = None
+ self.interp = None
+ if results is not None:
+ del results
+ if interp is not None:
+ interp.close()
def run(self, task):
- data = task
- script = f'WorkerContext._call_pickled({data!r}, {self.resultsid})'
-
try:
- self._exec(script)
- except ExecutionFailed as exc:
- exc_wrapper = exc
- else:
- exc_wrapper = None
-
- # Return the result, or raise the exception.
- while True:
- try:
- obj = _interpqueues.get(self.resultsid)
- except _interpqueues.QueueNotFoundError:
+ return self.interp.call(do_call, self.results, *task)
+ except interpreters.ExecutionFailed as wrapper:
+ # Wait for the exception data to show up.
+ exc = self.results.get()
+ if exc is None:
+ # The exception must have been not shareable.
raise # re-raise
- except _interpqueues.QueueError:
- continue
- except ModuleNotFoundError:
- # interpreters.queues doesn't exist, which means
- # QueueEmpty doesn't. Act as though it does.
- continue
- else:
- break
- (res, exc), unboundop = obj
- assert unboundop is None, unboundop
- if exc is not None:
- assert res is None, res
- assert exc_wrapper is not None
- raise exc from exc_wrapper
- return res
+ raise exc from wrapper
class BrokenInterpreterPool(_thread.BrokenThreadPool):
@@ -192,11 +103,11 @@ class InterpreterPoolExecutor(_thread.ThreadPoolExecutor):
BROKEN = BrokenInterpreterPool
@classmethod
- def prepare_context(cls, initializer, initargs, shared):
- return WorkerContext.prepare(initializer, initargs, shared)
+ def prepare_context(cls, initializer, initargs):
+ return WorkerContext.prepare(initializer, initargs)
def __init__(self, max_workers=None, thread_name_prefix='',
- initializer=None, initargs=(), shared=None):
+ initializer=None, initargs=()):
"""Initializes a new InterpreterPoolExecutor instance.
Args:
@@ -206,8 +117,6 @@ class InterpreterPoolExecutor(_thread.ThreadPoolExecutor):
initializer: A callable or script used to initialize
each worker interpreter.
initargs: A tuple of arguments to pass to the initializer.
- shared: A mapping of shareabled objects to be inserted into
- each worker interpreter.
"""
super().__init__(max_workers, thread_name_prefix,
- initializer, initargs, shared=shared)
+ initializer, initargs)
diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py
index 76b7b2abe83..a14650bf5fa 100644
--- a/Lib/concurrent/futures/process.py
+++ b/Lib/concurrent/futures/process.py
@@ -755,6 +755,11 @@ class ProcessPoolExecutor(_base.Executor):
self._executor_manager_thread_wakeup
def _adjust_process_count(self):
+ # gh-132969: avoid error when state is reset and executor is still running,
+ # which will happen when shutdown(wait=False) is called.
+ if self._processes is None:
+ return
+
# if there's an idle process, we don't need to spawn a new one.
if self._idle_worker_semaphore.acquire(blocking=False):
return
diff --git a/Lib/test/support/interpreters/__init__.py b/Lib/concurrent/interpreters/__init__.py
index e067f259364..aa46a2b37a4 100644
--- a/Lib/test/support/interpreters/__init__.py
+++ b/Lib/concurrent/interpreters/__init__.py
@@ -9,6 +9,10 @@ from _interpreters import (
InterpreterError, InterpreterNotFoundError, NotShareableError,
is_shareable,
)
+from ._queues import (
+ create as create_queue,
+ Queue, QueueEmpty, QueueFull,
+)
__all__ = [
@@ -20,21 +24,6 @@ __all__ = [
]
-_queuemod = None
-
-def __getattr__(name):
- if name in ('Queue', 'QueueEmpty', 'QueueFull', 'create_queue'):
- global create_queue, Queue, QueueEmpty, QueueFull
- ns = globals()
- from .queues import (
- create as create_queue,
- Queue, QueueEmpty, QueueFull,
- )
- return ns[name]
- else:
- raise AttributeError(name)
-
-
_EXEC_FAILURE_STR = """
{superstr}
@@ -157,12 +146,8 @@ class Interpreter:
self._decref()
# for pickling:
- def __getnewargs__(self):
- return (self._id,)
-
- # for pickling:
- def __getstate__(self):
- return None
+ def __reduce__(self):
+ return (type(self), (self._id,))
def _decref(self):
if not self._ownsref:
@@ -226,33 +211,32 @@ class Interpreter:
if excinfo is not None:
raise ExecutionFailed(excinfo)
- def call(self, callable, /):
- """Call the object in the interpreter with given args/kwargs.
+ def _call(self, callable, args, kwargs):
+ res, excinfo = _interpreters.call(self._id, callable, args, kwargs, restrict=True)
+ if excinfo is not None:
+ raise ExecutionFailed(excinfo)
+ return res
- Only functions that take no arguments and have no closure
- are supported.
+ def call(self, callable, /, *args, **kwargs):
+ """Call the object in the interpreter with given args/kwargs.
- The return value is discarded.
+ Nearly all callables, args, kwargs, and return values are
+ supported. All "shareable" objects are supported, as are
+ "stateless" functions (meaning non-closures that do not use
+ any globals). This method will fall back to pickle.
If the callable raises an exception then the error display
- (including full traceback) is send back between the interpreters
+ (including full traceback) is sent back between the interpreters
and an ExecutionFailed exception is raised, much like what
happens with Interpreter.exec().
"""
- # XXX Support args and kwargs.
- # XXX Support arbitrary callables.
- # XXX Support returning the return value (e.g. via pickle).
- excinfo = _interpreters.call(self._id, callable, restrict=True)
- if excinfo is not None:
- raise ExecutionFailed(excinfo)
+ return self._call(callable, args, kwargs)
- def call_in_thread(self, callable, /):
+ def call_in_thread(self, callable, /, *args, **kwargs):
"""Return a new thread that calls the object in the interpreter.
The return value and any raised exception are discarded.
"""
- def task():
- self.call(callable)
- t = threading.Thread(target=task)
+ t = threading.Thread(target=self._call, args=(callable, args, kwargs))
t.start()
return t
diff --git a/Lib/test/support/interpreters/_crossinterp.py b/Lib/concurrent/interpreters/_crossinterp.py
index 544e197ba4c..f47eb693ac8 100644
--- a/Lib/test/support/interpreters/_crossinterp.py
+++ b/Lib/concurrent/interpreters/_crossinterp.py
@@ -61,7 +61,7 @@ class UnboundItem:
def __repr__(self):
return f'{self._MODULE}.{self._NAME}'
-# return f'interpreters.queues.UNBOUND'
+# return f'interpreters._queues.UNBOUND'
UNBOUND = object.__new__(UnboundItem)
diff --git a/Lib/test/support/interpreters/queues.py b/Lib/concurrent/interpreters/_queues.py
index d6a3197d9e0..9c12b2c8c24 100644
--- a/Lib/test/support/interpreters/queues.py
+++ b/Lib/concurrent/interpreters/_queues.py
@@ -1,6 +1,5 @@
"""Cross-interpreter Queues High Level Module."""
-import pickle
import queue
import time
import weakref
@@ -130,12 +129,8 @@ class Queue:
return hash(self._id)
# for pickling:
- def __getnewargs__(self):
- return (self._id,)
-
- # for pickling:
- def __getstate__(self):
- return None
+ def __reduce__(self):
+ return (type(self), (self._id,))
def _set_unbound(self, op, items=None):
assert not hasattr(self, '_unbound')
diff --git a/Lib/configparser.py b/Lib/configparser.py
index 239fda60a02..18af1eadaad 100644
--- a/Lib/configparser.py
+++ b/Lib/configparser.py
@@ -1218,11 +1218,14 @@ class RawConfigParser(MutableMapping):
def _validate_key_contents(self, key):
"""Raises an InvalidWriteError for any keys containing
- delimiters or that match the section header pattern"""
+ delimiters or that begins with the section header pattern"""
if re.match(self.SECTCRE, key):
- raise InvalidWriteError("Cannot write keys matching section pattern")
- if any(delim in key for delim in self._delimiters):
- raise InvalidWriteError("Cannot write key that contains delimiters")
+ raise InvalidWriteError(
+ f"Cannot write key {key}; begins with section pattern")
+ for delim in self._delimiters:
+ if delim in key:
+ raise InvalidWriteError(
+ f"Cannot write key {key}; contains delimiter {delim}")
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for illegal non-string values.
diff --git a/Lib/ctypes/__init__.py b/Lib/ctypes/__init__.py
index 823a3692fd1..d6d07a13f75 100644
--- a/Lib/ctypes/__init__.py
+++ b/Lib/ctypes/__init__.py
@@ -379,12 +379,6 @@ def create_unicode_buffer(init, size=None):
return buf
raise TypeError(init)
-
-def SetPointerType(pointer, cls):
- import warnings
- warnings._deprecated("ctypes.SetPointerType", remove=(3, 15))
- pointer.set_type(cls)
-
def ARRAY(typ, len):
return typ * len
diff --git a/Lib/dbm/dumb.py b/Lib/dbm/dumb.py
index def120ffc37..1bc239a84ff 100644
--- a/Lib/dbm/dumb.py
+++ b/Lib/dbm/dumb.py
@@ -9,7 +9,7 @@ XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
-items is never reused)
+items is not reused exept if .reorganize() is called)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
@@ -17,8 +17,6 @@ updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
-- support opening for read-only (flag = 'm')
-
"""
import ast as _ast
@@ -289,6 +287,34 @@ class _Database(collections.abc.MutableMapping):
def __exit__(self, *args):
self.close()
+ def reorganize(self):
+ if self._readonly:
+ raise error('The database is opened for reading only')
+ self._verify_open()
+ # Ensure all changes are committed before reorganizing.
+ self._commit()
+ # Open file in r+ to allow changing in-place.
+ with _io.open(self._datfile, 'rb+') as f:
+ reorganize_pos = 0
+
+ # Iterate over existing keys, sorted by starting byte.
+ for key in sorted(self._index, key = lambda k: self._index[k][0]):
+ pos, siz = self._index[key]
+ f.seek(pos)
+ val = f.read(siz)
+
+ f.seek(reorganize_pos)
+ f.write(val)
+ self._index[key] = (reorganize_pos, siz)
+
+ blocks_occupied = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
+ reorganize_pos += blocks_occupied * _BLOCKSIZE
+
+ f.truncate(reorganize_pos)
+ # Commit changes to index, which were not in-place.
+ self._commit()
+
+
def open(file, flag='c', mode=0o666):
"""Open the database file, filename, and return corresponding object.
diff --git a/Lib/dbm/sqlite3.py b/Lib/dbm/sqlite3.py
index 7e0ae2a29e3..b296a1bcd1b 100644
--- a/Lib/dbm/sqlite3.py
+++ b/Lib/dbm/sqlite3.py
@@ -15,6 +15,7 @@ LOOKUP_KEY = "SELECT value FROM Dict WHERE key = CAST(? AS BLOB)"
STORE_KV = "REPLACE INTO Dict (key, value) VALUES (CAST(? AS BLOB), CAST(? AS BLOB))"
DELETE_KEY = "DELETE FROM Dict WHERE key = CAST(? AS BLOB)"
ITER_KEYS = "SELECT key FROM Dict"
+REORGANIZE = "VACUUM"
class error(OSError):
@@ -122,6 +123,9 @@ class _Database(MutableMapping):
def __exit__(self, *args):
self.close()
+ def reorganize(self):
+ self._execute(REORGANIZE)
+
def open(filename, /, flag="r", mode=0o666):
"""Open a dbm.sqlite3 database and return the dbm object.
diff --git a/Lib/difflib.py b/Lib/difflib.py
index f1f4e62514a..487936dbf47 100644
--- a/Lib/difflib.py
+++ b/Lib/difflib.py
@@ -78,8 +78,8 @@ class SequenceMatcher:
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
- >>> print(round(s.ratio(), 3))
- 0.866
+ >>> print(round(s.ratio(), 2))
+ 0.87
>>>
If you're only interested in where the sequences match,
@@ -1615,16 +1615,13 @@ def _mdiff(fromlines, tolines, context=None, linejunk=None,
_file_template = """
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-<html>
-
+<!DOCTYPE html>
+<html lang="en">
<head>
- <meta http-equiv="Content-Type"
- content="text/html; charset=%(charset)s" />
- <title></title>
- <style type="text/css">%(styles)s
+ <meta charset="%(charset)s">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <title>Diff comparison</title>
+ <style>%(styles)s
</style>
</head>
@@ -1636,13 +1633,36 @@ _file_template = """
_styles = """
:root {color-scheme: light dark}
- table.diff {font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace; border:medium}
- .diff_header {background-color:#e0e0e0}
- td.diff_header {text-align:right}
- .diff_next {background-color:#c0c0c0}
+ table.diff {
+ font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace;
+ border: medium;
+ }
+ .diff_header {
+ background-color: #e0e0e0;
+ font-weight: bold;
+ }
+ td.diff_header {
+ text-align: right;
+ padding: 0 8px;
+ }
+ .diff_next {
+ background-color: #c0c0c0;
+ padding: 4px 0;
+ }
.diff_add {background-color:palegreen}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
+ table.diff[summary="Legends"] {
+ margin-top: 20px;
+ border: 1px solid #ccc;
+ }
+ table.diff[summary="Legends"] th {
+ background-color: #e0e0e0;
+ padding: 4px 8px;
+ }
+ table.diff[summary="Legends"] td {
+ padding: 4px 8px;
+ }
@media (prefers-color-scheme: dark) {
.diff_header {background-color:#666}
@@ -1650,6 +1670,8 @@ _styles = """
.diff_add {background-color:darkgreen}
.diff_chg {background-color:#847415}
.diff_sub {background-color:darkred}
+ table.diff[summary="Legends"] {border-color:#555}
+ table.diff[summary="Legends"] th{background-color:#666}
}"""
_table_template = """
@@ -1692,7 +1714,7 @@ class HtmlDiff(object):
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
- See tools/scripts/diff.py for an example usage of this class.
+ See Doc/includes/diff.py for an example usage of this class.
"""
_file_template = _file_template
diff --git a/Lib/doctest.py b/Lib/doctest.py
index 2acb6cb79f3..c8c95ecbb27 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -101,6 +101,7 @@ import pdb
import re
import sys
import traceback
+import types
import unittest
from io import StringIO, IncrementalNewlineDecoder
from collections import namedtuple
@@ -385,7 +386,7 @@ class _OutputRedirectingPdb(pdb.Pdb):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
- pdb.Pdb.__init__(self, stdout=out, nosigint=True)
+ super().__init__(stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
@@ -1278,6 +1279,11 @@ class DocTestRunner:
# Reporting methods
#/////////////////////////////////////////////////////////////////
+ def report_skip(self, out, test, example):
+ """
+ Report that the given example was skipped.
+ """
+
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
@@ -1375,6 +1381,8 @@ class DocTestRunner:
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
+ if not quiet:
+ self.report_skip(out, test, example)
skips += 1
continue
@@ -1395,11 +1403,11 @@ class DocTestRunner:
exec(compile(example.source, filename, "single",
compileflags, True), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
- exception = None
+ exc_info = None
except KeyboardInterrupt:
raise
- except:
- exception = sys.exc_info()
+ except BaseException as exc:
+ exc_info = type(exc), exc, exc.__traceback__.tb_next
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
@@ -1408,21 +1416,21 @@ class DocTestRunner:
# If the example executed without raising any exceptions,
# verify its output.
- if exception is None:
+ if exc_info is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
- formatted_ex = traceback.format_exception_only(*exception[:2])
- if issubclass(exception[0], SyntaxError):
+ formatted_ex = traceback.format_exception_only(*exc_info[:2])
+ if issubclass(exc_info[0], SyntaxError):
# SyntaxError / IndentationError is special:
# we don't care about the carets / suggestions / etc
# We only care about the error message and notes.
# They start with `SyntaxError:` (or any other class name)
exception_line_prefixes = (
- f"{exception[0].__qualname__}:",
- f"{exception[0].__module__}.{exception[0].__qualname__}:",
+ f"{exc_info[0].__qualname__}:",
+ f"{exc_info[0].__module__}.{exc_info[0].__qualname__}:",
)
exc_msg_index = next(
index
@@ -1433,7 +1441,7 @@ class DocTestRunner:
exc_msg = "".join(formatted_ex)
if not quiet:
- got += _exception_traceback(exception)
+ got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
@@ -1462,7 +1470,7 @@ class DocTestRunner:
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
- exception)
+ exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
@@ -2272,12 +2280,63 @@ def set_unittest_reportflags(flags):
return old
+class _DocTestCaseRunner(DocTestRunner):
+
+ def __init__(self, *args, test_case, test_result, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._test_case = test_case
+ self._test_result = test_result
+ self._examplenum = 0
+
+ def _subTest(self):
+ subtest = unittest.case._SubTest(self._test_case, str(self._examplenum), {})
+ self._examplenum += 1
+ return subtest
+
+ def report_skip(self, out, test, example):
+ unittest.case._addSkip(self._test_result, self._subTest(), '')
+
+ def report_success(self, out, test, example, got):
+ self._test_result.addSubTest(self._test_case, self._subTest(), None)
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ tb = self._add_traceback(exc_info[2], test, example)
+ exc_info = (*exc_info[:2], tb)
+ self._test_result.addSubTest(self._test_case, self._subTest(), exc_info)
+
+ def report_failure(self, out, test, example, got):
+ msg = ('Failed example:\n' + _indent(example.source) +
+ self._checker.output_difference(example, got, self.optionflags).rstrip('\n'))
+ exc = self._test_case.failureException(msg)
+ tb = self._add_traceback(None, test, example)
+ exc_info = (type(exc), exc, tb)
+ self._test_result.addSubTest(self._test_case, self._subTest(), exc_info)
+
+ def _add_traceback(self, traceback, test, example):
+ if test.lineno is None or example.lineno is None:
+ lineno = None
+ else:
+ lineno = test.lineno + example.lineno + 1
+ return types.SimpleNamespace(
+ tb_frame = types.SimpleNamespace(
+ f_globals=test.globs,
+ f_code=types.SimpleNamespace(
+ co_filename=test.filename,
+ co_name=test.name,
+ ),
+ ),
+ tb_next = traceback,
+ tb_lasti = -1,
+ tb_lineno = lineno,
+ )
+
+
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
- unittest.TestCase.__init__(self)
+ super().__init__()
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
@@ -2301,30 +2360,28 @@ class DocTestCase(unittest.TestCase):
test.globs.clear()
test.globs.update(self._dt_globs)
+ def run(self, result=None):
+ self._test_result = result
+ return super().run(result)
+
def runTest(self):
test = self._dt_test
- old = sys.stdout
- new = StringIO()
optionflags = self._dt_optionflags
+ result = self._test_result
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
+ if getattr(result, 'failfast', False):
+ optionflags |= FAIL_FAST
- runner = DocTestRunner(optionflags=optionflags,
- checker=self._dt_checker, verbose=False)
-
- try:
- runner.DIVIDER = "-"*70
- results = runner.run(test, out=new.write, clear_globs=False)
- if results.skipped == results.attempted:
- raise unittest.SkipTest("all examples were skipped")
- finally:
- sys.stdout = old
-
- if results.failed:
- raise self.failureException(self.format_failure(new.getvalue()))
+ runner = _DocTestCaseRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False,
+ test_case=self, test_result=result)
+ results = runner.run(test, clear_globs=False)
+ if results.skipped == results.attempted:
+ raise unittest.SkipTest("all examples were skipped")
def format_failure(self, err):
test = self._dt_test
@@ -2439,7 +2496,7 @@ class DocTestCase(unittest.TestCase):
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
- DocTestCase.__init__(self, None)
+ super().__init__(None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py
index 9a51b943733..91243378dc0 100644
--- a/Lib/email/_header_value_parser.py
+++ b/Lib/email/_header_value_parser.py
@@ -1020,6 +1020,8 @@ def _get_ptext_to_endchars(value, endchars):
a flag that is True iff there were any quoted printables decoded.
"""
+ if not value:
+ return '', '', False
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
@@ -1573,7 +1575,7 @@ def get_dtext(value):
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
- domain_literal.append(errors.InvalidHeaderDefect(
+ domain_literal.defects.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
@@ -1592,9 +1594,9 @@ def get_domain_literal(value):
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
+ domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
- domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
diff --git a/Lib/email/header.py b/Lib/email/header.py
index 113a81f4131..220a84a7454 100644
--- a/Lib/email/header.py
+++ b/Lib/email/header.py
@@ -59,16 +59,22 @@ _max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
- Returns a list of (string, charset) pairs containing each of the decoded
- parts of the header. Charset is None for non-encoded parts of the header,
- otherwise a lower-case string containing the name of the character set
- specified in the encoded string.
+ For historical reasons, this function may return either:
+
+ 1. A list of length 1 containing a pair (str, None).
+ 2. A list of (bytes, charset) pairs containing each of the decoded
+ parts of the header. Charset is None for non-encoded parts of the header,
+ otherwise a lower-case string containing the name of the character set
+ specified in the encoded string.
header may be a string that may or may not contain RFC2047 encoded words,
or it may be a Header object.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
+
+ This function exists for backwards compatibility only. For new code, we
+ recommend using email.headerregistry.HeaderRegistry instead.
"""
# If it is a Header object, we can just return the encoded chunks.
if hasattr(header, '_chunks'):
@@ -161,6 +167,9 @@ def make_header(decoded_seq, maxlinelen=None, header_name=None,
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
+
+ This function exists for backwards compatibility only, and is not
+ recommended for use in new code.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
diff --git a/Lib/email/message.py b/Lib/email/message.py
index 87fcab68868..41fcc2b9778 100644
--- a/Lib/email/message.py
+++ b/Lib/email/message.py
@@ -564,7 +564,7 @@ class Message:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
msg.add_header('content-disposition', 'attachment',
- filename=('utf-8', '', FuรŸballer.ppt'))
+ filename=('utf-8', '', 'FuรŸballer.ppt'))
msg.add_header('content-disposition', 'attachment',
filename='FuรŸballer.ppt'))
"""
diff --git a/Lib/email/utils.py b/Lib/email/utils.py
index 7eab74dc0db..3de1f0d24a1 100644
--- a/Lib/email/utils.py
+++ b/Lib/email/utils.py
@@ -417,8 +417,14 @@ def decode_params(params):
for name, continuations in rfc2231_params.items():
value = []
extended = False
- # Sort by number
- continuations.sort()
+ # Sort by number, treating None as 0 if there is no 0,
+ # and ignore it if there is already a 0.
+ has_zero = any(x[0] == 0 for x in continuations)
+ if has_zero:
+ continuations = [x for x in continuations if x[0] is not None]
+ else:
+ continuations = [(x[0] or 0, x[1], x[2]) for x in continuations]
+ continuations.sort(key=lambda x: x[0])
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
diff --git a/Lib/encodings/idna.py b/Lib/encodings/idna.py
index 60a8d5eb227..0c90b4c9fe1 100644
--- a/Lib/encodings/idna.py
+++ b/Lib/encodings/idna.py
@@ -316,7 +316,7 @@ class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
- raise UnicodeError("Unsupported error handling: {errors}")
+ raise UnicodeError(f"Unsupported error handling: {errors}")
if not input:
return ("", 0)
diff --git a/Lib/encodings/palmos.py b/Lib/encodings/palmos.py
index c506d654523..df164ca5b95 100644
--- a/Lib/encodings/palmos.py
+++ b/Lib/encodings/palmos.py
@@ -201,7 +201,7 @@ decoding_table = (
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
- '\x9b' # 0x9B -> <control>
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
diff --git a/Lib/fractions.py b/Lib/fractions.py
index 063f28478c7..a8c67068522 100644
--- a/Lib/fractions.py
+++ b/Lib/fractions.py
@@ -168,9 +168,9 @@ _FLOAT_FORMAT_SPECIFICATION_MATCHER = re.compile(r"""
# A '0' that's *not* followed by another digit is parsed as a minimum width
# rather than a zeropad flag.
(?P<zeropad>0(?=[0-9]))?
- (?P<minimumwidth>0|[1-9][0-9]*)?
+ (?P<minimumwidth>[0-9]+)?
(?P<thousands_sep>[,_])?
- (?:\.(?P<precision>0|[1-9][0-9]*))?
+ (?:\.(?P<precision>[0-9]+))?
(?P<presentation_type>[eEfFgG%])
""", re.DOTALL | re.VERBOSE).fullmatch
@@ -504,6 +504,9 @@ class Fraction(numbers.Rational):
trim_point = not alternate_form
exponent_indicator = "E" if presentation_type in "EFG" else "e"
+ if align == '=' and fill == '0':
+ zeropad = True
+
# Round to get the digits we need, figure out where to place the point,
# and decide whether to use scientific notation. 'point_pos' is the
# relative to the _end_ of the digit string: that is, it's the number
diff --git a/Lib/genericpath.py b/Lib/genericpath.py
index ba7b0a13c7f..9363f564aab 100644
--- a/Lib/genericpath.py
+++ b/Lib/genericpath.py
@@ -8,7 +8,7 @@ import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdevdrive', 'isdir', 'isfile', 'isjunction', 'islink',
- 'lexists', 'samefile', 'sameopenfile', 'samestat']
+ 'lexists', 'samefile', 'sameopenfile', 'samestat', 'ALLOW_MISSING']
# Does a path exist?
@@ -189,3 +189,12 @@ def _check_arg_types(funcname, *args):
f'os.PathLike object, not {s.__class__.__name__!r}') from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
+
+# A singleton with a true boolean value.
+@object.__new__
+class ALLOW_MISSING:
+ """Special value for use in realpath()."""
+ def __repr__(self):
+ return 'os.path.ALLOW_MISSING'
+ def __reduce__(self):
+ return self.__class__.__name__
diff --git a/Lib/hashlib.py b/Lib/hashlib.py
index abacac22ea0..0e9bd98aa1f 100644
--- a/Lib/hashlib.py
+++ b/Lib/hashlib.py
@@ -141,29 +141,29 @@ def __get_openssl_constructor(name):
return __get_builtin_constructor(name)
-def __py_new(name, data=b'', **kwargs):
+def __py_new(name, *args, **kwargs):
"""new(name, data=b'', **kwargs) - Return a new hashing object using the
named algorithm; optionally initialized with data (which must be
a bytes-like object).
"""
- return __get_builtin_constructor(name)(data, **kwargs)
+ return __get_builtin_constructor(name)(*args, **kwargs)
-def __hash_new(name, data=b'', **kwargs):
+def __hash_new(name, *args, **kwargs):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be a bytes-like object).
"""
if name in __block_openssl_constructor:
# Prefer our builtin blake2 implementation.
- return __get_builtin_constructor(name)(data, **kwargs)
+ return __get_builtin_constructor(name)(*args, **kwargs)
try:
- return _hashlib.new(name, data, **kwargs)
+ return _hashlib.new(name, *args, **kwargs)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
- return __get_builtin_constructor(name)(data)
+ return __get_builtin_constructor(name)(*args, **kwargs)
try:
diff --git a/Lib/html/parser.py b/Lib/html/parser.py
index 1e30956fe24..9b4f0959913 100644
--- a/Lib/html/parser.py
+++ b/Lib/html/parser.py
@@ -27,18 +27,48 @@ charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
attr_charref = re.compile(r'&(#[0-9]+|#[xX][0-9a-fA-F]+|[a-zA-Z][a-zA-Z0-9]*)[;=]?')
starttagopen = re.compile('<[a-zA-Z]')
+endtagopen = re.compile('</[a-zA-Z]')
piclose = re.compile('>')
-commentclose = re.compile(r'--\s*>')
+commentclose = re.compile(r'--!?>')
+commentabruptclose = re.compile(r'-?>')
# Note:
-# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
-# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
+# 1) if you change tagfind/attrfind remember to update locatetagend too;
+# 2) if you change tagfind/attrfind and/or locatetagend the parser will
# explode, so don't do it.
-# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
-# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
-tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
-attrfind_tolerant = re.compile(
- r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
- r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
+# see the HTML5 specs section "13.2.5.6 Tag open state",
+# "13.2.5.8 Tag name state" and "13.2.5.33 Attribute name state".
+# https://html.spec.whatwg.org/multipage/parsing.html#tag-open-state
+# https://html.spec.whatwg.org/multipage/parsing.html#tag-name-state
+# https://html.spec.whatwg.org/multipage/parsing.html#attribute-name-state
+tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />]*)(?:[\t\n\r\f ]|/(?!>))*')
+attrfind_tolerant = re.compile(r"""
+ (
+ (?<=['"\t\n\r\f /])[^\t\n\r\f />][^\t\n\r\f /=>]* # attribute name
+ )
+ (= # value indicator
+ ('[^']*' # LITA-enclosed value
+ |"[^"]*" # LIT-enclosed value
+ |(?!['"])[^>\t\n\r\f ]* # bare value
+ )
+ )?
+ (?:[\t\n\r\f ]|/(?!>))* # possibly followed by a space
+""", re.VERBOSE)
+locatetagend = re.compile(r"""
+ [a-zA-Z][^\t\n\r\f />]* # tag name
+ [\t\n\r\f /]* # optional whitespace before attribute name
+ (?:(?<=['"\t\n\r\f /])[^\t\n\r\f />][^\t\n\r\f /=>]* # attribute name
+ (?:= # value indicator
+ (?:'[^']*' # LITA-enclosed value
+ |"[^"]*" # LIT-enclosed value
+ |(?!['"])[^>\t\n\r\f ]* # bare value
+ )
+ )?
+ [\t\n\r\f /]* # possibly followed by a space
+ )*
+ >?
+""", re.VERBOSE)
+# The following variables are not used, but are temporarily left for
+# backward compatibility.
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
@@ -55,8 +85,6 @@ locatestarttagend_tolerant = re.compile(r"""
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
-# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
-# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
# Character reference processing logic specific to attribute values
@@ -140,7 +168,8 @@ class HTMLParser(_markupbase.ParserBase):
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
- self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
+ self.interesting = re.compile(r'</%s(?=[\t\n\r\f />])' % self.cdata_elem,
+ re.IGNORECASE|re.ASCII)
def clear_cdata_mode(self):
self.interesting = interesting_normal
@@ -165,7 +194,7 @@ class HTMLParser(_markupbase.ParserBase):
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
- not re.compile(r'[\s;]').search(rawdata, amppos)):
+ not re.compile(r'[\t\n\r\f ;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
@@ -195,7 +224,7 @@ class HTMLParser(_markupbase.ParserBase):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
- elif (i + 1) < n:
+ elif (i + 1) < n or end:
self.handle_data("<")
k = i + 1
else:
@@ -203,17 +232,35 @@ class HTMLParser(_markupbase.ParserBase):
if k < 0:
if not end:
break
- k = rawdata.find('>', i + 1)
- if k < 0:
- k = rawdata.find('<', i + 1)
- if k < 0:
- k = i + 1
+ if starttagopen.match(rawdata, i): # < + letter
+ pass
+ elif startswith("</", i):
+ if i + 2 == n:
+ self.handle_data("</")
+ elif endtagopen.match(rawdata, i): # </ + letter
+ pass
+ else:
+ # bogus comment
+ self.handle_comment(rawdata[i+2:])
+ elif startswith("<!--", i):
+ j = n
+ for suffix in ("--!", "--", "-"):
+ if rawdata.endswith(suffix, i+4):
+ j -= len(suffix)
+ break
+ self.handle_comment(rawdata[i+4:j])
+ elif startswith("<![CDATA[", i):
+ self.unknown_decl(rawdata[i+3:])
+ elif rawdata[i:i+9].lower() == '<!doctype':
+ self.handle_decl(rawdata[i+2:])
+ elif startswith("<!", i):
+ # bogus comment
+ self.handle_comment(rawdata[i+2:])
+ elif startswith("<?", i):
+ self.handle_pi(rawdata[i+2:])
else:
- k += 1
- if self.convert_charrefs and not self.cdata_elem:
- self.handle_data(unescape(rawdata[i:k]))
- else:
- self.handle_data(rawdata[i:k])
+ raise AssertionError("we should not get here!")
+ k = n
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
@@ -290,8 +337,23 @@ class HTMLParser(_markupbase.ParserBase):
else:
return self.parse_bogus_comment(i)
+ # Internal -- parse comment, return length or -1 if not terminated
+ # see https://html.spec.whatwg.org/multipage/parsing.html#comment-start-state
+ def parse_comment(self, i, report=True):
+ rawdata = self.rawdata
+ assert rawdata.startswith('<!--', i), 'unexpected call to parse_comment()'
+ match = commentclose.search(rawdata, i+4)
+ if not match:
+ match = commentabruptclose.match(rawdata, i+4)
+ if not match:
+ return -1
+ if report:
+ j = match.start()
+ self.handle_comment(rawdata[i+4: j])
+ return match.end()
+
# Internal -- parse bogus comment, return length or -1 if not terminated
- # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
+ # see https://html.spec.whatwg.org/multipage/parsing.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
@@ -317,6 +379,8 @@ class HTMLParser(_markupbase.ParserBase):
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
+ # See the HTML5 specs section "13.2.5.8 Tag name state"
+ # https://html.spec.whatwg.org/multipage/parsing.html#tag-name-state
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
@@ -362,76 +426,42 @@ class HTMLParser(_markupbase.ParserBase):
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
- m = locatestarttagend_tolerant.match(rawdata, i)
- if m:
- j = m.end()
- next = rawdata[j:j+1]
- if next == ">":
- return j + 1
- if next == "/":
- if rawdata.startswith("/>", j):
- return j + 2
- if rawdata.startswith("/", j):
- # buffer boundary
- return -1
- # else bogus input
- if j > i:
- return j
- else:
- return i + 1
- if next == "":
- # end of input
- return -1
- if next in ("abcdefghijklmnopqrstuvwxyz=/"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
- # end of input in or before attribute value, or we have the
- # '/' from a '/>' ending
- return -1
- if j > i:
- return j
- else:
- return i + 1
- raise AssertionError("we should not get here!")
+ match = locatetagend.match(rawdata, i+1)
+ assert match
+ j = match.end()
+ if rawdata[j-1] != ">":
+ return -1
+ return j
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
+ # See the HTML5 specs section "13.2.5.7 End tag open state"
+ # https://html.spec.whatwg.org/multipage/parsing.html#end-tag-open-state
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
- match = endendtag.search(rawdata, i+1) # >
- if not match:
+ if rawdata.find('>', i+2) < 0: # fast check
return -1
- gtpos = match.end()
- match = endtagfind.match(rawdata, i) # </ + tag + >
- if not match:
- if self.cdata_elem is not None:
- self.handle_data(rawdata[i:gtpos])
- return gtpos
- # find the name: w3.org/TR/html5/tokenization.html#tag-name-state
- namematch = tagfind_tolerant.match(rawdata, i+2)
- if not namematch:
- # w3.org/TR/html5/tokenization.html#end-tag-open-state
- if rawdata[i:i+3] == '</>':
- return i+3
- else:
- return self.parse_bogus_comment(i)
- tagname = namematch.group(1).lower()
- # consume and ignore other stuff between the name and the >
- # Note: this is not 100% correct, since we might have things like
- # </tag attr=">">, but looking for > after the name should cover
- # most of the cases and is much simpler
- gtpos = rawdata.find('>', namematch.end())
- self.handle_endtag(tagname)
- return gtpos+1
+ if not endtagopen.match(rawdata, i): # </ + letter
+ if rawdata[i+2:i+3] == '>': # </> is ignored
+ # "missing-end-tag-name" parser error
+ return i+3
+ else:
+ return self.parse_bogus_comment(i)
- elem = match.group(1).lower() # script or style
- if self.cdata_elem is not None:
- if elem != self.cdata_elem:
- self.handle_data(rawdata[i:gtpos])
- return gtpos
+ match = locatetagend.match(rawdata, i+2)
+ assert match
+ j = match.end()
+ if rawdata[j-1] != ">":
+ return -1
- self.handle_endtag(elem)
+ # find the name: "13.2.5.8 Tag name state"
+ # https://html.spec.whatwg.org/multipage/parsing.html#tag-name-state
+ match = tagfind_tolerant.match(rawdata, i+2)
+ assert match
+ tag = match.group(1).lower()
+ self.handle_endtag(tag)
self.clear_cdata_mode()
- return gtpos
+ return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
diff --git a/Lib/http/server.py b/Lib/http/server.py
index f6d1b998f42..a2ffbe2e44d 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -115,7 +115,7 @@ DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
class HTTPServer(socketserver.TCPServer):
allow_reuse_address = True # Seems to make sense in testing environment
- allow_reuse_port = True
+ allow_reuse_port = False
def server_bind(self):
"""Override server_bind to store the server name."""
@@ -980,8 +980,8 @@ def test(HandlerClass=BaseHTTPRequestHandler,
HandlerClass.protocol_version = protocol
if tls_cert:
- server = ThreadingHTTPSServer(addr, HandlerClass, certfile=tls_cert,
- keyfile=tls_key, password=tls_password)
+ server = ServerClass(addr, HandlerClass, certfile=tls_cert,
+ keyfile=tls_key, password=tls_password)
else:
server = ServerClass(addr, HandlerClass)
@@ -1041,7 +1041,7 @@ def _main(args=None):
parser.error(f"Failed to read TLS password file: {e}")
# ensure dual-stack is not disabled; ref #38907
- class DualStackServer(ThreadingHTTPServer):
+ class DualStackServerMixin:
def server_bind(self):
# suppress exception when protocol is IPv4
@@ -1054,9 +1054,16 @@ def _main(args=None):
self.RequestHandlerClass(request, client_address, self,
directory=args.directory)
+ class HTTPDualStackServer(DualStackServerMixin, ThreadingHTTPServer):
+ pass
+ class HTTPSDualStackServer(DualStackServerMixin, ThreadingHTTPSServer):
+ pass
+
+ ServerClass = HTTPSDualStackServer if args.tls_cert else HTTPDualStackServer
+
test(
HandlerClass=SimpleHTTPRequestHandler,
- ServerClass=DualStackServer,
+ ServerClass=ServerClass,
port=args.port,
bind=args.bind,
protocol=args.protocol,
diff --git a/Lib/idlelib/NEWS2x.txt b/Lib/idlelib/NEWS2x.txt
index 6751ca5f111..3721193007e 100644
--- a/Lib/idlelib/NEWS2x.txt
+++ b/Lib/idlelib/NEWS2x.txt
@@ -1,6 +1,6 @@
What's New in IDLE 2.7? (Merged into 3.1 before 2.7 release.)
=======================
-*Release date: XX-XXX-2010*
+*Release date: 03-Jul-2010*
- idle.py modified and simplified to better support developing experimental
versions of IDLE which are not installed in the standard location.
diff --git a/Lib/idlelib/News3.txt b/Lib/idlelib/News3.txt
index 74d84b38931..30784578cc6 100644
--- a/Lib/idlelib/News3.txt
+++ b/Lib/idlelib/News3.txt
@@ -4,6 +4,13 @@ Released on 2025-10-07
=========================
+gh-112936: IDLE - Include Shell menu in single-process mode,
+though with Restart Shell and View Last Restart disabled.
+Patch by Zhikang Yan.
+
+gh-112938: IDLE - Fix uninteruptable hang when Shell gets
+rapid continuous output.
+
gh-127060: Set TERM environment variable to 'dumb' to not add ANSI escape
sequences for text color in tracebacks. IDLE does not understand them.
Patch by Victor Stinner.
diff --git a/Lib/idlelib/configdialog.py b/Lib/idlelib/configdialog.py
index 4d2adb48570..e618ef07a90 100644
--- a/Lib/idlelib/configdialog.py
+++ b/Lib/idlelib/configdialog.py
@@ -435,7 +435,7 @@ class FontPage(Frame):
self.font_name.set(font.lower())
def set_samples(self, event=None):
- """Update update both screen samples with the font settings.
+ """Update both screen samples with the font settings.
Called on font initialization and change events.
Accesses font_name, font_size, and font_bold Variables.
diff --git a/Lib/idlelib/debugger.py b/Lib/idlelib/debugger.py
index d90dbcd11f9..1fae1d4b0ad 100644
--- a/Lib/idlelib/debugger.py
+++ b/Lib/idlelib/debugger.py
@@ -1,6 +1,6 @@
"""Debug user code with a GUI interface to a subclass of bdb.Bdb.
-The Idb idb and Debugger gui instances each need a reference to each
+The Idb instance 'idb' and Debugger instance 'gui' need references to each
other or to an rpc proxy for each other.
If IDLE is started with '-n', so that user code and idb both run in the
diff --git a/Lib/idlelib/editor.py b/Lib/idlelib/editor.py
index c76db20c587..17b498f63ba 100644
--- a/Lib/idlelib/editor.py
+++ b/Lib/idlelib/editor.py
@@ -1649,7 +1649,7 @@ class IndentSearcher:
self.finished = 1
def run(self):
- """Return 2 lines containing block opener and and indent.
+ """Return 2 lines containing block opener and indent.
Either the indent line or both may be None.
"""
diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py
index a7293774eec..b63ff9ec287 100644
--- a/Lib/idlelib/idle_test/htest.py
+++ b/Lib/idlelib/idle_test/htest.py
@@ -337,7 +337,7 @@ _tree_widget_spec = {
'file': 'tree',
'kwds': {},
'msg': "The canvas is scrollable.\n"
- "Click on folders up to to the lowest level."
+ "Click on folders up to the lowest level."
}
_undo_delegator_spec = {
diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
index 69e933a6541..8b60b9d5c9c 100644
--- a/Lib/ipaddress.py
+++ b/Lib/ipaddress.py
@@ -1660,10 +1660,12 @@ class _BaseV6:
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
- if len(ip_str) > 39:
- msg = ("At most 39 characters expected in "
- f"{ip_str[:14]!r}({len(ip_str)-28} chars elided){ip_str[-14:]!r}")
- raise AddressValueError(msg)
+ if len(ip_str) > 45:
+ shorten = ip_str
+ if len(shorten) > 100:
+ shorten = f'{ip_str[:45]}({len(ip_str)-90} chars elided){ip_str[-45:]}'
+ raise AddressValueError(f"At most 45 characters expected in "
+ f"{shorten!r}")
# We want to allow more parts than the max to be 'split'
# to preserve the correct error message when there are
diff --git a/Lib/json/encoder.py b/Lib/json/encoder.py
index 016638549aa..bc446e0f377 100644
--- a/Lib/json/encoder.py
+++ b/Lib/json/encoder.py
@@ -348,7 +348,6 @@ def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
- yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
@@ -381,6 +380,8 @@ def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
f'not {key.__class__.__name__}')
if first:
first = False
+ if newline_indent is not None:
+ yield newline_indent
else:
yield item_separator
yield _encoder(key)
@@ -413,7 +414,7 @@ def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
except BaseException as exc:
exc.add_note(f'when serializing {type(dct).__name__} item {key!r}')
raise
- if newline_indent is not None:
+ if not first and newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
diff --git a/Lib/locale.py b/Lib/locale.py
index 2feb10e59c9..dfedc6386cb 100644
--- a/Lib/locale.py
+++ b/Lib/locale.py
@@ -883,6 +883,10 @@ del k, v
# updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin'
# removed 'univ'
# removed 'universal'
+#
+# SS 2025-06-10:
+# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist
+# on all platforms.
locale_alias = {
'a3': 'az_AZ.KOI8-C',
@@ -962,7 +966,6 @@ locale_alias = {
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
- 'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py
index 5c3c4424934..c5860d53b1b 100644
--- a/Lib/logging/__init__.py
+++ b/Lib/logging/__init__.py
@@ -1475,8 +1475,6 @@ class Logger(Filterer):
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
- _tls = threading.local()
-
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
@@ -1673,19 +1671,14 @@ class Logger(Filterer):
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
- if self._is_disabled():
+ if self.disabled:
return
-
- self._tls.in_progress = True
- try:
- maybe_record = self.filter(record)
- if not maybe_record:
- return
- if isinstance(maybe_record, LogRecord):
- record = maybe_record
- self.callHandlers(record)
- finally:
- self._tls.in_progress = False
+ maybe_record = self.filter(record)
+ if not maybe_record:
+ return
+ if isinstance(maybe_record, LogRecord):
+ record = maybe_record
+ self.callHandlers(record)
def addHandler(self, hdlr):
"""
@@ -1773,7 +1766,7 @@ class Logger(Filterer):
"""
Is this logger enabled for level 'level'?
"""
- if self._is_disabled():
+ if self.disabled:
return False
try:
@@ -1823,11 +1816,6 @@ class Logger(Filterer):
if isinstance(item, Logger) and item.parent is self and
_hierlevel(item) == 1 + _hierlevel(item.parent))
- def _is_disabled(self):
- # We need to use getattr as it will only be set the first time a log
- # message is recorded on any given thread
- return self.disabled or getattr(self._tls, 'in_progress', False)
-
def __repr__(self):
level = getLevelName(self.getEffectiveLevel())
return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
index c994349fd6e..3d9aa00fa52 100644
--- a/Lib/logging/config.py
+++ b/Lib/logging/config.py
@@ -1018,7 +1018,7 @@ def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None):
"""
allow_reuse_address = True
- allow_reuse_port = True
+ allow_reuse_port = False
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None, verify=None):
diff --git a/Lib/multiprocessing/forkserver.py b/Lib/multiprocessing/forkserver.py
index 681af2610e9..c91891ff162 100644
--- a/Lib/multiprocessing/forkserver.py
+++ b/Lib/multiprocessing/forkserver.py
@@ -222,6 +222,10 @@ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None,
except ImportError:
pass
+ # gh-135335: flush stdout/stderr in case any of the preloaded modules
+ # wrote to them, otherwise children might inherit buffered data
+ util._flush_std_streams()
+
util._close_stdin()
sig_r, sig_w = os.pipe()
diff --git a/Lib/netrc.py b/Lib/netrc.py
index b285fd8e357..2f502c1d533 100644
--- a/Lib/netrc.py
+++ b/Lib/netrc.py
@@ -7,6 +7,19 @@ import os, stat
__all__ = ["netrc", "NetrcParseError"]
+def _can_security_check():
+ # On WASI, getuid() is indicated as a stub but it may also be missing.
+ return os.name == 'posix' and hasattr(os, 'getuid')
+
+
+def _getpwuid(uid):
+ try:
+ import pwd
+ return pwd.getpwuid(uid)[0]
+ except (ImportError, LookupError):
+ return f'uid {uid}'
+
+
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
@@ -142,21 +155,15 @@ class netrc:
self._security_check(fp, default_netrc, self.hosts[entryname][0])
def _security_check(self, fp, default_netrc, login):
- if os.name == 'posix' and default_netrc and login != "anonymous":
+ if _can_security_check() and default_netrc and login != "anonymous":
prop = os.fstat(fp.fileno())
- if prop.st_uid != os.getuid():
- import pwd
- try:
- fowner = pwd.getpwuid(prop.st_uid)[0]
- except KeyError:
- fowner = 'uid %s' % prop.st_uid
- try:
- user = pwd.getpwuid(os.getuid())[0]
- except KeyError:
- user = 'uid %s' % os.getuid()
+ current_user_id = os.getuid()
+ if prop.st_uid != current_user_id:
+ fowner = _getpwuid(prop.st_uid)
+ user = _getpwuid(current_user_id)
raise NetrcParseError(
- (f"~/.netrc file owner ({fowner}, {user}) does not match"
- " current user"))
+ f"~/.netrc file owner ({fowner}) does not match"
+ f" current user ({user})")
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
diff --git a/Lib/ntpath.py b/Lib/ntpath.py
index 52ff2af743a..9cdc16480f9 100644
--- a/Lib/ntpath.py
+++ b/Lib/ntpath.py
@@ -29,7 +29,7 @@ __all__ = ["normcase","isabs","join","splitdrive","splitroot","split","splitext"
"abspath","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath",
"samefile", "sameopenfile", "samestat", "commonpath", "isjunction",
- "isdevdrive"]
+ "isdevdrive", "ALLOW_MISSING"]
def _get_bothseps(path):
if isinstance(path, bytes):
@@ -601,9 +601,10 @@ try:
from nt import _findfirstfile, _getfinalpathname, readlink as _nt_readlink
except ImportError:
# realpath is a no-op on systems without _getfinalpathname support.
- realpath = abspath
+ def realpath(path, *, strict=False):
+ return abspath(path)
else:
- def _readlink_deep(path):
+ def _readlink_deep(path, ignored_error=OSError):
# These error codes indicate that we should stop reading links and
# return the path we currently have.
# 1: ERROR_INVALID_FUNCTION
@@ -636,7 +637,7 @@ else:
path = old_path
break
path = normpath(join(dirname(old_path), path))
- except OSError as ex:
+ except ignored_error as ex:
if ex.winerror in allowed_winerror:
break
raise
@@ -645,7 +646,7 @@ else:
break
return path
- def _getfinalpathname_nonstrict(path):
+ def _getfinalpathname_nonstrict(path, ignored_error=OSError):
# These error codes indicate that we should stop resolving the path
# and return the value we currently have.
# 1: ERROR_INVALID_FUNCTION
@@ -673,17 +674,18 @@ else:
try:
path = _getfinalpathname(path)
return join(path, tail) if tail else path
- except OSError as ex:
+ except ignored_error as ex:
if ex.winerror not in allowed_winerror:
raise
try:
# The OS could not resolve this path fully, so we attempt
# to follow the link ourselves. If we succeed, join the tail
# and return.
- new_path = _readlink_deep(path)
+ new_path = _readlink_deep(path,
+ ignored_error=ignored_error)
if new_path != path:
return join(new_path, tail) if tail else new_path
- except OSError:
+ except ignored_error:
# If we fail to readlink(), let's keep traversing
pass
# If we get these errors, try to get the real name of the file without accessing it.
@@ -691,7 +693,7 @@ else:
try:
name = _findfirstfile(path)
path, _ = split(path)
- except OSError:
+ except ignored_error:
path, name = split(path)
else:
path, name = split(path)
@@ -721,6 +723,15 @@ else:
if normcase(path) == devnull:
return '\\\\.\\NUL'
had_prefix = path.startswith(prefix)
+
+ if strict is ALLOW_MISSING:
+ ignored_error = FileNotFoundError
+ strict = True
+ elif strict:
+ ignored_error = ()
+ else:
+ ignored_error = OSError
+
if not had_prefix and not isabs(path):
path = join(cwd, path)
try:
@@ -728,17 +739,16 @@ else:
initial_winerror = 0
except ValueError as ex:
# gh-106242: Raised for embedded null characters
- # In strict mode, we convert into an OSError.
+ # In strict modes, we convert into an OSError.
# Non-strict mode returns the path as-is, since we've already
# made it absolute.
if strict:
raise OSError(str(ex)) from None
path = normpath(path)
- except OSError as ex:
- if strict:
- raise
+ except ignored_error as ex:
initial_winerror = ex.winerror
- path = _getfinalpathname_nonstrict(path)
+ path = _getfinalpathname_nonstrict(path,
+ ignored_error=ignored_error)
# The path returned by _getfinalpathname will always start with \\?\ -
# strip off that prefix unless it was already provided on the original
# path.
diff --git a/Lib/os.py b/Lib/os.py
index 266e40b56f6..12926c832f5 100644
--- a/Lib/os.py
+++ b/Lib/os.py
@@ -10,7 +10,7 @@ This exports:
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
+ - os.linesep is the line separator in text files ('\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
@@ -118,6 +118,7 @@ if _exists("_have_functions"):
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
+ _add("HAVE_LSTAT", "lstat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
diff --git a/Lib/platform.py b/Lib/platform.py
index 077db81264a..da15bb4717b 100644
--- a/Lib/platform.py
+++ b/Lib/platform.py
@@ -612,6 +612,9 @@ def system_alias(system, release, version):
### Various internal helpers
+# Table for cleaning up characters in filenames.
+_SIMPLE_SUBSTITUTIONS = str.maketrans(r' /\:;"()', r'_-------')
+
def _platform(*args):
""" Helper to format the platform string in a filename
@@ -621,28 +624,13 @@ def _platform(*args):
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
- platform = platform.replace(' ', '_')
- platform = platform.replace('/', '-')
- platform = platform.replace('\\', '-')
- platform = platform.replace(':', '-')
- platform = platform.replace(';', '-')
- platform = platform.replace('"', '-')
- platform = platform.replace('(', '-')
- platform = platform.replace(')', '-')
+ platform = platform.translate(_SIMPLE_SUBSTITUTIONS)
# No need to report 'unknown' information...
platform = platform.replace('unknown', '')
# Fold '--'s and remove trailing '-'
- while True:
- cleaned = platform.replace('--', '-')
- if cleaned == platform:
- break
- platform = cleaned
- while platform and platform[-1] == '-':
- platform = platform[:-1]
-
- return platform
+ return re.sub(r'-{2,}', '-', platform).rstrip('-')
def _node(default=''):
@@ -1144,7 +1132,7 @@ def _sys_version(sys_version=None):
# CPython
cpython_sys_version_parser = re.compile(
r'([\w.+]+)\s*' # "version<space>"
- r'(?:experimental free-threading build\s+)?' # "free-threading-build<space>"
+ r'(?:free-threading build\s+)?' # "free-threading-build<space>"
r'\(#?([^,]+)' # "(#buildno"
r'(?:,\s*([\w ]*)' # ", builddate"
r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>"
diff --git a/Lib/posixpath.py b/Lib/posixpath.py
index db72ded8826..d38f3bd5872 100644
--- a/Lib/posixpath.py
+++ b/Lib/posixpath.py
@@ -36,7 +36,7 @@ __all__ = ["normcase","isabs","join","splitdrive","splitroot","split","splitext"
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath",
- "commonpath", "isjunction","isdevdrive"]
+ "commonpath", "isjunction","isdevdrive","ALLOW_MISSING"]
def _get_sep(path):
@@ -402,10 +402,18 @@ symbolic links encountered in the path."""
curdir = '.'
pardir = '..'
getcwd = os.getcwd
- return _realpath(filename, strict, sep, curdir, pardir, getcwd)
+ if strict is ALLOW_MISSING:
+ ignored_error = FileNotFoundError
+ strict = True
+ elif strict:
+ ignored_error = ()
+ else:
+ ignored_error = OSError
+
+ lstat = os.lstat
+ readlink = os.readlink
+ maxlinks = None
-def _realpath(filename, strict=False, sep=sep, curdir=curdir, pardir=pardir,
- getcwd=os.getcwd, lstat=os.lstat, readlink=os.readlink, maxlinks=None):
# The stack of unresolved path parts. When popped, a special value of None
# indicates that a symlink target has been resolved, and that the original
# symlink path can be retrieved by popping again. The [::-1] slice is a
@@ -477,27 +485,28 @@ def _realpath(filename, strict=False, sep=sep, curdir=curdir, pardir=pardir,
path = newpath
continue
target = readlink(newpath)
- except OSError:
- if strict:
- raise
- path = newpath
+ except ignored_error:
+ pass
+ else:
+ # Resolve the symbolic link
+ if target.startswith(sep):
+ # Symlink target is absolute; reset resolved path.
+ path = sep
+ if maxlinks is None:
+ # Mark this symlink as seen but not fully resolved.
+ seen[newpath] = None
+ # Push the symlink path onto the stack, and signal its specialness
+ # by also pushing None. When these entries are popped, we'll
+ # record the fully-resolved symlink target in the 'seen' mapping.
+ rest.append(newpath)
+ rest.append(None)
+ # Push the unresolved symlink target parts onto the stack.
+ target_parts = target.split(sep)[::-1]
+ rest.extend(target_parts)
+ part_count += len(target_parts)
continue
- # Resolve the symbolic link
- if target.startswith(sep):
- # Symlink target is absolute; reset resolved path.
- path = sep
- if maxlinks is None:
- # Mark this symlink as seen but not fully resolved.
- seen[newpath] = None
- # Push the symlink path onto the stack, and signal its specialness
- # by also pushing None. When these entries are popped, we'll
- # record the fully-resolved symlink target in the 'seen' mapping.
- rest.append(newpath)
- rest.append(None)
- # Push the unresolved symlink target parts onto the stack.
- target_parts = target.split(sep)[::-1]
- rest.extend(target_parts)
- part_count += len(target_parts)
+ # An error occurred and was ignored.
+ path = newpath
return path
diff --git a/Lib/pprint.py b/Lib/pprint.py
index 1e611481b51..92a2c543ac2 100644
--- a/Lib/pprint.py
+++ b/Lib/pprint.py
@@ -653,6 +653,40 @@ class PrettyPrinter:
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
+ if (issubclass(typ, list) and r is list.__repr__) or \
+ (issubclass(typ, tuple) and r is tuple.__repr__):
+ if issubclass(typ, list):
+ if not object:
+ return "[]", True, False
+ format = "[%s]"
+ elif len(object) == 1:
+ format = "(%s,)"
+ else:
+ if not object:
+ return "()", True, False
+ format = "(%s)"
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return format % "...", False, objid in context
+ if objid in context:
+ return _recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ components = []
+ append = components.append
+ level += 1
+ for o in object:
+ orepr, oreadable, orecur = self.format(
+ o, context, maxlevels, level)
+ append(orepr)
+ if not oreadable:
+ readable = False
+ if orecur:
+ recursive = True
+ del context[objid]
+ return format % ", ".join(components), readable, recursive
+
if issubclass(typ, _collections.abc.MappingView) and r in self._view_reprs:
objid = id(object)
if maxlevels and level >= maxlevels:
@@ -689,40 +723,6 @@ class PrettyPrinter:
del context[objid]
return typ.__name__ + '([%s])' % ", ".join(components), readable, recursive
- if (issubclass(typ, list) and r is list.__repr__) or \
- (issubclass(typ, tuple) and r is tuple.__repr__):
- if issubclass(typ, list):
- if not object:
- return "[]", True, False
- format = "[%s]"
- elif len(object) == 1:
- format = "(%s,)"
- else:
- if not object:
- return "()", True, False
- format = "(%s)"
- objid = id(object)
- if maxlevels and level >= maxlevels:
- return format % "...", False, objid in context
- if objid in context:
- return _recursion(object), False, True
- context[objid] = 1
- readable = True
- recursive = False
- components = []
- append = components.append
- level += 1
- for o in object:
- orepr, oreadable, orecur = self.format(
- o, context, maxlevels, level)
- append(orepr)
- if not oreadable:
- readable = False
- if orecur:
- recursive = True
- del context[objid]
- return format % ", ".join(components), readable, recursive
-
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index 7528178fdca..d508fb70ea4 100644
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -1812,7 +1812,6 @@ def writedocs(dir, pkgpath='', done=None):
def _introdoc():
- import textwrap
ver = '%d.%d' % sys.version_info[:2]
if os.environ.get('PYTHON_BASIC_REPL'):
pyrepl_keys = ''
@@ -2170,7 +2169,6 @@ module "pydoc_data.topics" could not be found.
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
- import textwrap
text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n'
wrapped_text = textwrap.wrap(text, 72)
doc += '\n%s\n' % '\n'.join(wrapped_text)
diff --git a/Lib/reprlib.py b/Lib/reprlib.py
index 441d1be4bde..ab18247682b 100644
--- a/Lib/reprlib.py
+++ b/Lib/reprlib.py
@@ -181,7 +181,22 @@ class Repr:
return s
def repr_int(self, x, level):
- s = builtins.repr(x) # XXX Hope this isn't too slow...
+ try:
+ s = builtins.repr(x)
+ except ValueError as exc:
+ assert 'sys.set_int_max_str_digits()' in str(exc)
+ # Those imports must be deferred due to Python's build system
+ # where the reprlib module is imported before the math module.
+ import math, sys
+ # Integers with more than sys.get_int_max_str_digits() digits
+ # are rendered differently as their repr() raises a ValueError.
+ # See https://github.com/python/cpython/issues/135487.
+ k = 1 + int(math.log10(abs(x)))
+ # Note: math.log10(abs(x)) may be overestimated or underestimated,
+ # but for simplicity, we do not compute the exact number of digits.
+ max_digits = sys.get_int_max_str_digits()
+ return (f'<{x.__class__.__name__} instance with roughly {k} '
+ f'digits (limit at {max_digits}) at 0x{id(x):x}>')
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
diff --git a/Lib/shelve.py b/Lib/shelve.py
index 50584716e9e..b53dc8b7a8e 100644
--- a/Lib/shelve.py
+++ b/Lib/shelve.py
@@ -171,6 +171,11 @@ class Shelf(collections.abc.MutableMapping):
if hasattr(self.dict, 'sync'):
self.dict.sync()
+ def reorganize(self):
+ self.sync()
+ if hasattr(self.dict, 'reorganize'):
+ self.dict.reorganize()
+
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
diff --git a/Lib/sqlite3/__main__.py b/Lib/sqlite3/__main__.py
index c2fa23c46cf..35344ecceff 100644
--- a/Lib/sqlite3/__main__.py
+++ b/Lib/sqlite3/__main__.py
@@ -12,6 +12,8 @@ from code import InteractiveConsole
from textwrap import dedent
from _colorize import get_theme, theme_no_color
+from ._completer import completer
+
def execute(c, sql, suppress_errors=True, theme=theme_no_color):
"""Helper that wraps execution of SQL code.
@@ -61,17 +63,21 @@ class SqliteInteractiveConsole(InteractiveConsole):
if source[0] == ".":
match source[1:].strip():
case "version":
- print(f"{sqlite3.sqlite_version}")
+ print(sqlite3.sqlite_version)
case "help":
- print("Enter SQL code and press enter.")
+ t = theme.syntax
+ print(f"Enter SQL code or one of the below commands, and press enter.\n\n"
+ f"{t.builtin}.version{t.reset} Print underlying SQLite library version\n"
+ f"{t.builtin}.help{t.reset} Print this help message\n"
+ f"{t.builtin}.quit{t.reset} Exit the CLI, equivalent to CTRL-D\n")
case "quit":
sys.exit(0)
case "":
pass
case _ as unknown:
t = theme.traceback
- self.write(f'{t.type}Error{t.reset}:{t.message} unknown'
- f'command or invalid arguments: "{unknown}".\n{t.reset}')
+ self.write(f'{t.type}Error{t.reset}: {t.message}unknown '
+ f'command: "{unknown}"{t.reset}\n')
else:
if not sqlite3.complete_statement(source):
return True
@@ -136,12 +142,9 @@ def main(*args):
execute(con, args.sql, suppress_errors=False, theme=theme)
else:
# No SQL provided; start the REPL.
- console = SqliteInteractiveConsole(con, use_color=True)
- try:
- import readline # noqa: F401
- except ImportError:
- pass
- console.interact(banner, exitmsg="")
+ with completer():
+ console = SqliteInteractiveConsole(con, use_color=True)
+ console.interact(banner, exitmsg="")
finally:
con.close()
diff --git a/Lib/sqlite3/_completer.py b/Lib/sqlite3/_completer.py
new file mode 100644
index 00000000000..f21ef69cad6
--- /dev/null
+++ b/Lib/sqlite3/_completer.py
@@ -0,0 +1,42 @@
+from contextlib import contextmanager
+
+try:
+ from _sqlite3 import SQLITE_KEYWORDS
+except ImportError:
+ SQLITE_KEYWORDS = ()
+
+_completion_matches = []
+
+
+def _complete(text, state):
+ global _completion_matches
+
+ if state == 0:
+ text_upper = text.upper()
+ _completion_matches = [c for c in SQLITE_KEYWORDS if c.startswith(text_upper)]
+ try:
+ return _completion_matches[state] + " "
+ except IndexError:
+ return None
+
+
+@contextmanager
+def completer():
+ try:
+ import readline
+ except ImportError:
+ yield
+ return
+
+ old_completer = readline.get_completer()
+ try:
+ readline.set_completer(_complete)
+ if readline.backend == "editline":
+ # libedit uses "^I" instead of "tab"
+ command_string = "bind ^I rl_complete"
+ else:
+ command_string = "tab: complete"
+ readline.parse_and_bind(command_string)
+ yield
+ finally:
+ readline.set_completer(old_completer)
diff --git a/Lib/sre_compile.py b/Lib/sre_compile.py
deleted file mode 100644
index f9da61e6487..00000000000
--- a/Lib/sre_compile.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import warnings
-warnings.warn(f"module {__name__!r} is deprecated",
- DeprecationWarning,
- stacklevel=2)
-
-from re import _compiler as _
-globals().update({k: v for k, v in vars(_).items() if k[:2] != '__'})
diff --git a/Lib/sre_constants.py b/Lib/sre_constants.py
deleted file mode 100644
index fa09d044292..00000000000
--- a/Lib/sre_constants.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import warnings
-warnings.warn(f"module {__name__!r} is deprecated",
- DeprecationWarning,
- stacklevel=2)
-
-from re import _constants as _
-globals().update({k: v for k, v in vars(_).items() if k[:2] != '__'})
diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py
deleted file mode 100644
index 25a3f557d44..00000000000
--- a/Lib/sre_parse.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import warnings
-warnings.warn(f"module {__name__!r} is deprecated",
- DeprecationWarning,
- stacklevel=2)
-
-from re import _parser as _
-globals().update({k: v for k, v in vars(_).items() if k[:2] != '__'})
diff --git a/Lib/tarfile.py b/Lib/tarfile.py
index 212b71f6509..068aa13ed70 100644
--- a/Lib/tarfile.py
+++ b/Lib/tarfile.py
@@ -67,7 +67,7 @@ __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError",
"DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter",
"tar_filter", "FilterError", "AbsoluteLinkError",
"OutsideDestinationError", "SpecialFileError", "AbsolutePathError",
- "LinkOutsideDestinationError"]
+ "LinkOutsideDestinationError", "LinkFallbackError"]
#---------------------------------------------------------
@@ -766,10 +766,22 @@ class LinkOutsideDestinationError(FilterError):
super().__init__(f'{tarinfo.name!r} would link to {path!r}, '
+ 'which is outside the destination')
+class LinkFallbackError(FilterError):
+ def __init__(self, tarinfo, path):
+ self.tarinfo = tarinfo
+ self._path = path
+ super().__init__(f'link {tarinfo.name!r} would be extracted as a '
+ + f'copy of {path!r}, which was rejected')
+
+# Errors caused by filters -- both "fatal" and "non-fatal" -- that
+# we consider to be issues with the argument, rather than a bug in the
+# filter function
+_FILTER_ERRORS = (FilterError, OSError, ExtractError)
+
def _get_filtered_attrs(member, dest_path, for_data=True):
new_attrs = {}
name = member.name
- dest_path = os.path.realpath(dest_path)
+ dest_path = os.path.realpath(dest_path, strict=os.path.ALLOW_MISSING)
# Strip leading / (tar's directory separator) from filenames.
# Include os.sep (target OS directory separator) as well.
if name.startswith(('/', os.sep)):
@@ -779,7 +791,8 @@ def _get_filtered_attrs(member, dest_path, for_data=True):
# For example, 'C:/foo' on Windows.
raise AbsolutePathError(member)
# Ensure we stay in the destination
- target_path = os.path.realpath(os.path.join(dest_path, name))
+ target_path = os.path.realpath(os.path.join(dest_path, name),
+ strict=os.path.ALLOW_MISSING)
if os.path.commonpath([target_path, dest_path]) != dest_path:
raise OutsideDestinationError(member, target_path)
# Limit permissions (no high bits, and go-w)
@@ -817,6 +830,9 @@ def _get_filtered_attrs(member, dest_path, for_data=True):
if member.islnk() or member.issym():
if os.path.isabs(member.linkname):
raise AbsoluteLinkError(member)
+ normalized = os.path.normpath(member.linkname)
+ if normalized != member.linkname:
+ new_attrs['linkname'] = normalized
if member.issym():
target_path = os.path.join(dest_path,
os.path.dirname(name),
@@ -824,7 +840,8 @@ def _get_filtered_attrs(member, dest_path, for_data=True):
else:
target_path = os.path.join(dest_path,
member.linkname)
- target_path = os.path.realpath(target_path)
+ target_path = os.path.realpath(target_path,
+ strict=os.path.ALLOW_MISSING)
if os.path.commonpath([target_path, dest_path]) != dest_path:
raise LinkOutsideDestinationError(member, target_path)
return new_attrs
@@ -2386,30 +2403,58 @@ class TarFile(object):
members = self
for member in members:
- tarinfo = self._get_extract_tarinfo(member, filter_function, path)
+ tarinfo, unfiltered = self._get_extract_tarinfo(
+ member, filter_function, path)
if tarinfo is None:
continue
if tarinfo.isdir():
# For directories, delay setting attributes until later,
# since permissions can interfere with extraction and
# extracting contents can reset mtime.
- directories.append(tarinfo)
+ directories.append(unfiltered)
self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(),
- numeric_owner=numeric_owner)
+ numeric_owner=numeric_owner,
+ filter_function=filter_function)
# Reverse sort directories.
directories.sort(key=lambda a: a.name, reverse=True)
+
# Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
+ for unfiltered in directories:
try:
+ # Need to re-apply any filter, to take the *current* filesystem
+ # state into account.
+ try:
+ tarinfo = filter_function(unfiltered, path)
+ except _FILTER_ERRORS as exc:
+ self._log_no_directory_fixup(unfiltered, repr(exc))
+ continue
+ if tarinfo is None:
+ self._log_no_directory_fixup(unfiltered,
+ 'excluded by filter')
+ continue
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ lstat = os.lstat(dirpath)
+ except FileNotFoundError:
+ self._log_no_directory_fixup(tarinfo, 'missing')
+ continue
+ if not stat.S_ISDIR(lstat.st_mode):
+ # This is no longer a directory; presumably a later
+ # member overwrote the entry.
+ self._log_no_directory_fixup(tarinfo, 'not a directory')
+ continue
self.chown(tarinfo, dirpath, numeric_owner=numeric_owner)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
self._handle_nonfatal_error(e)
+ def _log_no_directory_fixup(self, member, reason):
+ self._dbg(2, "tarfile: Not fixing up directory %r (%s)" %
+ (member.name, reason))
+
def extract(self, member, path="", set_attrs=True, *, numeric_owner=False,
filter=None):
"""Extract a member from the archive to the current working directory,
@@ -2425,41 +2470,56 @@ class TarFile(object):
String names of common filters are accepted.
"""
filter_function = self._get_filter_function(filter)
- tarinfo = self._get_extract_tarinfo(member, filter_function, path)
+ tarinfo, unfiltered = self._get_extract_tarinfo(
+ member, filter_function, path)
if tarinfo is not None:
self._extract_one(tarinfo, path, set_attrs, numeric_owner)
def _get_extract_tarinfo(self, member, filter_function, path):
- """Get filtered TarInfo (or None) from member, which might be a str"""
+ """Get (filtered, unfiltered) TarInfos from *member*
+
+ *member* might be a string.
+
+ Return (None, None) if not found.
+ """
+
if isinstance(member, str):
- tarinfo = self.getmember(member)
+ unfiltered = self.getmember(member)
else:
- tarinfo = member
+ unfiltered = member
- unfiltered = tarinfo
+ filtered = None
try:
- tarinfo = filter_function(tarinfo, path)
+ filtered = filter_function(unfiltered, path)
except (OSError, UnicodeEncodeError, FilterError) as e:
self._handle_fatal_error(e)
except ExtractError as e:
self._handle_nonfatal_error(e)
- if tarinfo is None:
+ if filtered is None:
self._dbg(2, "tarfile: Excluded %r" % unfiltered.name)
- return None
+ return None, None
+
# Prepare the link target for makelink().
- if tarinfo.islnk():
- tarinfo = copy.copy(tarinfo)
- tarinfo._link_target = os.path.join(path, tarinfo.linkname)
- return tarinfo
+ if filtered.islnk():
+ filtered = copy.copy(filtered)
+ filtered._link_target = os.path.join(path, filtered.linkname)
+ return filtered, unfiltered
+
+ def _extract_one(self, tarinfo, path, set_attrs, numeric_owner,
+ filter_function=None):
+ """Extract from filtered tarinfo to disk.
- def _extract_one(self, tarinfo, path, set_attrs, numeric_owner):
- """Extract from filtered tarinfo to disk"""
+ filter_function is only used when extracting a *different*
+ member (e.g. as fallback to creating a symlink)
+ """
self._check("r")
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs,
- numeric_owner=numeric_owner)
+ numeric_owner=numeric_owner,
+ filter_function=filter_function,
+ extraction_root=path)
except (OSError, UnicodeEncodeError) as e:
self._handle_fatal_error(e)
except ExtractError as e:
@@ -2517,9 +2577,13 @@ class TarFile(object):
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True,
- numeric_owner=False):
- """Extract the TarInfo object tarinfo to a physical
+ numeric_owner=False, *, filter_function=None,
+ extraction_root=None):
+ """Extract the filtered TarInfo object tarinfo to a physical
file called targetpath.
+
+ filter_function is only used when extracting a *different*
+ member (e.g. as fallback to creating a symlink)
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
@@ -2548,7 +2612,10 @@ class TarFile(object):
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
- self.makelink(tarinfo, targetpath)
+ self.makelink_with_filter(
+ tarinfo, targetpath,
+ filter_function=filter_function,
+ extraction_root=extraction_root)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
@@ -2631,10 +2698,18 @@ class TarFile(object):
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
+ return self.makelink_with_filter(tarinfo, targetpath, None, None)
+
+ def makelink_with_filter(self, tarinfo, targetpath,
+ filter_function, extraction_root):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
+
+ filter_function is only used when extracting a *different*
+ member (e.g. as fallback to creating a link).
"""
+ keyerror_to_extracterror = False
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
@@ -2642,18 +2717,38 @@ class TarFile(object):
# Avoid FileExistsError on following os.symlink.
os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
+ return
else:
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
- else:
- self._extract_member(self._find_link_target(tarinfo),
- targetpath)
+ return
except symlink_exception:
+ keyerror_to_extracterror = True
+
+ try:
+ unfiltered = self._find_link_target(tarinfo)
+ except KeyError:
+ if keyerror_to_extracterror:
+ raise ExtractError(
+ "unable to resolve link inside archive") from None
+ else:
+ raise
+
+ if filter_function is None:
+ filtered = unfiltered
+ else:
+ if extraction_root is None:
+ raise ExtractError(
+ "makelink_with_filter: if filter_function is not None, "
+ + "extraction_root must also not be None")
try:
- self._extract_member(self._find_link_target(tarinfo),
- targetpath)
- except KeyError:
- raise ExtractError("unable to resolve link inside archive") from None
+ filtered = filter_function(unfiltered, extraction_root)
+ except _FILTER_ERRORS as cause:
+ raise LinkFallbackError(tarinfo, unfiltered.name) from cause
+ if filtered is not None:
+ self._extract_member(filtered, targetpath,
+ filter_function=filter_function,
+ extraction_root=extraction_root)
def chown(self, tarinfo, targetpath, numeric_owner):
"""Set owner of targetpath according to tarinfo. If numeric_owner
diff --git a/Lib/test/.ruff.toml b/Lib/test/.ruff.toml
index 7aa8a4785d6..f1a967203ce 100644
--- a/Lib/test/.ruff.toml
+++ b/Lib/test/.ruff.toml
@@ -19,5 +19,12 @@ extend-exclude = [
[lint]
select = [
+ "F401", # Unused import
"F811", # Redefinition of unused variable (useful for finding test methods with the same name)
]
+
+[lint.per-file-ignores]
+"*/**/__main__.py" = ["F401"] # Unused import
+"test_import/*.py" = ["F401"] # Unused import
+"test_importlib/*.py" = ["F401"] # Unused import
+"typinganndata/partialexecution/*.py" = ["F401"] # Unused import
diff --git a/Lib/test/_code_definitions.py b/Lib/test/_code_definitions.py
index 733a15b25f6..70c44da2ec6 100644
--- a/Lib/test/_code_definitions.py
+++ b/Lib/test/_code_definitions.py
@@ -57,6 +57,22 @@ def spam_with_globals_and_builtins():
print(res)
+def spam_with_global_and_attr_same_name():
+ try:
+ spam_minimal.spam_minimal
+ except AttributeError:
+ pass
+
+
+def spam_full_args(a, b, /, c, d, *args, e, f, **kwargs):
+ return (a, b, c, d, e, f, args, kwargs)
+
+
+def spam_full_args_with_defaults(a=-1, b=-2, /, c=-3, d=-4, *args,
+ e=-5, f=-6, **kwargs):
+ return (a, b, c, d, e, f, args, kwargs)
+
+
def spam_args_attrs_and_builtins(a, b, /, c, d, *args, e, f, **kwargs):
if args.__len__() > 2:
return None
@@ -67,6 +83,10 @@ def spam_returns_arg(x):
return x
+def spam_raises():
+ raise Exception('spam!')
+
+
def spam_with_inner_not_closure():
def eggs():
pass
@@ -177,8 +197,12 @@ TOP_FUNCTIONS = [
spam_minimal,
spam_with_builtins,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
+ spam_full_args,
+ spam_full_args_with_defaults,
spam_args_attrs_and_builtins,
spam_returns_arg,
+ spam_raises,
spam_with_inner_not_closure,
spam_with_inner_closure,
spam_annotated,
@@ -219,8 +243,10 @@ STATELESS_FUNCTIONS = [
spam,
spam_minimal,
spam_with_builtins,
+ spam_full_args,
spam_args_attrs_and_builtins,
spam_returns_arg,
+ spam_raises,
spam_annotated,
spam_with_inner_not_closure,
spam_with_inner_closure,
@@ -238,7 +264,9 @@ STATELESS_FUNCTIONS = [
STATELESS_CODE = [
*STATELESS_FUNCTIONS,
script_with_globals,
+ spam_full_args_with_defaults,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
spam_full,
]
@@ -248,6 +276,7 @@ PURE_SCRIPT_FUNCTIONS = [
script_with_explicit_empty_return,
spam_minimal,
spam_with_builtins,
+ spam_raises,
spam_with_inner_not_closure,
spam_with_inner_closure,
]
@@ -255,6 +284,7 @@ SCRIPT_FUNCTIONS = [
*PURE_SCRIPT_FUNCTIONS,
script_with_globals,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
]
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 75f31d858d3..a1259ff1d63 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -6801,6 +6801,35 @@ class _TestSpawnedSysPath(BaseTestCase):
self.assertEqual(child_sys_path[1:], sys.path[1:])
self.assertIsNone(import_error, msg=f"child could not import {self._mod_name}")
+ def test_std_streams_flushed_after_preload(self):
+ # gh-135335: Check fork server flushes standard streams after
+ # preloading modules
+ if multiprocessing.get_start_method() != "forkserver":
+ self.skipTest("forkserver specific test")
+
+ # Create a test module in the temporary directory on the child's path
+ # TODO: This can all be simplified once gh-126631 is fixed and we can
+ # use __main__ instead of a module.
+ dirname = os.path.join(self._temp_dir, 'preloaded_module')
+ init_name = os.path.join(dirname, '__init__.py')
+ os.mkdir(dirname)
+ with open(init_name, "w") as f:
+ cmd = '''if 1:
+ import sys
+ print('stderr', end='', file=sys.stderr)
+ print('stdout', end='', file=sys.stdout)
+ '''
+ f.write(cmd)
+
+ name = os.path.join(os.path.dirname(__file__), 'mp_preload_flush.py')
+ env = {'PYTHONPATH': self._temp_dir}
+ _, out, err = test.support.script_helper.assert_python_ok(name, **env)
+
+ # Check stderr first, as it is more likely to be useful to see in the
+ # event of a failure.
+ self.assertEqual(err.decode().rstrip(), 'stderr')
+ self.assertEqual(out.decode().rstrip(), 'stdout')
+
class MiscTestCase(unittest.TestCase):
def test__all__(self):
diff --git a/Lib/test/audit-tests.py b/Lib/test/audit-tests.py
index 08b638e4b8d..6884ac0dbe6 100644
--- a/Lib/test/audit-tests.py
+++ b/Lib/test/audit-tests.py
@@ -643,6 +643,34 @@ def test_assert_unicode():
else:
raise RuntimeError("Expected sys.audit(9) to fail.")
+def test_sys_remote_exec():
+ import tempfile
+
+ pid = os.getpid()
+ event_pid = -1
+ event_script_path = ""
+ remote_event_script_path = ""
+ def hook(event, args):
+ if event not in ["sys.remote_exec", "cpython.remote_debugger_script"]:
+ return
+ print(event, args)
+ match event:
+ case "sys.remote_exec":
+ nonlocal event_pid, event_script_path
+ event_pid = args[0]
+ event_script_path = args[1]
+ case "cpython.remote_debugger_script":
+ nonlocal remote_event_script_path
+ remote_event_script_path = args[0]
+
+ sys.addaudithook(hook)
+ with tempfile.NamedTemporaryFile(mode='w+', delete=True) as tmp_file:
+ tmp_file.write("a = 1+1\n")
+ tmp_file.flush()
+ sys.remote_exec(pid, tmp_file.name)
+ assertEqual(event_pid, pid)
+ assertEqual(event_script_path, tmp_file.name)
+ assertEqual(remote_event_script_path, tmp_file.name)
if __name__ == "__main__":
from test.support import suppress_msvcrt_asserts
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 713cbedb299..a2d01b157ac 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -190,6 +190,12 @@ class Regrtest:
strip_py_suffix(tests)
+ exclude_tests = set()
+ if self.exclude:
+ for arg in self.cmdline_args:
+ exclude_tests.add(arg)
+ self.cmdline_args = []
+
if self.pgo:
# add default PGO tests if no tests are specified
setup_pgo_tests(self.cmdline_args, self.pgo_extended)
@@ -200,17 +206,15 @@ class Regrtest:
if self.tsan_parallel:
setup_tsan_parallel_tests(self.cmdline_args)
- exclude_tests = set()
- if self.exclude:
- for arg in self.cmdline_args:
- exclude_tests.add(arg)
- self.cmdline_args = []
-
alltests = findtests(testdir=self.test_dir,
exclude=exclude_tests)
if not self.fromfile:
selected = tests or self.cmdline_args
+ if exclude_tests:
+ # Support "--pgo/--tsan -x test_xxx" command
+ selected = [name for name in selected
+ if name not in exclude_tests]
if selected:
selected = split_test_packages(selected)
else:
@@ -543,8 +547,6 @@ class Regrtest:
self.first_runtests = runtests
self.logger.set_tests(runtests)
- setup_process()
-
if (runtests.hunt_refleak is not None) and (not self.num_workers):
# gh-109739: WindowsLoadTracker thread interferes with refleak check
use_load_tracker = False
@@ -721,10 +723,7 @@ class Regrtest:
self._execute_python(cmd, environ)
def _init(self):
- # Set sys.stdout encoder error handler to backslashreplace,
- # similar to sys.stderr error handler, to avoid UnicodeEncodeError
- # when printing a traceback or any other non-encodable character.
- sys.stdout.reconfigure(errors="backslashreplace")
+ setup_process()
if self.junit_filename and not os.path.isabs(self.junit_filename):
self.junit_filename = os.path.abspath(self.junit_filename)
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
index c3d1f60a400..9bfc414cd61 100644
--- a/Lib/test/libregrtest/setup.py
+++ b/Lib/test/libregrtest/setup.py
@@ -1,5 +1,6 @@
import faulthandler
import gc
+import io
import os
import random
import signal
@@ -52,6 +53,14 @@ def setup_process() -> None:
support.record_original_stdout(sys.stdout)
+ # Set sys.stdout encoder error handler to backslashreplace,
+ # similar to sys.stderr error handler, to avoid UnicodeEncodeError
+ # when printing a traceback or any other non-encodable character.
+ #
+ # Use an assertion to fix mypy error.
+ assert isinstance(sys.stdout, io.TextIOWrapper)
+ sys.stdout.reconfigure(errors="backslashreplace")
+
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
index 57d7b649d2e..958a915626a 100644
--- a/Lib/test/libregrtest/single.py
+++ b/Lib/test/libregrtest/single.py
@@ -283,7 +283,7 @@ def _runtest(result: TestResult, runtests: RunTests) -> None:
try:
setup_tests(runtests)
- if output_on_failure:
+ if output_on_failure or runtests.pgo:
support.verbose = True
stream = io.StringIO()
diff --git a/Lib/test/libregrtest/tsan.py b/Lib/test/libregrtest/tsan.py
index d984a735bdf..3545c5f999f 100644
--- a/Lib/test/libregrtest/tsan.py
+++ b/Lib/test/libregrtest/tsan.py
@@ -8,7 +8,7 @@ TSAN_TESTS = [
'test_capi.test_pyatomic',
'test_code',
'test_ctypes',
- # 'test_concurrent_futures', # gh-130605: too many data races
+ 'test_concurrent_futures',
'test_enum',
'test_functools',
'test_httpservers',
diff --git a/Lib/test/mp_preload_flush.py b/Lib/test/mp_preload_flush.py
new file mode 100644
index 00000000000..3501554d366
--- /dev/null
+++ b/Lib/test/mp_preload_flush.py
@@ -0,0 +1,15 @@
+import multiprocessing
+import sys
+
+modname = 'preloaded_module'
+if __name__ == '__main__':
+ if modname in sys.modules:
+ raise AssertionError(f'{modname!r} is not in sys.modules')
+ multiprocessing.set_start_method('forkserver')
+ multiprocessing.set_forkserver_preload([modname])
+ for _ in range(2):
+ p = multiprocessing.Process()
+ p.start()
+ p.join()
+elif modname not in sys.modules:
+ raise AssertionError(f'{modname!r} is not in sys.modules')
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index 9d6ae3e4d00..9a3a26a8400 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -1100,6 +1100,11 @@ class AbstractUnpickleTests:
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
+ def test_large_binstring(self):
+ errmsg = 'BINSTRING pickle has negative byte count'
+ with self.assertRaisesRegex(pickle.UnpicklingError, errmsg):
+ self.loads(b'T\0\0\0\x80')
+
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
diff --git a/Lib/test/pythoninfo.py b/Lib/test/pythoninfo.py
index e1830f2e6eb..80a262c18a5 100644
--- a/Lib/test/pythoninfo.py
+++ b/Lib/test/pythoninfo.py
@@ -920,10 +920,17 @@ def collect_windows(info_add):
try:
import _winapi
- dll_path = _winapi.GetModuleFileName(sys.dllhandle)
- info_add('windows.dll_path', dll_path)
- except (ImportError, AttributeError):
+ except ImportError:
pass
+ else:
+ try:
+ dll_path = _winapi.GetModuleFileName(sys.dllhandle)
+ info_add('windows.dll_path', dll_path)
+ except AttributeError:
+ pass
+
+ call_func(info_add, 'windows.ansi_code_page', _winapi, 'GetACP')
+ call_func(info_add, 'windows.oem_code_page', _winapi, 'GetOEMCP')
# windows.version_caption: "wmic os get Caption,Version /value" command
import subprocess
diff --git a/Lib/test/subprocessdata/fd_status.py b/Lib/test/subprocessdata/fd_status.py
index d12bd95abee..90e785981ae 100644
--- a/Lib/test/subprocessdata/fd_status.py
+++ b/Lib/test/subprocessdata/fd_status.py
@@ -2,7 +2,7 @@
file descriptors on stdout.
Usage:
-fd_stats.py: check all file descriptors
+fd_status.py: check all file descriptors (up to 255)
fd_status.py fd1 fd2 ...: check only specified file descriptors
"""
@@ -18,7 +18,7 @@ if __name__ == "__main__":
_MAXFD = os.sysconf("SC_OPEN_MAX")
except:
_MAXFD = 256
- test_fds = range(0, _MAXFD)
+ test_fds = range(0, min(_MAXFD, 256))
else:
test_fds = map(int, sys.argv[1:])
for fd in test_fds:
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index b7cd7940eb1..fd39d3f7c95 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -46,6 +46,7 @@ __all__ = [
# sys
"MS_WINDOWS", "is_jython", "is_android", "is_emscripten", "is_wasi",
"is_apple_mobile", "check_impl_detail", "unix_shell", "setswitchinterval",
+ "support_remote_exec_only",
# os
"get_pagesize",
# network
@@ -945,6 +946,31 @@ def check_sizeof(test, o, size):
% (type(o), result, size)
test.assertEqual(result, size, msg)
+def subTests(arg_names, arg_values, /, *, _do_cleanups=False):
+ """Run multiple subtests with different parameters.
+ """
+ single_param = False
+ if isinstance(arg_names, str):
+ arg_names = arg_names.replace(',',' ').split()
+ if len(arg_names) == 1:
+ single_param = True
+ arg_values = tuple(arg_values)
+ def decorator(func):
+ if isinstance(func, type):
+ raise TypeError('subTests() can only decorate methods, not classes')
+ @functools.wraps(func)
+ def wrapper(self, /, *args, **kwargs):
+ for values in arg_values:
+ if single_param:
+ values = (values,)
+ subtest_kwargs = dict(zip(arg_names, values))
+ with self.subTest(**subtest_kwargs):
+ func(self, *args, **kwargs, **subtest_kwargs)
+ if _do_cleanups:
+ self.doCleanups()
+ return wrapper
+ return decorator
+
#=======================================================================
# Decorator/context manager for running a code in a different locale,
# correctly resetting it afterwards.
@@ -1084,7 +1110,7 @@ def set_memlimit(limit: str) -> None:
global real_max_memuse
memlimit = _parse_memlimit(limit)
if memlimit < _2G - 1:
- raise ValueError('Memory limit {limit!r} too low to be useful')
+ raise ValueError(f'Memory limit {limit!r} too low to be useful')
real_max_memuse = memlimit
memlimit = min(memlimit, MAX_Py_ssize_t)
@@ -1101,7 +1127,6 @@ class _MemoryWatchdog:
self.started = False
def start(self):
- import warnings
try:
f = open(self.procfile, 'r')
except OSError as e:
@@ -2308,6 +2333,7 @@ def check_disallow_instantiation(testcase, tp, *args, **kwds):
qualname = f"{name}"
msg = f"cannot create '{re.escape(qualname)}' instances"
testcase.assertRaisesRegex(TypeError, msg, tp, *args, **kwds)
+ testcase.assertRaisesRegex(TypeError, msg, tp.__new__, tp, *args, **kwds)
def get_recursion_depth():
"""Get the recursion depth of the caller function.
@@ -2359,7 +2385,7 @@ def infinite_recursion(max_depth=None):
# very deep recursion.
max_depth = 20_000
elif max_depth < 3:
- raise ValueError("max_depth must be at least 3, got {max_depth}")
+ raise ValueError(f"max_depth must be at least 3, got {max_depth}")
depth = get_recursion_depth()
depth = max(depth - 1, 1) # Ignore infinite_recursion() frame.
limit = depth + max_depth
@@ -2728,7 +2754,7 @@ def iter_builtin_types():
# Fall back to making a best-effort guess.
if hasattr(object, '__flags__'):
# Look for any type object with the Py_TPFLAGS_STATIC_BUILTIN flag set.
- import datetime
+ import datetime # noqa: F401
seen = set()
for cls, subs in walk_class_hierarchy(object):
if cls in seen:
@@ -3045,6 +3071,27 @@ def is_libssl_fips_mode():
return False # more of a maybe, unless we add this to the _ssl module.
return get_fips_mode() != 0
+def _supports_remote_attaching():
+ PROCESS_VM_READV_SUPPORTED = False
+
+ try:
+ from _remote_debugging import PROCESS_VM_READV_SUPPORTED
+ except ImportError:
+ pass
+
+ return PROCESS_VM_READV_SUPPORTED
+
+def _support_remote_exec_only_impl():
+ if not sys.is_remote_debug_enabled():
+ return unittest.skip("Remote debugging is not enabled")
+ if sys.platform not in ("darwin", "linux", "win32"):
+ return unittest.skip("Test only runs on Linux, Windows and macOS")
+ if sys.platform == "linux" and not _supports_remote_attaching():
+ return unittest.skip("Test only runs on Linux with process_vm_readv support")
+ return _id
+
+def support_remote_exec_only(test):
+ return _support_remote_exec_only_impl()(test)
class EqualToForwardRef:
"""Helper to ease use of annotationlib.ForwardRef in tests.
diff --git a/Lib/test/support/interpreters/channels.py b/Lib/test/support/channels.py
index 7a2bd7d63f8..fab1797659b 100644
--- a/Lib/test/support/interpreters/channels.py
+++ b/Lib/test/support/channels.py
@@ -2,14 +2,14 @@
import time
import _interpchannels as _channels
-from . import _crossinterp
+from concurrent.interpreters import _crossinterp
# aliases:
from _interpchannels import (
- ChannelError, ChannelNotFoundError, ChannelClosedError,
- ChannelEmptyError, ChannelNotEmptyError,
+ ChannelError, ChannelNotFoundError, ChannelClosedError, # noqa: F401
+ ChannelEmptyError, ChannelNotEmptyError, # noqa: F401
)
-from ._crossinterp import (
+from concurrent.interpreters._crossinterp import (
UNBOUND_ERROR, UNBOUND_REMOVE,
)
@@ -69,7 +69,7 @@ def list_all():
if not hasattr(send, '_unboundop'):
send._set_unbound(unboundop)
else:
- assert send._unbound[0] == op
+ assert send._unbound[0] == unboundop
channels.append(chan)
return channels
@@ -105,12 +105,8 @@ class _ChannelEnd:
return other._id == self._id
# for pickling:
- def __getnewargs__(self):
- return (int(self._id),)
-
- # for pickling:
- def __getstate__(self):
- return None
+ def __reduce__(self):
+ return (type(self), (int(self._id),))
@property
def id(self):
diff --git a/Lib/test/test__interpchannels.py b/Lib/test/test__interpchannels.py
index 88eee03a3de..858d31a73cf 100644
--- a/Lib/test/test__interpchannels.py
+++ b/Lib/test/test__interpchannels.py
@@ -9,7 +9,7 @@ import unittest
from test.support import import_helper, skip_if_sanitizer
_channels = import_helper.import_module('_interpchannels')
-from test.support.interpreters import _crossinterp
+from concurrent.interpreters import _crossinterp
from test.test__interpreters import (
_interpreters,
_run_output,
diff --git a/Lib/test/test__interpreters.py b/Lib/test/test__interpreters.py
index 63fdaad8de7..a32d5d81d2b 100644
--- a/Lib/test/test__interpreters.py
+++ b/Lib/test/test__interpreters.py
@@ -474,15 +474,32 @@ class CommonTests(TestBase):
def test_signatures(self):
# See https://github.com/python/cpython/issues/126654
- msg = "expected 'shared' to be a dict"
+ msg = r'_interpreters.exec\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', 1)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', shared=1)
+ msg = r'_interpreters.run_string\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_string(self.id, 'a', shared=1)
+ msg = r'_interpreters.run_func\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=1)
+ # See https://github.com/python/cpython/issues/135855
+ msg = r'_interpreters.set___main___attrs\(\) argument 2 must be dict, not int'
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.set___main___attrs(self.id, 1)
+
+ def test_invalid_shared_none(self):
+ msg = r'must be dict, not None'
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.exec(self.id, 'a', shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.run_string(self.id, 'a', shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.run_func(self.id, lambda: None, shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.set___main___attrs(self.id, None)
def test_invalid_shared_encoding(self):
# See https://github.com/python/cpython/issues/127196
@@ -952,7 +969,8 @@ class RunFailedTests(TestBase):
""")
with self.subTest('script'):
- self.assert_run_failed(SyntaxError, script)
+ with self.assertRaises(SyntaxError):
+ _interpreters.run_string(self.id, script)
with self.subTest('module'):
modname = 'spam_spam_spam'
@@ -1019,12 +1037,19 @@ class RunFuncTests(TestBase):
with open(w, 'w', encoding="utf-8") as spipe:
with contextlib.redirect_stdout(spipe):
print('it worked!', end='')
+ failed = None
def f():
- _interpreters.set___main___attrs(self.id, dict(w=w))
- _interpreters.run_func(self.id, script)
+ nonlocal failed
+ try:
+ _interpreters.set___main___attrs(self.id, dict(w=w))
+ _interpreters.run_func(self.id, script)
+ except Exception as exc:
+ failed = exc
t = threading.Thread(target=f)
t.start()
t.join()
+ if failed:
+ raise Exception from failed
with open(r, encoding="utf-8") as outfile:
out = outfile.read()
@@ -1053,19 +1078,16 @@ class RunFuncTests(TestBase):
spam = True
def script():
assert spam
-
- with self.assertRaises(TypeError):
+ with self.assertRaises(ValueError):
_interpreters.run_func(self.id, script)
- # XXX This hasn't been fixed yet.
- @unittest.expectedFailure
def test_return_value(self):
def script():
return 'spam'
with self.assertRaises(ValueError):
_interpreters.run_func(self.id, script)
- @unittest.skip("we're not quite there yet")
+# @unittest.skip("we're not quite there yet")
def test_args(self):
with self.subTest('args'):
def script(a, b=0):
diff --git a/Lib/test/test_annotationlib.py b/Lib/test/test_annotationlib.py
index 73a821d15e3..ae0e73f08c5 100644
--- a/Lib/test/test_annotationlib.py
+++ b/Lib/test/test_annotationlib.py
@@ -7,7 +7,7 @@ import collections
import functools
import itertools
import pickle
-from string.templatelib import Interpolation, Template
+from string.templatelib import Template
import typing
import unittest
from annotationlib import (
@@ -815,6 +815,70 @@ class TestGetAnnotations(unittest.TestCase):
{"x": int},
)
+ def test_stringized_annotation_permutations(self):
+ def define_class(name, has_future, has_annos, base_text, extra_names=None):
+ lines = []
+ if has_future:
+ lines.append("from __future__ import annotations")
+ lines.append(f"class {name}({base_text}):")
+ if has_annos:
+ lines.append(f" {name}_attr: int")
+ else:
+ lines.append(" pass")
+ code = "\n".join(lines)
+ ns = support.run_code(code, extra_names=extra_names)
+ return ns[name]
+
+ def check_annotations(cls, has_future, has_annos):
+ if has_annos:
+ if has_future:
+ anno = "int"
+ else:
+ anno = int
+ self.assertEqual(get_annotations(cls), {f"{cls.__name__}_attr": anno})
+ else:
+ self.assertEqual(get_annotations(cls), {})
+
+ for meta_future, base_future, child_future, meta_has_annos, base_has_annos, child_has_annos in itertools.product(
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ ):
+ with self.subTest(
+ meta_future=meta_future,
+ base_future=base_future,
+ child_future=child_future,
+ meta_has_annos=meta_has_annos,
+ base_has_annos=base_has_annos,
+ child_has_annos=child_has_annos,
+ ):
+ meta = define_class(
+ "Meta",
+ has_future=meta_future,
+ has_annos=meta_has_annos,
+ base_text="type",
+ )
+ base = define_class(
+ "Base",
+ has_future=base_future,
+ has_annos=base_has_annos,
+ base_text="metaclass=Meta",
+ extra_names={"Meta": meta},
+ )
+ child = define_class(
+ "Child",
+ has_future=child_future,
+ has_annos=child_has_annos,
+ base_text="Base",
+ extra_names={"Base": base},
+ )
+ check_annotations(meta, meta_future, meta_has_annos)
+ check_annotations(base, base_future, base_has_annos)
+ check_annotations(child, child_future, child_has_annos)
+
def test_modify_annotations(self):
def f(x: int):
pass
@@ -1586,9 +1650,11 @@ class TestForwardRefClass(unittest.TestCase):
with support.swap_attr(builtins, "int", dict):
self.assertIs(ForwardRef("int").evaluate(), dict)
- with self.assertRaises(NameError):
+ with self.assertRaises(NameError, msg="name 'doesntexist' is not defined") as exc:
ForwardRef("doesntexist").evaluate()
+ self.assertEqual(exc.exception.name, "doesntexist")
+
def test_fwdref_invalid_syntax(self):
fr = ForwardRef("if")
with self.assertRaises(SyntaxError):
diff --git a/Lib/test/test_ast/test_ast.py b/Lib/test/test_ast/test_ast.py
index 46745cfa8f8..cc46529c0ef 100644
--- a/Lib/test/test_ast/test_ast.py
+++ b/Lib/test/test_ast/test_ast.py
@@ -1372,17 +1372,17 @@ class ASTHelpers_Test(unittest.TestCase):
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
- "args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])"
+ "Module(body=[Expr(value=Call(func=Name(id='spam'), "
+ "args=[Name(id='eggs'), Constant(value='and cheese')]))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
+ "Module([Expr(Call(Name('spam'), [Name('eggs'), "
"Constant('and cheese')]))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "Module(body=[Expr(value=Call(func=Name(id='spam', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=4), "
- "args=[Name(id='eggs', ctx=Load(), lineno=1, col_offset=5, "
+ "args=[Name(id='eggs', lineno=1, col_offset=5, "
"end_lineno=1, end_col_offset=9), Constant(value='and cheese', "
"lineno=1, col_offset=11, end_lineno=1, end_col_offset=23)], "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=24), "
@@ -1396,18 +1396,18 @@ Module(
body=[
Expr(
value=Call(
- func=Name(id='spam', ctx=Load()),
+ func=Name(id='spam'),
args=[
- Name(id='eggs', ctx=Load()),
+ Name(id='eggs'),
Constant(value='and cheese')]))])""")
self.assertEqual(ast.dump(node, annotate_fields=False, indent='\t'), """\
Module(
\t[
\t\tExpr(
\t\t\tCall(
-\t\t\t\tName('spam', Load()),
+\t\t\t\tName('spam'),
\t\t\t\t[
-\t\t\t\t\tName('eggs', Load()),
+\t\t\t\t\tName('eggs'),
\t\t\t\t\tConstant('and cheese')]))])""")
self.assertEqual(ast.dump(node, include_attributes=True, indent=3), """\
Module(
@@ -1416,7 +1416,6 @@ Module(
value=Call(
func=Name(
id='spam',
- ctx=Load(),
lineno=1,
col_offset=0,
end_lineno=1,
@@ -1424,7 +1423,6 @@ Module(
args=[
Name(
id='eggs',
- ctx=Load(),
lineno=1,
col_offset=5,
end_lineno=1,
@@ -1454,23 +1452,23 @@ Module(
)
node = ast.Raise(exc=ast.Name(id='e', ctx=ast.Load()), lineno=3, col_offset=4)
self.assertEqual(ast.dump(node),
- "Raise(exc=Name(id='e', ctx=Load()))"
+ "Raise(exc=Name(id='e'))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(Name('e', Load()))"
+ "Raise(Name('e'))"
)
self.assertEqual(ast.dump(node, include_attributes=True),
- "Raise(exc=Name(id='e', ctx=Load()), lineno=3, col_offset=4)"
+ "Raise(exc=Name(id='e'), lineno=3, col_offset=4)"
)
self.assertEqual(ast.dump(node, annotate_fields=False, include_attributes=True),
- "Raise(Name('e', Load()), lineno=3, col_offset=4)"
+ "Raise(Name('e'), lineno=3, col_offset=4)"
)
node = ast.Raise(cause=ast.Name(id='e', ctx=ast.Load()))
self.assertEqual(ast.dump(node),
- "Raise(cause=Name(id='e', ctx=Load()))"
+ "Raise(cause=Name(id='e'))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(cause=Name('e', Load()))"
+ "Raise(cause=Name('e'))"
)
# Arguments:
node = ast.arguments(args=[ast.arg("x")])
@@ -1502,10 +1500,10 @@ Module(
[ast.Name('dataclass', ctx=ast.Load())],
)
self.assertEqual(ast.dump(node),
- "ClassDef(name='T', keywords=[keyword(arg='a', value=Constant(value=None))], decorator_list=[Name(id='dataclass', ctx=Load())])",
+ "ClassDef(name='T', keywords=[keyword(arg='a', value=Constant(value=None))], decorator_list=[Name(id='dataclass')])",
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "ClassDef('T', [], [keyword('a', Constant(None))], [], [Name('dataclass', Load())])",
+ "ClassDef('T', [], [keyword('a', Constant(None))], [], [Name('dataclass')])",
)
def test_dump_show_empty(self):
@@ -1533,7 +1531,7 @@ Module(
check_node(
# Corner case: there are no real `Name` instances with `id=''`:
ast.Name(id='', ctx=ast.Load()),
- empty="Name(id='', ctx=Load())",
+ empty="Name(id='')",
full="Name(id='', ctx=Load())",
)
@@ -1544,39 +1542,63 @@ Module(
)
check_node(
+ ast.MatchSingleton(value=[]),
+ empty="MatchSingleton(value=[])",
+ full="MatchSingleton(value=[])",
+ )
+
+ check_node(
ast.Constant(value=None),
empty="Constant(value=None)",
full="Constant(value=None)",
)
check_node(
+ ast.Constant(value=[]),
+ empty="Constant(value=[])",
+ full="Constant(value=[])",
+ )
+
+ check_node(
ast.Constant(value=''),
empty="Constant(value='')",
full="Constant(value='')",
)
+ check_node(
+ ast.Interpolation(value=ast.Constant(42), str=None, conversion=-1),
+ empty="Interpolation(value=Constant(value=42), str=None, conversion=-1)",
+ full="Interpolation(value=Constant(value=42), str=None, conversion=-1)",
+ )
+
+ check_node(
+ ast.Interpolation(value=ast.Constant(42), str=[], conversion=-1),
+ empty="Interpolation(value=Constant(value=42), str=[], conversion=-1)",
+ full="Interpolation(value=Constant(value=42), str=[], conversion=-1)",
+ )
+
check_text(
"def a(b: int = 0, *, c): ...",
- empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))])])",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int'))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))])])",
full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))], decorator_list=[], type_params=[])], type_ignores=[])",
)
check_text(
"def a(b: int = 0, *, c): ...",
- empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)])",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)])",
full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], decorator_list=[], type_params=[], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)], type_ignores=[])",
include_attributes=True,
)
check_text(
'spam(eggs, "and cheese")',
- empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])",
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam'), args=[Name(id='eggs'), Constant(value='and cheese')]))])",
full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')], keywords=[]))], type_ignores=[])",
)
check_text(
'spam(eggs, text="and cheese")',
- empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))])",
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam'), args=[Name(id='eggs')], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))])",
full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))], type_ignores=[])",
)
@@ -1610,12 +1632,12 @@ Module(
self.assertEqual(src, ast.fix_missing_locations(src))
self.maxDiff = None
self.assertEqual(ast.dump(src, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
+ "Module(body=[Expr(value=Call(func=Name(id='write', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=5), "
"args=[Constant(value='spam', lineno=1, col_offset=6, end_lineno=1, "
"end_col_offset=12)], lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=13), lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=13), Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "end_col_offset=13), Expr(value=Call(func=Name(id='spam', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=0), "
"args=[Constant(value='eggs', lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=0)], lineno=1, col_offset=0, end_lineno=1, "
@@ -3335,7 +3357,7 @@ class CommandLineTests(unittest.TestCase):
body=[
AnnAssign(
target=Name(id='x', ctx=Store()),
- annotation=Name(id='bool', ctx=Load()),
+ annotation=Name(id='bool'),
value=Constant(value=1),
simple=1)],
type_ignores=[
@@ -3363,7 +3385,7 @@ class CommandLineTests(unittest.TestCase):
expect = '''
Expression(
body=Call(
- func=Name(id='print', ctx=Load()),
+ func=Name(id='print'),
args=[
Constant(value=1),
Constant(value=2),
@@ -3379,12 +3401,11 @@ class CommandLineTests(unittest.TestCase):
expect = '''
FunctionType(
argtypes=[
- Name(id='int', ctx=Load()),
- Name(id='str', ctx=Load())],
+ Name(id='int'),
+ Name(id='str')],
returns=Subscript(
- value=Name(id='list', ctx=Load()),
- slice=Name(id='int', ctx=Load()),
- ctx=Load()))
+ value=Name(id='list'),
+ slice=Name(id='int')))
'''
for flag in ('-m=func_type', '--mode=func_type'):
with self.subTest(flag=flag):
@@ -3398,7 +3419,7 @@ class CommandLineTests(unittest.TestCase):
body=[
AnnAssign(
target=Name(id='x', ctx=Store()),
- annotation=Name(id='bool', ctx=Load()),
+ annotation=Name(id='bool'),
value=Constant(value=1),
simple=1)])
'''
@@ -3443,7 +3464,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='x', ctx=Load()),
+ subject=Name(id='x'),
cases=[
match_case(
pattern=MatchValue(
@@ -3466,7 +3487,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='a', ctx=Load()),
+ subject=Name(id='a'),
cases=[
match_case(
pattern=MatchValue(
@@ -3492,7 +3513,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='a', ctx=Load()),
+ subject=Name(id='a'),
cases=[
match_case(
pattern=MatchValue(
diff --git a/Lib/test/test_asyncgen.py b/Lib/test/test_asyncgen.py
index 2c44647bf3e..636cb33dd98 100644
--- a/Lib/test/test_asyncgen.py
+++ b/Lib/test/test_asyncgen.py
@@ -2021,6 +2021,15 @@ class TestUnawaitedWarnings(unittest.TestCase):
g.athrow(RuntimeError)
gc_collect()
+ def test_athrow_throws_immediately(self):
+ async def gen():
+ yield 1
+
+ g = gen()
+ msg = "athrow expected at least 1 argument, got 0"
+ with self.assertRaisesRegex(TypeError, msg):
+ g.athrow()
+
def test_aclose(self):
async def gen():
yield 1
diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py
index 2ca5c4c6719..12179eb0c9e 100644
--- a/Lib/test/test_asyncio/test_base_events.py
+++ b/Lib/test/test_asyncio/test_base_events.py
@@ -24,6 +24,10 @@ import warnings
MOCK_ANY = mock.ANY
+class CustomError(Exception):
+ pass
+
+
def tearDownModule():
asyncio._set_event_loop_policy(None)
@@ -1190,6 +1194,36 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
+ @patch_socket
+ def test_create_connection_happy_eyeballs_empty_exceptions(self, m_socket):
+ # See gh-135836: Fix IndexError when Happy Eyeballs algorithm
+ # results in empty exceptions list
+
+ async def getaddrinfo(*args, **kw):
+ return [(socket.AF_INET, socket.SOCK_STREAM, 0, '', ('127.0.0.1', 80)),
+ (socket.AF_INET6, socket.SOCK_STREAM, 0, '', ('::1', 80))]
+
+ def getaddrinfo_task(*args, **kwds):
+ return self.loop.create_task(getaddrinfo(*args, **kwds))
+
+ self.loop.getaddrinfo = getaddrinfo_task
+
+ # Mock staggered_race to return empty exceptions list
+ # This simulates the scenario where Happy Eyeballs algorithm
+ # cancels all attempts but doesn't properly collect exceptions
+ with mock.patch('asyncio.staggered.staggered_race') as mock_staggered:
+ # Return (None, []) - no winner, empty exceptions list
+ async def mock_race(coro_fns, delay, loop):
+ return None, []
+ mock_staggered.side_effect = mock_race
+
+ coro = self.loop.create_connection(
+ MyProto, 'example.com', 80, happy_eyeballs_delay=0.1)
+
+ # Should raise TimeoutError instead of IndexError
+ with self.assertRaisesRegex(TimeoutError, "create_connection failed"):
+ self.loop.run_until_complete(coro)
+
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
@@ -1296,6 +1330,31 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertEqual(len(cm.exception.exceptions), 1)
self.assertIsInstance(cm.exception.exceptions[0], OSError)
+ @patch_socket
+ def test_create_connection_connect_non_os_err_close_err(self, m_socket):
+ # Test the case when sock_connect() raises non-OSError exception
+ # and sock.close() raises OSError.
+ async def getaddrinfo(*args, **kw):
+ return [(2, 1, 6, '', ('107.6.106.82', 80))]
+
+ def getaddrinfo_task(*args, **kwds):
+ return self.loop.create_task(getaddrinfo(*args, **kwds))
+
+ self.loop.getaddrinfo = getaddrinfo_task
+ self.loop.sock_connect = mock.Mock()
+ self.loop.sock_connect.side_effect = CustomError
+ sock = mock.Mock()
+ m_socket.socket.return_value = sock
+ sock.close.side_effect = OSError
+
+ coro = self.loop.create_connection(MyProto, 'example.com', 80)
+ self.assertRaises(
+ CustomError, self.loop.run_until_complete, coro)
+
+ coro = self.loop.create_connection(MyProto, 'example.com', 80, all_errors=True)
+ self.assertRaises(
+ CustomError, self.loop.run_until_complete, coro)
+
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
diff --git a/Lib/test/test_asyncio/test_tools.py b/Lib/test/test_asyncio/test_tools.py
index ba36e759ccd..34e94830204 100644
--- a/Lib/test/test_asyncio/test_tools.py
+++ b/Lib/test/test_asyncio/test_tools.py
@@ -2,6 +2,13 @@ import unittest
from asyncio import tools
+from collections import namedtuple
+
+FrameInfo = namedtuple('FrameInfo', ['funcname', 'filename', 'lineno'])
+CoroInfo = namedtuple('CoroInfo', ['call_stack', 'task_name'])
+TaskInfo = namedtuple('TaskInfo', ['task_id', 'task_name', 'coroutine_stack', 'awaited_by'])
+AwaitedInfo = namedtuple('AwaitedInfo', ['thread_id', 'awaited_by'])
+
# mock output of get_all_awaited_by function.
TEST_INPUTS_TREE = [
@@ -10,81 +17,151 @@ TEST_INPUTS_TREE = [
# different subtasks part of a TaskGroup (root1 and root2) which call
# awaiter functions.
(
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "timer",
- [
- [[("awaiter3", "/path/to/app.py", 130),
- ("awaiter2", "/path/to/app.py", 120),
- ("awaiter", "/path/to/app.py", 110)], 4],
- [[("awaiterB3", "/path/to/app.py", 190),
- ("awaiterB2", "/path/to/app.py", 180),
- ("awaiterB", "/path/to/app.py", 170)], 5],
- [[("awaiterB3", "/path/to/app.py", 190),
- ("awaiterB2", "/path/to/app.py", 180),
- ("awaiterB", "/path/to/app.py", 170)], 6],
- [[("awaiter3", "/path/to/app.py", 130),
- ("awaiter2", "/path/to/app.py", 120),
- ("awaiter", "/path/to/app.py", 110)], 7],
- ],
- ),
- (
- 8,
- "root1",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 9,
- "root2",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 4,
- "child1_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 6,
- "child2_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 7,
- "child1_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
- ),
- (
- 5,
- "child2_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="timer",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "/path/to/app.py", 130),
+ FrameInfo("awaiter2", "/path/to/app.py", 120),
+ FrameInfo("awaiter", "/path/to/app.py", 110)
+ ],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiterB3", "/path/to/app.py", 190),
+ FrameInfo("awaiterB2", "/path/to/app.py", 180),
+ FrameInfo("awaiterB", "/path/to/app.py", 170)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiterB3", "/path/to/app.py", 190),
+ FrameInfo("awaiterB2", "/path/to/app.py", 180),
+ FrameInfo("awaiterB", "/path/to/app.py", 170)
+ ],
+ task_name=6
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "/path/to/app.py", 130),
+ FrameInfo("awaiter2", "/path/to/app.py", 120),
+ FrameInfo("awaiter", "/path/to/app.py", 110)
+ ],
+ task_name=7
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="root1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=9,
+ task_name="root2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="child1_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="child2_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="child1_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="child2_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
@@ -130,26 +207,96 @@ TEST_INPUTS_TREE = [
[
# test case containing two roots
(
- (
- 9,
- [
- (5, "Task-5", []),
- (6, "Task-6", [[["main2"], 5]]),
- (7, "Task-7", [[["main2"], 5]]),
- (8, "Task-8", [[["main2"], 5]]),
- ],
+ AwaitedInfo(
+ thread_id=9,
+ awaited_by=[
+ TaskInfo(
+ task_id=5,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-6",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-7",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="Task-8",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ )
+ ]
),
- (
- 10,
- [
- (1, "Task-1", []),
- (2, "Task-2", [[["main"], 1]]),
- (3, "Task-3", [[["main"], 1]]),
- (4, "Task-4", [[["main"], 1]]),
- ],
+ AwaitedInfo(
+ thread_id=10,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
),
- (11, []),
- (0, []),
+ AwaitedInfo(thread_id=11, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
@@ -174,18 +321,63 @@ TEST_INPUTS_TREE = [
# test case containing two roots, one of them without subtasks
(
[
- (1, [(2, "Task-5", [])]),
- (
- 3,
- [
- (4, "Task-1", []),
- (5, "Task-2", [[["main"], 4]]),
- (6, "Task-3", [[["main"], 4]]),
- (7, "Task-4", [[["main"], 4]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
),
- (8, []),
- (0, []),
+ AwaitedInfo(
+ thread_id=3,
+ awaited_by=[
+ TaskInfo(
+ task_id=4,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ )
+ ]
+ ),
+ AwaitedInfo(thread_id=8, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
@@ -208,19 +400,44 @@ TEST_INPUTS_CYCLES_TREE = [
# this test case contains a cycle: two tasks awaiting each other.
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "a",
- [[["awaiter2"], 4], [["main"], 2]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (4, "b", [[["awaiter"], 3]]),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="a",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter2", "", 0)],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="b",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
([[4, 3, 4]]),
@@ -229,32 +446,85 @@ TEST_INPUTS_CYCLES_TREE = [
# this test case contains two cycles
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "A",
- [[["nested", "nested", "task_b"], 4]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (
- 4,
- "B",
- [
- [["nested", "nested", "task_c"], 5],
- [["nested", "nested", "task_a"], 3],
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
),
- (5, "C", [[["nested", "nested"], 6]]),
- (
- 6,
- "Task-2",
- [[["nested", "nested", "task_b"], 4]],
+ TaskInfo(
+ task_id=4,
+ task_name="B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_c", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_a", "", 0)
+ ],
+ task_name=3
+ )
+ ]
),
- ],
+ TaskInfo(
+ task_id=5,
+ task_name="C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0)
+ ],
+ task_name=6
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
([[4, 3, 4], [4, 6, 5, 4]]),
@@ -267,81 +537,160 @@ TEST_INPUTS_TABLE = [
# different subtasks part of a TaskGroup (root1 and root2) which call
# awaiter functions.
(
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "timer",
- [
- [["awaiter3", "awaiter2", "awaiter"], 4],
- [["awaiter1_3", "awaiter1_2", "awaiter1"], 5],
- [["awaiter1_3", "awaiter1_2", "awaiter1"], 6],
- [["awaiter3", "awaiter2", "awaiter"], 7],
- ],
- ),
- (
- 8,
- "root1",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 9,
- "root2",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 4,
- "child1_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 6,
- "child2_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 7,
- "child1_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
- ),
- (
- 5,
- "child2_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="timer",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "", 0),
+ FrameInfo("awaiter2", "", 0),
+ FrameInfo("awaiter", "", 0)
+ ],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter1_3", "", 0),
+ FrameInfo("awaiter1_2", "", 0),
+ FrameInfo("awaiter1", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter1_3", "", 0),
+ FrameInfo("awaiter1_2", "", 0),
+ FrameInfo("awaiter1", "", 0)
+ ],
+ task_name=6
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "", 0),
+ FrameInfo("awaiter2", "", 0),
+ FrameInfo("awaiter", "", 0)
+ ],
+ task_name=7
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="root1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=9,
+ task_name="root2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="child1_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="child2_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="child1_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="child2_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
[
1,
"0x3",
"timer",
+ "",
"awaiter3 -> awaiter2 -> awaiter",
"child1_1",
"0x4",
@@ -350,6 +699,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter1_3 -> awaiter1_2 -> awaiter1",
"child2_2",
"0x5",
@@ -358,6 +708,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter1_3 -> awaiter1_2 -> awaiter1",
"child2_1",
"0x6",
@@ -366,6 +717,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter3 -> awaiter2 -> awaiter",
"child1_2",
"0x7",
@@ -374,6 +726,7 @@ TEST_INPUTS_TABLE = [
1,
"0x8",
"root1",
+ "",
"_aexit -> __aexit__ -> main",
"Task-1",
"0x2",
@@ -382,6 +735,7 @@ TEST_INPUTS_TABLE = [
1,
"0x9",
"root2",
+ "",
"_aexit -> __aexit__ -> main",
"Task-1",
"0x2",
@@ -390,6 +744,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"child1_1",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root1",
"0x8",
@@ -398,6 +753,7 @@ TEST_INPUTS_TABLE = [
1,
"0x6",
"child2_1",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root1",
"0x8",
@@ -406,6 +762,7 @@ TEST_INPUTS_TABLE = [
1,
"0x7",
"child1_2",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root2",
"0x9",
@@ -414,6 +771,7 @@ TEST_INPUTS_TABLE = [
1,
"0x5",
"child2_2",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root2",
"0x9",
@@ -424,37 +782,107 @@ TEST_INPUTS_TABLE = [
[
# test case containing two roots
(
- (
- 9,
- [
- (5, "Task-5", []),
- (6, "Task-6", [[["main2"], 5]]),
- (7, "Task-7", [[["main2"], 5]]),
- (8, "Task-8", [[["main2"], 5]]),
- ],
+ AwaitedInfo(
+ thread_id=9,
+ awaited_by=[
+ TaskInfo(
+ task_id=5,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-6",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-7",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="Task-8",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ )
+ ]
),
- (
- 10,
- [
- (1, "Task-1", []),
- (2, "Task-2", [[["main"], 1]]),
- (3, "Task-3", [[["main"], 1]]),
- (4, "Task-4", [[["main"], 1]]),
- ],
+ AwaitedInfo(
+ thread_id=10,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
),
- (11, []),
- (0, []),
+ AwaitedInfo(thread_id=11, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
- [9, "0x5", "Task-5", "", "", "0x0"],
- [9, "0x6", "Task-6", "main2", "Task-5", "0x5"],
- [9, "0x7", "Task-7", "main2", "Task-5", "0x5"],
- [9, "0x8", "Task-8", "main2", "Task-5", "0x5"],
- [10, "0x1", "Task-1", "", "", "0x0"],
- [10, "0x2", "Task-2", "main", "Task-1", "0x1"],
- [10, "0x3", "Task-3", "main", "Task-1", "0x1"],
- [10, "0x4", "Task-4", "main", "Task-1", "0x1"],
+ [9, "0x5", "Task-5", "", "", "", "0x0"],
+ [9, "0x6", "Task-6", "", "main2", "Task-5", "0x5"],
+ [9, "0x7", "Task-7", "", "main2", "Task-5", "0x5"],
+ [9, "0x8", "Task-8", "", "main2", "Task-5", "0x5"],
+ [10, "0x1", "Task-1", "", "", "", "0x0"],
+ [10, "0x2", "Task-2", "", "main", "Task-1", "0x1"],
+ [10, "0x3", "Task-3", "", "main", "Task-1", "0x1"],
+ [10, "0x4", "Task-4", "", "main", "Task-1", "0x1"],
]
),
],
@@ -462,27 +890,72 @@ TEST_INPUTS_TABLE = [
# test case containing two roots, one of them without subtasks
(
[
- (1, [(2, "Task-5", [])]),
- (
- 3,
- [
- (4, "Task-1", []),
- (5, "Task-2", [[["main"], 4]]),
- (6, "Task-3", [[["main"], 4]]),
- (7, "Task-4", [[["main"], 4]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
),
- (8, []),
- (0, []),
+ AwaitedInfo(
+ thread_id=3,
+ awaited_by=[
+ TaskInfo(
+ task_id=4,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ )
+ ]
+ ),
+ AwaitedInfo(thread_id=8, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-5", "", "", "0x0"],
- [3, "0x4", "Task-1", "", "", "0x0"],
- [3, "0x5", "Task-2", "main", "Task-1", "0x4"],
- [3, "0x6", "Task-3", "main", "Task-1", "0x4"],
- [3, "0x7", "Task-4", "main", "Task-1", "0x4"],
+ [1, "0x2", "Task-5", "", "", "", "0x0"],
+ [3, "0x4", "Task-1", "", "", "", "0x0"],
+ [3, "0x5", "Task-2", "", "main", "Task-1", "0x4"],
+ [3, "0x6", "Task-3", "", "main", "Task-1", "0x4"],
+ [3, "0x7", "Task-4", "", "main", "Task-1", "0x4"],
]
),
],
@@ -491,27 +964,52 @@ TEST_INPUTS_TABLE = [
# this test case contains a cycle: two tasks awaiting each other.
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "a",
- [[["awaiter2"], 4], [["main"], 2]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (4, "b", [[["awaiter"], 3]]),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="a",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter2", "", 0)],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="b",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
- [1, "0x3", "a", "awaiter2", "b", "0x4"],
- [1, "0x3", "a", "main", "Task-1", "0x2"],
- [1, "0x4", "b", "awaiter", "a", "0x3"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
+ [1, "0x3", "a", "", "awaiter2", "b", "0x4"],
+ [1, "0x3", "a", "", "main", "Task-1", "0x2"],
+ [1, "0x4", "b", "", "awaiter", "a", "0x3"],
]
),
],
@@ -519,41 +1017,95 @@ TEST_INPUTS_TABLE = [
# this test case contains two cycles
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "A",
- [[["nested", "nested", "task_b"], 4]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
),
- (
- 4,
- "B",
- [
- [["nested", "nested", "task_c"], 5],
- [["nested", "nested", "task_a"], 3],
- ],
+ TaskInfo(
+ task_id=4,
+ task_name="B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_c", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_a", "", 0)
+ ],
+ task_name=3
+ )
+ ]
),
- (5, "C", [[["nested", "nested"], 6]]),
- (
- 6,
- "Task-2",
- [[["nested", "nested", "task_b"], 4]],
+ TaskInfo(
+ task_id=5,
+ task_name="C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0)
+ ],
+ task_name=6
+ )
+ ]
),
- ],
+ TaskInfo(
+ task_id=6,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
[
1,
"0x3",
"A",
+ "",
"nested -> nested -> task_b",
"B",
"0x4",
@@ -562,6 +1114,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"B",
+ "",
"nested -> nested -> task_c",
"C",
"0x5",
@@ -570,6 +1123,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"B",
+ "",
"nested -> nested -> task_a",
"A",
"0x3",
@@ -578,6 +1132,7 @@ TEST_INPUTS_TABLE = [
1,
"0x5",
"C",
+ "",
"nested -> nested",
"Task-2",
"0x6",
@@ -586,6 +1141,7 @@ TEST_INPUTS_TABLE = [
1,
"0x6",
"Task-2",
+ "",
"nested -> nested -> task_b",
"B",
"0x4",
@@ -600,7 +1156,8 @@ class TestAsyncioToolsTree(unittest.TestCase):
def test_asyncio_utils(self):
for input_, tree in TEST_INPUTS_TREE:
with self.subTest(input_):
- self.assertEqual(tools.build_async_tree(input_), tree)
+ result = tools.build_async_tree(input_)
+ self.assertEqual(result, tree)
def test_asyncio_utils_cycles(self):
for input_, cycles in TEST_INPUTS_CYCLES_TREE:
@@ -615,7 +1172,8 @@ class TestAsyncioToolsTable(unittest.TestCase):
def test_asyncio_utils(self):
for input_, table in TEST_INPUTS_TABLE:
with self.subTest(input_):
- self.assertEqual(tools.build_task_table(input_), table)
+ result = tools.build_task_table(input_)
+ self.assertEqual(result, table)
class TestAsyncioToolsBasic(unittest.TestCase):
@@ -632,26 +1190,67 @@ class TestAsyncioToolsBasic(unittest.TestCase):
self.assertEqual(tools.build_task_table(result), expected_output)
def test_only_independent_tasks_tree(self):
- input_ = [(1, [(10, "taskA", []), (11, "taskB", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="taskA",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="taskB",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
expected = [["โ””โ”€โ”€ (T) taskA"], ["โ””โ”€โ”€ (T) taskB"]]
result = tools.build_async_tree(input_)
self.assertEqual(sorted(result), sorted(expected))
def test_only_independent_tasks_table(self):
- input_ = [(1, [(10, "taskA", []), (11, "taskB", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="taskA",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="taskB",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
self.assertEqual(
tools.build_task_table(input_),
- [[1, "0xa", "taskA", "", "", "0x0"], [1, "0xb", "taskB", "", "", "0x0"]],
+ [[1, '0xa', 'taskA', '', '', '', '0x0'], [1, '0xb', 'taskB', '', '', '', '0x0']]
)
def test_single_task_tree(self):
"""Test build_async_tree with a single task and no awaits."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
expected_output = [
@@ -664,25 +1263,50 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_single_task_table(self):
"""Test build_task_table with a single task and no awaits."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
- expected_output = [[1, "0x2", "Task-1", "", "", "0x0"]]
+ expected_output = [[1, '0x2', 'Task-1', '', '', '', '0x0']]
self.assertEqual(tools.build_task_table(result), expected_output)
def test_cycle_detection(self):
"""Test build_async_tree raises CycleFoundException for cyclic input."""
result = [
- (
- 1,
- [
- (2, "Task-1", [[["main"], 3]]),
- (3, "Task-2", [[["main"], 2]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ )
+ ]
)
]
with self.assertRaises(tools.CycleFoundException) as context:
@@ -692,13 +1316,38 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_complex_tree(self):
"""Test build_async_tree with a more complex tree structure."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- (3, "Task-2", [[["main"], 2]]),
- (4, "Task-3", [[["main"], 3]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
)
]
expected_output = [
@@ -715,30 +1364,76 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_complex_table(self):
"""Test build_task_table with a more complex tree structure."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- (3, "Task-2", [[["main"], 2]]),
- (4, "Task-3", [[["main"], 3]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
)
]
expected_output = [
- [1, "0x2", "Task-1", "", "", "0x0"],
- [1, "0x3", "Task-2", "main", "Task-1", "0x2"],
- [1, "0x4", "Task-3", "main", "Task-2", "0x3"],
+ [1, '0x2', 'Task-1', '', '', '', '0x0'],
+ [1, '0x3', 'Task-2', '', 'main', 'Task-1', '0x2'],
+ [1, '0x4', 'Task-3', '', 'main', 'Task-2', '0x3']
]
self.assertEqual(tools.build_task_table(result), expected_output)
def test_deep_coroutine_chain(self):
input_ = [
- (
- 1,
- [
- (10, "leaf", [[["c1", "c2", "c3", "c4", "c5"], 11]]),
- (11, "root", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="leaf",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("c1", "", 0),
+ FrameInfo("c2", "", 0),
+ FrameInfo("c3", "", 0),
+ FrameInfo("c4", "", 0),
+ FrameInfo("c5", "", 0)
+ ],
+ task_name=11
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="root",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
expected = [
@@ -757,13 +1452,47 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_multiple_cycles_same_node(self):
input_ = [
- (
- 1,
- [
- (1, "Task-A", [[["call1"], 2]]),
- (2, "Task-B", [[["call2"], 3]]),
- (3, "Task-C", [[["call3"], 1], [["call4"], 2]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call1", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call2", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call3", "", 0)],
+ task_name=1
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("call4", "", 0)],
+ task_name=2
+ )
+ ]
+ )
+ ]
)
]
with self.assertRaises(tools.CycleFoundException) as ctx:
@@ -772,19 +1501,43 @@ class TestAsyncioToolsBasic(unittest.TestCase):
self.assertTrue(any(set(c) == {1, 2, 3} for c in cycles))
def test_table_output_format(self):
- input_ = [(1, [(1, "Task-A", [[["foo"], 2]]), (2, "Task-B", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("foo", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-B",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
table = tools.build_task_table(input_)
for row in table:
- self.assertEqual(len(row), 6)
+ self.assertEqual(len(row), 7)
self.assertIsInstance(row[0], int) # thread ID
self.assertTrue(
isinstance(row[1], str) and row[1].startswith("0x")
) # hex task ID
self.assertIsInstance(row[2], str) # task name
- self.assertIsInstance(row[3], str) # coroutine chain
- self.assertIsInstance(row[4], str) # awaiter name
+ self.assertIsInstance(row[3], str) # coroutine stack
+ self.assertIsInstance(row[4], str) # coroutine chain
+ self.assertIsInstance(row[5], str) # awaiter name
self.assertTrue(
- isinstance(row[5], str) and row[5].startswith("0x")
+ isinstance(row[6], str) and row[6].startswith("0x")
) # hex awaiter ID
@@ -792,28 +1545,86 @@ class TestAsyncioToolsEdgeCases(unittest.TestCase):
def test_task_awaits_self(self):
"""A task directly awaits itself - should raise a cycle."""
- input_ = [(1, [(1, "Self-Awaiter", [[["loopback"], 1]])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Self-Awaiter",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("loopback", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
+ )
+ ]
with self.assertRaises(tools.CycleFoundException) as ctx:
tools.build_async_tree(input_)
self.assertIn([1, 1], ctx.exception.cycles)
def test_task_with_missing_awaiter_id(self):
"""Awaiter ID not in task list - should not crash, just show 'Unknown'."""
- input_ = [(1, [(1, "Task-A", [[["coro"], 999]])])] # 999 not defined
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("coro", "", 0)],
+ task_name=999
+ )
+ ]
+ )
+ ]
+ )
+ ]
table = tools.build_task_table(input_)
self.assertEqual(len(table), 1)
- self.assertEqual(table[0][4], "Unknown")
+ self.assertEqual(table[0][5], "Unknown")
def test_duplicate_coroutine_frames(self):
"""Same coroutine frame repeated under a parent - should deduplicate."""
input_ = [
- (
- 1,
- [
- (1, "Task-1", [[["frameA"], 2], [["frameA"], 3]]),
- (2, "Task-2", []),
- (3, "Task-3", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("frameA", "", 0)],
+ task_name=2
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("frameA", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
tree = tools.build_async_tree(input_)
@@ -830,14 +1641,63 @@ class TestAsyncioToolsEdgeCases(unittest.TestCase):
def test_task_with_no_name(self):
"""Task with no name in id2name - should still render with fallback."""
- input_ = [(1, [(1, "root", [[["f1"], 2]]), (2, None, [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="root",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("f1", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name=None,
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
# If name is None, fallback to string should not crash
tree = tools.build_async_tree(input_)
self.assertIn("(T) None", "\n".join(tree[0]))
def test_tree_rendering_with_custom_emojis(self):
"""Pass custom emojis to the tree renderer."""
- input_ = [(1, [(1, "MainTask", [[["f1", "f2"], 2]]), (2, "SubTask", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="MainTask",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("f1", "", 0),
+ FrameInfo("f2", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="SubTask",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
tree = tools.build_async_tree(input_, task_emoji="๐Ÿงต", cor_emoji="๐Ÿ”")
flat = "\n".join(tree[0])
self.assertIn("๐Ÿงต MainTask", flat)
diff --git a/Lib/test/test_audit.py b/Lib/test/test_audit.py
index 5f9eb381f60..077765fcda2 100644
--- a/Lib/test/test_audit.py
+++ b/Lib/test/test_audit.py
@@ -322,6 +322,14 @@ class AuditTest(unittest.TestCase):
if returncode:
self.fail(stderr)
+ @support.support_remote_exec_only
+ @support.cpython_only
+ def test_sys_remote_exec(self):
+ returncode, events, stderr = self.run_python("test_sys_remote_exec")
+ self.assertTrue(any(["sys.remote_exec" in event for event in events]))
+ self.assertTrue(any(["cpython.remote_debugger_script" in event for event in events]))
+ if returncode:
+ self.fail(stderr)
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_build_details.py b/Lib/test/test_build_details.py
index 05ce163a337..ba4b8c5aa9b 100644
--- a/Lib/test/test_build_details.py
+++ b/Lib/test/test_build_details.py
@@ -117,12 +117,26 @@ class CPythonBuildDetailsTests(unittest.TestCase, FormatTestsBase):
# Override generic format tests with tests for our specific implemenation.
@needs_installed_python
- @unittest.skipIf(is_android or is_apple_mobile, 'Android and iOS run tests via a custom testbed method that changes sys.executable')
+ @unittest.skipIf(
+ is_android or is_apple_mobile,
+ 'Android and iOS run tests via a custom testbed method that changes sys.executable'
+ )
def test_base_interpreter(self):
value = self.key('base_interpreter')
self.assertEqual(os.path.realpath(value), os.path.realpath(sys.executable))
+ @needs_installed_python
+ @unittest.skipIf(
+ is_android or is_apple_mobile,
+ "Android and iOS run tests via a custom testbed method that doesn't ship headers"
+ )
+ def test_c_api(self):
+ value = self.key('c_api')
+ self.assertTrue(os.path.exists(os.path.join(value['headers'], 'Python.h')))
+ version = sysconfig.get_config_var('VERSION')
+ self.assertTrue(os.path.exists(os.path.join(value['pkgconfig_path'], f'python-{version}.pc')))
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index d221aa5e1d9..14fe3355239 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -2991,7 +2991,8 @@ class TestType(unittest.TestCase):
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
- tests.addTest(DocTestSuite(builtins))
+ if sys.float_repr_style == 'short':
+ tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
diff --git a/Lib/test/test_calendar.py b/Lib/test/test_calendar.py
index 7ade4271b7a..bc39c86b8cf 100644
--- a/Lib/test/test_calendar.py
+++ b/Lib/test/test_calendar.py
@@ -417,7 +417,7 @@ class OutputTestCase(unittest.TestCase):
self.check_htmlcalendar_encoding('utf-8', 'utf-8')
def test_output_htmlcalendar_encoding_default(self):
- self.check_htmlcalendar_encoding(None, sys.getdefaultencoding())
+ self.check_htmlcalendar_encoding(None, 'utf-8')
def test_yeardatescalendar(self):
def shrink(cal):
diff --git a/Lib/test/test_capi/test_abstract.py b/Lib/test/test_capi/test_abstract.py
index 7d548ae87c0..3a2ed9f5db8 100644
--- a/Lib/test/test_capi/test_abstract.py
+++ b/Lib/test/test_capi/test_abstract.py
@@ -1077,6 +1077,31 @@ class CAPITest(unittest.TestCase):
with self.assertRaisesRegex(TypeError, regex):
PyIter_NextItem(10)
+ def test_object_setattr_null_exc(self):
+ class Obj:
+ pass
+ obj = Obj()
+ obj.attr = 123
+
+ exc = ValueError("error")
+ with self.assertRaises(SystemError) as cm:
+ _testcapi.object_setattr_null_exc(obj, 'attr', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+ self.assertHasAttr(obj, 'attr')
+
+ with self.assertRaises(SystemError) as cm:
+ _testcapi.object_setattrstring_null_exc(obj, 'attr', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+ self.assertHasAttr(obj, 'attr')
+
+ with self.assertRaises(SystemError) as cm:
+ # undecodable name
+ _testcapi.object_setattrstring_null_exc(obj, b'\xff', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_capi/test_config.py b/Lib/test/test_capi/test_config.py
index a2d70dd3af4..04a27de8d84 100644
--- a/Lib/test/test_capi/test_config.py
+++ b/Lib/test/test_capi/test_config.py
@@ -3,7 +3,6 @@ Tests PyConfig_Get() and PyConfig_Set() C API (PEP 741).
"""
import os
import sys
-import sysconfig
import types
import unittest
from test import support
diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py
index f74694a7a74..ef950f5df04 100644
--- a/Lib/test/test_capi/test_misc.py
+++ b/Lib/test/test_capi/test_misc.py
@@ -413,11 +413,13 @@ class CAPITest(unittest.TestCase):
@support.requires_resource('cpu')
@support.skip_emscripten_stack_overflow()
+ @support.skip_wasi_stack_overflow()
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
@support.skip_emscripten_stack_overflow()
+ @support.skip_wasi_stack_overflow()
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py
index cb6eae48414..7be1c9eebb3 100644
--- a/Lib/test/test_capi/test_opt.py
+++ b/Lib/test/test_capi/test_opt.py
@@ -407,12 +407,12 @@ class TestUops(unittest.TestCase):
x = 0
for i in range(m):
for j in MyIter(n):
- x += 1000*i + j
+ x += j
return x
- x = testfunc(TIER2_THRESHOLD, TIER2_THRESHOLD)
+ x = testfunc(TIER2_THRESHOLD, 2)
- self.assertEqual(x, sum(range(TIER2_THRESHOLD)) * TIER2_THRESHOLD * 1001)
+ self.assertEqual(x, sum(range(TIER2_THRESHOLD)) * 2)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -678,7 +678,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_ADD_FLOAT", uops)
+ self.assertIn("_BINARY_OP_ADD_FLOAT__NO_DECREF_INPUTS", uops)
def test_float_subtract_constant_propagation(self):
def testfunc(n):
@@ -700,7 +700,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_SUBTRACT_FLOAT", uops)
+ self.assertIn("_BINARY_OP_SUBTRACT_FLOAT__NO_DECREF_INPUTS", uops)
def test_float_multiply_constant_propagation(self):
def testfunc(n):
@@ -722,7 +722,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_MULTIPLY_FLOAT", uops)
+ self.assertIn("_BINARY_OP_MULTIPLY_FLOAT__NO_DECREF_INPUTS", uops)
def test_add_unicode_propagation(self):
def testfunc(n):
@@ -1183,6 +1183,17 @@ class TestUopsOptimization(unittest.TestCase):
self.assertIsNotNone(ex)
self.assertIn("_RETURN_GENERATOR", get_opnames(ex))
+ def test_for_iter(self):
+ def testfunc(n):
+ t = 0
+ for i in set(range(n)):
+ t += i
+ return t
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD * (TIER2_THRESHOLD - 1) // 2)
+ self.assertIsNotNone(ex)
+ self.assertIn("_FOR_ITER_TIER_TWO", get_opnames(ex))
+
@unittest.skip("Tracing into generators currently isn't supported.")
def test_for_iter_gen(self):
def gen(n):
@@ -1370,6 +1381,21 @@ class TestUopsOptimization(unittest.TestCase):
# Removed guard
self.assertNotIn("_CHECK_FUNCTION_EXACT_ARGS", uops)
+ def test_method_guards_removed_or_reduced(self):
+ def testfunc(n):
+ result = 0
+ for i in range(n):
+ result += test_bound_method(i)
+ return result
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, sum(range(TIER2_THRESHOLD)))
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_PUSH_FRAME", uops)
+ # Strength reduced version
+ self.assertIn("_CHECK_FUNCTION_VERSION_INLINE", uops)
+ self.assertNotIn("_CHECK_METHOD_VERSION", uops)
+
def test_jit_error_pops(self):
"""
Tests that the correct number of pops are inserted into the
@@ -1655,13 +1681,11 @@ class TestUopsOptimization(unittest.TestCase):
self.assertIn("_CONTAINS_OP_DICT", uops)
self.assertNotIn("_TO_BOOL_BOOL", uops)
-
def test_remove_guard_for_known_type_str(self):
def f(n):
for i in range(n):
false = i == TIER2_THRESHOLD
empty = "X"[:false]
- empty += "" # Make JIT realize this is a string.
if empty:
return 1
return 0
@@ -1767,11 +1791,12 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_TOS_UNICODE", uops)
self.assertIn("_BINARY_OP_ADD_UNICODE", uops)
- def test_call_type_1(self):
+ def test_call_type_1_guards_removed(self):
def testfunc(n):
x = 0
for _ in range(n):
- x += type(42) is int
+ foo = eval('42')
+ x += type(foo) is int
return x
res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
@@ -1782,6 +1807,25 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_NOS_NULL", uops)
self.assertNotIn("_GUARD_CALLABLE_TYPE_1", uops)
+ def test_call_type_1_known_type(self):
+ def testfunc(n):
+ x = 0
+ for _ in range(n):
+ x += type(42) is int
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the result of type(...) is known, _CALL_TYPE_1 is replaced with
+ # _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW which is optimized away in
+ # remove_unneeded_uops.
+ self.assertNotIn("_CALL_TYPE_1", uops)
+ self.assertNotIn("_POP_CALL_ONE_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_CALL_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_TOP_LOAD_CONST_INLINE_BORROW", uops)
+
def test_call_type_1_result_is_const(self):
def testfunc(n):
x = 0
@@ -1795,7 +1839,6 @@ class TestUopsOptimization(unittest.TestCase):
self.assertEqual(res, TIER2_THRESHOLD)
self.assertIsNotNone(ex)
uops = get_opnames(ex)
- self.assertIn("_CALL_TYPE_1", uops)
self.assertNotIn("_GUARD_IS_NOT_NONE_POP", uops)
def test_call_str_1(self):
@@ -1925,6 +1968,49 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_NOS_INT", uops)
self.assertNotIn("_GUARD_TOS_INT", uops)
+ def test_call_len_known_length_small_int(self):
+ def testfunc(n):
+ x = 0
+ for _ in range(n):
+ t = (1, 2, 3, 4, 5)
+ if len(t) == 5:
+ x += 1
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the length is < _PY_NSMALLPOSINTS, the len() call is replaced
+ # with just an inline load.
+ self.assertNotIn("_CALL_LEN", uops)
+ self.assertNotIn("_POP_CALL_ONE_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_CALL_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_TOP_LOAD_CONST_INLINE_BORROW", uops)
+
+ def test_call_len_known_length(self):
+ def testfunc(n):
+ class C:
+ t = tuple(range(300))
+
+ x = 0
+ for _ in range(n):
+ if len(C.t) == 300: # comparison + guard removed
+ x += 1
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the length is >= _PY_NSMALLPOSINTS, we cannot replace
+ # the len() call with an inline load, but knowing the exact
+ # length allows us to optimize more code, such as conditionals
+ # in this case
+ self.assertIn("_CALL_LEN", uops)
+ self.assertNotIn("_COMPARE_OP_INT", uops)
+ self.assertNotIn("_GUARD_IS_TRUE_POP", uops)
+
def test_get_len_with_const_tuple(self):
def testfunc(n):
x = 0.0
@@ -2219,9 +2305,176 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_LOAD_ATTR_METHOD_NO_DICT", uops)
self.assertNotIn("_LOAD_ATTR_METHOD_LAZY_DICT", uops)
+ def test_float_op_refcount_elimination(self):
+ def testfunc(args):
+ a, b, n = args
+ c = 0.0
+ for _ in range(n):
+ c += a + b
+ return c
+
+ res, ex = self._run_with_optimizer(testfunc, (0.1, 0.1, TIER2_THRESHOLD))
+ self.assertAlmostEqual(res, TIER2_THRESHOLD * (0.1 + 0.1))
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_BINARY_OP_ADD_FLOAT__NO_DECREF_INPUTS", uops)
+
+ def test_remove_guard_for_slice_list(self):
+ def f(n):
+ for i in range(n):
+ false = i == TIER2_THRESHOLD
+ sliced = [1, 2, 3][:false]
+ if sliced:
+ return 1
+ return 0
+
+ res, ex = self._run_with_optimizer(f, TIER2_THRESHOLD)
+ self.assertEqual(res, 0)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_TO_BOOL_LIST", uops)
+ self.assertNotIn("_GUARD_TOS_LIST", uops)
+
+ def test_remove_guard_for_slice_tuple(self):
+ def f(n):
+ for i in range(n):
+ false = i == TIER2_THRESHOLD
+ a, b = (1, 2, 3)[: false + 2]
+
+ _, ex = self._run_with_optimizer(f, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_UNPACK_SEQUENCE_TWO_TUPLE", uops)
+ self.assertNotIn("_GUARD_TOS_TUPLE", uops)
+
+ def test_unary_invert_long_type(self):
+ def testfunc(n):
+ for _ in range(n):
+ a = 9397
+ x = ~a + ~a
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertNotIn("_GUARD_TOS_INT", uops)
+ self.assertNotIn("_GUARD_NOS_INT", uops)
+
+ def test_attr_promotion_failure(self):
+ # We're not testing for any specific uops here, just
+ # testing it doesn't crash.
+ script_helper.assert_python_ok('-c', textwrap.dedent("""
+ import _testinternalcapi
+ import _opcode
+ import email
+
+ def get_first_executor(func):
+ code = func.__code__
+ co_code = code.co_code
+ for i in range(0, len(co_code), 2):
+ try:
+ return _opcode.get_executor(code, i)
+ except ValueError:
+ pass
+ return None
+
+ def testfunc(n):
+ for _ in range(n):
+ email.jit_testing = None
+ prompt = email.jit_testing
+ del email.jit_testing
+
+
+ testfunc(_testinternalcapi.TIER2_THRESHOLD)
+ ex = get_first_executor(testfunc)
+ assert ex is not None
+ """))
+
+ def test_pop_top_specialize_none(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(None)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_NOP", uops)
+
+ def test_pop_top_specialize_int(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(100000)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_INT", uops)
+
+ def test_pop_top_specialize_float(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(1e6)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_FLOAT", uops)
+
+
+ def test_unary_negative_long_float_type(self):
+ def testfunc(n):
+ for _ in range(n):
+ a = 9397
+ f = 9397.0
+ x = -a + -a
+ y = -f + -f
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertNotIn("_GUARD_TOS_INT", uops)
+ self.assertNotIn("_GUARD_NOS_INT", uops)
+ self.assertNotIn("_GUARD_TOS_FLOAT", uops)
+ self.assertNotIn("_GUARD_NOS_FLOAT", uops)
+
+ def test_binary_op_constant_evaluate(self):
+ def testfunc(n):
+ for _ in range(n):
+ 2 ** 65
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ # For now... until we constant propagate it away.
+ self.assertIn("_BINARY_OP", uops)
+
def global_identity(x):
return x
+class TestObject:
+ def test(self, *args, **kwargs):
+ return args[0]
+
+test_object = TestObject()
+test_bound_method = TestObject.test.__get__(test_object)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_capi/test_sys.py b/Lib/test/test_capi/test_sys.py
index d3a9b378e77..3793ce2461e 100644
--- a/Lib/test/test_capi/test_sys.py
+++ b/Lib/test/test_capi/test_sys.py
@@ -19,6 +19,68 @@ class CAPITest(unittest.TestCase):
maxDiff = None
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getattr(self):
+ # Test PySys_GetAttr()
+ sys_getattr = _testlimitedcapi.sys_getattr
+
+ self.assertIs(sys_getattr('stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(sys_getattr('\U0001f40d'), 42)
+
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.nonexistent'):
+ sys_getattr('nonexistent')
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.\U0001f40d'):
+ sys_getattr('\U0001f40d')
+ self.assertRaises(TypeError, sys_getattr, 1)
+ self.assertRaises(TypeError, sys_getattr, [])
+ # CRASHES sys_getattr(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getattrstring(self):
+ # Test PySys_GetAttrString()
+ getattrstring = _testlimitedcapi.sys_getattrstring
+
+ self.assertIs(getattrstring(b'stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getattrstring('\U0001f40d'.encode()), 42)
+
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.nonexistent'):
+ getattrstring(b'nonexistent')
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.\U0001f40d'):
+ getattrstring('\U0001f40d'.encode())
+ self.assertRaises(UnicodeDecodeError, getattrstring, b'\xff')
+ # CRASHES getattrstring(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getoptionalattr(self):
+ # Test PySys_GetOptionalAttr()
+ getoptionalattr = _testlimitedcapi.sys_getoptionalattr
+
+ self.assertIs(getoptionalattr('stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getoptionalattr('\U0001f40d'), 42)
+
+ self.assertIs(getoptionalattr('nonexistent'), AttributeError)
+ self.assertIs(getoptionalattr('\U0001f40d'), AttributeError)
+ self.assertRaises(TypeError, getoptionalattr, 1)
+ self.assertRaises(TypeError, getoptionalattr, [])
+ # CRASHES getoptionalattr(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getoptionalattrstring(self):
+ # Test PySys_GetOptionalAttrString()
+ getoptionalattrstring = _testlimitedcapi.sys_getoptionalattrstring
+
+ self.assertIs(getoptionalattrstring(b'stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getoptionalattrstring('\U0001f40d'.encode()), 42)
+
+ self.assertIs(getoptionalattrstring(b'nonexistent'), AttributeError)
+ self.assertIs(getoptionalattrstring('\U0001f40d'.encode()), AttributeError)
+ self.assertRaises(UnicodeDecodeError, getoptionalattrstring, b'\xff')
+ # CRASHES getoptionalattrstring(NULL)
+
@support.cpython_only
@unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
def test_sys_getobject(self):
@@ -29,7 +91,7 @@ class CAPITest(unittest.TestCase):
with support.swap_attr(sys, '\U0001f40d', 42):
self.assertEqual(getobject('\U0001f40d'.encode()), 42)
- self.assertIs(getobject(b'nonexisting'), AttributeError)
+ self.assertIs(getobject(b'nonexistent'), AttributeError)
with support.catch_unraisable_exception() as cm:
self.assertIs(getobject(b'\xff'), AttributeError)
self.assertEqual(cm.unraisable.exc_type, UnicodeDecodeError)
diff --git a/Lib/test/test_capi/test_type.py b/Lib/test/test_capi/test_type.py
index 3c9974c7387..15fb4a93e2a 100644
--- a/Lib/test/test_capi/test_type.py
+++ b/Lib/test/test_capi/test_type.py
@@ -264,3 +264,13 @@ class TypeTests(unittest.TestCase):
ManualHeapType = _testcapi.ManualHeapType
for i in range(100):
self.assertIsInstance(ManualHeapType(), ManualHeapType)
+
+ def test_extension_managed_dict_type(self):
+ ManagedDictType = _testcapi.ManagedDictType
+ obj = ManagedDictType()
+ obj.foo = 42
+ self.assertEqual(obj.foo, 42)
+ self.assertEqual(obj.__dict__, {'foo': 42})
+ obj.__dict__ = {'bar': 3}
+ self.assertEqual(obj.__dict__, {'bar': 3})
+ self.assertEqual(obj.bar, 3)
diff --git a/Lib/test/test_capi/test_unicode.py b/Lib/test/test_capi/test_unicode.py
index 3408c10f426..6a9c60f3a6d 100644
--- a/Lib/test/test_capi/test_unicode.py
+++ b/Lib/test/test_capi/test_unicode.py
@@ -1739,6 +1739,20 @@ class CAPITest(unittest.TestCase):
# Check that the second call returns the same result
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
+ @support.cpython_only
+ @unittest.skipIf(_testcapi is None, 'need _testcapi module')
+ def test_GET_CACHED_HASH(self):
+ from _testcapi import unicode_GET_CACHED_HASH
+ content_bytes = b'some new string'
+ # avoid parser interning & constant folding
+ obj = str(content_bytes, 'ascii')
+ # impl detail: fresh strings do not have cached hash
+ self.assertEqual(unicode_GET_CACHED_HASH(obj), -1)
+ # impl detail: adding string to a dict caches its hash
+ {obj: obj}
+ # impl detail: ASCII string hashes are equal to bytes ones
+ self.assertEqual(unicode_GET_CACHED_HASH(obj), hash(content_bytes))
+
class PyUnicodeWriterTest(unittest.TestCase):
def create_writer(self, size):
@@ -1776,6 +1790,13 @@ class PyUnicodeWriterTest(unittest.TestCase):
self.assertEqual(writer.finish(),
"ascii-latin1=\xE9-euro=\u20AC.")
+ def test_ascii(self):
+ writer = self.create_writer(0)
+ writer.write_ascii(b"Hello ", -1)
+ writer.write_ascii(b"", 0)
+ writer.write_ascii(b"Python! <truncated>", 6)
+ self.assertEqual(writer.finish(), "Hello Python")
+
def test_invalid_utf8(self):
writer = self.create_writer(0)
with self.assertRaises(UnicodeDecodeError):
diff --git a/Lib/test/test_class.py b/Lib/test/test_class.py
index 4c12d43556f..8c7a62a74ba 100644
--- a/Lib/test/test_class.py
+++ b/Lib/test/test_class.py
@@ -652,6 +652,7 @@ class ClassTests(unittest.TestCase):
a = A(hash(A.f)^(-1))
hash(a.f)
+ @cpython_only
def testSetattrWrapperNameIntern(self):
# Issue #25794: __setattr__ should intern the attribute name
class A:
diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py
index 32cf8aacaf6..655f5a9be7f 100644
--- a/Lib/test/test_code.py
+++ b/Lib/test/test_code.py
@@ -701,6 +701,27 @@ class CodeTest(unittest.TestCase):
'checks': CO_FAST_LOCAL,
'res': CO_FAST_LOCAL,
},
+ defs.spam_with_global_and_attr_same_name: {},
+ defs.spam_full_args: {
+ 'a': POSONLY,
+ 'b': POSONLY,
+ 'c': POSORKW,
+ 'd': POSORKW,
+ 'e': KWONLY,
+ 'f': KWONLY,
+ 'args': VARARGS,
+ 'kwargs': VARKWARGS,
+ },
+ defs.spam_full_args_with_defaults: {
+ 'a': POSONLY,
+ 'b': POSONLY,
+ 'c': POSORKW,
+ 'd': POSORKW,
+ 'e': KWONLY,
+ 'f': KWONLY,
+ 'args': VARARGS,
+ 'kwargs': VARKWARGS,
+ },
defs.spam_args_attrs_and_builtins: {
'a': POSONLY,
'b': POSONLY,
@@ -714,6 +735,7 @@ class CodeTest(unittest.TestCase):
defs.spam_returns_arg: {
'x': POSORKW,
},
+ defs.spam_raises: {},
defs.spam_with_inner_not_closure: {
'eggs': CO_FAST_LOCAL,
},
@@ -934,6 +956,24 @@ class CodeTest(unittest.TestCase):
purelocals=5,
globalvars=6,
),
+ defs.spam_with_global_and_attr_same_name: new_var_counts(
+ globalvars=2,
+ attrs=1,
+ ),
+ defs.spam_full_args: new_var_counts(
+ posonly=2,
+ posorkw=2,
+ kwonly=2,
+ varargs=1,
+ varkwargs=1,
+ ),
+ defs.spam_full_args_with_defaults: new_var_counts(
+ posonly=2,
+ posorkw=2,
+ kwonly=2,
+ varargs=1,
+ varkwargs=1,
+ ),
defs.spam_args_attrs_and_builtins: new_var_counts(
posonly=2,
posorkw=2,
@@ -945,6 +985,9 @@ class CodeTest(unittest.TestCase):
defs.spam_returns_arg: new_var_counts(
posorkw=1,
),
+ defs.spam_raises: new_var_counts(
+ globalvars=1,
+ ),
defs.spam_with_inner_not_closure: new_var_counts(
purelocals=1,
),
@@ -1097,10 +1140,16 @@ class CodeTest(unittest.TestCase):
def test_stateless(self):
self.maxDiff = None
+ STATELESS_FUNCTIONS = [
+ *defs.STATELESS_FUNCTIONS,
+ # stateless with defaults
+ defs.spam_full_args_with_defaults,
+ ]
+
for func in defs.STATELESS_CODE:
with self.subTest((func, '(code)')):
_testinternalcapi.verify_stateless_code(func.__code__)
- for func in defs.STATELESS_FUNCTIONS:
+ for func in STATELESS_FUNCTIONS:
with self.subTest((func, '(func)')):
_testinternalcapi.verify_stateless_code(func)
@@ -1110,7 +1159,7 @@ class CodeTest(unittest.TestCase):
with self.assertRaises(Exception):
_testinternalcapi.verify_stateless_code(func.__code__)
- if func not in defs.STATELESS_FUNCTIONS:
+ if func not in STATELESS_FUNCTIONS:
with self.subTest((func, '(func)')):
with self.assertRaises(Exception):
_testinternalcapi.verify_stateless_code(func)
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index a767f67a02c..65d54d1004d 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -2,7 +2,6 @@ from _codecs import _unregister_error as _codecs_unregister_error
import codecs
import html.entities
import itertools
-import re
import sys
import unicodedata
import unittest
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 8c9a0972492..d8666f7290e 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1,6 +1,7 @@
import codecs
import contextlib
import copy
+import importlib
import io
import pickle
import os
@@ -3111,9 +3112,9 @@ class TransformCodecTest(unittest.TestCase):
def test_alias_modules_exist(self):
encodings_dir = os.path.dirname(encodings.__file__)
for value in encodings.aliases.aliases.values():
- codec_file = os.path.join(encodings_dir, value + ".py")
- self.assertTrue(os.path.isfile(codec_file),
- "Codec file not found: " + codec_file)
+ codec_mod = f"encodings.{value}"
+ self.assertIsNotNone(importlib.util.find_spec(codec_mod),
+ f"Codec module not found: {codec_mod}")
def test_quopri_stateless(self):
# Should encode with quotetabs=True
diff --git a/Lib/test/test_concurrent_futures/test_init.py b/Lib/test/test_concurrent_futures/test_init.py
index df640929309..6b8484c0d5f 100644
--- a/Lib/test/test_concurrent_futures/test_init.py
+++ b/Lib/test/test_concurrent_futures/test_init.py
@@ -20,6 +20,10 @@ INITIALIZER_STATUS = 'uninitialized'
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
+ # InterpreterPoolInitializerTest.test_initializer fails
+ # if we don't have a LOAD_GLOBAL. (It could be any global.)
+ # We will address this separately.
+ INITIALIZER_STATUS
def get_init_status():
return INITIALIZER_STATUS
diff --git a/Lib/test/test_concurrent_futures/test_interpreter_pool.py b/Lib/test/test_concurrent_futures/test_interpreter_pool.py
index f6c62ae4b20..844dfdd6fc9 100644
--- a/Lib/test/test_concurrent_futures/test_interpreter_pool.py
+++ b/Lib/test/test_concurrent_futures/test_interpreter_pool.py
@@ -2,35 +2,78 @@ import asyncio
import contextlib
import io
import os
-import pickle
+import sys
import time
import unittest
-from concurrent.futures.interpreter import (
- ExecutionFailed, BrokenInterpreterPool,
-)
+from concurrent.futures.interpreter import BrokenInterpreterPool
+from concurrent import interpreters
+from concurrent.interpreters import _queues as queues
import _interpreters
from test import support
+from test.support import os_helper
+from test.support import script_helper
import test.test_asyncio.utils as testasyncio_utils
-from test.support.interpreters import queues
from .executor import ExecutorTest, mul
from .util import BaseTestCase, InterpreterPoolMixin, setup_module
+WINDOWS = sys.platform.startswith('win')
+
+
+@contextlib.contextmanager
+def nonblocking(fd):
+ blocking = os.get_blocking(fd)
+ if blocking:
+ os.set_blocking(fd, False)
+ try:
+ yield
+ finally:
+ if blocking:
+ os.set_blocking(fd, blocking)
+
+
+def read_file_with_timeout(fd, nbytes, timeout):
+ with nonblocking(fd):
+ end = time.time() + timeout
+ try:
+ return os.read(fd, nbytes)
+ except BlockingIOError:
+ pass
+ while time.time() < end:
+ try:
+ return os.read(fd, nbytes)
+ except BlockingIOError:
+ continue
+ else:
+ raise TimeoutError('nothing to read')
+
+
+if not WINDOWS:
+ import select
+ def read_file_with_timeout(fd, nbytes, timeout):
+ r, _, _ = select.select([fd], [], [], timeout)
+ if fd not in r:
+ raise TimeoutError('nothing to read')
+ return os.read(fd, nbytes)
+
+
def noop():
pass
def write_msg(fd, msg):
+ import os
os.write(fd, msg + b'\0')
-def read_msg(fd):
+def read_msg(fd, timeout=10.0):
msg = b''
- while ch := os.read(fd, 1):
- if ch == b'\0':
- return msg
+ ch = read_file_with_timeout(fd, 1, timeout)
+ while ch != b'\0':
msg += ch
+ ch = os.read(fd, 1)
+ return msg
def get_current_name():
@@ -113,6 +156,38 @@ class InterpreterPoolExecutorTest(
self.assertEqual(before, b'\0')
self.assertEqual(after, msg)
+ def test_init_with___main___global(self):
+ # See https://github.com/python/cpython/pull/133957#issuecomment-2927415311.
+ text = """if True:
+ from concurrent.futures import InterpreterPoolExecutor
+
+ INITIALIZER_STATUS = 'uninitialized'
+
+ def init(x):
+ global INITIALIZER_STATUS
+ INITIALIZER_STATUS = x
+ INITIALIZER_STATUS
+
+ def get_init_status():
+ return INITIALIZER_STATUS
+
+ if __name__ == "__main__":
+ exe = InterpreterPoolExecutor(initializer=init,
+ initargs=('initialized',))
+ fut = exe.submit(get_init_status)
+ print(fut.result()) # 'initialized'
+ exe.shutdown(wait=True)
+ print(INITIALIZER_STATUS) # 'uninitialized'
+ """
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ self.assertEqual(stdout.splitlines(), [
+ 'initialized',
+ 'uninitialized',
+ ])
+
def test_init_closure(self):
count = 0
def init1():
@@ -121,10 +196,19 @@ class InterpreterPoolExecutorTest(
nonlocal count
count += 1
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=init1)
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=init2)
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=init1) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
+
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=init2) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
def test_init_instance_method(self):
class Spam:
@@ -132,26 +216,12 @@ class InterpreterPoolExecutorTest(
raise NotImplementedError
spam = Spam()
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=spam.initializer)
-
- def test_init_shared(self):
- msg = b'eggs'
- r, w = self.pipe()
- script = f"""if True:
- import os
- if __name__ != '__main__':
- import __main__
- spam = __main__.spam
- os.write({w}, spam + b'\\0')
- """
-
- executor = self.executor_type(shared={'spam': msg})
- fut = executor.submit(exec, script)
- fut.result()
- after = read_msg(r)
-
- self.assertEqual(after, msg)
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=spam.initializer) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
@unittest.expectedFailure
def test_init_exception_in_script(self):
@@ -178,8 +248,6 @@ class InterpreterPoolExecutorTest(
stderr = stderr.getvalue()
self.assertIn('ExecutionFailed: Exception: spam', stderr)
self.assertIn('Uncaught in the interpreter:', stderr)
- self.assertIn('The above exception was the direct cause of the following exception:',
- stderr)
@unittest.expectedFailure
def test_submit_script(self):
@@ -208,10 +276,14 @@ class InterpreterPoolExecutorTest(
return spam
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(task1)
- with self.assertRaises(pickle.PicklingError):
- executor.submit(task2)
+
+ fut = executor.submit(task1)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
+
+ fut = executor.submit(task2)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_local_instance(self):
class Spam:
@@ -219,8 +291,9 @@ class InterpreterPoolExecutorTest(
self.value = True
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(Spam)
+ fut = executor.submit(Spam)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_instance_method(self):
class Spam:
@@ -229,8 +302,9 @@ class InterpreterPoolExecutorTest(
spam = Spam()
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(spam.run)
+ fut = executor.submit(spam.run)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_func_globals(self):
executor = self.executor_type()
@@ -242,13 +316,14 @@ class InterpreterPoolExecutorTest(
@unittest.expectedFailure
def test_submit_exception_in_script(self):
+ # Scripts are not supported currently.
fut = self.executor.submit('raise Exception("spam")')
with self.assertRaises(Exception) as captured:
fut.result()
self.assertIs(type(captured.exception), Exception)
self.assertEqual(str(captured.exception), 'spam')
cause = captured.exception.__cause__
- self.assertIs(type(cause), ExecutionFailed)
+ self.assertIs(type(cause), interpreters.ExecutionFailed)
for attr in ('__name__', '__qualname__', '__module__'):
self.assertEqual(getattr(cause.excinfo.type, attr),
getattr(Exception, attr))
@@ -261,7 +336,7 @@ class InterpreterPoolExecutorTest(
self.assertIs(type(captured.exception), Exception)
self.assertEqual(str(captured.exception), 'spam')
cause = captured.exception.__cause__
- self.assertIs(type(cause), ExecutionFailed)
+ self.assertIs(type(cause), interpreters.ExecutionFailed)
for attr in ('__name__', '__qualname__', '__module__'):
self.assertEqual(getattr(cause.excinfo.type, attr),
getattr(Exception, attr))
@@ -269,16 +344,93 @@ class InterpreterPoolExecutorTest(
def test_saturation(self):
blocker = queues.create()
- executor = self.executor_type(4, shared=dict(blocker=blocker))
+ executor = self.executor_type(4)
for i in range(15 * executor._max_workers):
- executor.submit(exec, 'import __main__; __main__.blocker.get()')
- #executor.submit('blocker.get()')
+ executor.submit(blocker.get)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
blocker.put_nowait(None)
executor.shutdown(wait=True)
+ def test_blocking(self):
+ # There is no guarantee that a worker will be created for every
+ # submitted task. That's because there's a race between:
+ #
+ # * a new worker thread, created when task A was just submitted,
+ # becoming non-idle when it picks up task A
+ # * after task B is added to the queue, a new worker thread
+ # is started only if there are no idle workers
+ # (the check in ThreadPoolExecutor._adjust_thread_count())
+ #
+ # That means we must not block waiting for *all* tasks to report
+ # "ready" before we unblock the known-ready workers.
+ ready = queues.create()
+ blocker = queues.create()
+
+ def run(taskid, ready, blocker):
+ # There can't be any globals here.
+ ready.put_nowait(taskid)
+ blocker.get() # blocking
+
+ numtasks = 10
+ futures = []
+ with self.executor_type() as executor:
+ # Request the jobs.
+ for i in range(numtasks):
+ fut = executor.submit(run, i, ready, blocker)
+ futures.append(fut)
+ pending = numtasks
+ while pending > 0:
+ # Wait for any to be ready.
+ done = 0
+ for _ in range(pending):
+ try:
+ ready.get(timeout=1) # blocking
+ except interpreters.QueueEmpty:
+ pass
+ else:
+ done += 1
+ pending -= done
+ # Unblock the workers.
+ for _ in range(done):
+ blocker.put_nowait(None)
+
+ def test_blocking_with_limited_workers(self):
+ # This is essentially the same as test_blocking,
+ # but we explicitly force a limited number of workers,
+ # instead of it happening implicitly sometimes due to a race.
+ ready = queues.create()
+ blocker = queues.create()
+
+ def run(taskid, ready, blocker):
+ # There can't be any globals here.
+ ready.put_nowait(taskid)
+ blocker.get() # blocking
+
+ numtasks = 10
+ futures = []
+ with self.executor_type(4) as executor:
+ # Request the jobs.
+ for i in range(numtasks):
+ fut = executor.submit(run, i, ready, blocker)
+ futures.append(fut)
+ pending = numtasks
+ while pending > 0:
+ # Wait for any to be ready.
+ done = 0
+ for _ in range(pending):
+ try:
+ ready.get(timeout=1) # blocking
+ except interpreters.QueueEmpty:
+ pass
+ else:
+ done += 1
+ pending -= done
+ # Unblock the workers.
+ for _ in range(done):
+ blocker.put_nowait(None)
+
@support.requires_gil_enabled("gh-117344: test is flaky without the GIL")
def test_idle_thread_reuse(self):
executor = self.executor_type()
@@ -289,12 +441,21 @@ class InterpreterPoolExecutorTest(
executor.shutdown(wait=True)
def test_pickle_errors_propagate(self):
- # GH-125864: Pickle errors happen before the script tries to execute, so the
- # queue used to wait infinitely.
-
+ # GH-125864: Pickle errors happen before the script tries to execute,
+ # so the queue used to wait infinitely.
fut = self.executor.submit(PickleShenanigans(0))
- with self.assertRaisesRegex(RuntimeError, "gotcha"):
+ expected = interpreters.NotShareableError
+ with self.assertRaisesRegex(expected, 'args not shareable') as cm:
fut.result()
+ self.assertRegex(str(cm.exception.__cause__), 'unpickled')
+
+ def test_no_stale_references(self):
+ # Weak references don't cross between interpreters.
+ raise unittest.SkipTest('not applicable')
+
+ def test_free_reference(self):
+ # Weak references don't cross between interpreters.
+ raise unittest.SkipTest('not applicable')
class AsyncioTest(InterpretersMixin, testasyncio_utils.TestCase):
diff --git a/Lib/test/test_concurrent_futures/test_shutdown.py b/Lib/test/test_concurrent_futures/test_shutdown.py
index 7a4065afd46..99b315b47e2 100644
--- a/Lib/test/test_concurrent_futures/test_shutdown.py
+++ b/Lib/test/test_concurrent_futures/test_shutdown.py
@@ -330,6 +330,64 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
# shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
+ @classmethod
+ def _failing_task_gh_132969(cls, n):
+ raise ValueError("failing task")
+
+ @classmethod
+ def _good_task_gh_132969(cls, n):
+ time.sleep(0.1 * n)
+ return n
+
+ def _run_test_issue_gh_132969(self, max_workers):
+ # max_workers=2 will repro exception
+ # max_workers=4 will repro exception and then hang
+
+ # Repro conditions
+ # max_tasks_per_child=1
+ # a task ends abnormally
+ # shutdown(wait=False) is called
+ start_method = self.get_context().get_start_method()
+ if (start_method == "fork" or
+ (start_method == "forkserver" and sys.platform.startswith("win"))):
+ self.skipTest(f"Skipping test for {start_method = }")
+ executor = futures.ProcessPoolExecutor(
+ max_workers=max_workers,
+ max_tasks_per_child=1,
+ mp_context=self.get_context())
+ f1 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 1)
+ f2 = executor.submit(ProcessPoolShutdownTest._failing_task_gh_132969, 2)
+ f3 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 3)
+ result = 0
+ try:
+ result += f1.result()
+ result += f2.result()
+ result += f3.result()
+ except ValueError:
+ # stop processing results upon first exception
+ pass
+
+ # Ensure that the executor cleans up after called
+ # shutdown with wait=False
+ executor_manager_thread = executor._executor_manager_thread
+ executor.shutdown(wait=False)
+ time.sleep(0.2)
+ executor_manager_thread.join()
+ return result
+
+ def test_shutdown_gh_132969_case_1(self):
+ # gh-132969: test that exception "object of type 'NoneType' has no len()"
+ # is not raised when shutdown(wait=False) is called.
+ result = self._run_test_issue_gh_132969(2)
+ self.assertEqual(result, 1)
+
+ def test_shutdown_gh_132969_case_2(self):
+ # gh-132969: test that process does not hang and
+ # exception "object of type 'NoneType' has no len()" is not raised
+ # when shutdown(wait=False) is called.
+ result = self._run_test_issue_gh_132969(4)
+ self.assertEqual(result, 1)
+
create_executor_tests(globals(), ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py
index 23904d17d32..e7364e18742 100644
--- a/Lib/test/test_configparser.py
+++ b/Lib/test/test_configparser.py
@@ -986,12 +986,12 @@ class ConfigParserTestCase(BasicTestCase, unittest.TestCase):
def test_defaults_keyword(self):
"""bpo-23835 fix for ConfigParser"""
- cf = self.newconfig(defaults={1: 2.4})
- self.assertEqual(cf[self.default_section]['1'], '2.4')
- self.assertAlmostEqual(cf[self.default_section].getfloat('1'), 2.4)
- cf = self.newconfig(defaults={"A": 5.2})
- self.assertEqual(cf[self.default_section]['a'], '5.2')
- self.assertAlmostEqual(cf[self.default_section].getfloat('a'), 5.2)
+ cf = self.newconfig(defaults={1: 2.5})
+ self.assertEqual(cf[self.default_section]['1'], '2.5')
+ self.assertAlmostEqual(cf[self.default_section].getfloat('1'), 2.5)
+ cf = self.newconfig(defaults={"A": 5.25})
+ self.assertEqual(cf[self.default_section]['a'], '5.25')
+ self.assertAlmostEqual(cf[self.default_section].getfloat('a'), 5.25)
class ConfigParserTestCaseNoInterpolation(BasicTestCase, unittest.TestCase):
diff --git a/Lib/test/test_cprofile.py b/Lib/test/test_cprofile.py
index 192c8eab26e..57e818b1c68 100644
--- a/Lib/test/test_cprofile.py
+++ b/Lib/test/test_cprofile.py
@@ -125,21 +125,22 @@ class CProfileTest(ProfileTest):
"""
gh-106152
generator.throw() should trigger a call in cProfile
- In the any() call below, there should be two entries for the generator:
- * one for the call to __next__ which gets a True and terminates any
- * one when the generator is garbage collected which will effectively
- do a throw.
"""
+
+ def gen():
+ yield
+
pr = self.profilerclass()
pr.enable()
- any(a == 1 for a in (1, 2))
+ g = gen()
+ try:
+ g.throw(SyntaxError)
+ except SyntaxError:
+ pass
pr.disable()
pr.create_stats()
- for func, (cc, nc, _, _, _) in pr.stats.items():
- if func[2] == "<genexpr>":
- self.assertEqual(cc, 1)
- self.assertEqual(nc, 1)
+ self.assertTrue(any("throw" in func[2] for func in pr.stats.keys())),
def test_bad_descriptor(self):
# gh-132250
diff --git a/Lib/test/test_crossinterp.py b/Lib/test/test_crossinterp.py
index c54635eaeab..2fa0077a09b 100644
--- a/Lib/test/test_crossinterp.py
+++ b/Lib/test/test_crossinterp.py
@@ -1,6 +1,4 @@
import contextlib
-import importlib
-import importlib.util
import itertools
import sys
import types
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index 9aace57633b..60feab225a1 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -1122,19 +1122,22 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be a 1-character string')
+ '"quotechar" must be a unicode character or None, '
+ 'not a string of length 0')
mydialect.quotechar = "''"
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be a 1-character string')
+ '"quotechar" must be a unicode character or None, '
+ 'not a string of length 2')
mydialect.quotechar = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be string or None, not int')
+ '"quotechar" must be a unicode character or None, '
+ 'not int')
def test_delimiter(self):
class mydialect(csv.Dialect):
@@ -1151,31 +1154,32 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be a 1-character string')
+ '"delimiter" must be a unicode character, '
+ 'not a string of length 3')
mydialect.delimiter = ""
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be a 1-character string')
+ '"delimiter" must be a unicode character, not a string of length 0')
mydialect.delimiter = b","
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not bytes')
+ '"delimiter" must be a unicode character, not bytes')
mydialect.delimiter = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not int')
+ '"delimiter" must be a unicode character, not int')
mydialect.delimiter = None
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not NoneType')
+ '"delimiter" must be a unicode character, not NoneType')
def test_escapechar(self):
class mydialect(csv.Dialect):
@@ -1189,20 +1193,32 @@ class TestDialectValidity(unittest.TestCase):
self.assertEqual(d.escapechar, "\\")
mydialect.escapechar = ""
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be a 1-character string'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not a string of length 0')
mydialect.escapechar = "**"
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be a 1-character string'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not a string of length 2')
mydialect.escapechar = b"*"
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be string or None, not bytes'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not bytes')
mydialect.escapechar = 4
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be string or None, not int'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not int')
def test_lineterminator(self):
class mydialect(csv.Dialect):
@@ -1223,7 +1239,13 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"lineterminator" must be a string')
+ '"lineterminator" must be a string, not int')
+
+ mydialect.lineterminator = None
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"lineterminator" must be a string, not NoneType')
def test_invalid_chars(self):
def create_invalid(field_name, value, **kwargs):
diff --git a/Lib/test/test_ctypes/_support.py b/Lib/test/test_ctypes/_support.py
index 946d654a19a..700657a4e41 100644
--- a/Lib/test/test_ctypes/_support.py
+++ b/Lib/test/test_ctypes/_support.py
@@ -3,7 +3,6 @@
import ctypes
from _ctypes import Structure, Union, _Pointer, Array, _SimpleCData, CFuncPtr
import sys
-from test import support
_CData = Structure.__base__
diff --git a/Lib/test/test_ctypes/test_byteswap.py b/Lib/test/test_ctypes/test_byteswap.py
index ea5951603f9..f14e1aa32e1 100644
--- a/Lib/test/test_ctypes/test_byteswap.py
+++ b/Lib/test/test_ctypes/test_byteswap.py
@@ -1,5 +1,4 @@
import binascii
-import ctypes
import math
import struct
import sys
diff --git a/Lib/test/test_ctypes/test_generated_structs.py b/Lib/test/test_ctypes/test_generated_structs.py
index aa448fad5bb..1cb46a82701 100644
--- a/Lib/test/test_ctypes/test_generated_structs.py
+++ b/Lib/test/test_ctypes/test_generated_structs.py
@@ -10,7 +10,7 @@ Run this module to regenerate the files:
"""
import unittest
-from test.support import import_helper, verbose
+from test.support import import_helper
import re
from dataclasses import dataclass
from functools import cached_property
diff --git a/Lib/test/test_ctypes/test_incomplete.py b/Lib/test/test_ctypes/test_incomplete.py
index fefdfe9102e..3189fcd1bd1 100644
--- a/Lib/test/test_ctypes/test_incomplete.py
+++ b/Lib/test/test_ctypes/test_incomplete.py
@@ -1,6 +1,5 @@
import ctypes
import unittest
-import warnings
from ctypes import Structure, POINTER, pointer, c_char_p
# String-based "incomplete pointers" were implemented in ctypes 0.6.3 (2003, when
@@ -21,9 +20,7 @@ class TestSetPointerType(unittest.TestCase):
_fields_ = [("name", c_char_p),
("next", lpcell)]
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- ctypes.SetPointerType(lpcell, cell)
+ lpcell.set_type(cell)
self.assertIs(POINTER(cell), lpcell)
@@ -50,10 +47,9 @@ class TestSetPointerType(unittest.TestCase):
_fields_ = [("name", c_char_p),
("next", lpcell)]
- with self.assertWarns(DeprecationWarning):
- ctypes.SetPointerType(lpcell, cell)
-
+ lpcell.set_type(cell)
self.assertIs(POINTER(cell), lpcell)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_ctypes/test_parameters.py b/Lib/test/test_ctypes/test_parameters.py
index f89521cf8b3..46f8ff93efa 100644
--- a/Lib/test/test_ctypes/test_parameters.py
+++ b/Lib/test/test_ctypes/test_parameters.py
@@ -1,3 +1,4 @@
+import sys
import unittest
import test.support
from ctypes import (CDLL, PyDLL, ArgumentError,
@@ -240,7 +241,8 @@ class SimpleTypesTestCase(unittest.TestCase):
self.assertRegex(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
- self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
+ if sys.float_repr_style == 'short':
+ self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
self.assertRegex(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
self.assertRegex(repr(c_char_p.from_param(b'hihi')), r"^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
self.assertRegex(repr(c_wchar_p.from_param('hihi')), r"^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
diff --git a/Lib/test/test_dbm.py b/Lib/test/test_dbm.py
index a10922a403e..7e8d78b8940 100644
--- a/Lib/test/test_dbm.py
+++ b/Lib/test/test_dbm.py
@@ -135,6 +135,67 @@ class AnyDBMTestCase:
assert(f[key] == b"Python:")
f.close()
+ def test_anydbm_readonly_reorganize(self):
+ self.init_db()
+ with dbm.open(_fname, 'r') as d:
+ # Early stopping.
+ if not hasattr(d, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ self.assertRaises(dbm.error, lambda: d.reorganize())
+
+ def test_anydbm_reorganize_not_changed_content(self):
+ self.init_db()
+ with dbm.open(_fname, 'c') as d:
+ # Early stopping.
+ if not hasattr(d, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ keys_before = sorted(d.keys())
+ values_before = [d[k] for k in keys_before]
+ d.reorganize()
+ keys_after = sorted(d.keys())
+ values_after = [d[k] for k in keys_before]
+ self.assertEqual(keys_before, keys_after)
+ self.assertEqual(values_before, values_after)
+
+ def test_anydbm_reorganize_decreased_size(self):
+
+ def _calculate_db_size(db_path):
+ if os.path.isfile(db_path):
+ return os.path.getsize(db_path)
+ total_size = 0
+ for root, _, filenames in os.walk(db_path):
+ for filename in filenames:
+ file_path = os.path.join(root, filename)
+ total_size += os.path.getsize(file_path)
+ return total_size
+
+ # This test requires relatively large databases to reliably show difference in size before and after reorganizing.
+ with dbm.open(_fname, 'n') as f:
+ # Early stopping.
+ if not hasattr(f, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ for k in self._dict:
+ f[k.encode('ascii')] = self._dict[k] * 100000
+ db_keys = list(f.keys())
+
+ # Make sure to calculate size of database only after file is closed to ensure file content are flushed to disk.
+ size_before = _calculate_db_size(os.path.dirname(_fname))
+
+ # Delete some elements from the start of the database.
+ keys_to_delete = db_keys[:len(db_keys) // 2]
+ with dbm.open(_fname, 'c') as f:
+ for k in keys_to_delete:
+ del f[k]
+ f.reorganize()
+
+ # Make sure to calculate size of database only after file is closed to ensure file content are flushed to disk.
+ size_after = _calculate_db_size(os.path.dirname(_fname))
+
+ self.assertLess(size_after, size_before)
+
def test_open_with_bytes(self):
dbm.open(os.fsencode(_fname), "c").close()
diff --git a/Lib/test/test_dbm_gnu.py b/Lib/test/test_dbm_gnu.py
index 66268c42a30..e0b988b7b95 100644
--- a/Lib/test/test_dbm_gnu.py
+++ b/Lib/test/test_dbm_gnu.py
@@ -74,12 +74,12 @@ class TestGdbm(unittest.TestCase):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
- modes = all - set('fsu')
+ modes = all - set('fsum')
for mode in sorted(modes): # put "c" mode first
self.g = gdbm.open(filename, mode)
self.g.close()
- # Test additional flags (presumably "fsu").
+ # Test additional flags (presumably "fsum").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
@@ -217,6 +217,29 @@ class TestGdbm(unittest.TestCase):
create_empty_file(os.path.join(d, 'test'))
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
+ @unittest.skipUnless('m' in gdbm.open_flags, "requires 'm' in open_flags")
+ def test_nommap_no_crash(self):
+ self.g = g = gdbm.open(filename, 'nm')
+ os.truncate(filename, 0)
+
+ g.get(b'a', b'c')
+ g.keys()
+ g.firstkey()
+ g.nextkey(b'a')
+ with self.assertRaises(KeyError):
+ g[b'a']
+ with self.assertRaises(gdbm.error):
+ len(g)
+
+ with self.assertRaises(gdbm.error):
+ g[b'a'] = b'c'
+ with self.assertRaises(gdbm.error):
+ del g[b'a']
+ with self.assertRaises(gdbm.error):
+ g.setdefault(b'a', b'c')
+ with self.assertRaises(gdbm.error):
+ g.reorganize()
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index 9e298401dc3..ef64b878805 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -28,7 +28,6 @@ import logging
import math
import os, sys
import operator
-import warnings
import pickle, copy
import unittest
import numbers
@@ -982,6 +981,7 @@ class FormatTest:
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
+ ('.01f', '3.14159265', '3.1'), # leading zero in precision
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
@@ -1067,6 +1067,7 @@ class FormatTest:
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
+ ('008,', '123456', '0,123,456'), # leading zero in width
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index ea076ba4fef..f6ec2cf5ce8 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -4114,6 +4114,34 @@ class ClassPropertiesAndMethods(unittest.TestCase):
else:
self.fail("shouldn't be able to create inheritance cycles")
+ def test_assign_bases_many_subclasses(self):
+ # This is intended to check that typeobject.c:queue_slot_update() can
+ # handle updating many subclasses when a slot method is re-assigned.
+ class A:
+ x = 'hello'
+ def __call__(self):
+ return 123
+ def __getitem__(self, index):
+ return None
+
+ class X:
+ x = 'bye'
+
+ class B(A):
+ pass
+
+ subclasses = []
+ for i in range(1000):
+ sc = type(f'Sub{i}', (B,), {})
+ subclasses.append(sc)
+
+ self.assertEqual(subclasses[0]()(), 123)
+ self.assertEqual(subclasses[0]().x, 'hello')
+ B.__bases__ = (X,)
+ with self.assertRaises(TypeError):
+ subclasses[0]()()
+ self.assertEqual(subclasses[0]().x, 'bye')
+
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py
index 52c38e42eca..60c62430370 100644
--- a/Lib/test/test_dict.py
+++ b/Lib/test/test_dict.py
@@ -290,6 +290,38 @@ class DictTest(unittest.TestCase):
['Cannot convert dictionary update sequence element #0 to a sequence'],
)
+ def test_update_shared_keys(self):
+ class MyClass: pass
+
+ # Subclass str to enable us to create an object during the
+ # dict.update() call.
+ class MyStr(str):
+ def __hash__(self):
+ return super().__hash__()
+
+ def __eq__(self, other):
+ # Create an object that shares the same PyDictKeysObject as
+ # obj.__dict__.
+ obj2 = MyClass()
+ obj2.a = "a"
+ obj2.b = "b"
+ obj2.c = "c"
+ return super().__eq__(other)
+
+ obj = MyClass()
+ obj.a = "a"
+ obj.b = "b"
+
+ x = {}
+ x[MyStr("a")] = MyStr("a")
+
+ # gh-132617: this previously raised "dict mutated during update" error
+ x.update(obj.__dict__)
+
+ self.assertEqual(x, {
+ MyStr("a"): "a",
+ "b": "b",
+ })
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py
index 9e217249be7..6ac584a08d1 100644
--- a/Lib/test/test_difflib.py
+++ b/Lib/test/test_difflib.py
@@ -255,21 +255,21 @@ class TestSFpatches(unittest.TestCase):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_from1.splitlines(),
patch914575_to1.splitlines())
- self.assertIn('content="text/html; charset=utf-8"', output)
+ self.assertIn('charset="utf-8"', output)
def test_make_file_iso88591_charset(self):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_from1.splitlines(),
patch914575_to1.splitlines(),
charset='iso-8859-1')
- self.assertIn('content="text/html; charset=iso-8859-1"', output)
+ self.assertIn('charset="iso-8859-1"', output)
def test_make_file_usascii_charset_with_nonascii_input(self):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_nonascii_from1.splitlines(),
patch914575_nonascii_to1.splitlines(),
charset='us-ascii')
- self.assertIn('content="text/html; charset=us-ascii"', output)
+ self.assertIn('charset="us-ascii"', output)
self.assertIn('&#305;mpl&#305;c&#305;t', output)
class TestDiffer(unittest.TestCase):
diff --git a/Lib/test/test_difflib_expect.html b/Lib/test/test_difflib_expect.html
index 9f33a9e9c9c..2346a6f9f8d 100644
--- a/Lib/test/test_difflib_expect.html
+++ b/Lib/test/test_difflib_expect.html
@@ -1,22 +1,42 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-<html>
-
+<!DOCTYPE html>
+<html lang="en">
<head>
- <meta http-equiv="Content-Type"
- content="text/html; charset=utf-8" />
- <title></title>
- <style type="text/css">
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <title>Diff comparison</title>
+ <style>
:root {color-scheme: light dark}
- table.diff {font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace; border:medium}
- .diff_header {background-color:#e0e0e0}
- td.diff_header {text-align:right}
- .diff_next {background-color:#c0c0c0}
+ table.diff {
+ font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace;
+ border: medium;
+ }
+ .diff_header {
+ background-color: #e0e0e0;
+ font-weight: bold;
+ }
+ td.diff_header {
+ text-align: right;
+ padding: 0 8px;
+ }
+ .diff_next {
+ background-color: #c0c0c0;
+ padding: 4px 0;
+ }
.diff_add {background-color:palegreen}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
+ table.diff[summary="Legends"] {
+ margin-top: 20px;
+ border: 1px solid #ccc;
+ }
+ table.diff[summary="Legends"] th {
+ background-color: #e0e0e0;
+ padding: 4px 8px;
+ }
+ table.diff[summary="Legends"] td {
+ padding: 4px 8px;
+ }
@media (prefers-color-scheme: dark) {
.diff_header {background-color:#666}
@@ -24,6 +44,8 @@
.diff_add {background-color:darkgreen}
.diff_chg {background-color:#847415}
.diff_sub {background-color:darkred}
+ table.diff[summary="Legends"] {border-color:#555}
+ table.diff[summary="Legends"] th{background-color:#666}
}
</style>
</head>
diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py
index 547f6de5f5a..355990ed58e 100644
--- a/Lib/test/test_dis.py
+++ b/Lib/test/test_dis.py
@@ -606,7 +606,7 @@ dis_asyncwith = """\
POP_TOP
L1: RESUME 0
-%4d LOAD_FAST_BORROW 0 (c)
+%4d LOAD_FAST 0 (c)
COPY 1
LOAD_SPECIAL 3 (__aexit__)
SWAP 2
@@ -851,7 +851,7 @@ Disassembly of <code object <genexpr> at 0x..., file "%s", line %d>:
%4d RETURN_GENERATOR
POP_TOP
L1: RESUME 0
- LOAD_FAST_BORROW 0 (.0)
+ LOAD_FAST 0 (.0)
GET_ITER
L2: FOR_ITER 14 (to L3)
STORE_FAST 1 (z)
@@ -1821,7 +1821,7 @@ expected_opinfo_jumpy = [
make_inst(opname='LOAD_SMALL_INT', arg=10, argval=10, argrepr='', offset=12, start_offset=12, starts_line=False, line_number=3),
make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=14, start_offset=14, starts_line=False, line_number=3, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
make_inst(opname='GET_ITER', arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3),
- make_inst(opname='FOR_ITER', arg=32, argval=92, argrepr='to L4', offset=24, start_offset=24, starts_line=False, line_number=3, label=1, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='FOR_ITER', arg=33, argval=94, argrepr='to L4', offset=24, start_offset=24, starts_line=False, line_number=3, label=1, cache_info=[('counter', 1, b'\x00\x00')]),
make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=28, start_offset=28, starts_line=False, line_number=3),
make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=4, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=40, start_offset=40, starts_line=False, line_number=4),
@@ -1840,110 +1840,111 @@ expected_opinfo_jumpy = [
make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=82, start_offset=82, starts_line=False, line_number=7),
make_inst(opname='JUMP_BACKWARD', arg=32, argval=24, argrepr='to L1', offset=84, start_offset=84, starts_line=False, line_number=7, cache_info=[('counter', 1, b'\x00\x00')]),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=88, start_offset=88, starts_line=True, line_number=8, label=3),
- make_inst(opname='JUMP_FORWARD', arg=13, argval=118, argrepr='to L5', offset=90, start_offset=90, starts_line=False, line_number=8),
- make_inst(opname='END_FOR', arg=None, argval=None, argrepr='', offset=92, start_offset=92, starts_line=True, line_number=3, label=4),
- make_inst(opname='POP_ITER', arg=None, argval=None, argrepr='', offset=94, start_offset=94, starts_line=False, line_number=3),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=96, start_offset=96, starts_line=True, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=1, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=106, start_offset=106, starts_line=False, line_number=10),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=108, start_offset=108, starts_line=False, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=116, start_offset=116, starts_line=False, line_number=10),
- make_inst(opname='LOAD_FAST_CHECK', arg=0, argval='i', argrepr='i', offset=118, start_offset=118, starts_line=True, line_number=11, label=5),
- make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=120, start_offset=120, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=40, argval=212, argrepr='to L8', offset=128, start_offset=128, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=132, start_offset=132, starts_line=False, line_number=11),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=134, start_offset=134, starts_line=True, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=144, start_offset=144, starts_line=False, line_number=12),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=146, start_offset=146, starts_line=False, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=154, start_offset=154, starts_line=False, line_number=12),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=156, start_offset=156, starts_line=True, line_number=13),
- make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=158, start_offset=158, starts_line=False, line_number=13),
- make_inst(opname='BINARY_OP', arg=23, argval=23, argrepr='-=', offset=160, start_offset=160, starts_line=False, line_number=13, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
- make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=172, start_offset=172, starts_line=False, line_number=13),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=174, start_offset=174, starts_line=True, line_number=14),
- make_inst(opname='LOAD_SMALL_INT', arg=6, argval=6, argrepr='', offset=176, start_offset=176, starts_line=False, line_number=14),
- make_inst(opname='COMPARE_OP', arg=148, argval='>', argrepr='bool(>)', offset=178, start_offset=178, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=3, argval=192, argrepr='to L6', offset=182, start_offset=182, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=186, start_offset=186, starts_line=False, line_number=14),
- make_inst(opname='JUMP_BACKWARD', arg=37, argval=118, argrepr='to L5', offset=188, start_offset=188, starts_line=True, line_number=15, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=192, start_offset=192, starts_line=True, line_number=16, label=6),
- make_inst(opname='LOAD_SMALL_INT', arg=4, argval=4, argrepr='', offset=194, start_offset=194, starts_line=False, line_number=16),
- make_inst(opname='COMPARE_OP', arg=18, argval='<', argrepr='bool(<)', offset=196, start_offset=196, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_TRUE', arg=3, argval=210, argrepr='to L7', offset=200, start_offset=200, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=204, start_offset=204, starts_line=False, line_number=16),
- make_inst(opname='JUMP_BACKWARD', arg=46, argval=118, argrepr='to L5', offset=206, start_offset=206, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='JUMP_FORWARD', arg=11, argval=234, argrepr='to L9', offset=210, start_offset=210, starts_line=True, line_number=17, label=7),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=212, start_offset=212, starts_line=True, line_number=19, label=8, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=2, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=222, start_offset=222, starts_line=False, line_number=19),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=224, start_offset=224, starts_line=False, line_number=19, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=232, start_offset=232, starts_line=False, line_number=19),
- make_inst(opname='NOP', arg=None, argval=None, argrepr='', offset=234, start_offset=234, starts_line=True, line_number=20, label=9),
- make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=236, start_offset=236, starts_line=True, line_number=21),
- make_inst(opname='LOAD_SMALL_INT', arg=0, argval=0, argrepr='', offset=238, start_offset=238, starts_line=False, line_number=21),
- make_inst(opname='BINARY_OP', arg=11, argval=11, argrepr='/', offset=240, start_offset=240, starts_line=False, line_number=21, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=252, start_offset=252, starts_line=False, line_number=21),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=254, start_offset=254, starts_line=True, line_number=25),
- make_inst(opname='COPY', arg=1, argval=1, argrepr='', offset=256, start_offset=256, starts_line=False, line_number=25),
- make_inst(opname='LOAD_SPECIAL', arg=1, argval=1, argrepr='__exit__', offset=258, start_offset=258, starts_line=False, line_number=25),
- make_inst(opname='SWAP', arg=2, argval=2, argrepr='', offset=260, start_offset=260, starts_line=False, line_number=25),
- make_inst(opname='SWAP', arg=3, argval=3, argrepr='', offset=262, start_offset=262, starts_line=False, line_number=25),
- make_inst(opname='LOAD_SPECIAL', arg=0, argval=0, argrepr='__enter__', offset=264, start_offset=264, starts_line=False, line_number=25),
- make_inst(opname='CALL', arg=0, argval=0, argrepr='', offset=266, start_offset=266, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='STORE_FAST', arg=1, argval='dodgy', argrepr='dodgy', offset=274, start_offset=274, starts_line=False, line_number=25),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=276, start_offset=276, starts_line=True, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=3, argval='Never reach this', argrepr="'Never reach this'", offset=286, start_offset=286, starts_line=False, line_number=26),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=288, start_offset=288, starts_line=False, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=296, start_offset=296, starts_line=False, line_number=26),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=298, start_offset=298, starts_line=True, line_number=25),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=300, start_offset=300, starts_line=False, line_number=25),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=90, start_offset=90, starts_line=False, line_number=8),
+ make_inst(opname='JUMP_FORWARD', arg=13, argval=120, argrepr='to L5', offset=92, start_offset=92, starts_line=False, line_number=8),
+ make_inst(opname='END_FOR', arg=None, argval=None, argrepr='', offset=94, start_offset=94, starts_line=True, line_number=3, label=4),
+ make_inst(opname='POP_ITER', arg=None, argval=None, argrepr='', offset=96, start_offset=96, starts_line=False, line_number=3),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=98, start_offset=98, starts_line=True, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=1, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=108, start_offset=108, starts_line=False, line_number=10),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=110, start_offset=110, starts_line=False, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=118, start_offset=118, starts_line=False, line_number=10),
+ make_inst(opname='LOAD_FAST_CHECK', arg=0, argval='i', argrepr='i', offset=120, start_offset=120, starts_line=True, line_number=11, label=5),
+ make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=122, start_offset=122, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=40, argval=214, argrepr='to L8', offset=130, start_offset=130, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=134, start_offset=134, starts_line=False, line_number=11),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=136, start_offset=136, starts_line=True, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=146, start_offset=146, starts_line=False, line_number=12),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=148, start_offset=148, starts_line=False, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=156, start_offset=156, starts_line=False, line_number=12),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=158, start_offset=158, starts_line=True, line_number=13),
+ make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=160, start_offset=160, starts_line=False, line_number=13),
+ make_inst(opname='BINARY_OP', arg=23, argval=23, argrepr='-=', offset=162, start_offset=162, starts_line=False, line_number=13, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
+ make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=174, start_offset=174, starts_line=False, line_number=13),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=176, start_offset=176, starts_line=True, line_number=14),
+ make_inst(opname='LOAD_SMALL_INT', arg=6, argval=6, argrepr='', offset=178, start_offset=178, starts_line=False, line_number=14),
+ make_inst(opname='COMPARE_OP', arg=148, argval='>', argrepr='bool(>)', offset=180, start_offset=180, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=3, argval=194, argrepr='to L6', offset=184, start_offset=184, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=188, start_offset=188, starts_line=False, line_number=14),
+ make_inst(opname='JUMP_BACKWARD', arg=37, argval=120, argrepr='to L5', offset=190, start_offset=190, starts_line=True, line_number=15, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=194, start_offset=194, starts_line=True, line_number=16, label=6),
+ make_inst(opname='LOAD_SMALL_INT', arg=4, argval=4, argrepr='', offset=196, start_offset=196, starts_line=False, line_number=16),
+ make_inst(opname='COMPARE_OP', arg=18, argval='<', argrepr='bool(<)', offset=198, start_offset=198, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_TRUE', arg=3, argval=212, argrepr='to L7', offset=202, start_offset=202, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=206, start_offset=206, starts_line=False, line_number=16),
+ make_inst(opname='JUMP_BACKWARD', arg=46, argval=120, argrepr='to L5', offset=208, start_offset=208, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='JUMP_FORWARD', arg=11, argval=236, argrepr='to L9', offset=212, start_offset=212, starts_line=True, line_number=17, label=7),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=214, start_offset=214, starts_line=True, line_number=19, label=8, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=2, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=224, start_offset=224, starts_line=False, line_number=19),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=226, start_offset=226, starts_line=False, line_number=19, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=234, start_offset=234, starts_line=False, line_number=19),
+ make_inst(opname='NOP', arg=None, argval=None, argrepr='', offset=236, start_offset=236, starts_line=True, line_number=20, label=9),
+ make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=238, start_offset=238, starts_line=True, line_number=21),
+ make_inst(opname='LOAD_SMALL_INT', arg=0, argval=0, argrepr='', offset=240, start_offset=240, starts_line=False, line_number=21),
+ make_inst(opname='BINARY_OP', arg=11, argval=11, argrepr='/', offset=242, start_offset=242, starts_line=False, line_number=21, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=254, start_offset=254, starts_line=False, line_number=21),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=256, start_offset=256, starts_line=True, line_number=25),
+ make_inst(opname='COPY', arg=1, argval=1, argrepr='', offset=258, start_offset=258, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_SPECIAL', arg=1, argval=1, argrepr='__exit__', offset=260, start_offset=260, starts_line=False, line_number=25),
+ make_inst(opname='SWAP', arg=2, argval=2, argrepr='', offset=262, start_offset=262, starts_line=False, line_number=25),
+ make_inst(opname='SWAP', arg=3, argval=3, argrepr='', offset=264, start_offset=264, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_SPECIAL', arg=0, argval=0, argrepr='__enter__', offset=266, start_offset=266, starts_line=False, line_number=25),
+ make_inst(opname='CALL', arg=0, argval=0, argrepr='', offset=268, start_offset=268, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='STORE_FAST', arg=1, argval='dodgy', argrepr='dodgy', offset=276, start_offset=276, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=278, start_offset=278, starts_line=True, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=3, argval='Never reach this', argrepr="'Never reach this'", offset=288, start_offset=288, starts_line=False, line_number=26),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=290, start_offset=290, starts_line=False, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=298, start_offset=298, starts_line=False, line_number=26),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=300, start_offset=300, starts_line=True, line_number=25),
make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=302, start_offset=302, starts_line=False, line_number=25),
- make_inst(opname='CALL', arg=3, argval=3, argrepr='', offset=304, start_offset=304, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=312, start_offset=312, starts_line=False, line_number=25),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=314, start_offset=314, starts_line=True, line_number=28, label=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=324, start_offset=324, starts_line=False, line_number=28),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=326, start_offset=326, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=334, start_offset=334, starts_line=False, line_number=28),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=336, start_offset=336, starts_line=False, line_number=28),
- make_inst(opname='RETURN_VALUE', arg=None, argval=None, argrepr='', offset=338, start_offset=338, starts_line=False, line_number=28),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=340, start_offset=340, starts_line=True, line_number=25),
- make_inst(opname='WITH_EXCEPT_START', arg=None, argval=None, argrepr='', offset=342, start_offset=342, starts_line=False, line_number=25),
- make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=344, start_offset=344, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_TRUE', arg=2, argval=360, argrepr='to L11', offset=352, start_offset=352, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=356, start_offset=356, starts_line=False, line_number=25),
- make_inst(opname='RERAISE', arg=2, argval=2, argrepr='', offset=358, start_offset=358, starts_line=False, line_number=25),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=360, start_offset=360, starts_line=False, line_number=25, label=11),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=362, start_offset=362, starts_line=False, line_number=25),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=364, start_offset=364, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=304, start_offset=304, starts_line=False, line_number=25),
+ make_inst(opname='CALL', arg=3, argval=3, argrepr='', offset=306, start_offset=306, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=314, start_offset=314, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=316, start_offset=316, starts_line=True, line_number=28, label=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=326, start_offset=326, starts_line=False, line_number=28),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=328, start_offset=328, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=336, start_offset=336, starts_line=False, line_number=28),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=338, start_offset=338, starts_line=False, line_number=28),
+ make_inst(opname='RETURN_VALUE', arg=None, argval=None, argrepr='', offset=340, start_offset=340, starts_line=False, line_number=28),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=342, start_offset=342, starts_line=True, line_number=25),
+ make_inst(opname='WITH_EXCEPT_START', arg=None, argval=None, argrepr='', offset=344, start_offset=344, starts_line=False, line_number=25),
+ make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=346, start_offset=346, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_TRUE', arg=2, argval=362, argrepr='to L11', offset=354, start_offset=354, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=358, start_offset=358, starts_line=False, line_number=25),
+ make_inst(opname='RERAISE', arg=2, argval=2, argrepr='', offset=360, start_offset=360, starts_line=False, line_number=25),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=362, start_offset=362, starts_line=False, line_number=25, label=11),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=364, start_offset=364, starts_line=False, line_number=25),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=366, start_offset=366, starts_line=False, line_number=25),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=368, start_offset=368, starts_line=False, line_number=25),
- make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=29, argval=314, argrepr='to L10', offset=370, start_offset=370, starts_line=False, line_number=25),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=372, start_offset=372, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=374, start_offset=374, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=376, start_offset=376, starts_line=False, line_number=None),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=378, start_offset=378, starts_line=False, line_number=None),
- make_inst(opname='LOAD_GLOBAL', arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=380, start_offset=380, starts_line=True, line_number=22, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='CHECK_EXC_MATCH', arg=None, argval=None, argrepr='', offset=390, start_offset=390, starts_line=False, line_number=22),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=15, argval=426, argrepr='to L12', offset=392, start_offset=392, starts_line=False, line_number=22, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=396, start_offset=396, starts_line=False, line_number=22),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=22),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=400, start_offset=400, starts_line=True, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=5, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=410, start_offset=410, starts_line=False, line_number=23),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=412, start_offset=412, starts_line=False, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=420, start_offset=420, starts_line=False, line_number=23),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=422, start_offset=422, starts_line=False, line_number=23),
- make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=56, argval=314, argrepr='to L10', offset=424, start_offset=424, starts_line=False, line_number=23),
- make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=426, start_offset=426, starts_line=True, line_number=22, label=12),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=428, start_offset=428, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=430, start_offset=430, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=432, start_offset=432, starts_line=False, line_number=None),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=434, start_offset=434, starts_line=False, line_number=None),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=436, start_offset=436, starts_line=True, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=446, start_offset=446, starts_line=False, line_number=28),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=448, start_offset=448, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=456, start_offset=456, starts_line=False, line_number=28),
- make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=458, start_offset=458, starts_line=False, line_number=28),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=460, start_offset=460, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=462, start_offset=462, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=464, start_offset=464, starts_line=False, line_number=None),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=370, start_offset=370, starts_line=False, line_number=25),
+ make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=29, argval=316, argrepr='to L10', offset=372, start_offset=372, starts_line=False, line_number=25),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=374, start_offset=374, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=376, start_offset=376, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=378, start_offset=378, starts_line=False, line_number=None),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=380, start_offset=380, starts_line=False, line_number=None),
+ make_inst(opname='LOAD_GLOBAL', arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=382, start_offset=382, starts_line=True, line_number=22, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='CHECK_EXC_MATCH', arg=None, argval=None, argrepr='', offset=392, start_offset=392, starts_line=False, line_number=22),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=15, argval=428, argrepr='to L12', offset=394, start_offset=394, starts_line=False, line_number=22, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=22),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=400, start_offset=400, starts_line=False, line_number=22),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=402, start_offset=402, starts_line=True, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=5, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=412, start_offset=412, starts_line=False, line_number=23),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=414, start_offset=414, starts_line=False, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=422, start_offset=422, starts_line=False, line_number=23),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=424, start_offset=424, starts_line=False, line_number=23),
+ make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=56, argval=316, argrepr='to L10', offset=426, start_offset=426, starts_line=False, line_number=23),
+ make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=428, start_offset=428, starts_line=True, line_number=22, label=12),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=430, start_offset=430, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=432, start_offset=432, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=434, start_offset=434, starts_line=False, line_number=None),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=436, start_offset=436, starts_line=False, line_number=None),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=438, start_offset=438, starts_line=True, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=448, start_offset=448, starts_line=False, line_number=28),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=450, start_offset=450, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=458, start_offset=458, starts_line=False, line_number=28),
+ make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=460, start_offset=460, starts_line=False, line_number=28),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=462, start_offset=462, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=464, start_offset=464, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=466, start_offset=466, starts_line=False, line_number=None),
]
# One last piece of inspect fodder to check the default line number handling
diff --git a/Lib/test/test_doctest/sample_doctest_errors.py b/Lib/test/test_doctest/sample_doctest_errors.py
new file mode 100644
index 00000000000..4a6f07af2d4
--- /dev/null
+++ b/Lib/test/test_doctest/sample_doctest_errors.py
@@ -0,0 +1,46 @@
+"""This is a sample module used for testing doctest.
+
+This module includes various scenarios involving errors.
+
+>>> 2 + 2
+5
+>>> 1/0
+1
+"""
+
+def g():
+ [][0] # line 12
+
+def errors():
+ """
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ >>> def f():
+ ... 2 + '2'
+ ...
+ >>> f()
+ 1
+ >>> g()
+ 1
+ """
+
+def syntax_error():
+ """
+ >>> 2+*3
+ 5
+ """
+
+__test__ = {
+ 'bad': """
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ """,
+}
+
+def test_suite():
+ import doctest
+ return doctest.DocTestSuite()
diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py
index a4a49298bab..72763d4a013 100644
--- a/Lib/test/test_doctest/test_doctest.py
+++ b/Lib/test/test_doctest/test_doctest.py
@@ -2267,14 +2267,24 @@ def test_DocTestSuite():
>>> import unittest
>>> import test.test_doctest.sample_doctest
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
- >>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=9 errors=2 failures=2>
+ >>> for tst, _ in result.failures:
+ ... print(tst)
+ bad (test.test_doctest.sample_doctest.__test__) [0]
+ foo (test.test_doctest.sample_doctest) [0]
+ >>> for tst, _ in result.errors:
+ ... print(tst)
+ test_silly_setup (test.test_doctest.sample_doctest) [1]
+ y_is_one (test.test_doctest.sample_doctest) [0]
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest')
- >>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The module need not contain any doctest examples:
@@ -2296,13 +2306,26 @@ def test_DocTestSuite():
>>> result
<unittest.result.TestResult run=6 errors=0 failures=2>
>>> len(result.skipped)
- 2
+ 7
+ >>> for tst, _ in result.skipped:
+ ... print(tst)
+ double_skip (test.test_doctest.sample_doctest_skip) [0]
+ double_skip (test.test_doctest.sample_doctest_skip) [1]
+ double_skip (test.test_doctest.sample_doctest_skip)
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_pass (test.test_doctest.sample_doctest_skip) [0]
+ single_skip (test.test_doctest.sample_doctest_skip) [0]
+ single_skip (test.test_doctest.sample_doctest_skip)
+ >>> for tst, _ in result.failures:
+ ... print(tst)
+ no_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [1]
We can use the current module:
>>> suite = test.test_doctest.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
We can also provide a DocTestFinder:
@@ -2310,7 +2333,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... test_finder=finder)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The DocTestFinder need not return any tests:
@@ -2326,7 +2349,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=3 failures=2>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
@@ -2334,7 +2357,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
@@ -2342,7 +2365,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=2 failures=3>
You can supply setUp and tearDown functions:
@@ -2359,7 +2382,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
But the tearDown restores sanity:
@@ -2377,13 +2400,115 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
- """
+ """
+
+def test_DocTestSuite_errors():
+ """Tests for error reporting in DocTestSuite.
+
+ >>> import unittest
+ >>> import test.test_doctest.sample_doctest_errors as mod
+ >>> suite = doctest.DocTestSuite(mod)
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=4 errors=6 failures=3>
+ >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 5, in test.test_doctest.sample_doctest_errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.failures[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in test.test_doctest.sample_doctest_errors.__test__.bad
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.failures[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 16, in test.test_doctest.sample_doctest_errors.errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 7, in test.test_doctest.sample_doctest_errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in test.test_doctest.sample_doctest_errors.__test__.bad
+ File "<doctest test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 18, in test.test_doctest.sample_doctest_errors.errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[3][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 23, in test.test_doctest.sample_doctest_errors.errors
+ >...>> f()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ <BLANKLINE>
+ >>> print(result.errors[4][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 25, in test.test_doctest.sample_doctest_errors.errors
+ >...>> g()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[4]>", line 1, in <module>
+ g()
+ ~^^
+ File "...sample_doctest_errors.py", line 12, in g
+ [][0] # line 12
+ ~~^^^
+ IndexError: list index out of range
+ <BLANKLINE>
+ >>> print(result.errors[5][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 31, in test.test_doctest.sample_doctest_errors.syntax_error
+ >...>> 2+*3
+ File "<doctest test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ <BLANKLINE>
+ """
def test_DocFileSuite():
"""We can test tests found in text files using a DocFileSuite.
@@ -2396,7 +2521,7 @@ def test_DocFileSuite():
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
@@ -2408,14 +2533,14 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... package='test.test_doctest')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test_doctest/test_doctest.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
@@ -2441,7 +2566,7 @@ def test_DocFileSuite():
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
It is an error to specify `package` when `module_relative=False`:
@@ -2455,12 +2580,19 @@ def test_DocFileSuite():
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest4.txt',
- ... 'test_doctest_skip.txt')
+ ... 'test_doctest_skip.txt',
+ ... 'test_doctest_skip2.txt')
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=3 errors=0 failures=1>
- >>> len(result.skipped)
- 1
+ <unittest.result.TestResult run=4 errors=1 failures=0>
+ >>> len(result.skipped)
+ 4
+ >>> for tst, _ in result.skipped: # doctest: +ELLIPSIS
+ ... print('=', tst)
+ = ...test_doctest_skip.txt [0]
+ = ...test_doctest_skip.txt [1]
+ = ...test_doctest_skip.txt
+ = ...test_doctest_skip2.txt [0]
You can specify initial global variables:
@@ -2469,7 +2601,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
In this case, we supplied a missing favorite color. You can
provide doctest options:
@@ -2480,7 +2612,7 @@ def test_DocFileSuite():
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=1 failures=1>
And, you can provide setUp and tearDown functions:
@@ -2499,7 +2631,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
But the tearDown restores sanity:
@@ -2541,9 +2673,60 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
+ """
- """
+def test_DocFileSuite_errors():
+ """Tests for error reporting in DocTestSuite.
+
+ >>> import unittest
+ >>> suite = doctest.DocFileSuite('test_doctest_errors.txt')
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=3 failures=1>
+ >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
+ >...>> 1/0
+ File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 11, in test_doctest_errors.txt
+ >...>> f()
+ File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test_doctest_errors.txt[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 13, in test_doctest_errors.txt
+ >...>> 2+*3
+ File "<doctest test_doctest_errors.txt[4]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ <BLANKLINE>
+
+ """
def test_trailing_space_in_test():
"""
@@ -2612,14 +2795,26 @@ def test_unittest_reportflags():
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> import unittest
>>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=1 failures=1>
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- Traceback ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ Traceback (most recent call last):
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
- ...
+ print('a')
+ print()
+ print('b')
+ Expected:
+ a
+ <BLANKLINE>
+ b
+ Got:
+ a
+ <BLANKLINE>
+ b
+ <BLANKLINE>
Note that we see both failures displayed.
@@ -2628,16 +2823,8 @@ def test_unittest_reportflags():
Now, when we run the test:
- >>> result = suite.run(unittest.TestResult())
- >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- Traceback ...
- Failed example:
- favorite_color
- Exception raised:
- ...
- NameError: name 'favorite_color' is not defined
- <BLANKLINE>
- <BLANKLINE>
+ >>> suite.run(unittest.TestResult())
+ <unittest.result.TestResult run=1 errors=1 failures=0>
We get only the first failure.
@@ -2647,19 +2834,20 @@ def test_unittest_reportflags():
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
- Then the default eporting options are ignored:
+ Then the default reporting options are ignored:
>>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=1 failures=1>
*NOTE*: These doctest are intentionally not placed in raw string to depict
the trailing whitespace using `\x20` in the diff below.
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
print('a')
print()
@@ -2670,7 +2858,6 @@ def test_unittest_reportflags():
+\x20
b
<BLANKLINE>
- <BLANKLINE>
Test runners can restore the formatting flags after they run:
@@ -2860,6 +3047,57 @@ Test the verbose output:
>>> _colorize.COLORIZE = save_colorize
"""
+def test_testfile_errors(): r"""
+Tests for error reporting in the testfile() function.
+
+ >>> doctest.testfile('test_doctest_errors.txt', verbose=False) # doctest: +ELLIPSIS
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 11, in test_doctest_errors.txt
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test_doctest_errors.txt[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 13, in test_doctest_errors.txt
+ Failed example:
+ 2+*3
+ Exception raised:
+ File "<doctest test_doctest_errors.txt[4]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ **********************************************************************
+ 1 item had failures:
+ 4 of 5 in test_doctest_errors.txt
+ ***Test Failed*** 4 failures.
+ TestResults(failed=4, attempted=5)
+"""
+
class TestImporter(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
@@ -2990,6 +3228,110 @@ out of the binary module.
TestResults(failed=0, attempted=0)
"""
+def test_testmod_errors(): r"""
+Tests for error reporting in the testmod() function.
+
+ >>> import test.test_doctest.sample_doctest_errors as mod
+ >>> doctest.testmod(mod, verbose=False) # doctest: +ELLIPSIS
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 5, in test.test_doctest.sample_doctest_errors
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 7, in test.test_doctest.sample_doctest_errors
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line ?, in test.test_doctest.sample_doctest_errors.__test__.bad
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line ?, in test.test_doctest.sample_doctest_errors.__test__.bad
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 16, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 18, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 23, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 25, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ g()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[4]>", line 1, in <module>
+ g()
+ ~^^
+ File "...sample_doctest_errors.py", line 12, in g
+ [][0] # line 12
+ ~~^^^
+ IndexError: list index out of range
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 31, in test.test_doctest.sample_doctest_errors.syntax_error
+ Failed example:
+ 2+*3
+ Exception raised:
+ File "<doctest test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ **********************************************************************
+ 4 items had failures:
+ 2 of 2 in test.test_doctest.sample_doctest_errors
+ 2 of 2 in test.test_doctest.sample_doctest_errors.__test__.bad
+ 4 of 5 in test.test_doctest.sample_doctest_errors.errors
+ 1 of 1 in test.test_doctest.sample_doctest_errors.syntax_error
+ ***Test Failed*** 9 failures.
+ TestResults(failed=9, attempted=10)
+"""
+
try:
os.fsencode("foo-bรคr@baz.py")
supports_unicode = True
@@ -3021,11 +3363,6 @@ Check doctest with a non-ascii filename:
raise Exception('clรฉ')
Exception raised:
Traceback (most recent call last):
- File ...
- exec(compile(example.source, filename, "single",
- ~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- compileflags, True), test.globs)
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<doctest foo-bรคr@baz[0]>", line 1, in <module>
raise Exception('clรฉ')
Exception: clรฉ
@@ -3318,9 +3655,9 @@ def test_run_doctestsuite_multiple_times():
>>> import test.test_doctest.sample_doctest
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
"""
diff --git a/Lib/test/test_doctest/test_doctest_errors.txt b/Lib/test/test_doctest/test_doctest_errors.txt
new file mode 100644
index 00000000000..93c3c106e60
--- /dev/null
+++ b/Lib/test/test_doctest/test_doctest_errors.txt
@@ -0,0 +1,14 @@
+This is a sample doctest in a text file, in which all examples fail
+or raise an exception.
+
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ >>> def f():
+ ... 2 + '2'
+ ...
+ >>> f()
+ 1
+ >>> 2+*3
+ 5
diff --git a/Lib/test/test_doctest/test_doctest_skip.txt b/Lib/test/test_doctest/test_doctest_skip.txt
index f340e2b8141..06c23d06e60 100644
--- a/Lib/test/test_doctest/test_doctest_skip.txt
+++ b/Lib/test/test_doctest/test_doctest_skip.txt
@@ -2,3 +2,5 @@ This is a sample doctest in a text file, in which all examples are skipped.
>>> 2 + 2 # doctest: +SKIP
5
+ >>> 2 + 2 # doctest: +SKIP
+ 4
diff --git a/Lib/test/test_doctest/test_doctest_skip2.txt b/Lib/test/test_doctest/test_doctest_skip2.txt
new file mode 100644
index 00000000000..85e4938c346
--- /dev/null
+++ b/Lib/test/test_doctest/test_doctest_skip2.txt
@@ -0,0 +1,6 @@
+This is a sample doctest in a text file, in which some examples are skipped.
+
+ >>> 2 + 2 # doctest: +SKIP
+ 5
+ >>> 2 + 2
+ 4
diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py
index ac12c3b2306..179e236ecdf 100644
--- a/Lib/test/test_email/test__header_value_parser.py
+++ b/Lib/test/test_email/test__header_value_parser.py
@@ -463,6 +463,19 @@ class TestParser(TestParserMixin, TestEmailBase):
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
+ def test_get_qp_ctext_close_paren_only(self):
+ self._test_get_x(parser.get_qp_ctext,
+ ')', '', ' ', [], ')')
+
+ def test_get_qp_ctext_open_paren_only(self):
+ self._test_get_x(parser.get_qp_ctext,
+ '(', '', ' ', [], '(')
+
+ def test_get_qp_ctext_no_end_char(self):
+ self._test_get_x(parser.get_qp_ctext,
+ '', '', ' ', [], '')
+
+
# get_qcontent
def test_get_qcontent_only(self):
@@ -503,6 +516,14 @@ class TestParser(TestParserMixin, TestEmailBase):
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
+ def test_get_qcontent_empty(self):
+ self._test_get_x(parser.get_qcontent,
+ '"', '', '', [], '"')
+
+ def test_get_qcontent_no_end_char(self):
+ self._test_get_x(parser.get_qcontent,
+ '', '', '', [], '')
+
# get_atext
def test_get_atext_only(self):
@@ -1283,6 +1304,18 @@ class TestParser(TestParserMixin, TestEmailBase):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
+ def test_get_dtext_open_bracket_only(self):
+ self._test_get_x(parser.get_dtext,
+ '[', '', '', [], '[')
+
+ def test_get_dtext_close_bracket_only(self):
+ self._test_get_x(parser.get_dtext,
+ ']', '', '', [], ']')
+
+ def test_get_dtext_empty(self):
+ self._test_get_x(parser.get_dtext,
+ '', '', '', [], '')
+
# get_domain_literal
def test_get_domain_literal_only(self):
@@ -2458,6 +2491,38 @@ class TestParser(TestParserMixin, TestEmailBase):
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
+ def test_get_address_with_invalid_domain(self):
+ address = self._test_get_x(parser.get_address,
+ '<T@[',
+ '<T@[]>',
+ '<T@[]>',
+ [errors.InvalidHeaderDefect, # missing trailing '>' on angle-addr
+ errors.InvalidHeaderDefect, # end of input inside domain-literal
+ ],
+ '')
+ self.assertEqual(address.token_type, 'address')
+ self.assertEqual(len(address.mailboxes), 0)
+ self.assertEqual(len(address.all_mailboxes), 1)
+ self.assertEqual(address.all_mailboxes[0].domain, '[]')
+ self.assertEqual(address.all_mailboxes[0].local_part, 'T')
+ self.assertEqual(address.all_mailboxes[0].token_type, 'invalid-mailbox')
+ self.assertEqual(address[0].token_type, 'invalid-mailbox')
+
+ address = self._test_get_x(parser.get_address,
+ '!an??:=m==fr2@[C',
+ '!an??:=m==fr2@[C];',
+ '!an??:=m==fr2@[C];',
+ [errors.InvalidHeaderDefect, # end of header in group
+ errors.InvalidHeaderDefect, # end of input inside domain-literal
+ ],
+ '')
+ self.assertEqual(address.token_type, 'address')
+ self.assertEqual(len(address.mailboxes), 0)
+ self.assertEqual(len(address.all_mailboxes), 1)
+ self.assertEqual(address.all_mailboxes[0].domain, '[C]')
+ self.assertEqual(address.all_mailboxes[0].local_part, '=m==fr2')
+ self.assertEqual(address.all_mailboxes[0].token_type, 'invalid-mailbox')
+ self.assertEqual(address[0].token_type, 'group')
# get_address_list
@@ -2732,6 +2797,19 @@ class TestParser(TestParserMixin, TestEmailBase):
)
self.assertEqual(message_id.token_type, 'message-id')
+ def test_parse_message_id_with_invalid_domain(self):
+ message_id = self._test_parse_x(
+ parser.parse_message_id,
+ "<T@[",
+ "<T@[]>",
+ "<T@[]>",
+ [errors.ObsoleteHeaderDefect] + [errors.InvalidHeaderDefect] * 2,
+ [],
+ )
+ self.assertEqual(message_id.token_type, 'message-id')
+ self.assertEqual(str(message_id.all_defects[-1]),
+ "end of input inside domain-literal")
+
def test_parse_message_id_with_remaining(self):
message_id = self._test_parse_x(
parser.parse_message_id,
diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py
index 7b14305f997..b8116d073a2 100644
--- a/Lib/test/test_email/test_email.py
+++ b/Lib/test/test_email/test_email.py
@@ -389,6 +389,24 @@ class TestMessageAPI(TestEmailBase):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
+ def test_continuation_sorting_part_order(self):
+ msg = email.message_from_string(
+ "Content-Disposition: attachment; "
+ "filename*=\"ignored\"; "
+ "filename*0*=\"utf-8''foo%20\"; "
+ "filename*1*=\"bar.txt\"\n"
+ )
+ filename = msg.get_filename()
+ self.assertEqual(filename, 'foo bar.txt')
+
+ def test_sorting_no_continuations(self):
+ msg = email.message_from_string(
+ "Content-Disposition: attachment; "
+ "filename*=\"bar.txt\"; "
+ )
+ filename = msg.get_filename()
+ self.assertEqual(filename, 'bar.txt')
+
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
@@ -2550,6 +2568,18 @@ Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
self.assertEqual(str(make_header(decode_header(s))),
'"Mรผller T" <T.Mueller@xxx.com>')
+ def test_unencoded_ascii(self):
+ # bpo-22833/gh-67022: returns [(str, None)] rather than [(bytes, None)]
+ s = 'header without encoded words'
+ self.assertEqual(decode_header(s),
+ [('header without encoded words', None)])
+
+ def test_unencoded_utf8(self):
+ # bpo-22833/gh-67022: returns [(str, None)] rather than [(bytes, None)]
+ s = 'header with unexpected non ASCII caract\xe8res'
+ self.assertEqual(decode_header(s),
+ [('header with unexpected non ASCII caract\xe8res', None)])
+
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py
index 221f9db7763..bbc7630fa83 100644
--- a/Lib/test/test_enum.py
+++ b/Lib/test/test_enum.py
@@ -36,7 +36,7 @@ def load_tests(loader, tests, ignore):
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
howto_tests = os.path.join(REPO_ROOT, 'Doc/howto/enum.rst')
- if os.path.exists(howto_tests):
+ if os.path.exists(howto_tests) and sys.float_repr_style == 'short':
tests.addTests(doctest.DocFileSuite(
howto_tests,
module_relative=False,
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 175ef531386..57d0656487d 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -1445,6 +1445,7 @@ class ExceptionTests(unittest.TestCase):
foo()
support.gc_collect()
+ @support.skip_emscripten_stack_overflow()
@cpython_only
def test_recursion_normalizing_exception(self):
import_module("_testinternalcapi")
@@ -1522,6 +1523,7 @@ class ExceptionTests(unittest.TestCase):
self.assertIn(b'Done.', out)
+ @support.skip_emscripten_stack_overflow()
def test_recursion_in_except_handler(self):
def set_relative_recursion_limit(n):
diff --git a/Lib/test/test_external_inspection.py b/Lib/test/test_external_inspection.py
index ad3f669a030..0f31c225e68 100644
--- a/Lib/test/test_external_inspection.py
+++ b/Lib/test/test_external_inspection.py
@@ -4,9 +4,10 @@ import textwrap
import importlib
import sys
import socket
-from asyncio import staggered, taskgroups
+import threading
+from asyncio import staggered, taskgroups, base_events, tasks
from unittest.mock import ANY
-from test.support import os_helper, SHORT_TIMEOUT, busy_retry
+from test.support import os_helper, SHORT_TIMEOUT, busy_retry, requires_gil_enabled
from test.support.script_helper import make_script
from test.support.socket_helper import find_unused_port
@@ -16,11 +17,13 @@ PROCESS_VM_READV_SUPPORTED = False
try:
from _remote_debugging import PROCESS_VM_READV_SUPPORTED
- from _remote_debugging import get_stack_trace
- from _remote_debugging import get_async_stack_trace
- from _remote_debugging import get_all_awaited_by
+ from _remote_debugging import RemoteUnwinder
+ from _remote_debugging import FrameInfo, CoroInfo, TaskInfo
except ImportError:
- raise unittest.SkipTest("Test only runs when _remote_debugging is available")
+ raise unittest.SkipTest(
+ "Test only runs when _remote_debugging is available"
+ )
+
def _make_test_script(script_dir, script_basename, source):
to_return = make_script(script_dir, script_basename, source)
@@ -29,12 +32,32 @@ def _make_test_script(script_dir, script_basename, source):
skip_if_not_supported = unittest.skipIf(
- (sys.platform != "darwin" and sys.platform != "linux" and sys.platform != "win32"),
+ (
+ sys.platform != "darwin"
+ and sys.platform != "linux"
+ and sys.platform != "win32"
+ ),
"Test only runs on Linux, Windows and MacOS",
)
+def get_stack_trace(pid):
+ unwinder = RemoteUnwinder(pid, all_threads=True, debug=True)
+ return unwinder.get_stack_trace()
+
+
+def get_async_stack_trace(pid):
+ unwinder = RemoteUnwinder(pid, debug=True)
+ return unwinder.get_async_stack_trace()
+
+
+def get_all_awaited_by(pid):
+ unwinder = RemoteUnwinder(pid, debug=True)
+ return unwinder.get_all_awaited_by()
+
+
class TestGetStackTrace(unittest.TestCase):
+ maxDiff = None
@skip_if_not_supported
@unittest.skipIf(
@@ -46,7 +69,7 @@ class TestGetStackTrace(unittest.TestCase):
port = find_unused_port()
script = textwrap.dedent(
f"""\
- import time, sys, socket
+ import time, sys, socket, threading
# Connect to the test process
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', {port}))
@@ -55,13 +78,16 @@ class TestGetStackTrace(unittest.TestCase):
for x in range(100):
if x == 50:
baz()
+
def baz():
foo()
def foo():
- sock.sendall(b"ready"); time.sleep(10_000) # same line number
+ sock.sendall(b"ready:thread\\n"); time.sleep(10_000) # same line number
- bar()
+ t = threading.Thread(target=bar)
+ t.start()
+ sock.sendall(b"ready:main\\n"); t.join() # same line number
"""
)
stack_trace = None
@@ -82,11 +108,17 @@ class TestGetStackTrace(unittest.TestCase):
p = subprocess.Popen([sys.executable, script_name])
client_socket, _ = server_socket.accept()
server_socket.close()
- response = client_socket.recv(1024)
- self.assertEqual(response, b"ready")
+ response = b""
+ while (
+ b"ready:main" not in response
+ or b"ready:thread" not in response
+ ):
+ response += client_socket.recv(1024)
stack_trace = get_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -94,13 +126,23 @@ class TestGetStackTrace(unittest.TestCase):
p.terminate()
p.wait(timeout=SHORT_TIMEOUT)
- expected_stack_trace = [
- ("foo", script_name, 14),
- ("baz", script_name, 11),
- ("bar", script_name, 9),
- ("<module>", script_name, 16),
+ thread_expected_stack_trace = [
+ FrameInfo([script_name, 15, "foo"]),
+ FrameInfo([script_name, 12, "baz"]),
+ FrameInfo([script_name, 9, "bar"]),
+ FrameInfo([threading.__file__, ANY, "Thread.run"]),
]
- self.assertEqual(stack_trace, expected_stack_trace)
+ # Is possible that there are more threads, so we check that the
+ # expected stack traces are in the result (looking at you Windows!)
+ self.assertIn((ANY, thread_expected_stack_trace), stack_trace)
+
+ # Check that the main thread stack trace is in the result
+ frame = FrameInfo([script_name, 19, "<module>"])
+ for _, stack in stack_trace:
+ if frame in stack:
+ break
+ else:
+ self.fail("Main thread stack trace not found in result")
@skip_if_not_supported
@unittest.skipIf(
@@ -160,8 +202,12 @@ class TestGetStackTrace(unittest.TestCase):
):
script_dir = os.path.join(work_dir, "script_pkg")
os.mkdir(script_dir)
- server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ server_socket = socket.socket(
+ socket.AF_INET, socket.SOCK_STREAM
+ )
+ server_socket.setsockopt(
+ socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
+ )
server_socket.bind(("localhost", port))
server_socket.settimeout(SHORT_TIMEOUT)
server_socket.listen(1)
@@ -179,7 +225,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -190,79 +238,49 @@ class TestGetStackTrace(unittest.TestCase):
# sets are unordered, so we want to sort "awaited_by"s
stack_trace[2].sort(key=lambda x: x[1])
- root_task = "Task-1"
expected_stack_trace = [
[
- ("c5", script_name, 10),
- ("c4", script_name, 14),
- ("c3", script_name, 17),
- ("c2", script_name, 20),
+ FrameInfo([script_name, 10, "c5"]),
+ FrameInfo([script_name, 14, "c4"]),
+ FrameInfo([script_name, 17, "c3"]),
+ FrameInfo([script_name, 20, "c2"]),
],
"c2_root",
[
- [
- [
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ],
- [
- [("c1", script_name, 23)],
- "sub_main_1",
+ CoroInfo(
[
[
- [
- (
- "TaskGroup._aexit",
+ FrameInfo(
+ [
taskgroups.__file__,
ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ]
- ],
- ],
- [
- [("c1", script_name, 23)],
- "sub_main_2",
- [
- [
- [
- (
"TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
taskgroups.__file__,
ANY,
- ),
- (
"TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ]
- ],
- ],
+ ]
+ ),
+ FrameInfo([script_name, 26, "main"]),
+ ],
+ "Task-1",
+ ]
+ ),
+ CoroInfo(
+ [
+ [FrameInfo([script_name, 23, "c1"])],
+ "sub_main_1",
+ ]
+ ),
+ CoroInfo(
+ [
+ [FrameInfo([script_name, 23, "c1"])],
+ "sub_main_2",
+ ]
+ ),
],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -321,7 +339,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -334,9 +354,9 @@ class TestGetStackTrace(unittest.TestCase):
expected_stack_trace = [
[
- ("gen_nested_call", script_name, 10),
- ("gen", script_name, 16),
- ("main", script_name, 19),
+ FrameInfo([script_name, 10, "gen_nested_call"]),
+ FrameInfo([script_name, 16, "gen"]),
+ FrameInfo([script_name, 19, "main"]),
],
"Task-1",
[],
@@ -398,7 +418,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -410,9 +432,12 @@ class TestGetStackTrace(unittest.TestCase):
stack_trace[2].sort(key=lambda x: x[1])
expected_stack_trace = [
- [("deep", script_name, 11), ("c1", script_name, 15)],
+ [
+ FrameInfo([script_name, 11, "deep"]),
+ FrameInfo([script_name, 15, "c1"]),
+ ],
"Task-2",
- [[[("main", script_name, 21)], "Task-1", []]],
+ [CoroInfo([[FrameInfo([script_name, 21, "main"])], "Task-1"])],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -474,7 +499,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -486,20 +513,29 @@ class TestGetStackTrace(unittest.TestCase):
stack_trace[2].sort(key=lambda x: x[1])
expected_stack_trace = [
[
- ("deep", script_name, 11),
- ("c1", script_name, 15),
- ("staggered_race.<locals>.run_one_coro", staggered.__file__, ANY),
+ FrameInfo([script_name, 11, "deep"]),
+ FrameInfo([script_name, 15, "c1"]),
+ FrameInfo(
+ [
+ staggered.__file__,
+ ANY,
+ "staggered_race.<locals>.run_one_coro",
+ ]
+ ),
],
"Task-2",
[
- [
+ CoroInfo(
[
- ("staggered_race", staggered.__file__, ANY),
- ("main", script_name, 21),
- ],
- "Task-1",
- [],
- ]
+ [
+ FrameInfo(
+ [staggered.__file__, ANY, "staggered_race"]
+ ),
+ FrameInfo([script_name, 21, "main"]),
+ ],
+ "Task-1",
+ ]
+ )
],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -630,62 +666,174 @@ class TestGetStackTrace(unittest.TestCase):
# expected: at least 1000 pending tasks
self.assertGreaterEqual(len(entries), 1000)
# the first three tasks stem from the code structure
- self.assertIn((ANY, "Task-1", []), entries)
main_stack = [
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
+ FrameInfo([taskgroups.__file__, ANY, "TaskGroup._aexit"]),
+ FrameInfo(
+ [taskgroups.__file__, ANY, "TaskGroup.__aexit__"]
),
- ("main", script_name, 60),
+ FrameInfo([script_name, 60, "main"]),
]
self.assertIn(
- (ANY, "server task", [[main_stack, ANY]]),
+ TaskInfo(
+ [ANY, "Task-1", [CoroInfo([main_stack, ANY])], []]
+ ),
entries,
)
self.assertIn(
- (ANY, "echo client spam", [[main_stack, ANY]]),
+ TaskInfo(
+ [
+ ANY,
+ "server task",
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ base_events.__file__,
+ ANY,
+ "Server.serve_forever",
+ ]
+ )
+ ],
+ ANY,
+ ]
+ )
+ ],
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [script_name, ANY, "main"]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ ]
+ ),
+ entries,
+ )
+ self.assertIn(
+ TaskInfo(
+ [
+ ANY,
+ "Task-4",
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [tasks.__file__, ANY, "sleep"]
+ ),
+ FrameInfo(
+ [
+ script_name,
+ 38,
+ "echo_client",
+ ]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [
+ script_name,
+ 41,
+ "echo_client_spam",
+ ]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ ]
+ ),
entries,
)
- expected_stack = [
- [
+ expected_awaited_by = [
+ CoroInfo(
[
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("echo_client_spam", script_name, 41),
- ],
- ANY,
- ]
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [script_name, 41, "echo_client_spam"]
+ ),
+ ],
+ ANY,
+ ]
+ )
]
- tasks_with_stack = [
- task for task in entries if task[2] == expected_stack
+ tasks_with_awaited = [
+ task
+ for task in entries
+ if task.awaited_by == expected_awaited_by
]
- self.assertGreaterEqual(len(tasks_with_stack), 1000)
+ self.assertGreaterEqual(len(tasks_with_awaited), 1000)
# the final task will have some random number, but it should for
# sure be one of the echo client spam horde (In windows this is not true
# for some reason)
if sys.platform != "win32":
self.assertEqual(
- expected_stack,
- entries[-1][2],
+ tasks_with_awaited[-1].awaited_by,
+ entries[-1].awaited_by,
)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -700,15 +848,154 @@ class TestGetStackTrace(unittest.TestCase):
)
def test_self_trace(self):
stack_trace = get_stack_trace(os.getpid())
+ # Is possible that there are more threads, so we check that the
+ # expected stack traces are in the result (looking at you Windows!)
+ this_tread_stack = None
+ for thread_id, stack in stack_trace:
+ if thread_id == threading.get_native_id():
+ this_tread_stack = stack
+ break
+ self.assertIsNotNone(this_tread_stack)
self.assertEqual(
- stack_trace[0],
- (
- "TestGetStackTrace.test_self_trace",
- __file__,
- self.test_self_trace.__code__.co_firstlineno + 6,
- ),
+ stack[:2],
+ [
+ FrameInfo(
+ [
+ __file__,
+ get_stack_trace.__code__.co_firstlineno + 2,
+ "get_stack_trace",
+ ]
+ ),
+ FrameInfo(
+ [
+ __file__,
+ self.test_self_trace.__code__.co_firstlineno + 6,
+ "TestGetStackTrace.test_self_trace",
+ ]
+ ),
+ ],
+ )
+
+ @skip_if_not_supported
+ @unittest.skipIf(
+ sys.platform == "linux" and not PROCESS_VM_READV_SUPPORTED,
+ "Test only runs on Linux with process_vm_readv support",
+ )
+ @requires_gil_enabled("Free threaded builds don't have an 'active thread'")
+ def test_only_active_thread(self):
+ # Test that only_active_thread parameter works correctly
+ port = find_unused_port()
+ script = textwrap.dedent(
+ f"""\
+ import time, sys, socket, threading
+
+ # Connect to the test process
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect(('localhost', {port}))
+
+ def worker_thread(name, barrier, ready_event):
+ barrier.wait() # Synchronize thread start
+ ready_event.wait() # Wait for main thread signal
+ # Sleep to keep thread alive
+ time.sleep(10_000)
+
+ def main_work():
+ # Do busy work to hold the GIL
+ sock.sendall(b"working\\n")
+ count = 0
+ while count < 100000000:
+ count += 1
+ if count % 10000000 == 0:
+ pass # Keep main thread busy
+ sock.sendall(b"done\\n")
+
+ # Create synchronization primitives
+ num_threads = 3
+ barrier = threading.Barrier(num_threads + 1) # +1 for main thread
+ ready_event = threading.Event()
+
+ # Start worker threads
+ threads = []
+ for i in range(num_threads):
+ t = threading.Thread(target=worker_thread, args=(f"Worker-{{i}}", barrier, ready_event))
+ t.start()
+ threads.append(t)
+
+ # Wait for all threads to be ready
+ barrier.wait()
+
+ # Signal ready to parent process
+ sock.sendall(b"ready\\n")
+
+ # Signal threads to start waiting
+ ready_event.set()
+
+ # Give threads time to start sleeping
+ time.sleep(0.1)
+
+ # Now do busy work to hold the GIL
+ main_work()
+ """
)
+ with os_helper.temp_dir() as work_dir:
+ script_dir = os.path.join(work_dir, "script_pkg")
+ os.mkdir(script_dir)
+
+ # Create a socket server to communicate with the target process
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ server_socket.bind(("localhost", port))
+ server_socket.settimeout(SHORT_TIMEOUT)
+ server_socket.listen(1)
+
+ script_name = _make_test_script(script_dir, "script", script)
+ client_socket = None
+ try:
+ p = subprocess.Popen([sys.executable, script_name])
+ client_socket, _ = server_socket.accept()
+ server_socket.close()
+
+ # Wait for ready signal
+ response = b""
+ while b"ready" not in response:
+ response += client_socket.recv(1024)
+
+ # Wait for the main thread to start its busy work
+ while b"working" not in response:
+ response += client_socket.recv(1024)
+
+ # Get stack trace with all threads
+ unwinder_all = RemoteUnwinder(p.pid, all_threads=True)
+ all_traces = unwinder_all.get_stack_trace()
+
+ # Get stack trace with only GIL holder
+ unwinder_gil = RemoteUnwinder(p.pid, only_active_thread=True)
+ gil_traces = unwinder_gil.get_stack_trace()
+
+ except PermissionError:
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
+ finally:
+ if client_socket is not None:
+ client_socket.close()
+ p.kill()
+ p.terminate()
+ p.wait(timeout=SHORT_TIMEOUT)
+
+ # Verify we got multiple threads in all_traces
+ self.assertGreater(len(all_traces), 1, "Should have multiple threads")
+
+ # Verify we got exactly one thread in gil_traces
+ self.assertEqual(len(gil_traces), 1, "Should have exactly one GIL holder")
+
+ # The GIL holder should be in the all_traces list
+ gil_thread_id = gil_traces[0][0]
+ all_thread_ids = [trace[0] for trace in all_traces]
+ self.assertIn(gil_thread_id, all_thread_ids,
+ "GIL holder should be among all threads")
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_fcntl.py b/Lib/test/test_fcntl.py
index e0e6782258f..7140a7b4f29 100644
--- a/Lib/test/test_fcntl.py
+++ b/Lib/test/test_fcntl.py
@@ -11,7 +11,7 @@ from test.support import (
cpython_only, get_pagesize, is_apple, requires_subprocess, verbose
)
from test.support.import_helper import import_module
-from test.support.os_helper import TESTFN, unlink
+from test.support.os_helper import TESTFN, unlink, make_bad_fd
# Skip test if no fcntl module.
@@ -274,6 +274,17 @@ class TestFcntl(unittest.TestCase):
def test_fcntl_large_buffer(self):
self._check_fcntl_not_mutate_len(2024)
+ @unittest.skipUnless(hasattr(fcntl, 'F_DUPFD'), 'need fcntl.F_DUPFD')
+ def test_bad_fd(self):
+ # gh-134744: Test error handling
+ fd = make_bad_fd()
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, 0)
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, b'\0' * 10)
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, b'\0' * 2048)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py
index 5a0f033ebb8..e3d54f6315a 100644
--- a/Lib/test/test_fileio.py
+++ b/Lib/test/test_fileio.py
@@ -591,7 +591,7 @@ class OtherFileTests:
try:
f.write(b"abc")
f.close()
- with open(TESTFN_ASCII, "rb") as f:
+ with self.open(TESTFN_ASCII, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_ASCII)
@@ -608,7 +608,7 @@ class OtherFileTests:
try:
f.write(b"abc")
f.close()
- with open(TESTFN_UNICODE, "rb") as f:
+ with self.open(TESTFN_UNICODE, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_UNICODE)
@@ -692,13 +692,13 @@ class OtherFileTests:
def testAppend(self):
try:
- f = open(TESTFN, 'wb')
+ f = self.FileIO(TESTFN, 'wb')
f.write(b'spam')
f.close()
- f = open(TESTFN, 'ab')
+ f = self.FileIO(TESTFN, 'ab')
f.write(b'eggs')
f.close()
- f = open(TESTFN, 'rb')
+ f = self.FileIO(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
@@ -734,6 +734,7 @@ class OtherFileTests:
class COtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
+ open = _io.open
@cpython_only
def testInvalidFd_overflow(self):
@@ -755,6 +756,7 @@ class COtherFileTests(OtherFileTests, unittest.TestCase):
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
+ open = _pyio.open
def test_open_code(self):
# Check that the default behaviour of open_code matches
diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py
index 237d7b5d35e..00518abcb11 100644
--- a/Lib/test/test_float.py
+++ b/Lib/test/test_float.py
@@ -795,6 +795,8 @@ class FormatTestCase(unittest.TestCase):
self.assertRaises(ValueError, format, x, '.6,n')
@support.requires_IEEE_754
+ @unittest.skipUnless(sys.float_repr_style == 'short',
+ "applies only when using short float repr style")
def test_format_testfile(self):
with open(format_testfile, encoding="utf-8") as testfile:
for line in testfile:
diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py
index c7cc32e0949..1f626d87fa6 100644
--- a/Lib/test/test_format.py
+++ b/Lib/test/test_format.py
@@ -346,12 +346,12 @@ class FormatTest(unittest.TestCase):
testcommon(b"%s", memoryview(b"abc"), b"abc")
# %a will give the equivalent of
# repr(some_obj).encode('ascii', 'backslashreplace')
- testcommon(b"%a", 3.14, b"3.14")
+ testcommon(b"%a", 3.25, b"3.25")
testcommon(b"%a", b"ghi", b"b'ghi'")
testcommon(b"%a", "jkl", b"'jkl'")
testcommon(b"%a", "\u0544", b"'\\u0544'")
# %r is an alias for %a
- testcommon(b"%r", 3.14, b"3.14")
+ testcommon(b"%r", 3.25, b"3.25")
testcommon(b"%r", b"ghi", b"b'ghi'")
testcommon(b"%r", "jkl", b"'jkl'")
testcommon(b"%r", "\u0544", b"'\\u0544'")
@@ -407,19 +407,19 @@ class FormatTest(unittest.TestCase):
self.assertEqual(format("abc", "\u2007<5"), "abc\u2007\u2007")
self.assertEqual(format(123, "\u2007<5"), "123\u2007\u2007")
- self.assertEqual(format(12.3, "\u2007<6"), "12.3\u2007\u2007")
+ self.assertEqual(format(12.5, "\u2007<6"), "12.5\u2007\u2007")
self.assertEqual(format(0j, "\u2007<4"), "0j\u2007\u2007")
self.assertEqual(format(1+2j, "\u2007<8"), "(1+2j)\u2007\u2007")
self.assertEqual(format("abc", "\u2007>5"), "\u2007\u2007abc")
self.assertEqual(format(123, "\u2007>5"), "\u2007\u2007123")
- self.assertEqual(format(12.3, "\u2007>6"), "\u2007\u200712.3")
+ self.assertEqual(format(12.5, "\u2007>6"), "\u2007\u200712.5")
self.assertEqual(format(1+2j, "\u2007>8"), "\u2007\u2007(1+2j)")
self.assertEqual(format(0j, "\u2007>4"), "\u2007\u20070j")
self.assertEqual(format("abc", "\u2007^5"), "\u2007abc\u2007")
self.assertEqual(format(123, "\u2007^5"), "\u2007123\u2007")
- self.assertEqual(format(12.3, "\u2007^6"), "\u200712.3\u2007")
+ self.assertEqual(format(12.5, "\u2007^6"), "\u200712.5\u2007")
self.assertEqual(format(1+2j, "\u2007^8"), "\u2007(1+2j)\u2007")
self.assertEqual(format(0j, "\u2007^4"), "\u20070j\u2007")
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index 96b3f305194..1875a2f529c 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -1480,11 +1480,8 @@ class FractionTest(unittest.TestCase):
(F('-1234.5678'), '08,.0f', '-001,235'),
(F('-1234.5678'), '09,.0f', '-0,001,235'),
# Corner-case - zero-padding specified through fill and align
- # instead of the zero-pad character - in this case, treat '0' as a
- # regular fill character and don't attempt to insert commas into
- # the filled portion. This differs from the int and float
- # behaviour.
- (F('1234.5678'), '0=12,.2f', '00001,234.57'),
+ # instead of the zero-pad character.
+ (F('1234.5678'), '0=12,.2f', '0,001,234.57'),
# Corner case where it's not clear whether the '0' indicates zero
# padding or gives the minimum width, but there's still an obvious
# answer to give. We want this to work in case the minimum width
@@ -1518,6 +1515,8 @@ class FractionTest(unittest.TestCase):
(F(51, 1000), '.1f', '0.1'),
(F(149, 1000), '.1f', '0.1'),
(F(151, 1000), '.1f', '0.2'),
+ (F(22, 7), '.02f', '3.14'), # issue gh-130662
+ (F(22, 7), '005.02f', '03.14'),
]
for fraction, spec, expected in testcases:
with self.subTest(fraction=fraction, spec=spec):
@@ -1616,12 +1615,6 @@ class FractionTest(unittest.TestCase):
'=010%',
'>00.2f',
'>00f',
- # Too many zeros - minimum width should not have leading zeros
- '006f',
- # Leading zeros in precision
- '.010f',
- '.02f',
- '.000f',
# Missing precision
'.e',
'.f',
diff --git a/Lib/test/test_free_threading/test_generators.py b/Lib/test/test_free_threading/test_generators.py
new file mode 100644
index 00000000000..d01675eb38b
--- /dev/null
+++ b/Lib/test/test_free_threading/test_generators.py
@@ -0,0 +1,51 @@
+import concurrent.futures
+import unittest
+from threading import Barrier
+from unittest import TestCase
+import random
+import time
+
+from test.support import threading_helper, Py_GIL_DISABLED
+
+threading_helper.requires_working_threading(module=True)
+
+
+def random_sleep():
+ delay_us = random.randint(50, 100)
+ time.sleep(delay_us * 1e-6)
+
+def random_string():
+ return ''.join(random.choice('0123456789ABCDEF') for _ in range(10))
+
+def set_gen_name(g, b):
+ b.wait()
+ random_sleep()
+ g.__name__ = random_string()
+ return g.__name__
+
+def set_gen_qualname(g, b):
+ b.wait()
+ random_sleep()
+ g.__qualname__ = random_string()
+ return g.__qualname__
+
+
+@unittest.skipUnless(Py_GIL_DISABLED, "Enable only in FT build")
+class TestFTGenerators(TestCase):
+ NUM_THREADS = 4
+
+ def concurrent_write_with_func(self, func):
+ gen = (x for x in range(42))
+ for j in range(1000):
+ with concurrent.futures.ThreadPoolExecutor(max_workers=self.NUM_THREADS) as executor:
+ b = Barrier(self.NUM_THREADS)
+ futures = {executor.submit(func, gen, b): i for i in range(self.NUM_THREADS)}
+ for fut in concurrent.futures.as_completed(futures):
+ gen_name = fut.result()
+ self.assertEqual(len(gen_name), 10)
+
+ def test_concurrent_write(self):
+ with self.subTest(func=set_gen_name):
+ self.concurrent_write_with_func(func=set_gen_name)
+ with self.subTest(func=set_gen_qualname):
+ self.concurrent_write_with_func(func=set_gen_qualname)
diff --git a/Lib/test/test_free_threading/test_heapq.py b/Lib/test/test_free_threading/test_heapq.py
new file mode 100644
index 00000000000..ee7adfb2b78
--- /dev/null
+++ b/Lib/test/test_free_threading/test_heapq.py
@@ -0,0 +1,267 @@
+import unittest
+
+import heapq
+
+from enum import Enum
+from threading import Thread, Barrier, Lock
+from random import shuffle, randint
+
+from test.support import threading_helper
+from test import test_heapq
+
+
+NTHREADS = 10
+OBJECT_COUNT = 5_000
+
+
+class Heap(Enum):
+ MIN = 1
+ MAX = 2
+
+
+@threading_helper.requires_working_threading()
+class TestHeapq(unittest.TestCase):
+ def setUp(self):
+ self.test_heapq = test_heapq.TestHeapPython()
+
+ def test_racing_heapify(self):
+ heap = list(range(OBJECT_COUNT))
+ shuffle(heap)
+
+ self.run_concurrently(
+ worker_func=heapq.heapify, args=(heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heappush(self):
+ heap = []
+
+ def heappush_func(heap):
+ for item in reversed(range(OBJECT_COUNT)):
+ heapq.heappush(heap, item)
+
+ self.run_concurrently(
+ worker_func=heappush_func, args=(heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heappop(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+
+ # Each thread pops (OBJECT_COUNT / NTHREADS) items
+ self.assertEqual(OBJECT_COUNT % NTHREADS, 0)
+ per_thread_pop_count = OBJECT_COUNT // NTHREADS
+
+ def heappop_func(heap, pop_count):
+ local_list = []
+ for _ in range(pop_count):
+ item = heapq.heappop(heap)
+ local_list.append(item)
+
+ # Each local list should be sorted
+ self.assertTrue(self.is_sorted_ascending(local_list))
+
+ self.run_concurrently(
+ worker_func=heappop_func,
+ args=(heap, per_thread_pop_count),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), 0)
+
+ def test_racing_heappushpop(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+ pushpop_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heappushpop_func(heap, pushpop_items):
+ for item in pushpop_items:
+ popped_item = heapq.heappushpop(heap, item)
+ self.assertTrue(popped_item <= item)
+
+ self.run_concurrently(
+ worker_func=heappushpop_func,
+ args=(heap, pushpop_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), OBJECT_COUNT)
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heapreplace(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+ replace_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heapreplace_func(heap, replace_items):
+ for item in replace_items:
+ heapq.heapreplace(heap, item)
+
+ self.run_concurrently(
+ worker_func=heapreplace_func,
+ args=(heap, replace_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), OBJECT_COUNT)
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heapify_max(self):
+ max_heap = list(range(OBJECT_COUNT))
+ shuffle(max_heap)
+
+ self.run_concurrently(
+ worker_func=heapq.heapify_max, args=(max_heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heappush_max(self):
+ max_heap = []
+
+ def heappush_max_func(max_heap):
+ for item in range(OBJECT_COUNT):
+ heapq.heappush_max(max_heap, item)
+
+ self.run_concurrently(
+ worker_func=heappush_max_func, args=(max_heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heappop_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+
+ # Each thread pops (OBJECT_COUNT / NTHREADS) items
+ self.assertEqual(OBJECT_COUNT % NTHREADS, 0)
+ per_thread_pop_count = OBJECT_COUNT // NTHREADS
+
+ def heappop_max_func(max_heap, pop_count):
+ local_list = []
+ for _ in range(pop_count):
+ item = heapq.heappop_max(max_heap)
+ local_list.append(item)
+
+ # Each local list should be sorted
+ self.assertTrue(self.is_sorted_descending(local_list))
+
+ self.run_concurrently(
+ worker_func=heappop_max_func,
+ args=(max_heap, per_thread_pop_count),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), 0)
+
+ def test_racing_heappushpop_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+ pushpop_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heappushpop_max_func(max_heap, pushpop_items):
+ for item in pushpop_items:
+ popped_item = heapq.heappushpop_max(max_heap, item)
+ self.assertTrue(popped_item >= item)
+
+ self.run_concurrently(
+ worker_func=heappushpop_max_func,
+ args=(max_heap, pushpop_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), OBJECT_COUNT)
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heapreplace_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+ replace_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heapreplace_max_func(max_heap, replace_items):
+ for item in replace_items:
+ heapq.heapreplace_max(max_heap, item)
+
+ self.run_concurrently(
+ worker_func=heapreplace_max_func,
+ args=(max_heap, replace_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), OBJECT_COUNT)
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_lock_free_list_read(self):
+ n, n_threads = 1_000, 10
+ l = []
+ barrier = Barrier(n_threads * 2)
+
+ count = 0
+ lock = Lock()
+
+ def worker():
+ with lock:
+ nonlocal count
+ x = count
+ count += 1
+
+ barrier.wait()
+ for i in range(n):
+ if x % 2:
+ heapq.heappush(l, 1)
+ heapq.heappop(l)
+ else:
+ try:
+ l[0]
+ except IndexError:
+ pass
+
+ self.run_concurrently(worker, (), n_threads * 2)
+
+ @staticmethod
+ def is_sorted_ascending(lst):
+ """
+ Check if the list is sorted in ascending order (non-decreasing).
+ """
+ return all(lst[i - 1] <= lst[i] for i in range(1, len(lst)))
+
+ @staticmethod
+ def is_sorted_descending(lst):
+ """
+ Check if the list is sorted in descending order (non-increasing).
+ """
+ return all(lst[i - 1] >= lst[i] for i in range(1, len(lst)))
+
+ @staticmethod
+ def create_heap(size, heap_kind):
+ """
+ Create a min/max heap where elements are in the range (0, size - 1) and
+ shuffled before heapify.
+ """
+ heap = list(range(OBJECT_COUNT))
+ shuffle(heap)
+ if heap_kind == Heap.MIN:
+ heapq.heapify(heap)
+ else:
+ heapq.heapify_max(heap)
+
+ return heap
+
+ @staticmethod
+ def create_random_list(a, b, size):
+ """
+ Create a list of random numbers between a and b (inclusive).
+ """
+ return [randint(-a, b) for _ in range(size)]
+
+ def run_concurrently(self, worker_func, args, nthreads):
+ """
+ Run the worker function concurrently in multiple threads.
+ """
+ barrier = Barrier(nthreads)
+
+ def wrapper_func(*args):
+ # Wait for all threads to reach this point before proceeding.
+ barrier.wait()
+ worker_func(*args)
+
+ with threading_helper.catch_threading_exception() as cm:
+ workers = (
+ Thread(target=wrapper_func, args=args) for _ in range(nthreads)
+ )
+ with threading_helper.start_threads(workers):
+ pass
+
+ # Worker threads should not raise any exceptions
+ self.assertIsNone(cm.exc_value)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_free_threading/test_io.py b/Lib/test/test_free_threading/test_io.py
index f9bec740ddf..41d89e04da8 100644
--- a/Lib/test/test_free_threading/test_io.py
+++ b/Lib/test/test_free_threading/test_io.py
@@ -1,12 +1,13 @@
+import io
+import _pyio as pyio
import threading
from unittest import TestCase
from test.support import threading_helper
from random import randint
-from io import BytesIO
from sys import getsizeof
-class TestBytesIO(TestCase):
+class ThreadSafetyMixin:
# Test pretty much everything that can break under free-threading.
# Non-deterministic, but at least one of these things will fail if
# BytesIO object is not free-thread safe.
@@ -90,20 +91,27 @@ class TestBytesIO(TestCase):
barrier.wait()
getsizeof(b)
- self.check([write] * 10, BytesIO())
- self.check([writelines] * 10, BytesIO())
- self.check([write] * 10 + [truncate] * 10, BytesIO())
- self.check([truncate] + [read] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [read1] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [readline] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [readlines] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [readinto] * 10, BytesIO(b'0\n'*204800), bytearray(b'0\n'*204800))
- self.check([close] + [write] * 10, BytesIO())
- self.check([truncate] + [getvalue] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [getbuffer] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [iter] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [getstate] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [setstate] * 10, BytesIO(b'0\n'*204800), (b'123', 0, None))
- self.check([truncate] + [sizeof] * 10, BytesIO(b'0\n'*204800))
+ self.check([write] * 10, self.ioclass())
+ self.check([writelines] * 10, self.ioclass())
+ self.check([write] * 10 + [truncate] * 10, self.ioclass())
+ self.check([truncate] + [read] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [read1] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [readline] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [readlines] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [readinto] * 10, self.ioclass(b'0\n'*204800), bytearray(b'0\n'*204800))
+ self.check([close] + [write] * 10, self.ioclass())
+ self.check([truncate] + [getvalue] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [getbuffer] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [iter] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [getstate] * 10, self.ioclass(b'0\n'*204800))
+ state = self.ioclass(b'123').__getstate__()
+ self.check([truncate] + [setstate] * 10, self.ioclass(b'0\n'*204800), state)
+ self.check([truncate] + [sizeof] * 10, self.ioclass(b'0\n'*204800))
# no tests for seek or tell because they don't break anything
+
+class CBytesIOTest(ThreadSafetyMixin, TestCase):
+ ioclass = io.BytesIO
+
+class PyBytesIOTest(ThreadSafetyMixin, TestCase):
+ ioclass = pyio.BytesIO
diff --git a/Lib/test/test_free_threading/test_itertools.py b/Lib/test/test_free_threading/test_itertools.py
new file mode 100644
index 00000000000..9d366041917
--- /dev/null
+++ b/Lib/test/test_free_threading/test_itertools.py
@@ -0,0 +1,95 @@
+import unittest
+from threading import Thread, Barrier
+from itertools import batched, chain, cycle
+from test.support import threading_helper
+
+
+threading_helper.requires_working_threading(module=True)
+
+class ItertoolsThreading(unittest.TestCase):
+
+ @threading_helper.reap_threads
+ def test_batched(self):
+ number_of_threads = 10
+ number_of_iterations = 20
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ while True:
+ try:
+ next(it)
+ except StopIteration:
+ break
+
+ data = tuple(range(1000))
+ for it in range(number_of_iterations):
+ batch_iterator = batched(data, 2)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[batch_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+ @threading_helper.reap_threads
+ def test_cycle(self):
+ number_of_threads = 6
+ number_of_iterations = 10
+ number_of_cycles = 400
+
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ for _ in range(number_of_cycles):
+ try:
+ next(it)
+ except StopIteration:
+ pass
+
+ data = (1, 2, 3, 4)
+ for it in range(number_of_iterations):
+ cycle_iterator = cycle(data)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[cycle_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+ @threading_helper.reap_threads
+ def test_chain(self):
+ number_of_threads = 6
+ number_of_iterations = 20
+
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ while True:
+ try:
+ next(it)
+ except StopIteration:
+ break
+
+ data = [(1, )] * 200
+ for it in range(number_of_iterations):
+ chain_iterator = chain(*data)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[chain_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_free_threading/test_itertools_batched.py b/Lib/test/test_free_threading/test_itertools_batched.py
deleted file mode 100644
index a754b4f9ea9..00000000000
--- a/Lib/test/test_free_threading/test_itertools_batched.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import unittest
-from threading import Thread, Barrier
-from itertools import batched
-from test.support import threading_helper
-
-
-threading_helper.requires_working_threading(module=True)
-
-class EnumerateThreading(unittest.TestCase):
-
- @threading_helper.reap_threads
- def test_threading(self):
- number_of_threads = 10
- number_of_iterations = 20
- barrier = Barrier(number_of_threads)
- def work(it):
- barrier.wait()
- while True:
- try:
- _ = next(it)
- except StopIteration:
- break
-
- data = tuple(range(1000))
- for it in range(number_of_iterations):
- batch_iterator = batched(data, 2)
- worker_threads = []
- for ii in range(number_of_threads):
- worker_threads.append(
- Thread(target=work, args=[batch_iterator]))
-
- with threading_helper.start_threads(worker_threads):
- pass
-
- barrier.reset()
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Lib/test/test_free_threading/test_itertools_combinatoric.py b/Lib/test/test_free_threading/test_itertools_combinatoric.py
new file mode 100644
index 00000000000..5b3b88deedd
--- /dev/null
+++ b/Lib/test/test_free_threading/test_itertools_combinatoric.py
@@ -0,0 +1,51 @@
+import unittest
+from threading import Thread, Barrier
+from itertools import combinations, product
+from test.support import threading_helper
+
+
+threading_helper.requires_working_threading(module=True)
+
+def test_concurrent_iteration(iterator, number_of_threads):
+ barrier = Barrier(number_of_threads)
+ def iterator_worker(it):
+ barrier.wait()
+ while True:
+ try:
+ _ = next(it)
+ except StopIteration:
+ return
+
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=iterator_worker, args=[iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+class ItertoolsThreading(unittest.TestCase):
+
+ @threading_helper.reap_threads
+ def test_combinations(self):
+ number_of_threads = 10
+ number_of_iterations = 24
+
+ for it in range(number_of_iterations):
+ iterator = combinations((1, 2, 3, 4, 5), 2)
+ test_concurrent_iteration(iterator, number_of_threads)
+
+ @threading_helper.reap_threads
+ def test_product(self):
+ number_of_threads = 10
+ number_of_iterations = 24
+
+ for it in range(number_of_iterations):
+ iterator = product((1, 2, 3, 4, 5), (10, 20, 30))
+ test_concurrent_iteration(iterator, number_of_threads)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_fstring.py b/Lib/test/test_fstring.py
index dd58e032a8b..58a30c8e6ac 100644
--- a/Lib/test/test_fstring.py
+++ b/Lib/test/test_fstring.py
@@ -1336,9 +1336,9 @@ x = (
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
- self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
- self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
- self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
+ self.assertEqual(f'{1.25!s:10.10}', '1.25 ')
+ self.assertEqual(f'{1.25!r:10.10}', '1.25 ')
+ self.assertEqual(f'{1.25!a:10.10}', '1.25 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
@@ -1347,7 +1347,7 @@ x = (
# Conversions can have trailing whitespace after them since it
# does not provide any significance
self.assertEqual(f"{3!s }", "3")
- self.assertEqual(f'{3.14!s :10.10}', '3.14 ')
+ self.assertEqual(f'{1.25!s :10.10}', '1.25 ')
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
@@ -1380,7 +1380,7 @@ x = (
for conv in ' s', ' s ':
self.assertAllRaise(SyntaxError,
"f-string: conversion type must come right after the"
- " exclamanation mark",
+ " exclamation mark",
["f'{3!" + conv + "}'"])
self.assertAllRaise(SyntaxError,
diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py
index a71ddc01d1c..81d4e39f5be 100644
--- a/Lib/test/test_generated_cases.py
+++ b/Lib/test/test_generated_cases.py
@@ -1,11 +1,9 @@
import contextlib
import os
-import re
import sys
import tempfile
import unittest
-from io import StringIO
from test import support
from test import test_tools
@@ -31,12 +29,11 @@ skip_if_different_mount_drives()
test_tools.skip_if_missing("cases_generator")
with test_tools.imports_under_tool("cases_generator"):
- from analyzer import analyze_forest, StackItem
+ from analyzer import StackItem
from cwriter import CWriter
import parser
from stack import Local, Stack
import tier1_generator
- import opcode_metadata_generator
import optimizer_generator
@@ -59,14 +56,14 @@ class TestEffects(unittest.TestCase):
def test_effect_sizes(self):
stack = Stack()
inputs = [
- x := StackItem("x", None, "1"),
- y := StackItem("y", None, "oparg"),
- z := StackItem("z", None, "oparg*2"),
+ x := StackItem("x", "1"),
+ y := StackItem("y", "oparg"),
+ z := StackItem("z", "oparg*2"),
]
outputs = [
- StackItem("x", None, "1"),
- StackItem("b", None, "oparg*4"),
- StackItem("c", None, "1"),
+ StackItem("x", "1"),
+ StackItem("b", "oparg*4"),
+ StackItem("c", "1"),
]
null = CWriter.null()
stack.pop(z, null)
@@ -1106,32 +1103,6 @@ class TestGeneratedCases(unittest.TestCase):
"""
self.run_cases_test(input, output)
- def test_pointer_to_stackref(self):
- input = """
- inst(OP, (arg: _PyStackRef * -- out)) {
- out = *arg;
- DEAD(arg);
- }
- """
- output = """
- TARGET(OP) {
- #if Py_TAIL_CALL_INTERP
- int opcode = OP;
- (void)(opcode);
- #endif
- frame->instr_ptr = next_instr;
- next_instr += 1;
- INSTRUCTION_STATS(OP);
- _PyStackRef *arg;
- _PyStackRef out;
- arg = (_PyStackRef *)stack_pointer[-1].bits;
- out = *arg;
- stack_pointer[-1] = out;
- DISPATCH();
- }
- """
- self.run_cases_test(input, output)
-
def test_unused_cached_value(self):
input = """
op(FIRST, (arg1 -- out)) {
@@ -2005,8 +1976,8 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *arg1;
- JitOptSymbol *out;
+ JitOptRef arg1;
+ JitOptRef out;
arg1 = stack_pointer[-1];
out = EGGS(arg1);
stack_pointer[-1] = out;
@@ -2014,7 +1985,7 @@ class TestGeneratedAbstractCases(unittest.TestCase):
}
case OP2: {
- JitOptSymbol *out;
+ JitOptRef out;
out = sym_new_not_null(ctx);
stack_pointer[-1] = out;
break;
@@ -2039,14 +2010,14 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *out;
+ JitOptRef out;
out = sym_new_not_null(ctx);
stack_pointer[-1] = out;
break;
}
case OP2: {
- JitOptSymbol *out;
+ JitOptRef out;
out = NULL;
stack_pointer[-1] = out;
break;
@@ -2180,7 +2151,7 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *foo;
+ JitOptRef foo;
foo = NULL;
stack_pointer[0] = foo;
stack_pointer += 1;
@@ -2253,5 +2224,249 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"Inputs must have equal sizes"):
self.run_cases_test(input, input2, output)
+ def test_pure_uop_body_copied_in(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ res = PyStackRef_IsNone(foo);
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ res_stackref = PyStackRef_IsNone(foo);
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_pure_uop_body_copied_in_deopt(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ DEOPT_IF(PyStackRef_IsNull(foo));
+ res = foo;
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = foo;
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (PyStackRef_IsNull(foo)) {
+ ctx->done = true;
+ break;
+ }
+ res_stackref = foo;
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = foo;
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_pure_uop_body_copied_in_error_if(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ ERROR_IF(PyStackRef_IsNull(foo));
+ res = foo;
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = foo;
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (PyStackRef_IsNull(foo)) {
+ goto error;
+ }
+ res_stackref = foo;
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = foo;
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+
+ def test_replace_opcode_uop_body_copied_in_complex(self):
+ input = """
+ pure op(OP, (foo -- res)) {
+ if (foo) {
+ res = PyStackRef_IsNone(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (foo) {
+ res_stackref = PyStackRef_IsNone(foo);
+ }
+ else {
+ res_stackref = 1;
+ }
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_replace_opcode_escaping_uop_body_copied_in_complex(self):
+ input = """
+ pure op(OP, (foo -- res)) {
+ if (foo) {
+ res = ESCAPING_CODE(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (foo) {
+ res_stackref = ESCAPING_CODE(foo);
+ }
+ else {
+ res_stackref = 1;
+ }
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_replace_opocode_uop_reject_array_effects(self):
+ input = """
+ pure op(OP, (foo[2] -- res)) {
+ if (foo) {
+ res = PyStackRef_IsNone(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo[2] -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_unknown(ctx);
+ }
+ """
+ output = """
+ """
+ with self.assertRaisesRegex(SyntaxError,
+ "Pure evaluation cannot take array-like inputs"):
+ self.run_cases_test(input, input2, output)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_genericpath.py b/Lib/test/test_genericpath.py
index df07af01fc7..16c3268fefb 100644
--- a/Lib/test/test_genericpath.py
+++ b/Lib/test/test_genericpath.py
@@ -8,7 +8,7 @@ import sys
import unittest
import warnings
from test.support import (
- is_apple, is_emscripten, os_helper, warnings_helper
+ is_apple, os_helper, warnings_helper
)
from test.support.script_helper import assert_python_ok
from test.support.os_helper import FakePath
diff --git a/Lib/test/test_getpath.py b/Lib/test/test_getpath.py
index f86df9d0d03..83f09f34955 100644
--- a/Lib/test/test_getpath.py
+++ b/Lib/test/test_getpath.py
@@ -354,6 +354,27 @@ class MockGetPathTests(unittest.TestCase):
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
+ def test_venv_posix_without_home_key(self):
+ ns = MockPosixNamespace(
+ argv0="/venv/bin/python3",
+ PREFIX="/usr",
+ ENV_PATH="/usr/bin",
+ )
+ # Setup the bare minimum venv
+ ns.add_known_xfile("/usr/bin/python3")
+ ns.add_known_xfile("/venv/bin/python3")
+ ns.add_known_link("/venv/bin/python3", "/usr/bin/python3")
+ ns.add_known_file("/venv/pyvenv.cfg", [
+ # home = key intentionally omitted
+ ])
+ expected = dict(
+ executable="/venv/bin/python3",
+ prefix="/venv",
+ base_prefix="/usr",
+ )
+ actual = getpath(ns, expected)
+ self.assertEqual(expected, actual)
+
def test_venv_changed_name_posix(self):
"Test a venv layout on *nix."
ns = MockPosixNamespace(
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index c39565144bf..7f5d48b9c63 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -1,7 +1,7 @@
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
-from test.support import check_syntax_error
+from test.support import check_syntax_error, skip_wasi_stack_overflow
from test.support import import_helper
import annotationlib
import inspect
@@ -249,6 +249,18 @@ the \'lazy\' dog.\n\
compile(s, "<test>", "exec")
self.assertIn("was never closed", str(cm.exception))
+ @skip_wasi_stack_overflow()
+ def test_max_level(self):
+ # Macro defined in Parser/lexer/state.h
+ MAXLEVEL = 200
+
+ result = eval("(" * MAXLEVEL + ")" * MAXLEVEL)
+ self.assertEqual(result, ())
+
+ with self.assertRaises(SyntaxError) as cm:
+ eval("(" * (MAXLEVEL + 1) + ")" * (MAXLEVEL + 1))
+ self.assertStartsWith(str(cm.exception), 'too many nested parentheses')
+
var_annot_global: int # a global annotated is necessary for test_var_annot
diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py
index ccbacc7c19b..a12ff5662a7 100644
--- a/Lib/test/test_gzip.py
+++ b/Lib/test/test_gzip.py
@@ -9,7 +9,6 @@ import os
import struct
import sys
import unittest
-import warnings
from subprocess import PIPE, Popen
from test.support import catch_unraisable_exception
from test.support import import_helper
diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py
index de4c8a1670f..5bad483ae9d 100644
--- a/Lib/test/test_hashlib.py
+++ b/Lib/test/test_hashlib.py
@@ -12,12 +12,12 @@ import io
import itertools
import logging
import os
+import re
import sys
import sysconfig
import tempfile
import threading
import unittest
-import warnings
from test import support
from test.support import _4G, bigmemtest
from test.support import hashlib_helper
@@ -98,6 +98,14 @@ def read_vectors(hash_name):
yield parts
+DEPRECATED_STRING_PARAMETER = re.escape(
+ "the 'string' keyword parameter is deprecated since "
+ "Python 3.15 and slated for removal in Python 3.19; "
+ "use the 'data' keyword parameter or pass the data "
+ "to hash as a positional argument instead"
+)
+
+
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
@@ -141,11 +149,10 @@ class HashLibTestCase(unittest.TestCase):
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
- def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm, **kwargs):
- if data is None:
- return hashlib.new(_alg, **kwargs)
- return hashlib.new(_alg, data, **kwargs)
- constructors.add(_test_algorithm_via_hashlib_new)
+ def c(*args, __algorithm_name=algorithm, **kwargs):
+ return hashlib.new(__algorithm_name, *args, **kwargs)
+ c.__name__ = f'do_test_algorithm_via_hashlib_new_{algorithm}'
+ constructors.add(c)
_hashlib = self._conditional_import_module('_hashlib')
self._hashlib = _hashlib
@@ -201,6 +208,11 @@ class HashLibTestCase(unittest.TestCase):
return itertools.chain.from_iterable(constructors)
@property
+ def shake_constructors(self):
+ for shake_name in self.shakes:
+ yield from self.constructors_to_test.get(shake_name, ())
+
+ @property
def is_fips_mode(self):
return get_fips_mode()
@@ -250,6 +262,85 @@ class HashLibTestCase(unittest.TestCase):
self._hashlib.new("md5", usedforsecurity=False)
self._hashlib.openssl_md5(usedforsecurity=False)
+ @unittest.skipIf(get_fips_mode(), "skip in FIPS mode")
+ def test_clinic_signature(self):
+ for constructor in self.hash_constructors:
+ with self.subTest(constructor.__name__):
+ constructor(b'')
+ constructor(data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ constructor(string=b'')
+
+ digest_name = constructor(b'').name
+ with self.subTest(digest_name):
+ hashlib.new(digest_name, b'')
+ hashlib.new(digest_name, data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ hashlib.new(digest_name, string=b'')
+ # Make sure that _hashlib contains the constructor
+ # to test when using a combination of libcrypto and
+ # interned hash implementations.
+ if self._hashlib and digest_name in self._hashlib._constructors:
+ self._hashlib.new(digest_name, b'')
+ self._hashlib.new(digest_name, data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ self._hashlib.new(digest_name, string=b'')
+
+ @unittest.skipIf(get_fips_mode(), "skip in FIPS mode")
+ def test_clinic_signature_errors(self):
+ nomsg = b''
+ mymsg = b'msg'
+ conflicting_call = re.escape(
+ "'data' and 'string' are mutually exclusive "
+ "and support for 'string' keyword parameter "
+ "is slated for removal in a future version."
+ )
+ duplicated_param = re.escape("given by name ('data') and position")
+ unexpected_param = re.escape("got an unexpected keyword argument '_'")
+ for args, kwds, errmsg in [
+ # Reject duplicated arguments before unknown keyword arguments.
+ ((nomsg,), dict(data=nomsg, _=nomsg), duplicated_param),
+ ((mymsg,), dict(data=nomsg, _=nomsg), duplicated_param),
+ # Reject duplicated arguments before conflicting ones.
+ *itertools.product(
+ [[nomsg], [mymsg]],
+ [dict(data=nomsg), dict(data=nomsg, string=nomsg)],
+ [duplicated_param]
+ ),
+ # Reject unknown keyword arguments before conflicting ones.
+ *itertools.product(
+ [()],
+ [
+ dict(_=None),
+ dict(data=nomsg, _=None),
+ dict(string=nomsg, _=None),
+ dict(string=nomsg, data=nomsg, _=None),
+ ],
+ [unexpected_param]
+ ),
+ ((nomsg,), dict(_=None), unexpected_param),
+ ((mymsg,), dict(_=None), unexpected_param),
+ # Reject conflicting arguments.
+ [(nomsg,), dict(string=nomsg), conflicting_call],
+ [(mymsg,), dict(string=nomsg), conflicting_call],
+ [(), dict(data=nomsg, string=nomsg), conflicting_call],
+ ]:
+ for constructor in self.hash_constructors:
+ digest_name = constructor(b'').name
+ with self.subTest(constructor.__name__, args=args, kwds=kwds):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ constructor(*args, **kwds)
+ with self.subTest(digest_name, args=args, kwds=kwds):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ hashlib.new(digest_name, *args, **kwds)
+ if (self._hashlib and
+ digest_name in self._hashlib._constructors):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ self._hashlib.new(digest_name, *args, **kwds)
+
def test_unknown_hash(self):
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
@@ -284,6 +375,16 @@ class HashLibTestCase(unittest.TestCase):
self.assertIs(constructor, _md5.md5)
self.assertEqual(sorted(builtin_constructor_cache), ['MD5', 'md5'])
+ def test_copy(self):
+ for cons in self.hash_constructors:
+ h1 = cons(os.urandom(16), usedforsecurity=False)
+ h2 = h1.copy()
+ self.assertIs(type(h1), type(h2))
+ self.assertEqual(h1.name, h2.name)
+ size = (16,) if h1.name in self.shakes else ()
+ self.assertEqual(h1.digest(*size), h2.digest(*size))
+ self.assertEqual(h1.hexdigest(*size), h2.hexdigest(*size))
+
def test_hexdigest(self):
for cons in self.hash_constructors:
h = cons(usedforsecurity=False)
@@ -294,21 +395,50 @@ class HashLibTestCase(unittest.TestCase):
self.assertIsInstance(h.digest(), bytes)
self.assertEqual(hexstr(h.digest()), h.hexdigest())
- def test_digest_length_overflow(self):
- # See issue #34922
- large_sizes = (2**29, 2**32-10, 2**32+10, 2**61, 2**64-10, 2**64+10)
- for cons in self.hash_constructors:
- h = cons(usedforsecurity=False)
- if h.name not in self.shakes:
- continue
- if HASH is not None and isinstance(h, HASH):
- # _hashopenssl's take a size_t
- continue
- for digest in h.digest, h.hexdigest:
- self.assertRaises(ValueError, digest, -10)
- for length in large_sizes:
- with self.assertRaises((ValueError, OverflowError)):
- digest(length)
+ def test_shakes_zero_digest_length(self):
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(b'abcdef', usedforsecurity=False)
+ self.assertEqual(h.digest(0), b'')
+ self.assertEqual(h.hexdigest(0), '')
+
+ def test_shakes_invalid_digest_length(self):
+ # See https://github.com/python/cpython/issues/79103.
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(usedforsecurity=False)
+ # Note: digest() and hexdigest() take a signed input and
+ # raise if it is negative; the rationale is that we use
+ # internally PyBytes_FromStringAndSize() and _Py_strhex()
+ # which both take a Py_ssize_t.
+ for negative_size in (-1, -10, -(1 << 31), -sys.maxsize):
+ self.assertRaises(ValueError, h.digest, negative_size)
+ self.assertRaises(ValueError, h.hexdigest, negative_size)
+
+ def test_shakes_overflow_digest_length(self):
+ # See https://github.com/python/cpython/issues/135759.
+
+ exc_types = (OverflowError, ValueError)
+ # HACL* accepts an 'uint32_t' while OpenSSL accepts a 'size_t'.
+ openssl_overflown_sizes = (sys.maxsize + 1, 2 * sys.maxsize)
+ # https://github.com/python/cpython/issues/79103 restricts
+ # the accepted built-in lengths to 2 ** 29, even if OpenSSL
+ # accepts such lengths.
+ builtin_overflown_sizes = openssl_overflown_sizes + (
+ 2 ** 29, 2 ** 32 - 10, 2 ** 32, 2 ** 32 + 10,
+ 2 ** 61, 2 ** 64 - 10, 2 ** 64, 2 ** 64 + 10,
+ )
+
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(usedforsecurity=False)
+ if HASH is not None and isinstance(h, HASH):
+ overflown_sizes = openssl_overflown_sizes
+ else:
+ overflown_sizes = builtin_overflown_sizes
+ for invalid_size in overflown_sizes:
+ self.assertRaises(exc_types, h.digest, invalid_size)
+ self.assertRaises(exc_types, h.hexdigest, invalid_size)
def test_name_attribute(self):
for cons in self.hash_constructors:
@@ -719,8 +849,6 @@ class HashLibTestCase(unittest.TestCase):
self.assertRaises(ValueError, constructor, node_offset=-1)
self.assertRaises(OverflowError, constructor, node_offset=max_offset+1)
- self.assertRaises(TypeError, constructor, data=b'')
- self.assertRaises(TypeError, constructor, string=b'')
self.assertRaises(TypeError, constructor, '')
constructor(
@@ -929,49 +1057,67 @@ class HashLibTestCase(unittest.TestCase):
def test_sha256_gil(self):
gil_minsize = hashlib_helper.find_gil_minsize(['_sha2', '_hashlib'])
+ data = b'1' + b'#' * gil_minsize + b'1'
+ expected = hashlib.sha256(data).hexdigest()
+
m = hashlib.sha256()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ self.assertEqual(m.hexdigest(), expected)
- m = hashlib.sha256(b'1' + b'#' * gil_minsize + b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_threaded_hashing_fast(self):
+ # Same as test_threaded_hashing_slow() but only tests some functions
+ # since otherwise test_hashlib.py becomes too slow during development.
+ for name in ['md5', 'sha1', 'sha256', 'sha3_256', 'blake2s']:
+ if constructor := getattr(hashlib, name, None):
+ with self.subTest(name):
+ self.do_test_threaded_hashing(constructor, is_shake=False)
+ if shake_128 := getattr(hashlib, 'shake_128', None):
+ self.do_test_threaded_hashing(shake_128, is_shake=True)
+ @requires_resource('cpu')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
- def test_threaded_hashing(self):
+ def test_threaded_hashing_slow(self):
+ for algorithm, constructors in self.constructors_to_test.items():
+ is_shake = algorithm in self.shakes
+ for constructor in constructors:
+ with self.subTest(constructor.__name__, is_shake=is_shake):
+ self.do_test_threaded_hashing(constructor, is_shake)
+
+ def do_test_threaded_hashing(self, constructor, is_shake):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
- hasher = hashlib.sha1()
- num_threads = 5
- smallest_data = b'swineflu'
- data = smallest_data * 200000
- expected_hash = hashlib.sha1(data*num_threads).hexdigest()
-
- def hash_in_chunks(chunk_size):
- index = 0
- while index < len(data):
- hasher.update(data[index:index + chunk_size])
- index += chunk_size
+
+ # The data to hash has length s|M|q^N and the chunk size for the i-th
+ # thread is s|M|q^(N-i), where N is the number of threads, M is a fixed
+ # message of small length, and s >= 1 and q >= 2 are small integers.
+ smallest_size, num_threads, s, q = 8, 5, 2, 10
+
+ smallest_data = os.urandom(smallest_size)
+ data = s * smallest_data * (q ** num_threads)
+
+ h1 = constructor(usedforsecurity=False)
+ h2 = constructor(data * num_threads, usedforsecurity=False)
+
+ def update(chunk_size):
+ for index in range(0, len(data), chunk_size):
+ h1.update(data[index:index + chunk_size])
threads = []
- for threadnum in range(num_threads):
- chunk_size = len(data) // (10 ** threadnum)
+ for thread_num in range(num_threads):
+ # chunk_size = len(data) // (q ** thread_num)
+ chunk_size = s * smallest_size * q ** (num_threads - thread_num)
self.assertGreater(chunk_size, 0)
- self.assertEqual(chunk_size % len(smallest_data), 0)
- thread = threading.Thread(target=hash_in_chunks,
- args=(chunk_size,))
+ self.assertEqual(chunk_size % smallest_size, 0)
+ thread = threading.Thread(target=update, args=(chunk_size,))
threads.append(thread)
for thread in threads:
@@ -979,7 +1125,10 @@ class HashLibTestCase(unittest.TestCase):
for thread in threads:
thread.join()
- self.assertEqual(expected_hash, hasher.hexdigest())
+ if is_shake:
+ self.assertEqual(h1.hexdigest(16), h2.hexdigest(16))
+ else:
+ self.assertEqual(h1.hexdigest(), h2.hexdigest())
def test_get_fips_mode(self):
fips_mode = self.is_fips_mode
diff --git a/Lib/test/test_hmac.py b/Lib/test/test_hmac.py
index e898644dd8a..ff6e1bce0ef 100644
--- a/Lib/test/test_hmac.py
+++ b/Lib/test/test_hmac.py
@@ -21,7 +21,6 @@ import functools
import hmac
import hashlib
import random
-import test.support
import test.support.hashlib_helper as hashlib_helper
import types
import unittest
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 61fa24fab57..15cad061889 100644
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -5,6 +5,7 @@ import pprint
import unittest
from unittest.mock import patch
+from test import support
class EventCollector(html.parser.HTMLParser):
@@ -80,6 +81,13 @@ class EventCollectorCharrefs(EventCollector):
self.fail('This should never be called with convert_charrefs=True')
+# The normal event collector normalizes the events in get_events,
+# so we override it to return the original list of events.
+class EventCollectorNoNormalize(EventCollector):
+ def get_events(self):
+ return self.events
+
+
class TestCaseBase(unittest.TestCase):
def get_collector(self):
@@ -264,8 +272,7 @@ text
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
- def test_cdata_content(self):
- contents = [
+ @support.subTests('content', [
'<!-- not a comment --> &not-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
@@ -278,70 +285,127 @@ text
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
- 'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
- # these two should be invalid according to the HTML 5 spec,
- # section 8.1.2.2
- #'foo = </\nscript>',
- #'foo = </ script>',
- ]
- elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
- for content in contents:
- for element in elements:
- element_lower = element.lower()
- s = '<{element}>{content}</{element}>'.format(element=element,
- content=content)
- self._run_check(s, [("starttag", element_lower, []),
- ("data", content),
- ("endtag", element_lower)])
-
- def test_cdata_with_closing_tags(self):
+ 'foo = "</ script>"',
+ 'foo = "</scripture>"',
+ 'foo = "</script\v>"',
+ 'foo = "</script\xa0>"',
+ 'foo = "</ลฟcript>"',
+ 'foo = "</scrฤฑpt>"',
+ ])
+ def test_script_content(self, content):
+ s = f'<script>{content}</script>'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content),
+ ("endtag", "script")])
+
+ @support.subTests('content', [
+ 'a::before { content: "<!-- not a comment -->"; }',
+ 'a::before { content: "&not-an-entity-ref;"; }',
+ 'a::before { content: "<not a=\'start tag\'>"; }',
+ 'a::before { content: "\u2603"; }',
+ 'a::before { content: "< /style>"; }',
+ 'a::before { content: "</ style>"; }',
+ 'a::before { content: "</styled>"; }',
+ 'a::before { content: "</style\v>"; }',
+ 'a::before { content: "</style\xa0>"; }',
+ 'a::before { content: "</ลฟtyle>"; }',
+ ])
+ def test_style_content(self, content):
+ s = f'<style>{content}</style>'
+ self._run_check(s, [("starttag", "style", []),
+ ("data", content),
+ ("endtag", "style")])
+
+ @support.subTests('endtag', ['script', 'SCRIPT', 'script ', 'script\n',
+ 'script/', 'script foo=bar', 'script foo=">"'])
+ def test_script_closing_tag(self, endtag):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
- # The normal event collector normalizes the events in get_events,
- # so we override it to return the original list of events.
- class Collector(EventCollector):
- def get_events(self):
- return self.events
-
content = """<!-- not a comment --> &not-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
- for element in [' script', 'script ', ' script ',
- '\nscript', 'script\n', '\nscript\n']:
- element_lower = element.lower().strip()
- s = '<script>{content}</{element}>'.format(element=element,
- content=content)
- self._run_check(s, [("starttag", element_lower, []),
- ("data", content),
- ("endtag", element_lower)],
- collector=Collector(convert_charrefs=False))
-
- def test_EOF_in_cdata(self):
- content = """<!-- not a comment --> &not-an-entity-ref;
- <a href="" /> </p><p> <span></span></style>
- '</script' + '>'"""
- s = f'<script>{content}'
- self._run_check(s, [
- ("starttag", 'script', []),
- ("data", content)
- ])
+ s = f'<ScrIPt>{content}</{endtag}>'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content),
+ ("endtag", "script")],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
+
+ @support.subTests('endtag', ['style', 'STYLE', 'style ', 'style\n',
+ 'style/', 'style foo=bar', 'style foo=">"'])
+ def test_style_closing_tag(self, endtag):
+ content = """
+ b::before { content: "<!-- not a comment -->"; }
+ p::before { content: "&not-an-entity-ref;"; }
+ a::before { content: "<i>"; }
+ a::after { content: "</i>"; }
+ """
+ s = f'<StyLE>{content}</{endtag}>'
+ self._run_check(s, [("starttag", "style", []),
+ ("data", content),
+ ("endtag", "style")],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
+
+ @support.subTests('tail,end', [
+ ('', False),
+ ('<', False),
+ ('</', False),
+ ('</s', False),
+ ('</script', False),
+ ('</script ', True),
+ ('</script foo=bar', True),
+ ('</script foo=">', True),
+ ])
+ def test_eof_in_script(self, tail, end):
+ content = "a = 123"
+ s = f'<ScrIPt>{content}{tail}'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content if end else content + tail)],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
+ '<!----->'
'<!---->'
+ # abrupt-closing-of-empty-comment
+ '<!--->'
+ '<!-->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
- '<!-- and I have -- in the middle! -->')
+ '<!-- and I have -- in the middle! -->'
+ '<!--incorrectly-closed-comment--!>'
+ '<!----!>'
+ '<!----!-->'
+ '<!---- >-->'
+ '<!---!>-->'
+ '<!--!>-->'
+ # nested-comment
+ '<!-- <!-- nested --> -->'
+ '<!--<!-->'
+ '<!--<!--!>'
+ )
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
+ ('comment', '-'),
+ ('comment', ''),
+ ('comment', ''),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
- ('comment', ' and I have -- in the middle! ')]
+ ('comment', ' and I have -- in the middle! '),
+ ('comment', 'incorrectly-closed-comment'),
+ ('comment', ''),
+ ('comment', '--!'),
+ ('comment', '-- >'),
+ ('comment', '-!>'),
+ ('comment', '!>'),
+ ('comment', ' <!-- nested '), ('data', ' -->'),
+ ('comment', '<!'),
+ ('comment', '<!'),
+ ]
self._run_check(html, expected)
def test_condcoms(self):
@@ -430,28 +494,34 @@ text
('data', '<'),
('starttag', 'bc<', [('a', None)]),
('endtag', 'html'),
- ('data', '\n<img src="URL>'),
- ('comment', '/img'),
- ('endtag', 'html<')])
+ ('data', '\n')])
def test_starttag_junk_chars(self):
+ self._run_check("<", [('data', '<')])
+ self._run_check("<>", [('data', '<>')])
+ self._run_check("< >", [('data', '< >')])
+ self._run_check("< ", [('data', '< ')])
self._run_check("</>", [])
+ self._run_check("<$>", [('data', '<$>')])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
- self._run_check("</a", [('data', '</a')])
+ self._run_check("</a", [])
+ self._run_check("</ a>", [('comment', ' a')])
+ self._run_check("</ a", [('comment', ' a')])
self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
- self._run_check("<!", [('data', '<!')])
- self._run_check("<a", [('data', '<a')])
- self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
- self._run_check("<a foo='bar", [('data', "<a foo='bar")])
- self._run_check("<a foo='>'", [('data', "<a foo='>'")])
- self._run_check("<a foo='>", [('data', "<a foo='>")])
+ self._run_check("<!", [('comment', '')])
+ self._run_check("<a", [])
+ self._run_check("<a foo='bar'", [])
+ self._run_check("<a foo='bar", [])
+ self._run_check("<a foo='>'", [])
+ self._run_check("<a foo='>", [])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
+ self._run_check("</a$b>", [('endtag', 'a$b')])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
@@ -484,6 +554,10 @@ text
]
self._run_check(html, expected)
+ def test_slashes_in_endtag(self):
+ self._run_check('</a/>', [('endtag', 'a')])
+ self._run_check('</a foo="var"/>', [('endtag', 'a')])
+
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
@@ -518,15 +592,11 @@ text
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
- # This is technically wrong (the "> shouldn't be included in the 'data')
- # but is probably not worth fixing it (in addition to all the cases of
- # the previous test, it would require a full attribute parsing).
- # see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
- ('data', '"> confuses the parser')]
+ ('data', ' confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
@@ -553,7 +623,7 @@ text
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
- ('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
+ ('starttag', 'div', [('style', ''), (',', None), ('foo', None), ('=', None), ('"bar"', None)]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
@@ -576,21 +646,50 @@ text
for html, expected in data:
self._run_check(html, expected)
- def test_EOF_in_comments_or_decls(self):
+ def test_eof_in_comments(self):
data = [
- ('<!', [('data', '<!')]),
- ('<!-', [('data', '<!-')]),
- ('<!--', [('data', '<!--')]),
- ('<![', [('data', '<![')]),
- ('<![CDATA[', [('data', '<![CDATA[')]),
- ('<![CDATA[x', [('data', '<![CDATA[x')]),
- ('<!DOCTYPE', [('data', '<!DOCTYPE')]),
- ('<!DOCTYPE HTML', [('data', '<!DOCTYPE HTML')]),
+ ('<!--', [('comment', '')]),
+ ('<!---', [('comment', '')]),
+ ('<!----', [('comment', '')]),
+ ('<!-----', [('comment', '-')]),
+ ('<!------', [('comment', '--')]),
+ ('<!----!', [('comment', '')]),
+ ('<!---!', [('comment', '-!')]),
+ ('<!---!>', [('comment', '-!>')]),
+ ('<!--foo', [('comment', 'foo')]),
+ ('<!--foo-', [('comment', 'foo')]),
+ ('<!--foo--', [('comment', 'foo')]),
+ ('<!--foo--!', [('comment', 'foo')]),
+ ('<!--<!--', [('comment', '<!')]),
+ ('<!--<!--!', [('comment', '<!')]),
]
for html, expected in data:
self._run_check(html, expected)
+
+ def test_eof_in_declarations(self):
+ data = [
+ ('<!', [('comment', '')]),
+ ('<!-', [('comment', '-')]),
+ ('<![', [('comment', '[')]),
+ ('<![CDATA[', [('unknown decl', 'CDATA[')]),
+ ('<![CDATA[x', [('unknown decl', 'CDATA[x')]),
+ ('<![CDATA[x]', [('unknown decl', 'CDATA[x]')]),
+ ('<![CDATA[x]]', [('unknown decl', 'CDATA[x]]')]),
+ ('<!DOCTYPE', [('decl', 'DOCTYPE')]),
+ ('<!DOCTYPE ', [('decl', 'DOCTYPE ')]),
+ ('<!DOCTYPE html', [('decl', 'DOCTYPE html')]),
+ ('<!DOCTYPE html ', [('decl', 'DOCTYPE html ')]),
+ ('<!DOCTYPE html PUBLIC', [('decl', 'DOCTYPE html PUBLIC')]),
+ ('<!DOCTYPE html PUBLIC "foo', [('decl', 'DOCTYPE html PUBLIC "foo')]),
+ ('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "foo',
+ [('decl', 'DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "foo')]),
+ ]
+ for html, expected in data:
+ self._run_check(html, expected)
+
def test_bogus_comments(self):
- html = ('<! not really a comment >'
+ html = ('<!ELEMENT br EMPTY>'
+ '<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
@@ -604,6 +703,7 @@ text
'<![CDATA]]>' # required '[' after CDATA
)
expected = [
+ ('comment', 'ELEMENT br EMPTY'),
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
@@ -684,6 +784,26 @@ text
('endtag', 'a'), ('data', ' bar & baz')]
)
+ @support.requires_resource('cpu')
+ def test_eof_no_quadratic_complexity(self):
+ # Each of these examples used to take about an hour.
+ # Now they take a fraction of a second.
+ def check(source):
+ parser = html.parser.HTMLParser()
+ parser.feed(source)
+ parser.close()
+ n = 120_000
+ check("<a " * n)
+ check("<a a=" * n)
+ check("</a " * 14 * n)
+ check("</a a=" * 11 * n)
+ check("<!--" * 4 * n)
+ check("<!" * 60 * n)
+ check("<?" * 19 * n)
+ check("</$" * 15 * n)
+ check("<![CDATA[" * 9 * n)
+ check("<!doctype" * 35 * n)
+
class AttributesTestCase(TestCaseBase):
@@ -692,9 +812,15 @@ class AttributesTestCase(TestCaseBase):
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
- self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
- self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
- self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
+ self._run_check("<a foo==bar>", [('starttag', 'a', [('foo', '=bar')])])
+ self._run_check("<a foo =bar>", [('starttag', 'a', [('foo', None), ('=bar', None)])])
+ self._run_check("<a foo\t=bar>", [('starttag', 'a', [('foo', None), ('=bar', None)])])
+ self._run_check("<a foo\v=bar>", [('starttag', 'a', [('foo\v', 'bar')])])
+ self._run_check("<a foo\xa0=bar>", [('starttag', 'a', [('foo\xa0', 'bar')])])
+ self._run_check("<a foo= bar>", [('starttag', 'a', [('foo', ''), ('bar', None)])])
+ self._run_check("<a foo=\tbar>", [('starttag', 'a', [('foo', ''), ('bar', None)])])
+ self._run_check("<a foo=\vbar>", [('starttag', 'a', [('foo', '\vbar')])])
+ self._run_check("<a foo=\xa0bar>", [('starttag', 'a', [('foo', '\xa0bar')])])
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
@@ -703,6 +829,10 @@ class AttributesTestCase(TestCaseBase):
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
+ self._run_check("<a b=\t c=\n>",
+ [("starttag", "a", [("b", ""), ("c", "")])])
+ self._run_check("<a b=\v c=\xa0>",
+ [("starttag", "a", [("b", "\v"), ("c", "\xa0")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
@@ -774,7 +904,7 @@ class AttributesTestCase(TestCaseBase):
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
- ('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
+ ('starttag', 'a', [('href', None), ('=', None), ("test'&nbsp;style", 'color:red;bad4')]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
diff --git a/Lib/test/test_http_cookiejar.py b/Lib/test/test_http_cookiejar.py
index 6bc33b15ec3..04cb440cd4c 100644
--- a/Lib/test/test_http_cookiejar.py
+++ b/Lib/test/test_http_cookiejar.py
@@ -4,6 +4,7 @@ import os
import stat
import sys
import re
+from test import support
from test.support import os_helper
from test.support import warnings_helper
import time
@@ -105,8 +106,7 @@ class DateTimeTests(unittest.TestCase):
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
- def test_http2time_garbage(self):
- for test in [
+ @support.subTests('test', [
'',
'Garbage',
'Mandag 16. September 1996',
@@ -121,10 +121,9 @@ class DateTimeTests(unittest.TestCase):
'08-01-3697739',
'09 Feb 19942632 22:23:32 GMT',
'Wed, 09 Feb 1994834 22:23:32 GMT',
- ]:
- self.assertIsNone(http2time(test),
- "http2time(%s) is not None\n"
- "http2time(test) %s" % (test, http2time(test)))
+ ])
+ def test_http2time_garbage(self, test):
+ self.assertIsNone(http2time(test))
def test_http2time_redos_regression_actually_completes(self):
# LOOSE_HTTP_DATE_RE was vulnerable to malicious input which caused catastrophic backtracking (REDoS).
@@ -149,9 +148,7 @@ class DateTimeTests(unittest.TestCase):
self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"),
(1994, 2, 3, 14, 15, 29))
- def test_iso2time_formats(self):
- # test iso2time for supported dates.
- tests = [
+ @support.subTests('s', [
'1994-02-03 00:00:00 -0000', # ISO 8601 format
'1994-02-03 00:00:00 +0000', # ISO 8601 format
'1994-02-03 00:00:00', # zone is optional
@@ -164,16 +161,15 @@ class DateTimeTests(unittest.TestCase):
# A few tests with extra space at various places
' 1994-02-03 ',
' 1994-02-03T00:00:00 ',
- ]
-
+ ])
+ def test_iso2time_formats(self, s):
+ # test iso2time for supported dates.
test_t = 760233600 # assume broken POSIX counting of seconds
- for s in tests:
- self.assertEqual(iso2time(s), test_t, s)
- self.assertEqual(iso2time(s.lower()), test_t, s.lower())
- self.assertEqual(iso2time(s.upper()), test_t, s.upper())
+ self.assertEqual(iso2time(s), test_t, s)
+ self.assertEqual(iso2time(s.lower()), test_t, s.lower())
+ self.assertEqual(iso2time(s.upper()), test_t, s.upper())
- def test_iso2time_garbage(self):
- for test in [
+ @support.subTests('test', [
'',
'Garbage',
'Thursday, 03-Feb-94 00:00:00 GMT',
@@ -186,9 +182,9 @@ class DateTimeTests(unittest.TestCase):
'01-01-1980 00:00:62',
'01-01-1980T00:00:62',
'19800101T250000Z',
- ]:
- self.assertIsNone(iso2time(test),
- "iso2time(%r)" % test)
+ ])
+ def test_iso2time_garbage(self, test):
+ self.assertIsNone(iso2time(test))
def test_iso2time_performance_regression(self):
# If ISO_DATE_RE regresses to quadratic complexity, this test will take a very long time to succeed.
@@ -199,24 +195,23 @@ class DateTimeTests(unittest.TestCase):
class HeaderTests(unittest.TestCase):
- def test_parse_ns_headers(self):
- # quotes should be stripped
- expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
- for hdr in [
+ @support.subTests('hdr', [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
- ]:
- self.assertEqual(parse_ns_headers([hdr]), expected)
-
- def test_parse_ns_headers_version(self):
-
+ ])
+ def test_parse_ns_headers(self, hdr):
# quotes should be stripped
- expected = [[('foo', 'bar'), ('version', '1')]]
- for hdr in [
+ expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
+ self.assertEqual(parse_ns_headers([hdr]), expected)
+
+ @support.subTests('hdr', [
'foo=bar; version="1"',
'foo=bar; Version="1"',
- ]:
- self.assertEqual(parse_ns_headers([hdr]), expected)
+ ])
+ def test_parse_ns_headers_version(self, hdr):
+ # quotes should be stripped
+ expected = [[('foo', 'bar'), ('version', '1')]]
+ self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
@@ -226,8 +221,7 @@ class HeaderTests(unittest.TestCase):
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEqual(parse_ns_headers([hdr]), expected)
- def test_join_header_words(self):
- for src, expected in [
+ @support.subTests('src,expected', [
([[("foo", None), ("bar", "baz")]], "foo; bar=baz"),
(([]), ""),
(([[]]), ""),
@@ -237,12 +231,11 @@ class HeaderTests(unittest.TestCase):
'n; foo="foo;_", bar=foo_bar'),
([[("n", "m"), ("foo", None)], [("bar", "foo_bar")]],
'n=m; foo, bar=foo_bar'),
- ]:
- with self.subTest(src=src):
- self.assertEqual(join_header_words(src), expected)
+ ])
+ def test_join_header_words(self, src, expected):
+ self.assertEqual(join_header_words(src), expected)
- def test_split_header_words(self):
- tests = [
+ @support.subTests('arg,expect', [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
@@ -259,24 +252,22 @@ class HeaderTests(unittest.TestCase):
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
- ]
-
- for arg, expect in tests:
- try:
- result = split_header_words([arg])
- except:
- import traceback, io
- f = io.StringIO()
- traceback.print_exc(None, f)
- result = "(error -- traceback follows)\n\n%s" % f.getvalue()
- self.assertEqual(result, expect, """
+ ])
+ def test_split_header_words(self, arg, expect):
+ try:
+ result = split_header_words([arg])
+ except:
+ import traceback, io
+ f = io.StringIO()
+ traceback.print_exc(None, f)
+ result = "(error -- traceback follows)\n\n%s" % f.getvalue()
+ self.assertEqual(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
- def test_roundtrip(self):
- tests = [
+ @support.subTests('arg,expect', [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
@@ -309,12 +300,11 @@ Got: '%s'
('n; foo="foo;_", bar="foo,_"',
'n; foo="foo;_", bar="foo,_"'),
- ]
-
- for arg, expect in tests:
- input = split_header_words([arg])
- res = join_header_words(input)
- self.assertEqual(res, expect, """
+ ])
+ def test_roundtrip(self, arg, expect):
+ input = split_header_words([arg])
+ res = join_header_words(input)
+ self.assertEqual(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
@@ -516,14 +506,7 @@ class CookieTests(unittest.TestCase):
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
- def test_domain_return_ok(self):
- # test optimization: .domain_return_ok() should filter out most
- # domains in the CookieJar before we try to access them (because that
- # may require disk access -- in particular, with MSIECookieJar)
- # This is only a rough check for performance reasons, so it's not too
- # critical as long as it's sufficiently liberal.
- pol = DefaultCookiePolicy()
- for url, domain, ok in [
+ @support.subTests('url,domain,ok', [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
@@ -543,11 +526,18 @@ class CookieTests(unittest.TestCase):
("http://foo/", ".local", True),
("http://barfoo.com", ".foo.com", False),
("http://barfoo.com", "foo.com", False),
- ]:
- request = urllib.request.Request(url)
- r = pol.domain_return_ok(domain, request)
- if ok: self.assertTrue(r)
- else: self.assertFalse(r)
+ ])
+ def test_domain_return_ok(self, url, domain, ok):
+ # test optimization: .domain_return_ok() should filter out most
+ # domains in the CookieJar before we try to access them (because that
+ # may require disk access -- in particular, with MSIECookieJar)
+ # This is only a rough check for performance reasons, so it's not too
+ # critical as long as it's sufficiently liberal.
+ pol = DefaultCookiePolicy()
+ request = urllib.request.Request(url)
+ r = pol.domain_return_ok(domain, request)
+ if ok: self.assertTrue(r)
+ else: self.assertFalse(r)
def test_missing_value(self):
# missing = sign in Cookie: header is regarded by Mozilla as a missing
@@ -581,10 +571,7 @@ class CookieTests(unittest.TestCase):
self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
- def test_rfc2109_handling(self):
- # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
- # dependent on policy settings
- for rfc2109_as_netscape, rfc2965, version in [
+ @support.subTests('rfc2109_as_netscape,rfc2965,version', [
# default according to rfc2965 if not explicitly specified
(None, False, 0),
(None, True, 1),
@@ -593,24 +580,27 @@ class CookieTests(unittest.TestCase):
(False, True, 1),
(True, False, 0),
(True, True, 0),
- ]:
- policy = DefaultCookiePolicy(
- rfc2109_as_netscape=rfc2109_as_netscape,
- rfc2965=rfc2965)
- c = CookieJar(policy)
- interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
- try:
- cookie = c._cookies["www.example.com"]["/"]["ni"]
- except KeyError:
- self.assertIsNone(version) # didn't expect a stored cookie
- else:
- self.assertEqual(cookie.version, version)
- # 2965 cookies are unaffected
- interact_2965(c, "http://www.example.com/",
- "foo=bar; Version=1")
- if rfc2965:
- cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
- self.assertEqual(cookie2965.version, 1)
+ ])
+ def test_rfc2109_handling(self, rfc2109_as_netscape, rfc2965, version):
+ # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
+ # dependent on policy settings
+ policy = DefaultCookiePolicy(
+ rfc2109_as_netscape=rfc2109_as_netscape,
+ rfc2965=rfc2965)
+ c = CookieJar(policy)
+ interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
+ try:
+ cookie = c._cookies["www.example.com"]["/"]["ni"]
+ except KeyError:
+ self.assertIsNone(version) # didn't expect a stored cookie
+ else:
+ self.assertEqual(cookie.version, version)
+ # 2965 cookies are unaffected
+ interact_2965(c, "http://www.example.com/",
+ "foo=bar; Version=1")
+ if rfc2965:
+ cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
+ self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
@@ -778,8 +768,7 @@ class CookieTests(unittest.TestCase):
# Cookie is sent back to the same URI.
self.assertEqual(interact_netscape(cj, uri), value)
- def test_escape_path(self):
- cases = [
+ @support.subTests('arg,result', [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
@@ -799,9 +788,9 @@ class CookieTests(unittest.TestCase):
("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded
# unicode
("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
- ]
- for arg, result in cases:
- self.assertEqual(escape_path(arg), result)
+ ])
+ def test_escape_path(self, arg, result):
+ self.assertEqual(escape_path(arg), result)
def test_request_path(self):
# with parameters
diff --git a/Lib/test/test_idle.py b/Lib/test/test_idle.py
index 3d8b7ecc0ec..ebf572ac5ca 100644
--- a/Lib/test/test_idle.py
+++ b/Lib/test/test_idle.py
@@ -16,7 +16,7 @@ idlelib.testing = True
# Unittest.main and test.libregrtest.runtest.runtest_inner
# call load_tests, when present here, to discover tests to run.
-from idlelib.idle_test import load_tests
+from idlelib.idle_test import load_tests # noqa: F401
if __name__ == '__main__':
tk.NoDefaultRoot()
diff --git a/Lib/test/test_inspect/test_inspect.py b/Lib/test/test_inspect/test_inspect.py
index e584fb417b9..79eb103224b 100644
--- a/Lib/test/test_inspect/test_inspect.py
+++ b/Lib/test/test_inspect/test_inspect.py
@@ -5875,9 +5875,9 @@ class TestSignatureDefinitions(unittest.TestCase):
self._test_module_has_signatures(operator)
def test_os_module_has_signatures(self):
- unsupported_signature = {'chmod', 'link', 'utime'}
+ unsupported_signature = {'chmod', 'utime'}
unsupported_signature |= {name for name in
- ['get_terminal_size', 'posix_spawn', 'posix_spawnp',
+ ['get_terminal_size', 'link', 'posix_spawn', 'posix_spawnp',
'register_at_fork', 'startfile']
if hasattr(os, name)}
self._test_module_has_signatures(os, unsupported_signature=unsupported_signature)
diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py
index 1e2d572b1cb..a34b20beaca 100644
--- a/Lib/test/test_interpreters/test_api.py
+++ b/Lib/test/test_interpreters/test_api.py
@@ -1,18 +1,23 @@
+import contextlib
import os
import pickle
+import sys
from textwrap import dedent
import threading
import types
import unittest
from test import support
+from test.support import os_helper
+from test.support import script_helper
from test.support import import_helper
# Raise SkipTest if subinterpreters not supported.
_interpreters = import_helper.import_module('_interpreters')
+from concurrent import interpreters
from test.support import Py_GIL_DISABLED
-from test.support import interpreters
from test.support import force_not_colorized
-from test.support.interpreters import (
+import test._crossinterp_definitions as defs
+from concurrent.interpreters import (
InterpreterError, InterpreterNotFoundError, ExecutionFailed,
)
from .utils import (
@@ -29,6 +34,59 @@ WHENCE_STR_XI = 'cross-interpreter C-API'
WHENCE_STR_STDLIB = '_interpreters module'
+def is_pickleable(obj):
+ try:
+ pickle.dumps(obj)
+ except Exception:
+ return False
+ return True
+
+
+@contextlib.contextmanager
+def defined_in___main__(name, script, *, remove=False):
+ import __main__ as mainmod
+ mainns = vars(mainmod)
+ assert name not in mainns
+ exec(script, mainns, mainns)
+ if remove:
+ yield mainns.pop(name)
+ else:
+ try:
+ yield mainns[name]
+ finally:
+ mainns.pop(name, None)
+
+
+def build_excinfo(exctype, msg=None, formatted=None, errdisplay=None):
+ if isinstance(exctype, type):
+ assert issubclass(exctype, BaseException), exctype
+ exctype = types.SimpleNamespace(
+ __name__=exctype.__name__,
+ __qualname__=exctype.__qualname__,
+ __module__=exctype.__module__,
+ )
+ elif isinstance(exctype, str):
+ module, _, name = exctype.rpartition(exctype)
+ if not module and name in __builtins__:
+ module = 'builtins'
+ exctype = types.SimpleNamespace(
+ __name__=name,
+ __qualname__=exctype,
+ __module__=module or None,
+ )
+ else:
+ assert isinstance(exctype, types.SimpleNamespace)
+ assert msg is None or isinstance(msg, str), msg
+ assert formatted is None or isinstance(formatted, str), formatted
+ assert errdisplay is None or isinstance(errdisplay, str), errdisplay
+ return types.SimpleNamespace(
+ type=exctype,
+ msg=msg,
+ formatted=formatted,
+ errdisplay=errdisplay,
+ )
+
+
class ModuleTests(TestBase):
def test_queue_aliases(self):
@@ -75,7 +133,7 @@ class CreateTests(TestBase):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.create()
print(interp.id)
"""))
@@ -138,7 +196,7 @@ class GetCurrentTests(TestBase):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(cur.id)
"""))
@@ -155,7 +213,7 @@ class GetCurrentTests(TestBase):
with self.subTest('subinterpreter'):
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(id(cur))
cur = interpreters.get_current()
@@ -167,7 +225,7 @@ class GetCurrentTests(TestBase):
with self.subTest('per-interpreter'):
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(id(cur))
"""))
@@ -354,9 +412,11 @@ class InterpreterObjectTests(TestBase):
def test_pickle(self):
interp = interpreters.create()
- data = pickle.dumps(interp)
- unpickled = pickle.loads(data)
- self.assertEqual(unpickled, interp)
+ for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=protocol):
+ data = pickle.dumps(interp, protocol)
+ unpickled = pickle.loads(data)
+ self.assertEqual(unpickled, interp)
class TestInterpreterIsRunning(TestBase):
@@ -524,7 +584,7 @@ class TestInterpreterClose(TestBase):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.Interpreter({interp.id})
try:
interp.close()
@@ -541,7 +601,7 @@ class TestInterpreterClose(TestBase):
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2})
interp1.exec(dedent(f"""
- from test.support import interpreters
+ from concurrent import interpreters
interp2 = interpreters.Interpreter({interp2.id})
interp2.close()
interp3 = interpreters.create()
@@ -748,7 +808,7 @@ class TestInterpreterExec(TestBase):
ham()
""")
scriptfile = self.make_script('script.py', tempdir, text="""
- from test.support import interpreters
+ from concurrent import interpreters
def script():
import spam
@@ -769,7 +829,7 @@ class TestInterpreterExec(TestBase):
~~~~~~~~~~~^^^^^^^^
{interpmod_line.strip()}
raise ExecutionFailed(excinfo)
- test.support.interpreters.ExecutionFailed: RuntimeError: uh-oh!
+ concurrent.interpreters.ExecutionFailed: RuntimeError: uh-oh!
Uncaught in the interpreter:
@@ -839,9 +899,16 @@ class TestInterpreterExec(TestBase):
interp.exec(10)
def test_bytes_for_script(self):
+ r, w = self.pipe()
+ RAN = b'R'
+ DONE = b'D'
interp = interpreters.create()
- with self.assertRaises(TypeError):
- interp.exec(b'print("spam")')
+ interp.exec(f"""if True:
+ import os
+ os.write({w}, {RAN!r})
+ """)
+ os.write(w, DONE)
+ self.assertEqual(os.read(r, 1), RAN)
def test_with_background_threads_still_running(self):
r_interp, w_interp = self.pipe()
@@ -879,28 +946,46 @@ class TestInterpreterExec(TestBase):
with self.assertRaisesRegex(InterpreterError, 'unrecognized'):
interp.exec('raise Exception("it worked!")')
+ def test_list_comprehension(self):
+ # gh-135450: List comprehensions caused an assertion failure
+ # in _PyCode_CheckNoExternalState()
+ import string
+ r_interp, w_interp = self.pipe()
+
+ interp = interpreters.create()
+ interp.exec(f"""if True:
+ import os
+ comp = [str(i) for i in range(10)]
+ os.write({w_interp}, ''.join(comp).encode())
+ """)
+ self.assertEqual(os.read(r_interp, 10).decode(), string.digits)
+ interp.close()
+
+
# test__interpreters covers the remaining
# Interpreter.exec() behavior.
-def call_func_noop():
- pass
+call_func_noop = defs.spam_minimal
+call_func_ident = defs.spam_returns_arg
+call_func_failure = defs.spam_raises
def call_func_return_shareable():
return (1, None)
-def call_func_return_not_shareable():
- return [1, 2, 3]
+def call_func_return_stateless_func():
+ return (lambda x: x)
-def call_func_failure():
- raise Exception('spam!')
+def call_func_return_pickleable():
+ return [1, 2, 3]
-def call_func_ident(value):
- return value
+def call_func_return_unpickleable():
+ x = 42
+ return (lambda: x)
def get_call_func_closure(value):
@@ -909,6 +994,11 @@ def get_call_func_closure(value):
return call_func_closure
+def call_func_exec_wrapper(script, ns):
+ res = exec(script, ns, ns)
+ return res, ns, id(ns)
+
+
class Spam:
@staticmethod
@@ -1005,86 +1095,663 @@ class TestInterpreterCall(TestBase):
# - preserves info (e.g. SyntaxError)
# - matching error display
- def test_call(self):
+ @contextlib.contextmanager
+ def assert_fails(self, expected):
+ with self.assertRaises(ExecutionFailed) as cm:
+ yield cm
+ uncaught = cm.exception.excinfo
+ self.assertEqual(uncaught.type.__name__, expected.__name__)
+
+ def assert_fails_not_shareable(self):
+ return self.assert_fails(interpreters.NotShareableError)
+
+ def assert_code_equal(self, code1, code2):
+ if code1 == code2:
+ return
+ self.assertEqual(code1.co_name, code2.co_name)
+ self.assertEqual(code1.co_flags, code2.co_flags)
+ self.assertEqual(code1.co_consts, code2.co_consts)
+ self.assertEqual(code1.co_varnames, code2.co_varnames)
+ self.assertEqual(code1.co_cellvars, code2.co_cellvars)
+ self.assertEqual(code1.co_freevars, code2.co_freevars)
+ self.assertEqual(code1.co_names, code2.co_names)
+ self.assertEqual(
+ _testinternalcapi.get_code_var_counts(code1),
+ _testinternalcapi.get_code_var_counts(code2),
+ )
+ self.assertEqual(code1.co_code, code2.co_code)
+
+ def assert_funcs_equal(self, func1, func2):
+ if func1 == func2:
+ return
+ self.assertIs(type(func1), type(func2))
+ self.assertEqual(func1.__name__, func2.__name__)
+ self.assertEqual(func1.__defaults__, func2.__defaults__)
+ self.assertEqual(func1.__kwdefaults__, func2.__kwdefaults__)
+ self.assertEqual(func1.__closure__, func2.__closure__)
+ self.assert_code_equal(func1.__code__, func2.__code__)
+ self.assertEqual(
+ _testinternalcapi.get_code_var_counts(func1),
+ _testinternalcapi.get_code_var_counts(func2),
+ )
+
+ def assert_exceptions_equal(self, exc1, exc2):
+ assert isinstance(exc1, Exception)
+ assert isinstance(exc2, Exception)
+ if exc1 == exc2:
+ return
+ self.assertIs(type(exc1), type(exc2))
+ self.assertEqual(exc1.args, exc2.args)
+
+ def test_stateless_funcs(self):
interp = interpreters.create()
- for i, (callable, args, kwargs) in enumerate([
- (call_func_noop, (), {}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
- (Spam.noop, (), {}),
+ func = call_func_noop
+ with self.subTest('no args, no return'):
+ res = interp.call(func)
+ self.assertIsNone(res)
+
+ func = call_func_return_shareable
+ with self.subTest('no args, returns shareable'):
+ res = interp.call(func)
+ self.assertEqual(res, (1, None))
+
+ func = call_func_return_stateless_func
+ expected = (lambda x: x)
+ with self.subTest('no args, returns stateless func'):
+ res = interp.call(func)
+ self.assert_funcs_equal(res, expected)
+
+ func = call_func_return_pickleable
+ with self.subTest('no args, returns pickleable'):
+ res = interp.call(func)
+ self.assertEqual(res, [1, 2, 3])
+
+ func = call_func_return_unpickleable
+ with self.subTest('no args, returns unpickleable'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+
+ def test_stateless_func_returns_arg(self):
+ interp = interpreters.create()
+
+ for arg in [
+ None,
+ 10,
+ 'spam!',
+ b'spam!',
+ (1, 2, 'spam!'),
+ memoryview(b'spam!'),
+ ]:
+ with self.subTest(f'shareable {arg!r}'):
+ assert _interpreters.is_shareable(arg)
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assertEqual(res, arg)
+
+ for arg in defs.STATELESS_FUNCTIONS:
+ with self.subTest(f'stateless func {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assert_funcs_equal(res, arg)
+
+ for arg in defs.TOP_FUNCTIONS:
+ if arg in defs.STATELESS_FUNCTIONS:
+ continue
+ with self.subTest(f'stateful func {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assert_funcs_equal(res, arg)
+ assert is_pickleable(arg)
+
+ for arg in [
+ Ellipsis,
+ NotImplemented,
+ object(),
+ 2**1000,
+ [1, 2, 3],
+ {'a': 1, 'b': 2},
+ types.SimpleNamespace(x=42),
+ # builtin types
+ object,
+ type,
+ Exception,
+ ModuleNotFoundError,
+ # builtin exceptions
+ Exception('uh-oh!'),
+ ModuleNotFoundError('mymodule'),
+ # builtin fnctions
+ len,
+ sys.exit,
+ # user classes
+ *defs.TOP_CLASSES,
+ *(c(*a) for c, a in defs.TOP_CLASSES.items()
+ if c not in defs.CLASSES_WITHOUT_EQUALITY),
+ ]:
+ with self.subTest(f'pickleable {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ if type(arg) is object:
+ self.assertIs(type(res), object)
+ elif isinstance(arg, BaseException):
+ self.assert_exceptions_equal(res, arg)
+ else:
+ self.assertEqual(res, arg)
+ assert is_pickleable(arg)
+
+ for arg in [
+ types.MappingProxyType({}),
+ *(f for f in defs.NESTED_FUNCTIONS
+ if f not in defs.STATELESS_FUNCTIONS),
+ ]:
+ with self.subTest(f'unpickleable {arg!r}'):
+ assert not _interpreters.is_shareable(arg)
+ assert not is_pickleable(arg)
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ def test_full_args(self):
+ interp = interpreters.create()
+ expected = (1, 2, 3, 4, 5, 6, ('?',), {'g': 7, 'h': 8})
+ func = defs.spam_full_args
+ res = interp.call(func, 1, 2, 3, 4, '?', e=5, f=6, g=7, h=8)
+ self.assertEqual(res, expected)
+
+ def test_full_defaults(self):
+ # pickleable, but not stateless
+ interp = interpreters.create()
+ expected = (-1, -2, -3, -4, -5, -6, (), {'g': 8, 'h': 9})
+ res = interp.call(defs.spam_full_args_with_defaults, g=8, h=9)
+ self.assertEqual(res, expected)
+
+ def test_modified_arg(self):
+ interp = interpreters.create()
+ script = dedent("""
+ a = 7
+ b = 2
+ c = a ** b
+ """)
+ ns = {}
+ expected = {'a': 7, 'b': 2, 'c': 49}
+ res = interp.call(call_func_exec_wrapper, script, ns)
+ obj, resns, resid = res
+ del resns['__builtins__']
+ self.assertIsNone(obj)
+ self.assertEqual(ns, {})
+ self.assertEqual(resns, expected)
+ self.assertNotEqual(resid, id(ns))
+ self.assertNotEqual(resid, id(resns))
+
+ def test_func_in___main___valid(self):
+ # pickleable, already there'
+
+ with os_helper.temp_dir() as tempdir:
+ def new_mod(name, text):
+ script_helper.make_script(tempdir, name, dedent(text))
+
+ def run(text):
+ name = 'myscript'
+ text = dedent(f"""
+ import sys
+ sys.path.insert(0, {tempdir!r})
+
+ """) + dedent(text)
+ filename = script_helper.make_script(tempdir, name, text)
+ res = script_helper.assert_python_ok(filename)
+ return res.out.decode('utf-8').strip()
+
+ # no module indirection
+ with self.subTest('no indirection'):
+ text = run(f"""
+ from concurrent import interpreters
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ interp = interpreters.create()
+ res = interp.call(spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ # indirect as func, direct interp
+ new_mod('mymod', f"""
+ def run(interp, func):
+ return interp.call(func)
+ """)
+ with self.subTest('indirect as func, direct interp'):
+ text = run(f"""
+ from concurrent import interpreters
+ import mymod
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ interp = interpreters.create()
+ res = mymod.run(interp, spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ # indirect as func, indirect interp
+ new_mod('mymod', f"""
+ from concurrent import interpreters
+ def run(func):
+ interp = interpreters.create()
+ return interp.call(func)
+ """)
+ with self.subTest('indirect as func, indirect interp'):
+ text = run(f"""
+ import mymod
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ res = mymod.run(spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ def test_func_in___main___invalid(self):
+ interp = interpreters.create()
+
+ funcname = f'{__name__.replace(".", "_")}_spam_okay'
+ script = dedent(f"""
+ def {funcname}():
+ # This a global var...
+ return __name__
+ """)
+
+ with self.subTest('pickleable, added dynamically'):
+ with defined_in___main__(funcname, script) as arg:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ with self.subTest('lying about __main__'):
+ with defined_in___main__(funcname, script, remove=True) as arg:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ def test_func_in___main___hidden(self):
+ # When a top-level function that uses global variables is called
+ # through Interpreter.call(), it will be pickled, sent over,
+ # and unpickled. That requires that it be found in the other
+ # interpreter's __main__ module. However, the original script
+ # that defined the function is only run in the main interpreter,
+ # so pickle.loads() would normally fail.
+ #
+ # We work around this by running the script in the other
+ # interpreter. However, this is a one-off solution for the sake
+ # of unpickling, so we avoid modifying that interpreter's
+ # __main__ module by running the script in a hidden module.
+ #
+ # In this test we verify that the function runs with the hidden
+ # module as its __globals__ when called in the other interpreter,
+ # and that the interpreter's __main__ module is unaffected.
+ text = dedent("""
+ eggs = True
+
+ def spam(*, explicit=False):
+ if explicit:
+ import __main__
+ ns = __main__.__dict__
+ else:
+ # For now we have to have a LOAD_GLOBAL in the
+ # function in order for globals() to actually return
+ # spam.__globals__. Maybe it doesn't go through pickle?
+ # XXX We will fix this later.
+ spam
+ ns = globals()
+
+ func = ns.get('spam')
+ return [
+ id(ns),
+ ns.get('__name__'),
+ ns.get('__file__'),
+ id(func),
+ None if func is None else repr(func),
+ ns.get('eggs'),
+ ns.get('ham'),
+ ]
+
+ if __name__ == "__main__":
+ from concurrent import interpreters
+ interp = interpreters.create()
+
+ ham = True
+ print([
+ [
+ spam(explicit=True),
+ spam(),
+ ],
+ [
+ interp.call(spam, explicit=True),
+ interp.call(spam),
+ ],
+ ])
+ """)
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ local, remote = eval(stdout)
+
+ # In the main interpreter.
+ main, unpickled = local
+ nsid, _, _, funcid, func, _, _ = main
+ self.assertEqual(main, [
+ nsid,
+ '__main__',
+ filename,
+ funcid,
+ func,
+ True,
+ True,
+ ])
+ self.assertIsNot(func, None)
+ self.assertRegex(func, '^<function spam at 0x.*>$')
+ self.assertEqual(unpickled, main)
+
+ # In the subinterpreter.
+ main, unpickled = remote
+ nsid1, _, _, funcid1, _, _, _ = main
+ self.assertEqual(main, [
+ nsid1,
+ '__main__',
+ None,
+ funcid1,
+ None,
+ None,
+ None,
+ ])
+ nsid2, _, _, funcid2, func, _, _ = unpickled
+ self.assertEqual(unpickled, [
+ nsid2,
+ '<fake __main__>',
+ filename,
+ funcid2,
+ func,
+ True,
+ None,
+ ])
+ self.assertIsNot(func, None)
+ self.assertRegex(func, '^<function spam at 0x.*>$')
+ self.assertNotEqual(nsid2, nsid1)
+ self.assertNotEqual(funcid2, funcid1)
+
+ def test_func_in___main___uses_globals(self):
+ # See the note in test_func_in___main___hidden about pickle
+ # and the __main__ module.
+ #
+ # Additionally, the solution to that problem must provide
+ # for global variables on which a pickled function might rely.
+ #
+ # To check that, we run a script that has two global functions
+ # and a global variable in the __main__ module. One of the
+ # functions sets the global variable and the other returns
+ # the value.
+ #
+ # The script calls those functions multiple times in another
+ # interpreter, to verify the following:
+ #
+ # * the global variable is properly initialized
+ # * the global variable retains state between calls
+ # * the setter modifies that persistent variable
+ # * the getter uses the variable
+ # * the calls in the other interpreter do not modify
+ # the main interpreter
+ # * those calls don't modify the interpreter's __main__ module
+ # * the functions and variable do not actually show up in the
+ # other interpreter's __main__ module
+ text = dedent("""
+ count = 0
+
+ def inc(x=1):
+ global count
+ count += x
+
+ def get_count():
+ return count
+
+ if __name__ == "__main__":
+ counts = []
+ results = [count, counts]
+
+ from concurrent import interpreters
+ interp = interpreters.create()
+
+ val = interp.call(get_count)
+ counts.append(val)
+
+ interp.call(inc)
+ val = interp.call(get_count)
+ counts.append(val)
+
+ interp.call(inc, 3)
+ val = interp.call(get_count)
+ counts.append(val)
+
+ results.append(count)
+
+ modified = {name: interp.call(eval, f'{name!r} in vars()')
+ for name in ('count', 'inc', 'get_count')}
+ results.append(modified)
+
+ print(results)
+ """)
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ before, counts, after, modified = eval(stdout)
+ self.assertEqual(modified, {
+ 'count': False,
+ 'inc': False,
+ 'get_count': False,
+ })
+ self.assertEqual(before, 0)
+ self.assertEqual(after, 0)
+ self.assertEqual(counts, [0, 1, 4])
+
+ def test_raises(self):
+ interp = interpreters.create()
+ with self.assertRaises(ExecutionFailed):
+ interp.call(call_func_failure)
+
+ with self.assert_fails(ValueError):
+ interp.call(call_func_complex, '???', exc=ValueError('spam'))
+
+ def test_call_valid(self):
+ interp = interpreters.create()
+
+ for i, (callable, args, kwargs, expected) in enumerate([
+ (call_func_noop, (), {}, None),
+ (call_func_ident, ('spamspamspam',), {}, 'spamspamspam'),
+ (call_func_return_shareable, (), {}, (1, None)),
+ (call_func_return_pickleable, (), {}, [1, 2, 3]),
+ (Spam.noop, (), {}, None),
+ (Spam.from_values, (), {}, Spam(())),
+ (Spam.from_values, (1, 2, 3), {}, Spam((1, 2, 3))),
+ (Spam, ('???',), {}, Spam('???')),
+ (Spam(101), (), {}, (101, (), {})),
+ (Spam(10101).run, (), {}, (10101, (), {})),
+ (call_func_complex, ('ident', 'spam'), {}, 'spam'),
+ (call_func_complex, ('full-ident', 'spam'), {}, ('spam', (), {})),
+ (call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'},
+ ('spam', ('ham',), {'eggs': '!!!'})),
+ (call_func_complex, ('globals',), {}, __name__),
+ (call_func_complex, ('interpid',), {}, interp.id),
+ (call_func_complex, ('custom', 'spam!'), {}, Spam('spam!')),
]):
with self.subTest(f'success case #{i+1}'):
- res = interp.call(callable)
- self.assertIs(res, None)
+ res = interp.call(callable, *args, **kwargs)
+ self.assertEqual(res, expected)
+
+ def test_call_invalid(self):
+ interp = interpreters.create()
+
+ func = get_call_func_closure
+ with self.subTest(func):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, 42)
+
+ func = get_call_func_closure(42)
+ with self.subTest(func):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+
+ func = call_func_complex
+ op = 'closure'
+ with self.subTest(f'{func} ({op})'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, op, value='~~~')
+
+ op = 'custom-inner'
+ with self.subTest(f'{func} ({op})'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, op, 'eggs!')
+
+ def test_callable_requires_frame(self):
+ # There are various functions that require a current frame.
+ interp = interpreters.create()
+ for call, expected in [
+ ((eval, '[1, 2, 3]'),
+ [1, 2, 3]),
+ ((eval, 'sum([1, 2, 3])'),
+ 6),
+ ((exec, '...'),
+ None),
+ ]:
+ with self.subTest(str(call)):
+ res = interp.call(*call)
+ self.assertEqual(res, expected)
+
+ result_not_pickleable = [
+ globals,
+ locals,
+ vars,
+ ]
+ for func, expectedtype in {
+ globals: dict,
+ locals: dict,
+ vars: dict,
+ dir: list,
+ }.items():
+ with self.subTest(str(func)):
+ if func in result_not_pickleable:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+ else:
+ res = interp.call(func)
+ self.assertIsInstance(res, expectedtype)
+ self.assertIn('__builtins__', res)
+
+ def test_globals_from_builtins(self):
+ # The builtins exec(), eval(), globals(), locals(), vars(),
+ # and dir() each runs relative to the target interpreter's
+ # __main__ module, when called directly. However,
+ # globals(), locals(), and vars() don't work when called
+ # directly so we don't check them.
+ from _frozen_importlib import BuiltinImporter
+ interp = interpreters.create()
+
+ names = interp.call(dir)
+ self.assertEqual(names, [
+ '__builtins__',
+ '__doc__',
+ '__loader__',
+ '__name__',
+ '__package__',
+ '__spec__',
+ ])
+
+ values = {name: interp.call(eval, name)
+ for name in names if name != '__builtins__'}
+ self.assertEqual(values, {
+ '__name__': '__main__',
+ '__doc__': None,
+ '__spec__': None, # It wasn't imported, so no module spec?
+ '__package__': None,
+ '__loader__': BuiltinImporter,
+ })
+ with self.assertRaises(ExecutionFailed):
+ interp.call(eval, 'spam'),
+
+ interp.call(exec, f'assert dir() == {names}')
+
+ # Update the interpreter's __main__.
+ interp.prepare_main(spam=42)
+ expected = names + ['spam']
+
+ names = interp.call(dir)
+ self.assertEqual(names, expected)
+
+ value = interp.call(eval, 'spam')
+ self.assertEqual(value, 42)
+
+ interp.call(exec, f'assert dir() == {expected}, dir()')
+
+ def test_globals_from_stateless_func(self):
+ # A stateless func, which doesn't depend on any globals,
+ # doesn't go through pickle, so it runs in __main__.
+ def set_global(name, value):
+ globals()[name] = value
+
+ def get_global(name):
+ return globals().get(name)
+
+ interp = interpreters.create()
+
+ modname = interp.call(get_global, '__name__')
+ self.assertEqual(modname, '__main__')
+
+ res = interp.call(get_global, 'spam')
+ self.assertIsNone(res)
+
+ interp.exec('spam = True')
+ res = interp.call(get_global, 'spam')
+ self.assertTrue(res)
+
+ interp.call(set_global, 'spam', 42)
+ res = interp.call(get_global, 'spam')
+ self.assertEqual(res, 42)
+
+ interp.exec('assert spam == 42, repr(spam)')
+
+ def test_call_in_thread(self):
+ interp = interpreters.create()
for i, (callable, args, kwargs) in enumerate([
- (call_func_ident, ('spamspamspam',), {}),
- (get_call_func_closure, (42,), {}),
- (get_call_func_closure(42), (), {}),
+ (call_func_noop, (), {}),
+ (call_func_return_shareable, (), {}),
+ (call_func_return_pickleable, (), {}),
(Spam.from_values, (), {}),
(Spam.from_values, (1, 2, 3), {}),
- (Spam, ('???'), {}),
(Spam(101), (), {}),
(Spam(10101).run, (), {}),
+ (Spam.noop, (), {}),
(call_func_complex, ('ident', 'spam'), {}),
(call_func_complex, ('full-ident', 'spam'), {}),
(call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'}),
(call_func_complex, ('globals',), {}),
(call_func_complex, ('interpid',), {}),
- (call_func_complex, ('closure',), {'value': '~~~'}),
(call_func_complex, ('custom', 'spam!'), {}),
- (call_func_complex, ('custom-inner', 'eggs!'), {}),
- (call_func_complex, ('???',), {'exc': ValueError('spam')}),
- ]):
- with self.subTest(f'invalid case #{i+1}'):
- with self.assertRaises(Exception):
- if args or kwargs:
- raise Exception((args, kwargs))
- interp.call(callable)
-
- with self.assertRaises(ExecutionFailed):
- interp.call(call_func_failure)
-
- def test_call_in_thread(self):
- interp = interpreters.create()
-
- for i, (callable, args, kwargs) in enumerate([
- (call_func_noop, (), {}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
- (Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
with self.captured_thread_exception() as ctx:
- t = interp.call_in_thread(callable)
+ t = interp.call_in_thread(callable, *args, **kwargs)
t.join()
self.assertIsNone(ctx.caught)
for i, (callable, args, kwargs) in enumerate([
- (call_func_ident, ('spamspamspam',), {}),
(get_call_func_closure, (42,), {}),
(get_call_func_closure(42), (), {}),
- (Spam.from_values, (), {}),
- (Spam.from_values, (1, 2, 3), {}),
- (Spam, ('???'), {}),
- (Spam(101), (), {}),
- (Spam(10101).run, (), {}),
- (call_func_complex, ('ident', 'spam'), {}),
- (call_func_complex, ('full-ident', 'spam'), {}),
- (call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'}),
- (call_func_complex, ('globals',), {}),
- (call_func_complex, ('interpid',), {}),
- (call_func_complex, ('closure',), {'value': '~~~'}),
- (call_func_complex, ('custom', 'spam!'), {}),
- (call_func_complex, ('custom-inner', 'eggs!'), {}),
- (call_func_complex, ('???',), {'exc': ValueError('spam')}),
]):
with self.subTest(f'invalid case #{i+1}'):
- if args or kwargs:
- continue
with self.captured_thread_exception() as ctx:
- t = interp.call_in_thread(callable)
+ t = interp.call_in_thread(callable, *args, **kwargs)
t.join()
self.assertIsNotNone(ctx.caught)
@@ -1593,18 +2260,14 @@ class LowLevelTests(TestBase):
with results:
exc = _interpreters.exec(interpid, script)
out = results.stdout()
- self.assertEqual(out, '')
- self.assert_ns_equal(exc, types.SimpleNamespace(
- type=types.SimpleNamespace(
- __name__='Exception',
- __qualname__='Exception',
- __module__='builtins',
- ),
- msg='uh-oh!',
+ expected = build_excinfo(
+ Exception, 'uh-oh!',
# We check these in other tests.
formatted=exc.formatted,
errdisplay=exc.errdisplay,
- ))
+ )
+ self.assertEqual(out, '')
+ self.assert_ns_equal(exc, expected)
with self.subTest('from C-API'):
with self.interpreter_from_capi() as interpid:
@@ -1616,25 +2279,50 @@ class LowLevelTests(TestBase):
self.assertEqual(exc.msg, 'it worked!')
def test_call(self):
- with self.subTest('no args'):
- interpid = _interpreters.create()
- exc = _interpreters.call(interpid, call_func_return_shareable)
- self.assertIs(exc, None)
+ interpid = _interpreters.create()
+
+ # Here we focus on basic args and return values.
+ # See TestInterpreterCall for full operational coverage,
+ # including supported callables.
+
+ with self.subTest('no args, return None'):
+ func = defs.spam_minimal
+ res, exc = _interpreters.call(interpid, func)
+ self.assertIsNone(exc)
+ self.assertIsNone(res)
+
+ with self.subTest('empty args, return None'):
+ func = defs.spam_minimal
+ res, exc = _interpreters.call(interpid, func, (), {})
+ self.assertIsNone(exc)
+ self.assertIsNone(res)
+
+ with self.subTest('no args, return non-None'):
+ func = defs.script_with_return
+ res, exc = _interpreters.call(interpid, func)
+ self.assertIsNone(exc)
+ self.assertIs(res, True)
+
+ with self.subTest('full args, return non-None'):
+ expected = (1, 2, 3, 4, 5, 6, (7, 8), {'g': 9, 'h': 0})
+ func = defs.spam_full_args
+ args = (1, 2, 3, 4, 7, 8)
+ kwargs = dict(e=5, f=6, g=9, h=0)
+ res, exc = _interpreters.call(interpid, func, args, kwargs)
+ self.assertIsNone(exc)
+ self.assertEqual(res, expected)
with self.subTest('uncaught exception'):
- interpid = _interpreters.create()
- exc = _interpreters.call(interpid, call_func_failure)
- self.assertEqual(exc, types.SimpleNamespace(
- type=types.SimpleNamespace(
- __name__='Exception',
- __qualname__='Exception',
- __module__='builtins',
- ),
- msg='spam!',
+ func = defs.spam_raises
+ res, exc = _interpreters.call(interpid, func)
+ expected = build_excinfo(
+ Exception, 'spam!',
# We check these in other tests.
formatted=exc.formatted,
errdisplay=exc.errdisplay,
- ))
+ )
+ self.assertIsNone(res)
+ self.assertEqual(exc, expected)
@requires_test_modules
def test_set___main___attrs(self):
diff --git a/Lib/test/test_interpreters/test_channels.py b/Lib/test/test_interpreters/test_channels.py
index 0c027b17cea..52827357078 100644
--- a/Lib/test/test_interpreters/test_channels.py
+++ b/Lib/test/test_interpreters/test_channels.py
@@ -8,8 +8,8 @@ import time
from test.support import import_helper
# Raise SkipTest if subinterpreters not supported.
_channels = import_helper.import_module('_interpchannels')
-from test.support import interpreters
-from test.support.interpreters import channels
+from concurrent import interpreters
+from test.support import channels
from .utils import _run_output, TestBase
@@ -121,9 +121,11 @@ class TestRecvChannelAttrs(TestBase):
def test_pickle(self):
ch, _ = channels.create()
- data = pickle.dumps(ch)
- unpickled = pickle.loads(data)
- self.assertEqual(unpickled, ch)
+ for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=protocol):
+ data = pickle.dumps(ch, protocol)
+ unpickled = pickle.loads(data)
+ self.assertEqual(unpickled, ch)
class TestSendChannelAttrs(TestBase):
@@ -152,9 +154,11 @@ class TestSendChannelAttrs(TestBase):
def test_pickle(self):
_, ch = channels.create()
- data = pickle.dumps(ch)
- unpickled = pickle.loads(data)
- self.assertEqual(unpickled, ch)
+ for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=protocol):
+ data = pickle.dumps(ch, protocol)
+ unpickled = pickle.loads(data)
+ self.assertEqual(unpickled, ch)
class TestSendRecv(TestBase):
@@ -171,7 +175,7 @@ class TestSendRecv(TestBase):
def test_send_recv_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import channels
+ from test.support import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
@@ -244,7 +248,7 @@ class TestSendRecv(TestBase):
def test_send_recv_nowait_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import channels
+ from test.support import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
@@ -387,7 +391,7 @@ class TestSendRecv(TestBase):
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
sch = channels.SendChannel({sch.id})
obj1 = b'spam'
obj2 = b'eggs'
@@ -482,7 +486,7 @@ class TestSendRecv(TestBase):
self.assertEqual(_channels.get_count(rch.id), 0)
_run_output(interp, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
sch = channels.SendChannel({sch.id})
sch.send_nowait(1, unbounditems=channels.UNBOUND)
sch.send_nowait(2, unbounditems=channels.UNBOUND_ERROR)
@@ -518,7 +522,7 @@ class TestSendRecv(TestBase):
sch.send_nowait(1)
_run_output(interp1, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj1 = rch.recv()
@@ -526,7 +530,7 @@ class TestSendRecv(TestBase):
sch.send_nowait(obj1, unbounditems=channels.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj2 = rch.recv()
diff --git a/Lib/test/test_interpreters/test_lifecycle.py b/Lib/test/test_interpreters/test_lifecycle.py
index ac24f6568ac..15537ac6cc8 100644
--- a/Lib/test/test_interpreters/test_lifecycle.py
+++ b/Lib/test/test_interpreters/test_lifecycle.py
@@ -119,7 +119,7 @@ class StartupTests(TestBase):
# The main interpreter's sys.path[0] should be used by subinterpreters.
script = '''
import sys
- from test.support import interpreters
+ from concurrent import interpreters
orig = sys.path[0]
@@ -170,7 +170,7 @@ class FinalizationTests(TestBase):
# is reported, even when subinterpreters get cleaned up at the end.
import subprocess
argv = [sys.executable, '-c', '''if True:
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.create()
raise Exception
''']
diff --git a/Lib/test/test_interpreters/test_queues.py b/Lib/test/test_interpreters/test_queues.py
index 64a2db1230d..5451c6654ac 100644
--- a/Lib/test/test_interpreters/test_queues.py
+++ b/Lib/test/test_interpreters/test_queues.py
@@ -7,9 +7,8 @@ import unittest
from test.support import import_helper, Py_DEBUG
# Raise SkipTest if subinterpreters not supported.
_queues = import_helper.import_module('_interpqueues')
-from test.support import interpreters
-from test.support.interpreters import queues, _crossinterp
-import test._crossinterp_definitions as defs
+from concurrent import interpreters
+from concurrent.interpreters import _queues as queues, _crossinterp
from .utils import _run_output, TestBase as _TestBase
@@ -127,7 +126,7 @@ class QueueTests(TestBase):
interp = interpreters.create()
interp.exec(dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue1 = queues.Queue({queue1.id})
"""));
@@ -189,9 +188,11 @@ class QueueTests(TestBase):
def test_pickle(self):
queue = queues.create()
- data = pickle.dumps(queue)
- unpickled = pickle.loads(data)
- self.assertEqual(unpickled, queue)
+ for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=protocol):
+ data = pickle.dumps(queue, protocol)
+ unpickled = pickle.loads(data)
+ self.assertEqual(unpickled, queue)
class TestQueueOps(TestBase):
@@ -209,18 +210,64 @@ class TestQueueOps(TestBase):
self.assertIs(after, True)
def test_full(self):
- expected = [False, False, False, True, False, False, False]
- actual = []
- queue = queues.create(3)
- for _ in range(3):
- actual.append(queue.full())
- queue.put(None)
- actual.append(queue.full())
- for _ in range(3):
- queue.get()
- actual.append(queue.full())
+ for maxsize in [1, 3, 11]:
+ with self.subTest(f'maxsize={maxsize}'):
+ num_to_add = maxsize
+ expected = [False] * (num_to_add * 2 + 3)
+ expected[maxsize] = True
+ expected[maxsize + 1] = True
+
+ queue = queues.create(maxsize)
+ actual = []
+ empty = [queue.empty()]
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.put_nowait(None)
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueFull):
+ queue.put_nowait(None)
+ empty.append(queue.empty())
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.get_nowait()
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueEmpty):
+ queue.get_nowait()
+ actual.append(queue.full())
+ empty.append(queue.empty())
- self.assertEqual(actual, expected)
+ self.assertEqual(actual, expected)
+ self.assertEqual(empty, [True, False, True])
+
+ # no max size
+ for args in [(), (0,), (-1,), (-10,)]:
+ with self.subTest(f'maxsize={args[0]}' if args else '<default>'):
+ num_to_add = 13
+ expected = [False] * (num_to_add * 2 + 3)
+
+ queue = queues.create(*args)
+ actual = []
+ empty = [queue.empty()]
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.put_nowait(None)
+ actual.append(queue.full())
+ empty.append(queue.empty())
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.get_nowait()
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueEmpty):
+ queue.get_nowait()
+ actual.append(queue.full())
+ empty.append(queue.empty())
+
+ self.assertEqual(actual, expected)
+ self.assertEqual(empty, [True, False, True])
def test_qsize(self):
expected = [0, 1, 2, 3, 2, 3, 2, 1, 0, 1, 0]
@@ -325,7 +372,7 @@ class TestQueueOps(TestBase):
def test_put_get_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.create()
"""))
for methname in ('get', 'get_nowait'):
@@ -352,7 +399,7 @@ class TestQueueOps(TestBase):
out = _run_output(
interp,
dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue1 = queues.Queue({queue1.id})
queue2 = queues.Queue({queue2.id})
assert queue1.qsize() == 1, 'expected: queue1.qsize() == 1'
@@ -391,7 +438,7 @@ class TestQueueOps(TestBase):
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj1 = b'spam'
obj2 = b'eggs'
@@ -469,7 +516,7 @@ class TestQueueOps(TestBase):
queue = queues.create()
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
queue.put(1, unbounditems=queues.UNBOUND)
queue.put(2, unbounditems=queues.UNBOUND_ERROR)
@@ -505,14 +552,14 @@ class TestQueueOps(TestBase):
queue.put(1)
_run_output(interp1, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj1 = queue.get()
queue.put(2, unbounditems=queues.UNBOUND)
queue.put(obj1, unbounditems=queues.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj2 = queue.get()
obj1 = queue.get()
diff --git a/Lib/test/test_interpreters/test_stress.py b/Lib/test/test_interpreters/test_stress.py
index fae2f38cb55..e25e67a0d4f 100644
--- a/Lib/test/test_interpreters/test_stress.py
+++ b/Lib/test/test_interpreters/test_stress.py
@@ -6,7 +6,7 @@ from test.support import import_helper
from test.support import threading_helper
# Raise SkipTest if subinterpreters not supported.
import_helper.import_module('_interpreters')
-from test.support import interpreters
+from concurrent import interpreters
from .utils import TestBase
diff --git a/Lib/test/test_interpreters/utils.py b/Lib/test/test_interpreters/utils.py
index fc4ad662e03..ae09aa457b4 100644
--- a/Lib/test/test_interpreters/utils.py
+++ b/Lib/test/test_interpreters/utils.py
@@ -12,7 +12,6 @@ from textwrap import dedent
import threading
import types
import unittest
-import warnings
from test import support
@@ -22,7 +21,7 @@ try:
import _interpreters
except ImportError as exc:
raise unittest.SkipTest(str(exc))
-from test.support import interpreters
+from concurrent import interpreters
try:
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 168e66c5a3f..b487bcabf01 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -9,6 +9,7 @@
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
+# * test_free_threading/test_io - tests thread safety of io objects
################################################################################
# ATTENTION TEST WRITERS!!!
@@ -1062,6 +1063,37 @@ class IOTest(unittest.TestCase):
# Silence destructor error
R.flush = lambda self: None
+ @threading_helper.requires_working_threading()
+ def test_write_readline_races(self):
+ # gh-134908: Concurrent iteration over a file caused races
+ thread_count = 2
+ write_count = 100
+ read_count = 100
+
+ def writer(file, barrier):
+ barrier.wait()
+ for _ in range(write_count):
+ file.write("x")
+
+ def reader(file, barrier):
+ barrier.wait()
+ for _ in range(read_count):
+ for line in file:
+ self.assertEqual(line, "")
+
+ with self.open(os_helper.TESTFN, "w+") as f:
+ barrier = threading.Barrier(thread_count + 1)
+ reader = threading.Thread(target=reader, args=(f, barrier))
+ writers = [threading.Thread(target=writer, args=(f, barrier))
+ for _ in range(thread_count)]
+ with threading_helper.catch_threading_exception() as cm:
+ with threading_helper.start_threads(writers + [reader]):
+ pass
+ self.assertIsNone(cm.exc_type)
+
+ self.assertEqual(os.stat(os_helper.TESTFN).st_size,
+ write_count * thread_count)
+
class CIOTest(IOTest):
diff --git a/Lib/test/test_ioctl.py b/Lib/test/test_ioctl.py
index 3c7a58aa2bc..277d2fc99ea 100644
--- a/Lib/test/test_ioctl.py
+++ b/Lib/test/test_ioctl.py
@@ -5,7 +5,7 @@ import sys
import threading
import unittest
from test import support
-from test.support import threading_helper
+from test.support import os_helper, threading_helper
from test.support.import_helper import import_module
fcntl = import_module('fcntl')
termios = import_module('termios')
@@ -201,6 +201,17 @@ class IoctlTestsPty(unittest.TestCase):
new_winsz = struct.unpack("HHHH", result)
self.assertEqual(new_winsz[:2], (20, 40))
+ @unittest.skipUnless(hasattr(fcntl, 'FICLONE'), 'need fcntl.FICLONE')
+ def test_bad_fd(self):
+ # gh-134744: Test error handling
+ fd = os_helper.make_bad_fd()
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, fd)
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, b'\0' * 10)
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, b'\0' * 2048)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
index ee95454e64b..db1c38243e2 100644
--- a/Lib/test/test_ipaddress.py
+++ b/Lib/test/test_ipaddress.py
@@ -399,14 +399,16 @@ class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
def test_bad_address_split_v6_too_long(self):
def assertBadSplit(addr):
- msg = r"At most 39 characters expected in %s"
- with self.assertAddressError(msg, repr(re.escape(addr[:14]))):
+ msg = r"At most 45 characters expected in '%s"
+ with self.assertAddressError(msg, re.escape(addr[:45])):
ipaddress.IPv6Address(addr)
# Long IPv6 address
long_addr = ("0:" * 10000) + "0"
assertBadSplit(long_addr)
assertBadSplit(long_addr + "%zoneid")
+ assertBadSplit(long_addr + ":255.255.255.255")
+ assertBadSplit(long_addr + ":ffff:255.255.255.255")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
@@ -2189,6 +2191,11 @@ class IpaddrUnitTest(unittest.TestCase):
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
+ self.assertEqual(ipaddress.ip_address('0000:0000:0000:0000:0000:FFFF:192.168.255.255'),
+ ipaddress.ip_address('::ffff:c0a8:ffff'))
+ self.assertEqual(ipaddress.ip_address('FFFF:0000:0000:0000:0000:0000:192.168.255.255'),
+ ipaddress.ip_address('ffff::c0a8:ffff'))
+
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1%scope'),
ipaddress.ip_address('::FFFF:c000:201%scope'))
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1%scope'),
@@ -2201,6 +2208,10 @@ class IpaddrUnitTest(unittest.TestCase):
ipaddress.ip_address('::FFFF:c000:201%scope'))
self.assertNotEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201%scope'))
+ self.assertEqual(ipaddress.ip_address('0000:0000:0000:0000:0000:FFFF:192.168.255.255%scope'),
+ ipaddress.ip_address('::ffff:c0a8:ffff%scope'))
+ self.assertEqual(ipaddress.ip_address('FFFF:0000:0000:0000:0000:0000:192.168.255.255%scope'),
+ ipaddress.ip_address('ffff::c0a8:ffff%scope'))
def testIPVersion(self):
self.assertEqual(ipaddress.IPv4Address.version, 4)
@@ -2610,6 +2621,10 @@ class IpaddrUnitTest(unittest.TestCase):
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
+ '0000:0000:0000:0000:0000:0000:255.255.255.255': '::ffff:ffff/128',
+ '0000:0000:0000:0000:0000:ffff:255.255.255.255': '::ffff:255.255.255.255/128',
+ 'ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255':
+ 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py
index 1b9f3cf7624..18e4b676c53 100644
--- a/Lib/test/test_iter.py
+++ b/Lib/test/test_iter.py
@@ -1147,7 +1147,7 @@ class TestCase(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
diff --git a/Lib/test/test_json/test_dump.py b/Lib/test/test_json/test_dump.py
index 13b40020781..39470754003 100644
--- a/Lib/test/test_json/test_dump.py
+++ b/Lib/test/test_json/test_dump.py
@@ -22,6 +22,14 @@ class TestDump:
self.assertIn('valid_key', o)
self.assertNotIn(b'invalid_key', o)
+ def test_dump_skipkeys_indent_empty(self):
+ v = {b'invalid_key': False}
+ self.assertEqual(self.json.dumps(v, skipkeys=True, indent=4), '{}')
+
+ def test_skipkeys_indent(self):
+ v = {b'invalid_key': False, 'valid_key': True}
+ self.assertEqual(self.json.dumps(v, skipkeys=True, indent=4), '{\n "valid_key": true\n}')
+
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
diff --git a/Lib/test/test_json/test_tool.py b/Lib/test/test_json/test_tool.py
index 9ea2679c77e..30f9bb33316 100644
--- a/Lib/test/test_json/test_tool.py
+++ b/Lib/test/test_json/test_tool.py
@@ -270,7 +270,7 @@ class TestMain(unittest.TestCase):
(r'" \"foo\" "', f'{t.string}" \\"foo\\" "{t.reset}'),
('"ฮฑ"', f'{t.string}"\\u03b1"{t.reset}'),
('123', f'{t.number}123{t.reset}'),
- ('-1.2345e+23', f'{t.number}-1.2345e+23{t.reset}'),
+ ('-1.25e+23', f'{t.number}-1.25e+23{t.reset}'),
(r'{"\\": ""}',
f'''\
{ob}
diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py
index 6894fba2ad1..223f34fb696 100644
--- a/Lib/test/test_list.py
+++ b/Lib/test/test_list.py
@@ -365,5 +365,20 @@ class ListTest(list_tests.CommonTest):
rc, _, _ = assert_python_ok("-c", code)
self.assertEqual(rc, 0)
+ def test_list_overwrite_local(self):
+ """Test that overwriting the last reference to the
+ iterable doesn't prematurely free the iterable"""
+
+ def foo(x):
+ self.assertEqual(sys.getrefcount(x), 1)
+ r = 0
+ for i in x:
+ r += i
+ x = None
+ return r
+
+ self.assertEqual(foo(list(range(10))), 45)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_listcomps.py b/Lib/test/test_listcomps.py
index cffdeeacc5d..70148dc30fc 100644
--- a/Lib/test/test_listcomps.py
+++ b/Lib/test/test_listcomps.py
@@ -716,7 +716,7 @@ class ListComprehensionTest(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py
index 455d2af37ef..55b502e52ca 100644
--- a/Lib/test/test_locale.py
+++ b/Lib/test/test_locale.py
@@ -387,6 +387,10 @@ class NormalizeTest(unittest.TestCase):
self.check('c', 'C')
self.check('posix', 'C')
+ def test_c_utf8(self):
+ self.check('c.utf8', 'C.UTF-8')
+ self.check('C.UTF-8', 'C.UTF-8')
+
def test_english(self):
self.check('en', 'en_US.ISO8859-1')
self.check('EN', 'en_US.ISO8859-1')
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index fa5b1e43816..3819965ed2c 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -1036,7 +1036,7 @@ class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
allow_reuse_address = True
- allow_reuse_port = True
+ allow_reuse_port = False
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
@@ -4214,89 +4214,6 @@ class ConfigDictTest(BaseTest):
handler = logging.getHandlerByName('custom')
self.assertEqual(handler.custom_kwargs, custom_kwargs)
- # See gh-91555 and gh-90321
- @support.requires_subprocess()
- def test_deadlock_in_queue(self):
- queue = multiprocessing.Queue()
- handler = logging.handlers.QueueHandler(queue)
- logger = multiprocessing.get_logger()
- level = logger.level
- try:
- logger.setLevel(logging.DEBUG)
- logger.addHandler(handler)
- logger.debug("deadlock")
- finally:
- logger.setLevel(level)
- logger.removeHandler(handler)
-
- def test_recursion_in_custom_handler(self):
- class BadHandler(logging.Handler):
- def __init__(self):
- super().__init__()
- def emit(self, record):
- logger.debug("recurse")
- logger = logging.getLogger("test_recursion_in_custom_handler")
- logger.addHandler(BadHandler())
- logger.setLevel(logging.DEBUG)
- logger.debug("boom")
-
- @threading_helper.requires_working_threading()
- def test_thread_supression_noninterference(self):
- lock = threading.Lock()
- logger = logging.getLogger("test_thread_supression_noninterference")
-
- # Block on the first call, allow others through
- #
- # NOTE: We need to bypass the base class's lock, otherwise that will
- # block multiple calls to the same handler itself.
- class BlockOnceHandler(TestHandler):
- def __init__(self, barrier):
- super().__init__(support.Matcher())
- self.barrier = barrier
-
- def createLock(self):
- self.lock = None
-
- def handle(self, record):
- self.emit(record)
-
- def emit(self, record):
- if self.barrier:
- barrier = self.barrier
- self.barrier = None
- barrier.wait()
- with lock:
- pass
- super().emit(record)
- logger.info("blow up if not supressed")
-
- barrier = threading.Barrier(2)
- handler = BlockOnceHandler(barrier)
- logger.addHandler(handler)
- logger.setLevel(logging.DEBUG)
-
- t1 = threading.Thread(target=logger.debug, args=("1",))
- with lock:
-
- # Ensure first thread is blocked in the handler, hence supressing logging...
- t1.start()
- barrier.wait()
-
- # ...but the second thread should still be able to log...
- t2 = threading.Thread(target=logger.debug, args=("2",))
- t2.start()
- t2.join(timeout=3)
-
- self.assertEqual(len(handler.buffer), 1)
- self.assertTrue(handler.matches(levelno=logging.DEBUG, message='2'))
-
- # The first thread should still be blocked here
- self.assertTrue(t1.is_alive())
-
- # Now the lock has been released the first thread should complete
- t1.join()
- self.assertEqual(len(handler.buffer), 2)
- self.assertTrue(handler.matches(levelno=logging.DEBUG, message='1'))
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py
index 913a60bf9e0..46cb54647b1 100644
--- a/Lib/test/test_math.py
+++ b/Lib/test/test_math.py
@@ -475,6 +475,19 @@ class MathTests(unittest.TestCase):
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
+ def test_signbit(self):
+ self.assertRaises(TypeError, math.signbit)
+ self.assertRaises(TypeError, math.signbit, '1.0')
+
+ # C11, ยง7.12.3.6 requires signbit() to return a nonzero value
+ # if and only if the sign of its argument value is negative,
+ # but in practice, we are only interested in a boolean value.
+ self.assertIsInstance(math.signbit(1.0), bool)
+
+ for arg in [0., 1., INF, NAN]:
+ self.assertFalse(math.signbit(arg))
+ self.assertTrue(math.signbit(-arg))
+
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0, abs_tol=math.ulp(1))
@@ -1214,6 +1227,12 @@ class MathTests(unittest.TestCase):
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
+ @requires_IEEE_754
+ def testLdexp_denormal(self):
+ # Denormal output incorrectly rounded (truncated)
+ # on some Windows.
+ self.assertEqual(math.ldexp(6993274598585239, -1126), 1e-323)
+
def testLog(self):
self.assertRaises(TypeError, math.log)
self.assertRaises(TypeError, math.log, 1, 2, 3)
@@ -1381,7 +1400,6 @@ class MathTests(unittest.TestCase):
args = ((-5, -5, 10), (1.5, 4611686018427387904, 2305843009213693952))
self.assertEqual(sumprod(*args), 0.0)
-
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"sumprod() accuracy not guaranteed on machines with double rounding")
@@ -1967,6 +1985,28 @@ class MathTests(unittest.TestCase):
self.assertFalse(math.isfinite(float("inf")))
self.assertFalse(math.isfinite(float("-inf")))
+ def testIsnormal(self):
+ self.assertTrue(math.isnormal(1.25))
+ self.assertTrue(math.isnormal(-1.0))
+ self.assertFalse(math.isnormal(0.0))
+ self.assertFalse(math.isnormal(-0.0))
+ self.assertFalse(math.isnormal(INF))
+ self.assertFalse(math.isnormal(NINF))
+ self.assertFalse(math.isnormal(NAN))
+ self.assertFalse(math.isnormal(FLOAT_MIN/2))
+ self.assertFalse(math.isnormal(-FLOAT_MIN/2))
+
+ def testIssubnormal(self):
+ self.assertFalse(math.issubnormal(1.25))
+ self.assertFalse(math.issubnormal(-1.0))
+ self.assertFalse(math.issubnormal(0.0))
+ self.assertFalse(math.issubnormal(-0.0))
+ self.assertFalse(math.issubnormal(INF))
+ self.assertFalse(math.issubnormal(NINF))
+ self.assertFalse(math.issubnormal(NAN))
+ self.assertTrue(math.issubnormal(FLOAT_MIN/2))
+ self.assertTrue(math.issubnormal(-FLOAT_MIN/2))
+
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("-nan")))
@@ -2458,7 +2498,6 @@ class MathTests(unittest.TestCase):
with self.assertRaises(ValueError):
math.nextafter(1.0, INF, steps=-1)
-
@requires_IEEE_754
def test_ulp(self):
self.assertEqual(math.ulp(1.0), sys.float_info.epsilon)
diff --git a/Lib/test/test_monitoring.py b/Lib/test/test_monitoring.py
index 263e4e6f394..a932ac80117 100644
--- a/Lib/test/test_monitoring.py
+++ b/Lib/test/test_monitoring.py
@@ -2157,6 +2157,21 @@ class TestRegressions(MonitoringTestBase, unittest.TestCase):
sys.monitoring.restart_events()
sys.monitoring.set_events(0, 0)
+ def test_134879(self):
+ # gh-134789
+ # Specialized FOR_ITER not incrementing index
+ def foo():
+ t = 0
+ for i in [1,2,3,4]:
+ t += i
+ self.assertEqual(t, 10)
+
+ sys.monitoring.use_tool_id(0, "test")
+ self.addCleanup(sys.monitoring.free_tool_id, 0)
+ sys.monitoring.set_local_events(0, foo.__code__, E.BRANCH_LEFT | E.BRANCH_RIGHT)
+ foo()
+ sys.monitoring.set_local_events(0, foo.__code__, 0)
+
class TestOptimizer(MonitoringTestBase, unittest.TestCase):
diff --git a/Lib/test/test_netrc.py b/Lib/test/test_netrc.py
index 81e11a293cc..9d720f62710 100644
--- a/Lib/test/test_netrc.py
+++ b/Lib/test/test_netrc.py
@@ -1,11 +1,7 @@
import netrc, os, unittest, sys, textwrap
+from test import support
from test.support import os_helper
-try:
- import pwd
-except ImportError:
- pwd = None
-
temp_filename = os_helper.TESTFN
class NetrcTestCase(unittest.TestCase):
@@ -269,9 +265,14 @@ class NetrcTestCase(unittest.TestCase):
machine bar.domain.com login foo password pass
""", '#pass')
+ @unittest.skipUnless(support.is_wasi, 'WASI only test')
+ def test_security_on_WASI(self):
+ self.assertFalse(netrc._can_security_check())
+ self.assertEqual(netrc._getpwuid(0), 'uid 0')
+ self.assertEqual(netrc._getpwuid(123456), 'uid 123456')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
- @unittest.skipIf(pwd is None, 'security check requires pwd module')
+ @unittest.skipUnless(hasattr(os, 'getuid'), "os.getuid is required")
@os_helper.skip_unless_working_chmod
def test_security(self):
# This test is incomplete since we are normally not run as root and
diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py
index f83ef225a6e..22f6403d482 100644
--- a/Lib/test/test_ntpath.py
+++ b/Lib/test/test_ntpath.py
@@ -6,8 +6,9 @@ import subprocess
import sys
import unittest
import warnings
-from test.support import cpython_only, os_helper
-from test.support import TestFailed, is_emscripten
+from ntpath import ALLOW_MISSING
+from test import support
+from test.support import TestFailed, cpython_only, os_helper
from test.support.os_helper import FakePath
from test import test_genericpath
from tempfile import TemporaryFile
@@ -77,6 +78,10 @@ def tester(fn, wantResult):
%(str(fn), str(wantResult), repr(gotResult)))
+def _parameterize(*parameters):
+ return support.subTests('kwargs', parameters, _do_cleanups=True)
+
+
class NtpathTestCase(unittest.TestCase):
def assertPathEqual(self, path1, path2):
if path1 == path2 or _norm(path1) == _norm(path2):
@@ -475,6 +480,27 @@ class TestNtpath(NtpathTestCase):
tester("ntpath.realpath('.\\.')", expected)
tester("ntpath.realpath('\\'.join(['.'] * 100))", expected)
+ def test_realpath_curdir_strict(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('.', strict=True)", expected)
+ tester("ntpath.realpath('./.', strict=True)", expected)
+ tester("ntpath.realpath('/'.join(['.'] * 100), strict=True)", expected)
+ tester("ntpath.realpath('.\\.', strict=True)", expected)
+ tester("ntpath.realpath('\\'.join(['.'] * 100), strict=True)", expected)
+
+ def test_realpath_curdir_missing_ok(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('./.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('/'.join(['.'] * 100), strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('.\\.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('\\'.join(['.'] * 100), strict=ALLOW_MISSING)",
+ expected)
+
def test_realpath_pardir(self):
expected = ntpath.normpath(os.getcwd())
tester("ntpath.realpath('..')", ntpath.dirname(expected))
@@ -487,24 +513,59 @@ class TestNtpath(NtpathTestCase):
tester("ntpath.realpath('\\'.join(['..'] * 50))",
ntpath.splitdrive(expected)[0] + '\\')
+ def test_realpath_pardir_strict(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('..', strict=True)", ntpath.dirname(expected))
+ tester("ntpath.realpath('../..', strict=True)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('/'.join(['..'] * 50), strict=True)",
+ ntpath.splitdrive(expected)[0] + '\\')
+ tester("ntpath.realpath('..\\..', strict=True)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('\\'.join(['..'] * 50), strict=True)",
+ ntpath.splitdrive(expected)[0] + '\\')
+
+ def test_realpath_pardir_missing_ok(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('..', strict=ALLOW_MISSING)",
+ ntpath.dirname(expected))
+ tester("ntpath.realpath('../..', strict=ALLOW_MISSING)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('/'.join(['..'] * 50), strict=ALLOW_MISSING)",
+ ntpath.splitdrive(expected)[0] + '\\')
+ tester("ntpath.realpath('..\\..', strict=ALLOW_MISSING)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('\\'.join(['..'] * 50), strict=ALLOW_MISSING)",
+ ntpath.splitdrive(expected)[0] + '\\')
+
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_basic(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_basic(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(os_helper.unlink, ABSTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ABSTFN + "1")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
- self.assertPathEqual(ntpath.realpath(os.fsencode(ABSTFN + "1")),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1", **kwargs), ABSTFN)
+ self.assertPathEqual(ntpath.realpath(os.fsencode(ABSTFN + "1"), **kwargs),
os.fsencode(ABSTFN))
# gh-88013: call ntpath.realpath with binary drive name may raise a
# TypeError. The drive should not exist to reproduce the bug.
drives = {f"{c}:\\" for c in string.ascii_uppercase} - set(os.listdrives())
d = drives.pop().encode()
- self.assertEqual(ntpath.realpath(d), d)
+ self.assertEqual(ntpath.realpath(d, strict=False), d)
+
+ # gh-106242: Embedded nulls and non-strict fallback to abspath
+ if kwargs:
+ with self.assertRaises(OSError):
+ ntpath.realpath(os_helper.TESTFN + "\0spam",
+ **kwargs)
+ else:
+ self.assertEqual(ABSTFN + "\0spam",
+ ntpath.realpath(os_helper.TESTFN + "\0spam", **kwargs))
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@@ -527,51 +588,66 @@ class TestNtpath(NtpathTestCase):
self.assertEqual(realpath(path, strict=False), path)
# gh-106242: Embedded nulls should raise OSError (not ValueError)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFNb + b'\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFN + '\\nonexistent\\x\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFNb + b'\\nonexistent\\x\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFN + '\x00\\..'
self.assertEqual(realpath(path, strict=False), os.getcwd())
self.assertEqual(realpath(path, strict=True), os.getcwd())
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), os.getcwd())
path = ABSTFNb + b'\x00\\..'
self.assertEqual(realpath(path, strict=False), os.getcwdb())
self.assertEqual(realpath(path, strict=True), os.getcwdb())
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), os.getcwdb())
path = ABSTFN + '\\nonexistent\\x\x00\\..'
self.assertEqual(realpath(path, strict=False), ABSTFN + '\\nonexistent')
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), ABSTFN + '\\nonexistent')
path = ABSTFNb + b'\\nonexistent\\x\x00\\..'
self.assertEqual(realpath(path, strict=False), ABSTFNb + b'\\nonexistent')
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), ABSTFNb + b'\\nonexistent')
+ @unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_invalid_unicode_paths(self, kwargs):
+ realpath = ntpath.realpath
+ ABSTFN = ntpath.abspath(os_helper.TESTFN)
+ ABSTFNb = os.fsencode(ABSTFN)
path = ABSTFNb + b'\xff'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\\nonexistent\\\xff'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\xff\\..'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\\nonexistent\\\xff\\..'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_relative(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_relative(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(os_helper.unlink, ABSTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ntpath.relpath(ABSTFN + "1"))
- self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1", **kwargs), ABSTFN)
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@@ -723,7 +799,62 @@ class TestNtpath(NtpathTestCase):
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_symlink_prefix(self):
+ def test_realpath_symlink_loops_raise(self):
+ # Symlink loops raise OSError in ALLOW_MISSING mode
+ ABSTFN = ntpath.abspath(os_helper.TESTFN)
+ self.addCleanup(os_helper.unlink, ABSTFN)
+ self.addCleanup(os_helper.unlink, ABSTFN + "1")
+ self.addCleanup(os_helper.unlink, ABSTFN + "2")
+ self.addCleanup(os_helper.unlink, ABSTFN + "y")
+ self.addCleanup(os_helper.unlink, ABSTFN + "c")
+ self.addCleanup(os_helper.unlink, ABSTFN + "a")
+ self.addCleanup(os_helper.unlink, ABSTFN + "x")
+
+ os.symlink(ABSTFN, ABSTFN)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN, strict=ALLOW_MISSING)
+
+ os.symlink(ABSTFN + "1", ABSTFN + "2")
+ os.symlink(ABSTFN + "2", ABSTFN + "1")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "1",
+ strict=ALLOW_MISSING)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "2",
+ strict=ALLOW_MISSING)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "1\\x",
+ strict=ALLOW_MISSING)
+
+ # Windows eliminates '..' components before resolving links;
+ # realpath is not expected to raise if this removes the loop.
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\.."),
+ ntpath.dirname(ABSTFN))
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\x"),
+ ntpath.dirname(ABSTFN) + "\\x")
+
+ os.symlink(ABSTFN + "x", ABSTFN + "y")
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\"
+ + ntpath.basename(ABSTFN) + "y"),
+ ABSTFN + "x")
+ self.assertRaises(
+ OSError, ntpath.realpath,
+ ABSTFN + "1\\..\\" + ntpath.basename(ABSTFN) + "1",
+ strict=ALLOW_MISSING)
+
+ os.symlink(ntpath.basename(ABSTFN) + "a\\b", ABSTFN + "a")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "a",
+ strict=ALLOW_MISSING)
+
+ os.symlink("..\\" + ntpath.basename(ntpath.dirname(ABSTFN))
+ + "\\" + ntpath.basename(ABSTFN) + "c", ABSTFN + "c")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "c",
+ strict=ALLOW_MISSING)
+
+ # Test using relative path as well.
+ self.assertRaises(OSError, ntpath.realpath, ntpath.basename(ABSTFN),
+ strict=ALLOW_MISSING)
+
+ @os_helper.skip_unless_symlink
+ @unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_symlink_prefix(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "3")
self.addCleanup(os_helper.unlink, "\\\\?\\" + ABSTFN + "3.")
@@ -738,9 +869,9 @@ class TestNtpath(NtpathTestCase):
f.write(b'1')
os.symlink("\\\\?\\" + ABSTFN + "3.", ABSTFN + "3.link")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "3link"),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "3link", **kwargs),
ABSTFN + "3")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "3.link"),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "3.link", **kwargs),
"\\\\?\\" + ABSTFN + "3.")
# Resolved paths should be usable to open target files
@@ -750,14 +881,17 @@ class TestNtpath(NtpathTestCase):
self.assertEqual(f.read(), b'1')
# When the prefix is included, it is not stripped
- self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3link"),
+ self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3link", **kwargs),
"\\\\?\\" + ABSTFN + "3")
- self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3.link"),
+ self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3.link", **kwargs),
"\\\\?\\" + ABSTFN + "3.")
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_nul(self):
tester("ntpath.realpath('NUL')", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=False)", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=True)", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=ALLOW_MISSING)", r'\\.\NUL')
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@unittest.skipUnless(HAVE_GETSHORTPATHNAME, 'need _getshortpathname')
@@ -781,12 +915,20 @@ class TestNtpath(NtpathTestCase):
self.assertPathEqual(test_file_long, ntpath.realpath(test_file_short))
- with os_helper.change_cwd(test_dir_long):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
- with os_helper.change_cwd(test_dir_long.lower()):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
- with os_helper.change_cwd(test_dir_short):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
+ for kwargs in {}, {'strict': True}, {'strict': ALLOW_MISSING}:
+ with self.subTest(**kwargs):
+ with os_helper.change_cwd(test_dir_long):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
+ with os_helper.change_cwd(test_dir_long.lower()):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
+ with os_helper.change_cwd(test_dir_short):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_permission(self):
@@ -807,12 +949,15 @@ class TestNtpath(NtpathTestCase):
# Automatic generation of short names may be disabled on
# NTFS volumes for the sake of performance.
# They're not supported at all on ReFS and exFAT.
- subprocess.run(
+ p = subprocess.run(
# Try to set the short name manually.
['fsutil.exe', 'file', 'setShortName', test_file, 'LONGFI~1.TXT'],
creationflags=subprocess.DETACHED_PROCESS
)
+ if p.returncode:
+ raise unittest.SkipTest('failed to set short name')
+
try:
self.assertPathEqual(test_file, ntpath.realpath(test_file_short))
except AssertionError:
diff --git a/Lib/test/test_optparse.py b/Lib/test/test_optparse.py
index e6ffd2b0ffe..e476e472780 100644
--- a/Lib/test/test_optparse.py
+++ b/Lib/test/test_optparse.py
@@ -615,9 +615,9 @@ Options:
self.parser.add_option(
"-p", "--prob",
help="blow up with probability PROB [default: %default]")
- self.parser.set_defaults(prob=0.43)
+ self.parser.set_defaults(prob=0.25)
expected_help = self.help_prefix + \
- " -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n"
+ " -p PROB, --prob=PROB blow up with probability PROB [default: 0.25]\n"
self.assertHelp(self.parser, expected_help)
def test_alt_expand(self):
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 88b5b0e6e35..5217037ae9d 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -13,7 +13,6 @@ import itertools
import locale
import os
import pickle
-import platform
import select
import selectors
import shutil
@@ -1919,6 +1918,10 @@ class MakedirTests(unittest.TestCase):
support.is_wasi,
"WASI's umask is a stub."
)
+ @unittest.skipIf(
+ support.is_emscripten,
+ "TODO: Fails in buildbot; see #135783"
+ )
def test_mode(self):
with os_helper.temp_umask(0o002):
base = os_helper.TESTFN
@@ -4291,13 +4294,8 @@ class EventfdTests(unittest.TestCase):
@unittest.skipIf(sys.platform == "android", "gh-124873: Test is flaky on Android")
@support.requires_linux_version(2, 6, 30)
class TimerfdTests(unittest.TestCase):
- # 1 ms accuracy is reliably achievable on every platform except Android
- # emulators, where we allow 10 ms (gh-108277).
- if sys.platform == "android" and platform.android_ver().is_emulator:
- CLOCK_RES_PLACES = 2
- else:
- CLOCK_RES_PLACES = 3
-
+ # gh-126112: Use 10 ms to tolerate slow buildbots
+ CLOCK_RES_PLACES = 2 # 10 ms
CLOCK_RES = 10 ** -CLOCK_RES_PLACES
CLOCK_RES_NS = 10 ** (9 - CLOCK_RES_PLACES)
diff --git a/Lib/test/test_pathlib/test_pathlib.py b/Lib/test/test_pathlib/test_pathlib.py
index 13356b4cfe0..b2e2cdb3338 100644
--- a/Lib/test/test_pathlib/test_pathlib.py
+++ b/Lib/test/test_pathlib/test_pathlib.py
@@ -2954,7 +2954,13 @@ class PathTest(PurePathTest):
else:
# ".." segments are normalized first on Windows, so this path is stat()able.
self.assertEqual(set(p.glob("xyzzy/..")), { P(self.base, "xyzzy", "..") })
- self.assertEqual(set(p.glob("/".join([".."] * 50))), { P(self.base, *[".."] * 50)})
+ if sys.platform == "emscripten":
+ # Emscripten will return ELOOP if there are 49 or more ..'s.
+ # Can remove when https://github.com/emscripten-core/emscripten/pull/24591 is merged.
+ NDOTDOTS = 48
+ else:
+ NDOTDOTS = 50
+ self.assertEqual(set(p.glob("/".join([".."] * NDOTDOTS))), { P(self.base, *[".."] * NDOTDOTS)})
def test_glob_inaccessible(self):
P = self.cls
diff --git a/Lib/test/test_peepholer.py b/Lib/test/test_peepholer.py
index 0a9ba578673..98629df4574 100644
--- a/Lib/test/test_peepholer.py
+++ b/Lib/test/test_peepholer.py
@@ -12,7 +12,7 @@ except ImportError:
from test import support
from test.support.bytecode_helper import (
- BytecodeTestCase, CfgOptimizationTestCase, CompilationStepTestCase)
+ BytecodeTestCase, CfgOptimizationTestCase)
def compile_pattern_with_fast_locals(pattern):
@@ -292,6 +292,7 @@ class TestTranforms(BytecodeTestCase):
('---x', 'UNARY_NEGATIVE', None, False, None, None),
('~~~x', 'UNARY_INVERT', None, False, None, None),
('+++x', 'CALL_INTRINSIC_1', intrinsic_positive, False, None, None),
+ ('~True', 'UNARY_INVERT', None, False, None, None),
]
for (
@@ -718,9 +719,9 @@ class TestTranforms(BytecodeTestCase):
self.assertEqual(format('x = %d!', 1234), 'x = 1234!')
self.assertEqual(format('x = %x!', 1234), 'x = 4d2!')
self.assertEqual(format('x = %f!', 1234), 'x = 1234.000000!')
- self.assertEqual(format('x = %s!', 1234.5678901), 'x = 1234.5678901!')
- self.assertEqual(format('x = %f!', 1234.5678901), 'x = 1234.567890!')
- self.assertEqual(format('x = %d!', 1234.5678901), 'x = 1234!')
+ self.assertEqual(format('x = %s!', 1234.0000625), 'x = 1234.0000625!')
+ self.assertEqual(format('x = %f!', 1234.0000625), 'x = 1234.000063!')
+ self.assertEqual(format('x = %d!', 1234.0000625), 'x = 1234!')
self.assertEqual(format('x = %s%% %%%%', 1234), 'x = 1234% %%')
self.assertEqual(format('x = %s!', '%% %s'), 'x = %% %s!')
self.assertEqual(format('x = %s, y = %d', 12, 34), 'x = 12, y = 34')
@@ -2614,6 +2615,90 @@ class OptimizeLoadFastTestCase(DirectCfgOptimizerTests):
]
self.cfg_optimization_test(insts, expected, consts=[None])
+ def test_format_simple(self):
+ # FORMAT_SIMPLE will leave its operand on the stack if it's a unicode
+ # object. We treat it conservatively and assume that it always leaves
+ # its operand on the stack.
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("STORE_FAST", 1, 3),
+ ]
+ self.check(insts, insts)
+
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("POP_TOP", None, 3),
+ ]
+ expected = [
+ ("LOAD_FAST_BORROW", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("POP_TOP", None, 3),
+ ]
+ self.check(insts, expected)
+
+ def test_set_function_attribute(self):
+ # SET_FUNCTION_ATTRIBUTE leaves the function on the stack
+ insts = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("STORE_FAST", 1, 4),
+ ("LOAD_CONST", 0, 5),
+ ("RETURN_VALUE", None, 6)
+ ]
+ self.cfg_optimization_test(insts, insts, consts=[None])
+
+ insts = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("RETURN_VALUE", None, 4)
+ ]
+ expected = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST_BORROW", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("RETURN_VALUE", None, 4)
+ ]
+ self.cfg_optimization_test(insts, expected, consts=[None])
+
+ def test_get_yield_from_iter(self):
+ # GET_YIELD_FROM_ITER may leave its operand on the stack
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("GET_YIELD_FROM_ITER", None, 2),
+ ("LOAD_CONST", 0, 3),
+ send := self.Label(),
+ ("SEND", end := self.Label(), 5),
+ ("YIELD_VALUE", 1, 6),
+ ("RESUME", 2, 7),
+ ("JUMP", send, 8),
+ end,
+ ("END_SEND", None, 9),
+ ("LOAD_CONST", 0, 10),
+ ("RETURN_VALUE", None, 11),
+ ]
+ self.cfg_optimization_test(insts, insts, consts=[None])
+
+ def test_push_exc_info(self):
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("PUSH_EXC_INFO", None, 2),
+ ]
+ self.check(insts, insts)
+
+ def test_load_special(self):
+ # LOAD_SPECIAL may leave self on the stack
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("LOAD_SPECIAL", 0, 2),
+ ("STORE_FAST", 1, 3),
+ ]
+ self.check(insts, insts)
+
+
def test_del_in_finally(self):
# This loads `obj` onto the stack, executes `del obj`, then returns the
# `obj` from the stack. See gh-133371 for more details.
@@ -2630,6 +2715,14 @@ class OptimizeLoadFastTestCase(DirectCfgOptimizerTests):
gc.collect()
self.assertEqual(obj, [42])
+ def test_format_simple_unicode(self):
+ # Repro from gh-134889
+ def f():
+ var = f"{1}"
+ var = f"{var}"
+ return var
+ self.assertEqual(f(), "1")
+
if __name__ == "__main__":
diff --git a/Lib/test/test_perf_profiler.py b/Lib/test/test_perf_profiler.py
index 21d097dbb55..7529c853f9c 100644
--- a/Lib/test/test_perf_profiler.py
+++ b/Lib/test/test_perf_profiler.py
@@ -506,9 +506,12 @@ def _is_perf_version_at_least(major, minor):
# The output of perf --version looks like "perf version 6.7-3" but
# it can also be perf version "perf version 5.15.143", or even include
# a commit hash in the version string, like "6.12.9.g242e6068fd5c"
+ #
+ # PermissionError is raised if perf does not exist on the Windows Subsystem
+ # for Linux, see #134987
try:
output = subprocess.check_output(["perf", "--version"], text=True)
- except (subprocess.CalledProcessError, FileNotFoundError):
+ except (subprocess.CalledProcessError, FileNotFoundError, PermissionError):
return False
version = output.split()[2]
version = version.split("-")[0]
diff --git a/Lib/test/test_platform.py b/Lib/test/test_platform.py
index 3688cc4267b..479649053ab 100644
--- a/Lib/test/test_platform.py
+++ b/Lib/test/test_platform.py
@@ -133,6 +133,22 @@ class PlatformTest(unittest.TestCase):
for terse in (False, True):
res = platform.platform(aliased, terse)
+ def test__platform(self):
+ for src, res in [
+ ('foo bar', 'foo_bar'),
+ (
+ '1/2\\3:4;5"6(7)8(7)6"5;4:3\\2/1',
+ '1-2-3-4-5-6-7-8-7-6-5-4-3-2-1'
+ ),
+ ('--', ''),
+ ('-f', '-f'),
+ ('-foo----', '-foo'),
+ ('--foo---', '-foo'),
+ ('---foo--', '-foo'),
+ ]:
+ with self.subTest(src=src):
+ self.assertEqual(platform._platform(src), res)
+
def test_system(self):
res = platform.system()
diff --git a/Lib/test/test_posixpath.py b/Lib/test/test_posixpath.py
index f3f9895f529..21f06712548 100644
--- a/Lib/test/test_posixpath.py
+++ b/Lib/test/test_posixpath.py
@@ -4,7 +4,8 @@ import posixpath
import random
import sys
import unittest
-from posixpath import realpath, abspath, dirname, basename
+from functools import partial
+from posixpath import realpath, abspath, dirname, basename, ALLOW_MISSING
from test import support
from test import test_genericpath
from test.support import import_helper
@@ -33,6 +34,11 @@ def skip_if_ABSTFN_contains_backslash(test):
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
+
+def _parameterize(*parameters):
+ return support.subTests('kwargs', parameters)
+
+
class PosixPathTest(unittest.TestCase):
def setUp(self):
@@ -442,32 +448,35 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(result, expected)
@skip_if_ABSTFN_contains_backslash
- def test_realpath_curdir(self):
- self.assertEqual(realpath('.'), os.getcwd())
- self.assertEqual(realpath('./.'), os.getcwd())
- self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_curdir(self, kwargs):
+ self.assertEqual(realpath('.', **kwargs), os.getcwd())
+ self.assertEqual(realpath('./.', **kwargs), os.getcwd())
+ self.assertEqual(realpath('/'.join(['.'] * 100), **kwargs), os.getcwd())
- self.assertEqual(realpath(b'.'), os.getcwdb())
- self.assertEqual(realpath(b'./.'), os.getcwdb())
- self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
+ self.assertEqual(realpath(b'.', **kwargs), os.getcwdb())
+ self.assertEqual(realpath(b'./.', **kwargs), os.getcwdb())
+ self.assertEqual(realpath(b'/'.join([b'.'] * 100), **kwargs), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
- def test_realpath_pardir(self):
- self.assertEqual(realpath('..'), dirname(os.getcwd()))
- self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
- self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_pardir(self, kwargs):
+ self.assertEqual(realpath('..', **kwargs), dirname(os.getcwd()))
+ self.assertEqual(realpath('../..', **kwargs), dirname(dirname(os.getcwd())))
+ self.assertEqual(realpath('/'.join(['..'] * 100), **kwargs), '/')
- self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
- self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
- self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
+ self.assertEqual(realpath(b'..', **kwargs), dirname(os.getcwdb()))
+ self.assertEqual(realpath(b'../..', **kwargs), dirname(dirname(os.getcwdb())))
+ self.assertEqual(realpath(b'/'.join([b'..'] * 100), **kwargs), b'/')
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_basic(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_basic(self, kwargs):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
- self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
+ self.assertEqual(realpath(ABSTFN, **kwargs), ABSTFN+"1")
finally:
os_helper.unlink(ABSTFN)
@@ -487,90 +496,115 @@ class PosixPathTest(unittest.TestCase):
path = '/\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/x\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/nonexistent/x\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
+
path = '/nonexistent/x\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/nonexistent/x\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/\udfff'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
self.assertRaises(UnicodeEncodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/\udfff'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), path)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
path = '/\udfff/..'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), '/')
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), '/')
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
self.assertRaises(UnicodeEncodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/\udfff/..'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), '/nonexistent')
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), '/nonexistent')
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
path = b'/\xff'
if sys.platform == 'win32':
self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertEqual(realpath(path, strict=False), path)
if support.is_wasi:
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
path = b'/nonexistent/\xff'
if sys.platform == 'win32':
self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeDecodeError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertEqual(realpath(path, strict=False), path)
if support.is_wasi:
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_relative(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_relative(self, kwargs):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
- self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
+ self.assertEqual(realpath(ABSTFN, **kwargs), ABSTFN+"1")
finally:
os_helper.unlink(ABSTFN)
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_missing_pardir(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_missing_pardir(self, kwargs):
try:
os.symlink(TESTFN + "1", TESTFN)
- self.assertEqual(realpath("nonexistent/../" + TESTFN), ABSTFN + "1")
+ self.assertEqual(
+ realpath("nonexistent/../" + TESTFN, **kwargs), ABSTFN + "1")
finally:
os_helper.unlink(TESTFN)
@@ -617,37 +651,38 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_symlink_loops_strict(self):
+ @_parameterize({'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_symlink_loops_strict(self, kwargs):
# Bug #43757, raise OSError if we get into an infinite symlink loop in
- # strict mode.
+ # the strict modes.
try:
os.symlink(ABSTFN, ABSTFN)
- self.assertRaises(OSError, realpath, ABSTFN, strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN, **kwargs)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
- self.assertRaises(OSError, realpath, ABSTFN+"1", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"2", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"1", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"2", **kwargs)
- self.assertRaises(OSError, realpath, ABSTFN+"1/x", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"1/..", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"1/../x", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/x", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/..", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/../x", **kwargs)
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertRaises(OSError, realpath,
- ABSTFN+"1/../" + basename(ABSTFN) + "y", strict=True)
+ ABSTFN+"1/../" + basename(ABSTFN) + "y", **kwargs)
self.assertRaises(OSError, realpath,
- ABSTFN+"1/../" + basename(ABSTFN) + "1", strict=True)
+ ABSTFN+"1/../" + basename(ABSTFN) + "1", **kwargs)
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
- self.assertRaises(OSError, realpath, ABSTFN+"a", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"a", **kwargs)
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
- self.assertRaises(OSError, realpath, ABSTFN+"c", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"c", **kwargs)
# Test using relative path as well.
with os_helper.change_cwd(dirname(ABSTFN)):
- self.assertRaises(OSError, realpath, basename(ABSTFN), strict=True)
+ self.assertRaises(OSError, realpath, basename(ABSTFN), **kwargs)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN+"1")
@@ -658,13 +693,14 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_repeated_indirect_symlinks(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_repeated_indirect_symlinks(self, kwargs):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
- self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
+ self.assertEqual(realpath(ABSTFN + '/link', **kwargs), ABSTFN)
finally:
os_helper.unlink(ABSTFN + '/self')
os_helper.unlink(ABSTFN + '/link')
@@ -672,14 +708,15 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_deep_recursion(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_deep_recursion(self, kwargs):
depth = 10
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
- self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
+ self.assertEqual(realpath(ABSTFN + '/%d' % depth, **kwargs), ABSTFN)
# Test using relative path as well.
with os_helper.change_cwd(ABSTFN):
@@ -691,7 +728,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_parents(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_parents(self, kwargs):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
@@ -702,7 +740,8 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
with os_helper.change_cwd(ABSTFN + "/k"):
- self.assertEqual(realpath("a"), ABSTFN + "/y/a")
+ self.assertEqual(realpath("a", **kwargs),
+ ABSTFN + "/y/a")
finally:
os_helper.unlink(ABSTFN + "/k")
os_helper.rmdir(ABSTFN + "/y")
@@ -710,7 +749,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_before_normalizing(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_before_normalizing(self, kwargs):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
@@ -725,10 +765,10 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
- self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
+ self.assertEqual(realpath(ABSTFN + "/link-y/..", **kwargs), ABSTFN + "/k")
# Relative path.
with os_helper.change_cwd(dirname(ABSTFN)):
- self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
+ self.assertEqual(realpath(basename(ABSTFN) + "/link-y/..", **kwargs),
ABSTFN + "/k")
finally:
os_helper.unlink(ABSTFN + "/link-y")
@@ -738,7 +778,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_first(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_first(self, kwargs):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
@@ -748,8 +789,8 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN, ABSTFN + "link")
with os_helper.change_cwd(dirname(ABSTFN)):
base = basename(ABSTFN)
- self.assertEqual(realpath(base + "link"), ABSTFN)
- self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
+ self.assertEqual(realpath(base + "link", **kwargs), ABSTFN)
+ self.assertEqual(realpath(base + "link/k", **kwargs), ABSTFN + "/k")
finally:
os_helper.unlink(ABSTFN + "link")
os_helper.rmdir(ABSTFN + "/k")
@@ -767,12 +808,67 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(realpath(ABSTFN + '/foo'), ABSTFN + '/foo')
self.assertEqual(realpath(ABSTFN + '/../foo'), dirname(ABSTFN) + '/foo')
self.assertEqual(realpath(ABSTFN + '/foo/..'), ABSTFN)
- with self.assertRaises(PermissionError):
- realpath(ABSTFN, strict=True)
finally:
os.chmod(ABSTFN, 0o755, follow_symlinks=False)
os_helper.unlink(ABSTFN)
+ @os_helper.skip_unless_symlink
+ @skip_if_ABSTFN_contains_backslash
+ @unittest.skipIf(os.chmod not in os.supports_follow_symlinks, "Can't set symlink permissions")
+ @unittest.skipIf(sys.platform != "darwin", "only macOS requires read permission to readlink()")
+ @_parameterize({'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_unreadable_symlink_strict(self, kwargs):
+ try:
+ os.symlink(ABSTFN+"1", ABSTFN)
+ os.chmod(ABSTFN, 0o000, follow_symlinks=False)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN, **kwargs)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/foo', **kwargs),
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/../foo', **kwargs)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/foo/..', **kwargs)
+ finally:
+ os.chmod(ABSTFN, 0o755, follow_symlinks=False)
+ os.unlink(ABSTFN)
+
+ @skip_if_ABSTFN_contains_backslash
+ @os_helper.skip_unless_symlink
+ def test_realpath_unreadable_directory(self):
+ try:
+ os.mkdir(ABSTFN)
+ os.mkdir(ABSTFN + '/k')
+ os.chmod(ABSTFN, 0o000)
+ self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN)
+
+ try:
+ os.stat(ABSTFN)
+ except PermissionError:
+ pass
+ else:
+ self.skipTest('Cannot block permissions')
+
+ self.assertEqual(realpath(ABSTFN + '/k', strict=False),
+ ABSTFN + '/k')
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/k',
+ strict=True)
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/k',
+ strict=ALLOW_MISSING)
+
+ self.assertEqual(realpath(ABSTFN + '/missing', strict=False),
+ ABSTFN + '/missing')
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/missing',
+ strict=True)
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/missing',
+ strict=ALLOW_MISSING)
+ finally:
+ os.chmod(ABSTFN, 0o755)
+ os_helper.rmdir(ABSTFN + '/k')
+ os_helper.rmdir(ABSTFN)
+
@skip_if_ABSTFN_contains_backslash
def test_realpath_nonterminal_file(self):
try:
@@ -780,14 +876,27 @@ class PosixPathTest(unittest.TestCase):
f.write('test_posixpath wuz ere')
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN)
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN)
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN)
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
@@ -800,14 +909,27 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "1", ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN + "1")
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "1")
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN + "1")
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN + "1")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN + "1")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "1/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN + "1")
@@ -822,14 +944,27 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "1", ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN + "2")
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "2")
+ self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "2")
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN + "2")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN + "2")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "2/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN + "1")
@@ -1017,9 +1152,12 @@ class PathLikeTests(unittest.TestCase):
def test_path_abspath(self):
self.assertPathEqual(self.path.abspath)
- def test_path_realpath(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_path_realpath(self, kwargs):
self.assertPathEqual(self.path.realpath)
+ self.assertPathEqual(partial(self.path.realpath, **kwargs))
+
def test_path_relpath(self):
self.assertPathEqual(self.path.relpath)
diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py
index 0c84d3d3bfd..41c337ade7e 100644
--- a/Lib/test/test_pprint.py
+++ b/Lib/test/test_pprint.py
@@ -458,7 +458,7 @@ class QueryTestCase(unittest.TestCase):
return super().__new__(Temperature, celsius_degrees)
def __repr__(self):
kelvin_degrees = self + 273.15
- return f"{kelvin_degrees}ยฐK"
+ return f"{kelvin_degrees:.2f}ยฐK"
self.assertEqual(pprint.pformat(Temperature(1000)), '1273.15ยฐK')
def test_sorted_dict(self):
diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py
index c1728f5019d..4836f38c388 100644
--- a/Lib/test/test_pty.py
+++ b/Lib/test/test_pty.py
@@ -20,7 +20,6 @@ import select
import signal
import socket
import io # readline
-import warnings
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py
index 3e7b2cd0dc9..bce68e6cd7a 100644
--- a/Lib/test/test_pyclbr.py
+++ b/Lib/test/test_pyclbr.py
@@ -11,7 +11,6 @@ from types import FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase, main as unittest_main
from test.test_importlib import util as test_importlib_util
-import warnings
StaticMethodType = type(staticmethod(lambda: None))
@@ -246,9 +245,6 @@ class PyclbrTest(TestCase):
# These were once some of the longest modules.
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('pickle', ignore=('partial', 'PickleBuffer'))
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- cm('sre_parse', ignore=('dump', 'groups', 'pos')) # from sre_constants import *; property
with temporary_main_spec():
cm(
'pdb',
diff --git a/Lib/test/test_pydoc/test_pydoc.py b/Lib/test/test_pydoc/test_pydoc.py
index 281b24eaa36..d1d6f4987de 100644
--- a/Lib/test/test_pydoc/test_pydoc.py
+++ b/Lib/test/test_pydoc/test_pydoc.py
@@ -553,7 +553,7 @@ class PydocDocTest(unittest.TestCase):
# of the known subclasses of object. (doc.docclass() used to
# fail if HeapType was imported before running this test, like
# when running tests sequentially.)
- from _testcapi import HeapType
+ from _testcapi import HeapType # noqa: F401
except ImportError:
pass
text = doc.docclass(object)
diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py
index 1d56ccd71cf..d4b4f60be98 100644
--- a/Lib/test/test_pyexpat.py
+++ b/Lib/test/test_pyexpat.py
@@ -9,12 +9,11 @@ import traceback
from io import BytesIO
from test import support
from test.support import os_helper
-
+from test.support import sortdict
+from unittest import mock
from xml.parsers import expat
from xml.parsers.expat import errors
-from test.support import sortdict
-
class SetAttributeTest(unittest.TestCase):
def setUp(self):
@@ -436,6 +435,19 @@ class BufferTextTest(unittest.TestCase):
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
+ def test_change_character_data_handler_in_callback(self):
+ # Test that xmlparse_handler_setter() properly handles
+ # the special case "parser.CharacterDataHandler = None".
+ def handler(*args):
+ parser.CharacterDataHandler = None
+
+ handler_wrapper = mock.Mock(wraps=handler)
+ parser = expat.ParserCreate()
+ parser.CharacterDataHandler = handler_wrapper
+ parser.Parse(b"<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", True)
+ handler_wrapper.assert_called_once()
+ self.assertIsNone(parser.CharacterDataHandler)
+
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
@@ -595,7 +607,7 @@ class ChardataBufferTest(unittest.TestCase):
def test_disabling_buffer(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>" + b'a' * 512
xml2 = b'b' * 1024
- xml3 = b'c' * 1024 + b'</a>';
+ xml3 = b'c' * 1024 + b'</a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py
index abb4bd1bc25..98bae7dd703 100644
--- a/Lib/test/test_pyrepl/test_pyrepl.py
+++ b/Lib/test/test_pyrepl/test_pyrepl.py
@@ -918,7 +918,14 @@ class TestPyReplCompleter(TestCase):
class TestPyReplModuleCompleter(TestCase):
def setUp(self):
+ import importlib
+ # Make iter_modules() search only the standard library.
+ # This makes the test more reliable in case there are
+ # other user packages/scripts on PYTHONPATH which can
+ # interfere with the completions.
+ lib_path = os.path.dirname(importlib.__path__[0])
self._saved_sys_path = sys.path
+ sys.path = [lib_path]
def tearDown(self):
sys.path = self._saved_sys_path
@@ -932,14 +939,6 @@ class TestPyReplModuleCompleter(TestCase):
return reader
def test_import_completions(self):
- import importlib
- # Make iter_modules() search only the standard library.
- # This makes the test more reliable in case there are
- # other user packages/scripts on PYTHONPATH which can
- # intefere with the completions.
- lib_path = os.path.dirname(importlib.__path__[0])
- sys.path = [lib_path]
-
cases = (
("import path\t\n", "import pathlib"),
("import importlib.\t\tres\t\n", "import importlib.resources"),
@@ -1052,6 +1051,19 @@ class TestPyReplModuleCompleter(TestCase):
output = reader.readline()
self.assertEqual(output, expected)
+ def test_no_fallback_on_regular_completion(self):
+ cases = (
+ ("import pri\t\n", "import pri"),
+ ("from pri\t\n", "from pri"),
+ ("from typing import Na\t\n", "from typing import Na"),
+ )
+ for code, expected in cases:
+ with self.subTest(code=code):
+ events = code_to_events(code)
+ reader = self.prepare_reader(events, namespace={})
+ output = reader.readline()
+ self.assertEqual(output, expected)
+
def test_get_path_and_prefix(self):
cases = (
('', ('', '')),
@@ -1660,6 +1672,17 @@ class TestMain(ReplTestCase):
self.assertEqual(exit_code, 0)
self.assertNotIn("TypeError", output)
+ @force_not_colorized
+ def test_non_string_suggestion_candidates(self):
+ commands = ("import runpy\n"
+ "runpy._run_module_code('blech', {0: '', 'bluch': ''}, '')\n"
+ "exit()\n")
+
+ output, exit_code = self.run_repl(commands)
+ self.assertEqual(exit_code, 0)
+ self.assertNotIn("all elements in 'candidates' must be strings", output)
+ self.assertIn("bluch", output)
+
def test_readline_history_file(self):
# skip, if readline module is not available
readline = import_module('readline')
diff --git a/Lib/test/test_pyrepl/test_unix_console.py b/Lib/test/test_pyrepl/test_unix_console.py
index c447b310c49..b3f7dc028fe 100644
--- a/Lib/test/test_pyrepl/test_unix_console.py
+++ b/Lib/test/test_pyrepl/test_unix_console.py
@@ -20,6 +20,7 @@ except ImportError:
def unix_console(events, **kwargs):
console = UnixConsole()
console.get_event = MagicMock(side_effect=events)
+ console.getpending = MagicMock(return_value=Event("key", ""))
height = kwargs.get("height", 25)
width = kwargs.get("width", 80)
diff --git a/Lib/test/test_pyrepl/test_windows_console.py b/Lib/test/test_pyrepl/test_windows_console.py
index e7bab226b31..f9607e02c60 100644
--- a/Lib/test/test_pyrepl/test_windows_console.py
+++ b/Lib/test/test_pyrepl/test_windows_console.py
@@ -35,6 +35,7 @@ class WindowsConsoleTests(TestCase):
def console(self, events, **kwargs) -> Console:
console = WindowsConsole()
console.get_event = MagicMock(side_effect=events)
+ console.getpending = MagicMock(return_value=Event("key", ""))
console.wait = MagicMock()
console._scroll = MagicMock()
console._hide_cursor = MagicMock()
@@ -385,6 +386,7 @@ class WindowsConsoleGetEventTests(TestCase):
self.console._read_input = self.mock
self.console._WindowsConsole__vt_support = kwargs.get("vt_support",
False)
+ self.console.wait = MagicMock(return_value=True)
event = self.console.get_event(block=False)
return event
diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py
index 7f4fe357034..c855fb8fe2b 100644
--- a/Lib/test/test_queue.py
+++ b/Lib/test/test_queue.py
@@ -6,7 +6,7 @@ import threading
import time
import unittest
import weakref
-from test.support import gc_collect
+from test.support import gc_collect, bigmemtest
from test.support import import_helper
from test.support import threading_helper
@@ -963,33 +963,33 @@ class BaseSimpleQueueTest:
# One producer, one consumer => results appended in well-defined order
self.assertEqual(results, inputs)
- def test_many_threads(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads(self, size):
# Test multiple concurrent put() and get()
- N = 50
q = self.q
inputs = list(range(10000))
- results = self.run_threads(N, q, inputs, self.feed, self.consume)
+ results = self.run_threads(size, q, inputs, self.feed, self.consume)
# Multiple consumers without synchronization append the
# results in random order
self.assertEqual(sorted(results), inputs)
- def test_many_threads_nonblock(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads_nonblock(self, size):
# Test multiple concurrent put() and get(block=False)
- N = 50
q = self.q
inputs = list(range(10000))
- results = self.run_threads(N, q, inputs,
+ results = self.run_threads(size, q, inputs,
self.feed, self.consume_nonblock)
self.assertEqual(sorted(results), inputs)
- def test_many_threads_timeout(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads_timeout(self, size):
# Test multiple concurrent put() and get(timeout=...)
- N = 50
q = self.q
inputs = list(range(1000))
- results = self.run_threads(N, q, inputs,
+ results = self.run_threads(size, q, inputs,
self.feed, self.consume_timeout)
self.assertEqual(sorted(results), inputs)
diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py
index bd76d636e4f..0217ebd132b 100644
--- a/Lib/test/test_random.py
+++ b/Lib/test/test_random.py
@@ -14,6 +14,15 @@ from test import support
from fractions import Fraction
from collections import abc, Counter
+
+class MyIndex:
+ def __init__(self, value):
+ self.value = value
+
+ def __index__(self):
+ return self.value
+
+
class TestBasicOps:
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
@@ -142,6 +151,7 @@ class TestBasicOps:
# Exception raised if size of sample exceeds that of population
self.assertRaises(ValueError, self.gen.sample, population, N+1)
self.assertRaises(ValueError, self.gen.sample, [], -1)
+ self.assertRaises(TypeError, self.gen.sample, population, 1.0)
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
@@ -259,6 +269,7 @@ class TestBasicOps:
choices(data, range(4), k=5),
choices(k=5, population=data, weights=range(4)),
choices(k=5, population=data, cum_weights=range(4)),
+ choices(data, k=MyIndex(5)),
]:
self.assertEqual(len(sample), 5)
self.assertEqual(type(sample), list)
@@ -369,118 +380,40 @@ class TestBasicOps:
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
+ @support.requires_IEEE_754
+ def test_53_bits_per_float(self):
+ span = 2 ** 53
+ cum = 0
+ for i in range(100):
+ cum |= int(self.gen.random() * span)
+ self.assertEqual(cum, span-1)
+
def test_getrandbits(self):
+ getrandbits = self.gen.getrandbits
# Verify ranges
for k in range(1, 1000):
- self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
- self.assertEqual(self.gen.getrandbits(0), 0)
+ self.assertTrue(0 <= getrandbits(k) < 2**k)
+ self.assertEqual(getrandbits(0), 0)
# Verify all bits active
- getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
all_bits = 2**span-1
cum = 0
cpl_cum = 0
for i in range(100):
- v = getbits(span)
+ v = getrandbits(span)
cum |= v
cpl_cum |= all_bits ^ v
self.assertEqual(cum, all_bits)
self.assertEqual(cpl_cum, all_bits)
# Verify argument checking
- self.assertRaises(TypeError, self.gen.getrandbits)
- self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
- self.assertRaises(ValueError, self.gen.getrandbits, -1)
- self.assertRaises(TypeError, self.gen.getrandbits, 10.1)
-
- def test_pickling(self):
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- state = pickle.dumps(self.gen, proto)
- origseq = [self.gen.random() for i in range(10)]
- newgen = pickle.loads(state)
- restoredseq = [newgen.random() for i in range(10)]
- self.assertEqual(origseq, restoredseq)
-
- def test_bug_1727780(self):
- # verify that version-2-pickles can be loaded
- # fine, whether they are created on 32-bit or 64-bit
- # platforms, and that version-3-pickles load fine.
- files = [("randv2_32.pck", 780),
- ("randv2_64.pck", 866),
- ("randv3.pck", 343)]
- for file, value in files:
- with open(support.findfile(file),"rb") as f:
- r = pickle.load(f)
- self.assertEqual(int(r.random()*1000), value)
-
- def test_bug_9025(self):
- # Had problem with an uneven distribution in int(n*random())
- # Verify the fix by checking that distributions fall within expectations.
- n = 100000
- randrange = self.gen.randrange
- k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
- self.assertTrue(0.30 < k/n < .37, (k/n))
-
- def test_randbytes(self):
- # Verify ranges
- for n in range(1, 10):
- data = self.gen.randbytes(n)
- self.assertEqual(type(data), bytes)
- self.assertEqual(len(data), n)
-
- self.assertEqual(self.gen.randbytes(0), b'')
-
- # Verify argument checking
- self.assertRaises(TypeError, self.gen.randbytes)
- self.assertRaises(TypeError, self.gen.randbytes, 1, 2)
- self.assertRaises(ValueError, self.gen.randbytes, -1)
- self.assertRaises(TypeError, self.gen.randbytes, 1.0)
-
- def test_mu_sigma_default_args(self):
- self.assertIsInstance(self.gen.normalvariate(), float)
- self.assertIsInstance(self.gen.gauss(), float)
-
-
-try:
- random.SystemRandom().random()
-except NotImplementedError:
- SystemRandom_available = False
-else:
- SystemRandom_available = True
-
-@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
-class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
- gen = random.SystemRandom()
-
- def test_autoseed(self):
- # Doesn't need to do anything except not fail
- self.gen.seed()
-
- def test_saverestore(self):
- self.assertRaises(NotImplementedError, self.gen.getstate)
- self.assertRaises(NotImplementedError, self.gen.setstate, None)
-
- def test_seedargs(self):
- # Doesn't need to do anything except not fail
- self.gen.seed(100)
-
- def test_gauss(self):
- self.gen.gauss_next = None
- self.gen.seed(100)
- self.assertEqual(self.gen.gauss_next, None)
-
- def test_pickling(self):
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto)
-
- def test_53_bits_per_float(self):
- # This should pass whenever a C double has 53 bit precision.
- span = 2 ** 53
- cum = 0
- for i in range(100):
- cum |= int(self.gen.random() * span)
- self.assertEqual(cum, span-1)
+ self.assertRaises(TypeError, getrandbits)
+ self.assertRaises(TypeError, getrandbits, 1, 2)
+ self.assertRaises(ValueError, getrandbits, -1)
+ self.assertRaises(OverflowError, getrandbits, 1<<1000)
+ self.assertRaises(ValueError, getrandbits, -1<<1000)
+ self.assertRaises(TypeError, getrandbits, 10.1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
@@ -559,6 +492,10 @@ class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
randrange(1000, step=100)
with self.assertRaises(TypeError):
randrange(1000, None, step=100)
+ with self.assertRaises(TypeError):
+ randrange(1000, step=MyIndex(1))
+ with self.assertRaises(TypeError):
+ randrange(1000, None, step=MyIndex(1))
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
@@ -581,6 +518,116 @@ class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
+ def test_randrange_index(self):
+ randrange = self.gen.randrange
+ self.assertIn(randrange(MyIndex(5)), range(5))
+ self.assertIn(randrange(MyIndex(2), MyIndex(7)), range(2, 7))
+ self.assertIn(randrange(MyIndex(5), MyIndex(15), MyIndex(2)), range(5, 15, 2))
+
+ def test_randint(self):
+ randint = self.gen.randint
+ self.assertIn(randint(2, 5), (2, 3, 4, 5))
+ self.assertEqual(randint(2, 2), 2)
+ self.assertIn(randint(MyIndex(2), MyIndex(5)), (2, 3, 4, 5))
+ self.assertEqual(randint(MyIndex(2), MyIndex(2)), 2)
+
+ self.assertRaises(ValueError, randint, 5, 2)
+ self.assertRaises(TypeError, randint)
+ self.assertRaises(TypeError, randint, 2)
+ self.assertRaises(TypeError, randint, 2, 5, 1)
+ self.assertRaises(TypeError, randint, 2.0, 5)
+ self.assertRaises(TypeError, randint, 2, 5.0)
+
+ def test_pickling(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ state = pickle.dumps(self.gen, proto)
+ origseq = [self.gen.random() for i in range(10)]
+ newgen = pickle.loads(state)
+ restoredseq = [newgen.random() for i in range(10)]
+ self.assertEqual(origseq, restoredseq)
+
+ def test_bug_1727780(self):
+ # verify that version-2-pickles can be loaded
+ # fine, whether they are created on 32-bit or 64-bit
+ # platforms, and that version-3-pickles load fine.
+ files = [("randv2_32.pck", 780),
+ ("randv2_64.pck", 866),
+ ("randv3.pck", 343)]
+ for file, value in files:
+ with open(support.findfile(file),"rb") as f:
+ r = pickle.load(f)
+ self.assertEqual(int(r.random()*1000), value)
+
+ def test_bug_9025(self):
+ # Had problem with an uneven distribution in int(n*random())
+ # Verify the fix by checking that distributions fall within expectations.
+ n = 100000
+ randrange = self.gen.randrange
+ k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
+ self.assertTrue(0.30 < k/n < .37, (k/n))
+
+ def test_randrange_bug_1590891(self):
+ start = 1000000000000
+ stop = -100000000000000000000
+ step = -200
+ x = self.gen.randrange(start, stop, step)
+ self.assertTrue(stop < x <= start)
+ self.assertEqual((x+stop)%step, 0)
+
+ def test_randbytes(self):
+ # Verify ranges
+ for n in range(1, 10):
+ data = self.gen.randbytes(n)
+ self.assertEqual(type(data), bytes)
+ self.assertEqual(len(data), n)
+
+ self.assertEqual(self.gen.randbytes(0), b'')
+
+ # Verify argument checking
+ self.assertRaises(TypeError, self.gen.randbytes)
+ self.assertRaises(TypeError, self.gen.randbytes, 1, 2)
+ self.assertRaises(ValueError, self.gen.randbytes, -1)
+ self.assertRaises(OverflowError, self.gen.randbytes, 1<<1000)
+ self.assertRaises((ValueError, OverflowError), self.gen.randbytes, -1<<1000)
+ self.assertRaises(TypeError, self.gen.randbytes, 1.0)
+
+ def test_mu_sigma_default_args(self):
+ self.assertIsInstance(self.gen.normalvariate(), float)
+ self.assertIsInstance(self.gen.gauss(), float)
+
+
+try:
+ random.SystemRandom().random()
+except NotImplementedError:
+ SystemRandom_available = False
+else:
+ SystemRandom_available = True
+
+@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
+class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
+ gen = random.SystemRandom()
+
+ def test_autoseed(self):
+ # Doesn't need to do anything except not fail
+ self.gen.seed()
+
+ def test_saverestore(self):
+ self.assertRaises(NotImplementedError, self.gen.getstate)
+ self.assertRaises(NotImplementedError, self.gen.setstate, None)
+
+ def test_seedargs(self):
+ # Doesn't need to do anything except not fail
+ self.gen.seed(100)
+
+ def test_gauss(self):
+ self.gen.gauss_next = None
+ self.gen.seed(100)
+ self.assertEqual(self.gen.gauss_next, None)
+
+ def test_pickling(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto)
+
class TestRawMersenneTwister(unittest.TestCase):
@test.support.cpython_only
@@ -766,38 +813,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
seed = (1 << (10000 * 8)) - 1 # about 10K bytes
self.gen.seed(seed)
- def test_53_bits_per_float(self):
- # This should pass whenever a C double has 53 bit precision.
- span = 2 ** 53
- cum = 0
- for i in range(100):
- cum |= int(self.gen.random() * span)
- self.assertEqual(cum, span-1)
-
- def test_bigrand(self):
- # The randrange routine should build-up the required number of bits
- # in stages so that all bit positions are active.
- span = 2 ** 500
- cum = 0
- for i in range(100):
- r = self.gen.randrange(span)
- self.assertTrue(0 <= r < span)
- cum |= r
- self.assertEqual(cum, span-1)
-
- def test_bigrand_ranges(self):
- for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
- start = self.gen.randrange(2 ** (i-2))
- stop = self.gen.randrange(2 ** i)
- if stop <= start:
- continue
- self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
-
- def test_rangelimits(self):
- for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
- self.assertEqual(set(range(start,stop)),
- set([self.gen.randrange(start,stop) for i in range(100)]))
-
def test_getrandbits(self):
super().test_getrandbits()
@@ -805,6 +820,25 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.gen.seed(1234567)
self.assertEqual(self.gen.getrandbits(100),
97904845777343510404718956115)
+ self.gen.seed(1234567)
+ self.assertEqual(self.gen.getrandbits(MyIndex(100)),
+ 97904845777343510404718956115)
+
+ def test_getrandbits_2G_bits(self):
+ size = 2**31
+ self.gen.seed(1234567)
+ x = self.gen.getrandbits(size)
+ self.assertEqual(x.bit_length(), size)
+ self.assertEqual(x & (2**100-1), 890186470919986886340158459475)
+ self.assertEqual(x >> (size-100), 1226514312032729439655761284440)
+
+ @support.bigmemtest(size=2**32, memuse=1/8+2/15, dry_run=False)
+ def test_getrandbits_4G_bits(self, size):
+ self.gen.seed(1234568)
+ x = self.gen.getrandbits(size)
+ self.assertEqual(x.bit_length(), size)
+ self.assertEqual(x & (2**100-1), 287241425661104632871036099814)
+ self.assertEqual(x >> (size-100), 739728759900339699429794460738)
def test_randrange_uses_getrandbits(self):
# Verify use of getrandbits by randrange
@@ -816,27 +850,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(self.gen.randrange(2**99),
97904845777343510404718956115)
- def test_randbelow_logic(self, _log=log, int=int):
- # check bitcount transition points: 2**i and 2**(i+1)-1
- # show that: k = int(1.001 + _log(n, 2))
- # is equal to or one greater than the number of bits in n
- for i in range(1, 1000):
- n = 1 << i # check an exact power of two
- numbits = i+1
- k = int(1.00001 + _log(n, 2))
- self.assertEqual(k, numbits)
- self.assertEqual(n, 2**(k-1))
-
- n += n - 1 # check 1 below the next power of two
- k = int(1.00001 + _log(n, 2))
- self.assertIn(k, [numbits, numbits+1])
- self.assertTrue(2**k > n > 2**(k-2))
-
- n -= n >> 15 # check a little farther below the next power of two
- k = int(1.00001 + _log(n, 2))
- self.assertEqual(k, numbits) # note the stronger assertion
- self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
-
def test_randbelow_without_getrandbits(self):
# Random._randbelow() can only use random() when the built-in one
# has been overridden but no new getrandbits() method was supplied.
@@ -871,14 +884,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.gen._randbelow_without_getrandbits(n, maxsize=maxsize)
self.assertEqual(random_mock.call_count, 2)
- def test_randrange_bug_1590891(self):
- start = 1000000000000
- stop = -100000000000000000000
- step = -200
- x = self.gen.randrange(start, stop, step)
- self.assertTrue(stop < x <= start)
- self.assertEqual((x+stop)%step, 0)
-
def test_choices_algorithms(self):
# The various ways of specifying weights should produce the same results
choices = self.gen.choices
@@ -962,6 +967,14 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(self.gen.randbytes(n),
gen2.getrandbits(n * 8).to_bytes(n, 'little'))
+ @support.bigmemtest(size=2**29, memuse=1+16/15, dry_run=False)
+ def test_randbytes_256M(self, size):
+ self.gen.seed(2849427419)
+ x = self.gen.randbytes(size)
+ self.assertEqual(len(x), size)
+ self.assertEqual(x[:12].hex(), 'f6fd9ae63855ab91ea238b4f')
+ self.assertEqual(x[-12:].hex(), '0e7af69a84ee99bf4a11becc')
+
def test_sample_counts_equivalence(self):
# Test the documented strong equivalence to a sample with repeated elements.
# We run this test on random.Random() which makes deterministic selections
diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py
index e9128ac1d97..993652d2e88 100644
--- a/Lib/test/test_re.py
+++ b/Lib/test/test_re.py
@@ -5,7 +5,6 @@ from test.support import (gc_collect, bigmemtest, _2G,
import locale
import re
import string
-import sys
import unittest
import warnings
from re import Scanner
@@ -2927,33 +2926,6 @@ class ImplementationTest(unittest.TestCase):
pat = re.compile("")
check_disallow_instantiation(self, type(pat.scanner("")))
- def test_deprecated_modules(self):
- deprecated = {
- 'sre_compile': ['compile', 'error',
- 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_compile_info'],
- 'sre_constants': ['error', 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_NamedIntConstant'],
- 'sre_parse': ['SubPattern', 'parse',
- 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_parse_sub'],
- }
- for name in deprecated:
- with self.subTest(module=name):
- sys.modules.pop(name, None)
- with self.assertWarns(DeprecationWarning) as w:
- __import__(name)
- self.assertEqual(str(w.warning),
- f"module {name!r} is deprecated")
- self.assertEqual(w.filename, __file__)
- self.assertIn(name, sys.modules)
- mod = sys.modules[name]
- self.assertEqual(mod.__name__, name)
- self.assertEqual(mod.__package__, '')
- for attr in deprecated[name]:
- self.assertHasAttr(mod, attr)
- del sys.modules[name]
-
@cpython_only
def test_case_helpers(self):
import _sre
diff --git a/Lib/test/test_readline.py b/Lib/test/test_readline.py
index b9d082b3597..45192fe5082 100644
--- a/Lib/test/test_readline.py
+++ b/Lib/test/test_readline.py
@@ -1,6 +1,7 @@
"""
Very minimal unittests for parts of the readline module.
"""
+import codecs
import locale
import os
import sys
@@ -231,6 +232,13 @@ print("History length:", readline.get_current_history_length())
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
+ if sys.flags.utf8_mode:
+ encoding = locale.getencoding()
+ encoding = codecs.lookup(encoding).name # normalize the name
+ if encoding != "utf-8":
+ # gh-133711: The Python UTF-8 Mode ignores the LC_CTYPE locale
+ # and always use the UTF-8 encoding.
+ self.skipTest(f"the LC_CTYPE encoding is {encoding!r}")
try:
readline.add_history("\xEB\xEF")
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 7e317d5ab94..5bc3c5924b0 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -768,13 +768,16 @@ class BaseTestCase(unittest.TestCase):
self.fail(msg)
return proc
- def run_python(self, args, **kw):
+ def run_python(self, args, isolated=True, **kw):
extraargs = []
if 'uops' in sys._xoptions:
# Pass -X uops along
extraargs.extend(['-X', 'uops'])
- args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args]
- proc = self.run_command(args, **kw)
+ cmd = [sys.executable, *extraargs, '-X', 'faulthandler']
+ if isolated:
+ cmd.append('-I')
+ cmd.extend(args)
+ proc = self.run_command(cmd, **kw)
return proc.stdout
@@ -831,8 +834,8 @@ class ProgramsTestCase(BaseTestCase):
self.check_executed_tests(output, self.tests,
randomize=True, stats=len(self.tests))
- def run_tests(self, args, env=None):
- output = self.run_python(args, env=env)
+ def run_tests(self, args, env=None, isolated=True):
+ output = self.run_python(args, env=env, isolated=isolated)
self.check_output(output)
def test_script_regrtest(self):
@@ -874,7 +877,10 @@ class ProgramsTestCase(BaseTestCase):
self.run_tests(args)
def run_batch(self, *args):
- proc = self.run_command(args)
+ proc = self.run_command(args,
+ # gh-133711: cmd.exe uses the OEM code page
+ # to display the non-ASCII current directory
+ errors="backslashreplace")
self.check_output(proc.stdout)
@unittest.skipUnless(sysconfig.is_python_build(),
@@ -2064,7 +2070,7 @@ class ArgsTestCase(BaseTestCase):
self.check_executed_tests(output, [testname],
failed=[testname],
parallel=True,
- stats=TestStats(1, 1, 0))
+ stats=TestStats(1, 2, 1))
def _check_random_seed(self, run_workers: bool):
# gh-109276: When -r/--randomize is used, random.seed() is called
@@ -2273,7 +2279,6 @@ class ArgsTestCase(BaseTestCase):
def test_xml(self):
code = textwrap.dedent(r"""
import unittest
- from test import support
class VerboseTests(unittest.TestCase):
def test_failed(self):
@@ -2308,6 +2313,50 @@ class ArgsTestCase(BaseTestCase):
for out in testcase.iter('system-out'):
self.assertEqual(out.text, r"abc \x1b def")
+ def test_nonascii(self):
+ code = textwrap.dedent(r"""
+ import unittest
+
+ class NonASCIITests(unittest.TestCase):
+ def test_docstring(self):
+ '''docstring:\u20ac'''
+
+ def test_subtest(self):
+ with self.subTest(param='subtest:\u20ac'):
+ pass
+
+ def test_skip(self):
+ self.skipTest('skipped:\u20ac')
+ """)
+ testname = self.create_test(code=code)
+
+ env = dict(os.environ)
+ env['PYTHONIOENCODING'] = 'ascii'
+
+ def check(output):
+ self.check_executed_tests(output, testname, stats=TestStats(3, 0, 1))
+ self.assertIn(r'docstring:\u20ac', output)
+ self.assertIn(r'skipped:\u20ac', output)
+
+ # Run sequentially
+ output = self.run_tests('-v', testname, env=env, isolated=False)
+ check(output)
+
+ # Run in parallel
+ output = self.run_tests('-j1', '-v', testname, env=env, isolated=False)
+ check(output)
+
+ def test_pgo_exclude(self):
+ # Get PGO tests
+ output = self.run_tests('--pgo', '--list-tests')
+ pgo_tests = output.strip().split()
+
+ # Exclude test_re
+ output = self.run_tests('--pgo', '--list-tests', '-x', 'test_re')
+ tests = output.strip().split()
+ self.assertNotIn('test_re', tests)
+ self.assertEqual(len(tests), len(pgo_tests) - 1)
+
class TestUtils(unittest.TestCase):
def test_format_duration(self):
diff --git a/Lib/test/test_remote_pdb.py b/Lib/test/test_remote_pdb.py
index aef8a6b0129..a1c50af15f3 100644
--- a/Lib/test/test_remote_pdb.py
+++ b/Lib/test/test_remote_pdb.py
@@ -1,5 +1,4 @@
import io
-import time
import itertools
import json
import os
@@ -8,16 +7,13 @@ import signal
import socket
import subprocess
import sys
-import tempfile
import textwrap
-import threading
import unittest
import unittest.mock
from contextlib import closing, contextmanager, redirect_stdout, redirect_stderr, ExitStack
-from pathlib import Path
from test.support import is_wasi, cpython_only, force_color, requires_subprocess, SHORT_TIMEOUT
-from test.support.os_helper import temp_dir, TESTFN, unlink
-from typing import Dict, List, Optional, Tuple, Union, Any
+from test.support.os_helper import TESTFN, unlink
+from typing import List
import pdb
from pdb import _PdbServer, _PdbClient
@@ -1434,7 +1430,6 @@ class PdbConnectTestCase(unittest.TestCase):
def _supports_remote_attaching():
- from contextlib import suppress
PROCESS_VM_READV_SUPPORTED = False
try:
diff --git a/Lib/test/test_reprlib.py b/Lib/test/test_reprlib.py
index 16623654c29..22a55b57c07 100644
--- a/Lib/test/test_reprlib.py
+++ b/Lib/test/test_reprlib.py
@@ -151,14 +151,38 @@ class ReprTests(unittest.TestCase):
eq(r(frozenset({1, 2, 3, 4, 5, 6, 7})), "frozenset({1, 2, 3, 4, 5, 6, ...})")
def test_numbers(self):
- eq = self.assertEqual
- eq(r(123), repr(123))
- eq(r(123), repr(123))
- eq(r(1.0/3), repr(1.0/3))
-
- n = 10**100
- expected = repr(n)[:18] + "..." + repr(n)[-19:]
- eq(r(n), expected)
+ for x in [123, 1.0 / 3]:
+ self.assertEqual(r(x), repr(x))
+
+ max_digits = sys.get_int_max_str_digits()
+ for k in [100, max_digits - 1]:
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ expected = repr(n)[:18] + "..." + repr(n)[-19:]
+ self.assertEqual(r(n), expected)
+
+ def re_msg(n, d):
+ return (rf'<{n.__class__.__name__} instance with roughly {d} '
+ rf'digits \(limit at {max_digits}\) at 0x[a-f0-9]+>')
+
+ k = max_digits
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
+
+ for k in [max_digits + 1, 2 * max_digits]:
+ self.assertGreater(k, 100)
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
+ with self.subTest(f'10 ** {k} - 1', k=k):
+ n = 10 ** k - 1
+ # Here, since math.log10(n) == math.log10(n-1),
+ # the number of digits of n - 1 is overestimated.
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
def test_instance(self):
eq = self.assertEqual
@@ -373,20 +397,20 @@ class ReprTests(unittest.TestCase):
'object': {
1: 'two',
b'three': [
- (4.5, 6.7),
+ (4.5, 6.25),
[set((8, 9)), frozenset((10, 11))],
],
},
'tests': (
(dict(indent=None), '''\
- {1: 'two', b'three': [(4.5, 6.7), [{8, 9}, frozenset({10, 11})]]}'''),
+ {1: 'two', b'three': [(4.5, 6.25), [{8, 9}, frozenset({10, 11})]]}'''),
(dict(indent=False), '''\
{
1: 'two',
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -406,7 +430,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -426,7 +450,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -446,7 +470,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -466,7 +490,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -494,7 +518,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -514,7 +538,7 @@ class ReprTests(unittest.TestCase):
-->b'three': [
-->-->(
-->-->-->4.5,
- -->-->-->6.7,
+ -->-->-->6.25,
-->-->),
-->-->[
-->-->-->{
@@ -534,7 +558,7 @@ class ReprTests(unittest.TestCase):
....b'three': [
........(
............4.5,
- ............6.7,
+ ............6.25,
........),
........[
............{
diff --git a/Lib/test/test_shutil.py b/Lib/test/test_shutil.py
index 62c80aab4b3..ebb6cf88336 100644
--- a/Lib/test/test_shutil.py
+++ b/Lib/test/test_shutil.py
@@ -3492,7 +3492,7 @@ class PublicAPITests(unittest.TestCase):
target_api.append('disk_usage')
self.assertEqual(set(shutil.__all__), set(target_api))
with self.assertWarns(DeprecationWarning):
- from shutil import ExecError
+ from shutil import ExecError # noqa: F401
if __name__ == '__main__':
diff --git a/Lib/test/test_sqlite3/test_cli.py b/Lib/test/test_sqlite3/test_cli.py
index 37e0f74f688..720fa3c4c1e 100644
--- a/Lib/test/test_sqlite3/test_cli.py
+++ b/Lib/test/test_sqlite3/test_cli.py
@@ -1,14 +1,22 @@
"""sqlite3 CLI tests."""
import sqlite3
+import sys
+import textwrap
import unittest
+import unittest.mock
+import os
from sqlite3.__main__ import main as cli
+from test.support.import_helper import import_module
from test.support.os_helper import TESTFN, unlink
+from test.support.pty_helper import run_pty
from test.support import (
captured_stdout,
captured_stderr,
captured_stdin,
force_not_colorized_test_class,
+ requires_subprocess,
+ verbose,
)
@@ -130,7 +138,7 @@ class InteractiveSession(unittest.TestCase):
self.assertEndsWith(out, self.PS1)
self.assertEqual(out.count(self.PS1), 2)
self.assertEqual(out.count(self.PS2), 0)
- self.assertIn("Error", err)
+ self.assertIn('Error: unknown command: "', err)
# test "unknown_command" is pointed out in the error message
self.assertIn("unknown_command", err)
@@ -200,5 +208,108 @@ class InteractiveSession(unittest.TestCase):
self.assertIn('\x1b[1;35mOperationalError (SQLITE_ERROR)\x1b[0m: '
'\x1b[35mnear "sel": syntax error\x1b[0m', err)
+
+@requires_subprocess()
+@force_not_colorized_test_class
+class Completion(unittest.TestCase):
+ PS1 = "sqlite> "
+
+ @classmethod
+ def setUpClass(cls):
+ _sqlite3 = import_module("_sqlite3")
+ if not hasattr(_sqlite3, "SQLITE_KEYWORDS"):
+ raise unittest.SkipTest("unable to determine SQLite keywords")
+
+ readline = import_module("readline")
+ if readline.backend == "editline":
+ raise unittest.SkipTest("libedit readline is not supported")
+
+ def write_input(self, input_, env=None):
+ script = textwrap.dedent("""
+ import readline
+ from sqlite3.__main__ import main
+
+ readline.parse_and_bind("set colored-completion-prefix off")
+ main()
+ """)
+ return run_pty(script, input_, env)
+
+ def test_complete_sql_keywords(self):
+ # List candidates starting with 'S', there should be multiple matches.
+ input_ = b"S\t\tEL\t 1;\n.quit\n"
+ output = self.write_input(input_)
+ self.assertIn(b"SELECT", output)
+ self.assertIn(b"SET", output)
+ self.assertIn(b"SAVEPOINT", output)
+ self.assertIn(b"(1,)", output)
+
+ # Keywords are completed in upper case for even lower case user input.
+ input_ = b"sel\t\t 1;\n.quit\n"
+ output = self.write_input(input_)
+ self.assertIn(b"SELECT", output)
+ self.assertIn(b"(1,)", output)
+
+ @unittest.skipIf(sys.platform.startswith("freebsd"),
+ "Two actual tabs are inserted when there are no matching"
+ " completions in the pseudo-terminal opened by run_pty()"
+ " on FreeBSD")
+ def test_complete_no_match(self):
+ input_ = b"xyzzy\t\t\b\b\b\b\b\b\b.quit\n"
+ # Set NO_COLOR to disable coloring for self.PS1.
+ output = self.write_input(input_, env={**os.environ, "NO_COLOR": "1"})
+ lines = output.decode().splitlines()
+ indices = (
+ i for i, line in enumerate(lines, 1)
+ if line.startswith(f"{self.PS1}xyzzy")
+ )
+ line_num = next(indices, -1)
+ self.assertNotEqual(line_num, -1)
+ # Completions occupy lines, assert no extra lines when there is nothing
+ # to complete.
+ self.assertEqual(line_num, len(lines))
+
+ def test_complete_no_input(self):
+ from _sqlite3 import SQLITE_KEYWORDS
+
+ script = textwrap.dedent("""
+ import readline
+ from sqlite3.__main__ import main
+
+ # Configure readline to ...:
+ # - hide control sequences surrounding each candidate
+ # - hide "Display all xxx possibilities? (y or n)"
+ # - hide "--More--"
+ # - show candidates one per line
+ readline.parse_and_bind("set colored-completion-prefix off")
+ readline.parse_and_bind("set colored-stats off")
+ readline.parse_and_bind("set completion-query-items 0")
+ readline.parse_and_bind("set page-completions off")
+ readline.parse_and_bind("set completion-display-width 0")
+ readline.parse_and_bind("set show-all-if-ambiguous off")
+ readline.parse_and_bind("set show-all-if-unmodified off")
+
+ main()
+ """)
+ input_ = b"\t\t.quit\n"
+ output = run_pty(script, input_, env={**os.environ, "NO_COLOR": "1"})
+ try:
+ lines = output.decode().splitlines()
+ indices = [
+ i for i, line in enumerate(lines)
+ if line.startswith(self.PS1)
+ ]
+ self.assertEqual(len(indices), 2)
+ start, end = indices
+ candidates = [l.strip() for l in lines[start+1:end]]
+ self.assertEqual(candidates, sorted(SQLITE_KEYWORDS))
+ except:
+ if verbose:
+ print(' PTY output: '.center(30, '-'))
+ print(output.decode(errors='replace'))
+ print(' end PTY output '.center(30, '-'))
+ raise
+
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index 2767a53d53c..f123f6ece40 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -31,6 +31,7 @@ import weakref
import platform
import sysconfig
import functools
+from contextlib import nullcontext
try:
import ctypes
except ImportError:
@@ -2843,6 +2844,7 @@ class ThreadedTests(unittest.TestCase):
# See GH-124984: OpenSSL is not thread safe.
threads = []
+ warnings_filters = sys.flags.context_aware_warnings
global USE_SAME_TEST_CONTEXT
USE_SAME_TEST_CONTEXT = True
try:
@@ -2851,7 +2853,10 @@ class ThreadedTests(unittest.TestCase):
self.test_alpn_protocols,
self.test_getpeercert,
self.test_crl_check,
- self.test_check_hostname_idn,
+ functools.partial(
+ self.test_check_hostname_idn,
+ warnings_filters=warnings_filters,
+ ),
self.test_wrong_cert_tls12,
self.test_wrong_cert_tls13,
):
@@ -3097,7 +3102,7 @@ class ThreadedTests(unittest.TestCase):
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
- def test_check_hostname_idn(self):
+ def test_check_hostname_idn(self, warnings_filters=True):
if support.verbose:
sys.stdout.write("\n")
@@ -3152,16 +3157,30 @@ class ThreadedTests(unittest.TestCase):
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
- with ThreadedEchoServer(context=server_context, chatty=True) as server:
- with warnings_helper.check_no_resource_warning(self):
- with self.assertRaises(UnicodeError):
- context.wrap_socket(socket.socket(),
- server_hostname='.pythontest.net')
- with ThreadedEchoServer(context=server_context, chatty=True) as server:
- with warnings_helper.check_no_resource_warning(self):
- with self.assertRaises(UnicodeDecodeError):
- context.wrap_socket(socket.socket(),
- server_hostname=b'k\xf6nig.idn.pythontest.net')
+ with (
+ ThreadedEchoServer(context=server_context, chatty=True) as server,
+ (
+ warnings_helper.check_no_resource_warning(self)
+ if warnings_filters
+ else nullcontext()
+ ),
+ self.assertRaises(UnicodeError),
+ ):
+ context.wrap_socket(socket.socket(), server_hostname='.pythontest.net')
+
+ with (
+ ThreadedEchoServer(context=server_context, chatty=True) as server,
+ (
+ warnings_helper.check_no_resource_warning(self)
+ if warnings_filters
+ else nullcontext()
+ ),
+ self.assertRaises(UnicodeDecodeError),
+ ):
+ context.wrap_socket(
+ socket.socket(),
+ server_hostname=b'k\xf6nig.idn.pythontest.net',
+ )
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
diff --git a/Lib/test/test_stable_abi_ctypes.py b/Lib/test/test_stable_abi_ctypes.py
index 1e6f69d49e9..5a6ba9de337 100644
--- a/Lib/test/test_stable_abi_ctypes.py
+++ b/Lib/test/test_stable_abi_ctypes.py
@@ -658,7 +658,11 @@ SYMBOL_NAMES = (
"PySys_AuditTuple",
"PySys_FormatStderr",
"PySys_FormatStdout",
+ "PySys_GetAttr",
+ "PySys_GetAttrString",
"PySys_GetObject",
+ "PySys_GetOptionalAttr",
+ "PySys_GetOptionalAttrString",
"PySys_GetXOptions",
"PySys_HasWarnOptions",
"PySys_ResetWarnOptions",
diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py
index 0dd619dd7c8..8250b0aef09 100644
--- a/Lib/test/test_statistics.py
+++ b/Lib/test/test_statistics.py
@@ -3319,7 +3319,8 @@ class TestNormalDistC(unittest.TestCase, TestNormalDist):
def load_tests(loader, tests, ignore):
"""Used for doctest/unittest integration."""
tests.addTests(doctest.DocTestSuite())
- tests.addTests(doctest.DocTestSuite(statistics))
+ if sys.float_repr_style == 'short':
+ tests.addTests(doctest.DocTestSuite(statistics))
return tests
diff --git a/Lib/test/test_str.py b/Lib/test/test_str.py
index d6a7bd0da59..2584fbf72d3 100644
--- a/Lib/test/test_str.py
+++ b/Lib/test/test_str.py
@@ -1231,10 +1231,10 @@ class StrTest(string_tests.StringLikeTest,
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
- self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
- self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
- self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
- self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
+ self.assertEqual('{0:\x00<6}'.format(3.25), '3.25\x00\x00')
+ self.assertEqual('{0:\x01<6}'.format(3.25), '3.25\x01\x01')
+ self.assertEqual('{0:\x00^6}'.format(3.25), '\x003.25\x00')
+ self.assertEqual('{0:^6}'.format(3.25), ' 3.25 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
diff --git a/Lib/test/test_string/_support.py b/Lib/test/test_string/_support.py
index eaa3354a559..abdddaf187b 100644
--- a/Lib/test/test_string/_support.py
+++ b/Lib/test/test_string/_support.py
@@ -1,4 +1,3 @@
-import unittest
from string.templatelib import Interpolation
diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py
index 268230f6da7..0241e543cd7 100644
--- a/Lib/test/test_strptime.py
+++ b/Lib/test/test_strptime.py
@@ -221,14 +221,16 @@ class StrptimeTests(unittest.TestCase):
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%\n"):
- with self.assertRaisesRegex(ValueError, "stray % in format "):
+ with (self.subTest(format=bad_format),
+ self.assertRaisesRegex(ValueError, "stray % in format ")):
_strptime._strptime_time("2005", bad_format)
- for bad_format in ("%e", "%Oe", "%O", "%O ", "%Ee", "%E", "%E ",
- "%.", "%+", "%_", "%~", "%\\",
+ for bad_format in ("%i", "%Oi", "%O", "%O ", "%Ee", "%E", "%E ",
+ "%.", "%+", "%~", "%\\",
"%O.", "%O+", "%O_", "%O~", "%O\\"):
directive = bad_format[1:].rstrip()
- with self.assertRaisesRegex(ValueError,
- f"'{re.escape(directive)}' is a bad directive in format "):
+ with (self.subTest(format=bad_format),
+ self.assertRaisesRegex(ValueError,
+ f"'{re.escape(directive)}' is a bad directive in format ")):
_strptime._strptime_time("2005", bad_format)
msg_week_no_year_or_weekday = r"ISO week directive '%V' must be used with " \
@@ -335,6 +337,15 @@ class StrptimeTests(unittest.TestCase):
self.roundtrip('%B', 1, (1900, m, 1, 0, 0, 0, 0, 1, 0))
self.roundtrip('%b', 1, (1900, m, 1, 0, 0, 0, 0, 1, 0))
+ @run_with_locales('LC_TIME', 'az_AZ', 'ber_DZ', 'ber_MA', 'crh_UA')
+ def test_month_locale2(self):
+ # Test for month directives
+ # Month name contains 'ฤฐ' ('\u0130')
+ self.roundtrip('%B', 1, (2025, 6, 1, 0, 0, 0, 6, 152, 0))
+ self.roundtrip('%b', 1, (2025, 6, 1, 0, 0, 0, 6, 152, 0))
+ self.roundtrip('%B', 1, (2025, 7, 1, 0, 0, 0, 1, 182, 0))
+ self.roundtrip('%b', 1, (2025, 7, 1, 0, 0, 0, 1, 182, 0))
+
def test_day(self):
# Test for day directives
self.roundtrip('%d %Y', 2)
@@ -480,13 +491,11 @@ class StrptimeTests(unittest.TestCase):
# * Year is not included: ha_NG.
# * Use non-Gregorian calendar: lo_LA, thai, th_TH.
# On Windows: ar_IN, ar_SA, fa_IR, ps_AF.
- #
- # BUG: Generates regexp that does not match the current date and time
- # for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'he_IL', 'eu_ES', 'ar_AE', 'mfe_MU', 'yo_NG',
'csb_PL', 'br_FR', 'gez_ET', 'brx_IN',
- 'my_MM', 'or_IN', 'shn_MM', 'az_IR')
+ 'my_MM', 'or_IN', 'shn_MM', 'az_IR',
+ 'byn_ER', 'wal_ET', 'lzh_TW')
def test_date_time_locale(self):
# Test %c directive
loc = locale.getlocale(locale.LC_TIME)[0]
@@ -525,11 +534,9 @@ class StrptimeTests(unittest.TestCase):
# NB: Does not roundtrip because use non-Gregorian calendar:
# lo_LA, thai, th_TH. On Windows: ar_IN, ar_SA, fa_IR, ps_AF.
- # BUG: Generates regexp that does not match the current date
- # for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'he_IL', 'eu_ES', 'ar_AE',
- 'az_IR', 'my_MM', 'or_IN', 'shn_MM')
+ 'az_IR', 'my_MM', 'or_IN', 'shn_MM', 'lzh_TW')
def test_date_locale(self):
# Test %x directive
now = time.time()
@@ -546,7 +553,7 @@ class StrptimeTests(unittest.TestCase):
# NB: Dates before 1969 do not roundtrip on many locales, including C.
@unittest.skipIf(support.linked_to_musl(), "musl libc issue, bpo-46390")
@run_with_locales('LC_TIME', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
- 'eu_ES', 'ar_AE', 'my_MM', 'shn_MM')
+ 'eu_ES', 'ar_AE', 'my_MM', 'shn_MM', 'lzh_TW')
def test_date_locale2(self):
# Test %x directive
loc = locale.getlocale(locale.LC_TIME)[0]
@@ -562,11 +569,11 @@ class StrptimeTests(unittest.TestCase):
# norwegian, nynorsk.
# * Hours are in 12-hour notation without AM/PM indication: hy_AM,
# ms_MY, sm_WS.
- # BUG: Generates regexp that does not match the current time for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'aa_ET', 'am_ET', 'az_IR', 'byn_ER', 'fa_IR', 'gez_ET',
'my_MM', 'om_ET', 'or_IN', 'shn_MM', 'sid_ET', 'so_SO',
- 'ti_ET', 'tig_ER', 'wal_ET')
+ 'ti_ET', 'tig_ER', 'wal_ET', 'lzh_TW',
+ 'ar_SA', 'bg_BG')
def test_time_locale(self):
# Test %X directive
loc = locale.getlocale(locale.LC_TIME)[0]
diff --git a/Lib/test/test_syntax.py b/Lib/test/test_syntax.py
index c7ac7914158..c52d2421941 100644
--- a/Lib/test/test_syntax.py
+++ b/Lib/test/test_syntax.py
@@ -382,6 +382,13 @@ SyntaxError: invalid syntax
Traceback (most recent call last):
SyntaxError: invalid syntax
+# But prefixes of soft keywords should
+# still raise specialized errors
+
+>>> (mat x)
+Traceback (most recent call last):
+SyntaxError: invalid syntax. Perhaps you forgot a comma?
+
From compiler_complex_args():
>>> def f(None=1):
@@ -1436,17 +1443,17 @@ Regression tests for gh-133999:
>>> try: pass
... except TypeError as name: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
>>> try: pass
... except* TypeError as name: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
>>> match 1:
... case 1 | 2 as abc: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
Ensure that early = are not matched by the parser as invalid comparisons
>>> f(2, 4, x=34); 1 $ 2
@@ -1695,6 +1702,28 @@ Make sure that the old "raise X, Y[, Z]" form is gone:
...
SyntaxError: invalid syntax
+Better errors for `raise` statement:
+
+ >>> raise ValueError from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression after 'from'?
+
+ >>> raise mod.ValueError() from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression after 'from'?
+
+ >>> raise from exc
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
+ >>> raise from None
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
+ >>> raise from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
Check that an multiple exception types with missing parentheses
raise a custom exception only when using 'as'
@@ -2843,6 +2872,13 @@ class SyntaxErrorTestCase(unittest.TestCase):
"""
self._check_error(source, "parameter and nonlocal", lineno=3)
+ def test_raise_from_error_message(self):
+ source = """if 1:
+ raise AssertionError() from None
+ print(1,,2)
+ """
+ self._check_error(source, "invalid syntax", lineno=3)
+
def test_yield_outside_function(self):
self._check_error("if 0: yield", "outside function")
self._check_error("if 0: yield\nelse: x=1", "outside function")
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index 795d1ecbb59..486bf10a0b5 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -24,7 +24,7 @@ from test.support import import_helper
from test.support import force_not_colorized
from test.support import SHORT_TIMEOUT
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ImportError:
interpreters = None
import textwrap
@@ -729,7 +729,7 @@ class SysModuleTest(unittest.TestCase):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'pthread-stubs', 'solaris', None))
- self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
+ self.assertIn(info.lock, ('pymutex', None))
if sys.platform.startswith(("linux", "android", "freebsd")):
self.assertEqual(info.name, "pthread")
elif sys.platform == "win32":
@@ -869,12 +869,7 @@ class SysModuleTest(unittest.TestCase):
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
- arg = sys_attr
- attr_type = type(sys_attr)
- with self.assertRaises(TypeError):
- attr_type(arg)
- with self.assertRaises(TypeError):
- attr_type.__new__(attr_type, arg)
+ support.check_disallow_instantiation(self, type(sys_attr), sys_attr)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
@@ -1074,6 +1069,7 @@ class SysModuleTest(unittest.TestCase):
self.assertHasAttr(sys.implementation, 'version')
self.assertHasAttr(sys.implementation, 'hexversion')
self.assertHasAttr(sys.implementation, 'cache_tag')
+ self.assertHasAttr(sys.implementation, 'supports_isolated_interpreters')
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
@@ -1087,6 +1083,15 @@ class SysModuleTest(unittest.TestCase):
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
+ # https://peps.python.org/pep-0734
+ sii = sys.implementation.supports_isolated_interpreters
+ self.assertIsInstance(sii, bool)
+ if test.support.check_impl_detail(cpython=True):
+ if test.support.is_emscripten or test.support.is_wasi:
+ self.assertFalse(sii)
+ else:
+ self.assertTrue(sii)
+
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
@@ -1135,23 +1140,12 @@ class SysModuleTest(unittest.TestCase):
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
try:
- # While we could imagine a Python session where the number of
- # multiple buffer objects would exceed the sharing of references,
- # it is unlikely to happen in a normal test run.
- #
- # In free-threaded builds each code object owns an array of
- # pointers to copies of the bytecode. When the number of
- # code objects is a large fraction of the total number of
- # references, this can cause the total number of allocated
- # blocks to exceed the total number of references.
- #
- # For some reason, iOS seems to trigger the "unlikely to happen"
- # case reliably under CI conditions. It's not clear why; but as
- # this test is checking the behavior of getallocatedblock()
- # under garbage collection, we can skip this pre-condition check
- # for now. See GH-130384.
- if not support.Py_GIL_DISABLED and not support.is_apple_mobile:
- self.assertLess(a, sys.gettotalrefcount())
+ # The reported blocks will include immortalized strings, but the
+ # total ref count will not. This will sanity check that among all
+ # other objects (those eligible for garbage collection) there
+ # are more references being tracked than allocated blocks.
+ interned_immortal = sys.getunicodeinternedsize(_only_immortal=True)
+ self.assertLess(a - interned_immortal, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
@@ -1299,6 +1293,7 @@ class SysModuleTest(unittest.TestCase):
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
+ @unittest.skipUnless(hasattr(sys, '_stdlib_dir'), 'need sys._stdlib_dir')
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
@@ -1953,22 +1948,7 @@ class SizeofTest(unittest.TestCase):
self.assertEqual(out, b"")
self.assertEqual(err, b"")
-
-def _supports_remote_attaching():
- PROCESS_VM_READV_SUPPORTED = False
-
- try:
- from _remote_debugging import PROCESS_VM_READV_SUPPORTED
- except ImportError:
- pass
-
- return PROCESS_VM_READV_SUPPORTED
-
-@unittest.skipIf(not sys.is_remote_debug_enabled(), "Remote debugging is not enabled")
-@unittest.skipIf(sys.platform != "darwin" and sys.platform != "linux" and sys.platform != "win32",
- "Test only runs on Linux, Windows and MacOS")
-@unittest.skipIf(sys.platform == "linux" and not _supports_remote_attaching(),
- "Test only runs on Linux with process_vm_readv support")
+@test.support.support_remote_exec_only
@test.support.cpython_only
class TestRemoteExec(unittest.TestCase):
def tearDown(self):
@@ -2127,7 +2107,7 @@ print("Remote script executed successfully!")
returncode, stdout, stderr = self._run_remote_exec_test(script, prologue=prologue)
self.assertEqual(returncode, 0)
self.assertIn(b"Remote script executed successfully!", stdout)
- self.assertIn(b"Audit event: remote_debugger_script, arg: ", stdout)
+ self.assertIn(b"Audit event: cpython.remote_debugger_script, arg: ", stdout)
self.assertEqual(stderr, b"")
def test_remote_exec_with_exception(self):
diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py
index d30f69ded66..2eb8de4b29f 100644
--- a/Lib/test/test_sysconfig.py
+++ b/Lib/test/test_sysconfig.py
@@ -32,7 +32,6 @@ from sysconfig import (get_paths, get_platform, get_config_vars,
from sysconfig.__main__ import _main, _parse_makefile, _get_pybuilddir, _get_json_data_name
import _imp
import _osx_support
-import _sysconfig
HAS_USER_BASE = sysconfig._HAS_USER_BASE
@@ -712,8 +711,8 @@ class TestSysConfig(unittest.TestCase, VirtualEnvironmentMixin):
ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase', 'installed_base', 'installed_platbase'}
for key in ignore_keys:
- json_config_vars.pop(key)
- system_config_vars.pop(key)
+ json_config_vars.pop(key, None)
+ system_config_vars.pop(key, None)
self.assertEqual(system_config_vars, json_config_vars)
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index cf218a2bf14..7055e1ed147 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -2715,6 +2715,31 @@ class MiscTest(unittest.TestCase):
str(excinfo.exception),
)
+ @unittest.skipUnless(os_helper.can_symlink(), 'requires symlink support')
+ @unittest.skipUnless(hasattr(os, 'chmod'), "missing os.chmod")
+ @unittest.mock.patch('os.chmod')
+ def test_deferred_directory_attributes_update(self, mock_chmod):
+ # Regression test for gh-127987: setting attributes on arbitrary files
+ tempdir = os.path.join(TEMPDIR, 'test127987')
+ def mock_chmod_side_effect(path, mode, **kwargs):
+ target_path = os.path.realpath(path)
+ if os.path.commonpath([target_path, tempdir]) != tempdir:
+ raise Exception("should not try to chmod anything outside the destination", target_path)
+ mock_chmod.side_effect = mock_chmod_side_effect
+
+ outside_tree_dir = os.path.join(TEMPDIR, 'outside_tree_dir')
+ with ArchiveMaker() as arc:
+ arc.add('x', symlink_to='.')
+ arc.add('x', type=tarfile.DIRTYPE, mode='?rwsrwsrwt')
+ arc.add('x', symlink_to=outside_tree_dir)
+
+ os.makedirs(outside_tree_dir)
+ try:
+ arc.open().extractall(path=tempdir, filter='tar')
+ finally:
+ os_helper.rmtree(outside_tree_dir)
+ os_helper.rmtree(tempdir)
+
class CommandLineTest(unittest.TestCase):
@@ -3275,6 +3300,10 @@ class NoneInfoExtractTests(ReadTest):
got_paths = set(
p.relative_to(directory)
for p in pathlib.Path(directory).glob('**/*'))
+ if self.extraction_filter in (None, 'data'):
+ # The 'data' filter is expected to reject special files
+ for path in 'ustar/fifotype', 'ustar/blktype', 'ustar/chrtype':
+ got_paths.discard(pathlib.Path(path))
self.assertEqual(self.control_paths, got_paths)
@contextmanager
@@ -3504,12 +3533,28 @@ class ArchiveMaker:
self.bio = None
def add(self, name, *, type=None, symlink_to=None, hardlink_to=None,
- mode=None, size=None, **kwargs):
- """Add a member to the test archive. Call within `with`."""
+ mode=None, size=None, content=None, **kwargs):
+ """Add a member to the test archive. Call within `with`.
+
+ Provides many shortcuts:
+ - default `type` is based on symlink_to, hardlink_to, and trailing `/`
+ in name (which is stripped)
+ - size & content defaults are based on each other
+ - content can be str or bytes
+ - mode should be textual ('-rwxrwxrwx')
+
+ (add more! this is unstable internal test-only API)
+ """
name = str(name)
tarinfo = tarfile.TarInfo(name).replace(**kwargs)
+ if content is not None:
+ if isinstance(content, str):
+ content = content.encode()
+ size = len(content)
if size is not None:
tarinfo.size = size
+ if content is None:
+ content = bytes(tarinfo.size)
if mode:
tarinfo.mode = _filemode_to_int(mode)
if symlink_to is not None:
@@ -3523,7 +3568,7 @@ class ArchiveMaker:
if type is not None:
tarinfo.type = type
if tarinfo.isreg():
- fileobj = io.BytesIO(bytes(tarinfo.size))
+ fileobj = io.BytesIO(content)
else:
fileobj = None
self.tar_w.addfile(tarinfo, fileobj)
@@ -3557,7 +3602,7 @@ class TestExtractionFilters(unittest.TestCase):
destdir = outerdir / 'dest'
@contextmanager
- def check_context(self, tar, filter):
+ def check_context(self, tar, filter, *, check_flag=True):
"""Extracts `tar` to `self.destdir` and allows checking the result
If an error occurs, it must be checked using `expect_exception`
@@ -3566,27 +3611,40 @@ class TestExtractionFilters(unittest.TestCase):
except the destination directory itself and parent directories of
other files.
When checking directories, do so before their contents.
+
+ A file called 'flag' is made in outerdir (i.e. outside destdir)
+ before extraction; it should not be altered nor should its contents
+ be read/copied.
"""
with os_helper.temp_dir(self.outerdir):
+ flag_path = self.outerdir / 'flag'
+ flag_path.write_text('capture me')
try:
tar.extractall(self.destdir, filter=filter)
except Exception as exc:
self.raised_exception = exc
+ self.reraise_exception = True
self.expected_paths = set()
else:
self.raised_exception = None
+ self.reraise_exception = False
self.expected_paths = set(self.outerdir.glob('**/*'))
self.expected_paths.discard(self.destdir)
+ self.expected_paths.discard(flag_path)
try:
- yield
+ yield self
finally:
tar.close()
- if self.raised_exception:
+ if self.reraise_exception:
raise self.raised_exception
self.assertEqual(self.expected_paths, set())
+ if check_flag:
+ self.assertEqual(flag_path.read_text(), 'capture me')
+ else:
+ assert filter == 'fully_trusted'
def expect_file(self, name, type=None, symlink_to=None, mode=None,
- size=None):
+ size=None, content=None):
"""Check a single file. See check_context."""
if self.raised_exception:
raise self.raised_exception
@@ -3605,26 +3663,45 @@ class TestExtractionFilters(unittest.TestCase):
# The symlink might be the same (textually) as what we expect,
# but some systems change the link to an equivalent path, so
# we fall back to samefile().
- if expected != got:
- self.assertTrue(got.samefile(expected))
+ try:
+ if expected != got:
+ self.assertTrue(got.samefile(expected))
+ except Exception as e:
+ # attach a note, so it's shown even if `samefile` fails
+ e.add_note(f'{expected=}, {got=}')
+ raise
elif type == tarfile.REGTYPE or type is None:
self.assertTrue(path.is_file())
elif type == tarfile.DIRTYPE:
self.assertTrue(path.is_dir())
elif type == tarfile.FIFOTYPE:
self.assertTrue(path.is_fifo())
+ elif type == tarfile.SYMTYPE:
+ self.assertTrue(path.is_symlink())
else:
raise NotImplementedError(type)
if size is not None:
self.assertEqual(path.stat().st_size, size)
+ if content is not None:
+ self.assertEqual(path.read_text(), content)
for parent in path.parents:
self.expected_paths.discard(parent)
+ def expect_any_tree(self, name):
+ """Check a directory; forget about its contents."""
+ tree_path = (self.destdir / name).resolve()
+ self.expect_file(tree_path, type=tarfile.DIRTYPE)
+ self.expected_paths = {
+ p for p in self.expected_paths
+ if tree_path not in p.parents
+ }
+
def expect_exception(self, exc_type, message_re='.'):
with self.assertRaisesRegex(exc_type, message_re):
if self.raised_exception is not None:
raise self.raised_exception
- self.raised_exception = None
+ self.reraise_exception = False
+ return self.raised_exception
def test_benign_file(self):
with ArchiveMaker() as arc:
@@ -3710,6 +3787,80 @@ class TestExtractionFilters(unittest.TestCase):
self.expect_file('parent/evil')
@symlink_test
+ @os_helper.skip_unless_symlink
+ def test_realpath_limit_attack(self):
+ # (CVE-2025-4517)
+
+ with ArchiveMaker() as arc:
+ # populate the symlinks and dirs that expand in os.path.realpath()
+ # The component length is chosen so that in common cases, the unexpanded
+ # path fits in PATH_MAX, but it overflows when the final symlink
+ # is expanded
+ steps = "abcdefghijklmnop"
+ if sys.platform == 'win32':
+ component = 'd' * 25
+ elif 'PC_PATH_MAX' in os.pathconf_names:
+ max_path_len = os.pathconf(self.outerdir.parent, "PC_PATH_MAX")
+ path_sep_len = 1
+ dest_len = len(str(self.destdir)) + path_sep_len
+ component_len = (max_path_len - dest_len) // (len(steps) + path_sep_len)
+ component = 'd' * component_len
+ else:
+ raise NotImplementedError("Need to guess component length for {sys.platform}")
+ path = ""
+ step_path = ""
+ for i in steps:
+ arc.add(os.path.join(path, component), type=tarfile.DIRTYPE,
+ mode='drwxrwxrwx')
+ arc.add(os.path.join(path, i), symlink_to=component)
+ path = os.path.join(path, component)
+ step_path = os.path.join(step_path, i)
+ # create the final symlink that exceeds PATH_MAX and simply points
+ # to the top dir.
+ # this link will never be expanded by
+ # os.path.realpath(strict=False), nor anything after it.
+ linkpath = os.path.join(*steps, "l"*254)
+ parent_segments = [".."] * len(steps)
+ arc.add(linkpath, symlink_to=os.path.join(*parent_segments))
+ # make a symlink outside to keep the tar command happy
+ arc.add("escape", symlink_to=os.path.join(linkpath, ".."))
+ # use the symlinks above, that are not checked, to create a hardlink
+ # to a file outside of the destination path
+ arc.add("flaglink", hardlink_to=os.path.join("escape", "flag"))
+ # now that we have the hardlink we can overwrite the file
+ arc.add("flaglink", content='overwrite')
+ # we can also create new files as well!
+ arc.add("escape/newfile", content='new')
+
+ with (self.subTest('fully_trusted'),
+ self.check_context(arc.open(), filter='fully_trusted',
+ check_flag=False)):
+ if sys.platform == 'win32':
+ self.expect_exception((FileNotFoundError, FileExistsError))
+ elif self.raised_exception:
+ # Cannot symlink/hardlink: tarfile falls back to getmember()
+ self.expect_exception(KeyError)
+ # Otherwise, this block should never enter.
+ else:
+ self.expect_any_tree(component)
+ self.expect_file('flaglink', content='overwrite')
+ self.expect_file('../newfile', content='new')
+ self.expect_file('escape', type=tarfile.SYMTYPE)
+ self.expect_file('a', symlink_to=component)
+
+ for filter in 'tar', 'data':
+ with self.subTest(filter), self.check_context(arc.open(), filter=filter):
+ exc = self.expect_exception((OSError, KeyError))
+ if isinstance(exc, OSError):
+ if sys.platform == 'win32':
+ # 3: ERROR_PATH_NOT_FOUND
+ # 5: ERROR_ACCESS_DENIED
+ # 206: ERROR_FILENAME_EXCED_RANGE
+ self.assertIn(exc.winerror, (3, 5, 206))
+ else:
+ self.assertEqual(exc.errno, errno.ENAMETOOLONG)
+
+ @symlink_test
def test_parent_symlink2(self):
# Test interplaying symlinks
# Inspired by 'dirsymlink2b' in jwilk/traversal-archives
@@ -3931,8 +4082,8 @@ class TestExtractionFilters(unittest.TestCase):
arc.add('symlink2', symlink_to=os.path.join(
'linkdir', 'hardlink2'))
arc.add('targetdir/target', size=3)
- arc.add('linkdir/hardlink', hardlink_to='targetdir/target')
- arc.add('linkdir/hardlink2', hardlink_to='linkdir/symlink')
+ arc.add('linkdir/hardlink', hardlink_to=os.path.join('targetdir', 'target'))
+ arc.add('linkdir/hardlink2', hardlink_to=os.path.join('linkdir', 'symlink'))
for filter in 'tar', 'data', 'fully_trusted':
with self.check_context(arc.open(), filter):
@@ -3948,6 +4099,129 @@ class TestExtractionFilters(unittest.TestCase):
self.expect_file('linkdir/symlink', size=3)
self.expect_file('symlink2', size=3)
+ @symlink_test
+ def test_sneaky_hardlink_fallback(self):
+ # (CVE-2025-4330)
+ # Test that when hardlink extraction falls back to extracting members
+ # from the archive, the extracted member is (re-)filtered.
+ with ArchiveMaker() as arc:
+ # Create a directory structure so the c/escape symlink stays
+ # inside the path
+ arc.add("a/t/dummy")
+ # Create b/ directory
+ arc.add("b/")
+ # Point "c" to the bottom of the tree in "a"
+ arc.add("c", symlink_to=os.path.join("a", "t"))
+ # link to non-existant location under "a"
+ arc.add("c/escape", symlink_to=os.path.join("..", "..",
+ "link_here"))
+ # Move "c" to point to "b" ("c/escape" no longer exists)
+ arc.add("c", symlink_to="b")
+ # Attempt to create a hard link to "c/escape". Since it doesn't
+ # exist it will attempt to extract "cescape" but at "boom".
+ arc.add("boom", hardlink_to=os.path.join("c", "escape"))
+
+ with self.check_context(arc.open(), 'data'):
+ if not os_helper.can_symlink():
+ # When 'c/escape' is extracted, 'c' is a regular
+ # directory, and 'c/escape' *would* point outside
+ # the destination if symlinks were allowed.
+ self.expect_exception(
+ tarfile.LinkOutsideDestinationError)
+ elif sys.platform == "win32":
+ # On Windows, 'c/escape' points outside the destination
+ self.expect_exception(tarfile.LinkOutsideDestinationError)
+ else:
+ e = self.expect_exception(
+ tarfile.LinkFallbackError,
+ "link 'boom' would be extracted as a copy of "
+ + "'c/escape', which was rejected")
+ self.assertIsInstance(e.__cause__,
+ tarfile.LinkOutsideDestinationError)
+ for filter in 'tar', 'fully_trusted':
+ with self.subTest(filter), self.check_context(arc.open(), filter):
+ if not os_helper.can_symlink():
+ self.expect_file("a/t/dummy")
+ self.expect_file("b/")
+ self.expect_file("c/")
+ else:
+ self.expect_file("a/t/dummy")
+ self.expect_file("b/")
+ self.expect_file("a/t/escape", symlink_to='../../link_here')
+ self.expect_file("boom", symlink_to='../../link_here')
+ self.expect_file("c", symlink_to='b')
+
+ @symlink_test
+ def test_exfiltration_via_symlink(self):
+ # (CVE-2025-4138)
+ # Test changing symlinks that result in a symlink pointing outside
+ # the extraction directory, unless prevented by 'data' filter's
+ # normalization.
+ with ArchiveMaker() as arc:
+ arc.add("escape", symlink_to=os.path.join('link', 'link', '..', '..', 'link-here'))
+ arc.add("link", symlink_to='./')
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter):
+ if os_helper.can_symlink():
+ self.expect_file("link", symlink_to='./')
+ if filter == 'data':
+ self.expect_file("escape", symlink_to='link-here')
+ else:
+ self.expect_file("escape",
+ symlink_to='link/link/../../link-here')
+ else:
+ # Nothing is extracted.
+ pass
+
+ @symlink_test
+ def test_chmod_outside_dir(self):
+ # (CVE-2024-12718)
+ # Test that members used for delayed updates of directory metadata
+ # are (re-)filtered.
+ with ArchiveMaker() as arc:
+ # "pwn" is a veeeery innocent symlink:
+ arc.add("a/pwn", symlink_to='.')
+ # But now "pwn" is also a directory, so it's scheduled to have its
+ # metadata updated later:
+ arc.add("a/pwn/", mode='drwxrwxrwx')
+ # Oops, "pwn" is not so innocent any more:
+ arc.add("a/pwn", symlink_to='x/../')
+ # Newly created symlink points to the dest dir,
+ # so it's OK for the "data" filter.
+ arc.add('a/x', symlink_to=('../'))
+ # But now "pwn" points outside the dest dir
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter) as cc:
+ if not os_helper.can_symlink():
+ self.expect_file("a/pwn/")
+ elif filter == 'data':
+ self.expect_file("a/x", symlink_to='../')
+ self.expect_file("a/pwn", symlink_to='.')
+ else:
+ self.expect_file("a/x", symlink_to='../')
+ self.expect_file("a/pwn", symlink_to='x/../')
+ if sys.platform != "win32":
+ st_mode = cc.outerdir.stat().st_mode
+ self.assertNotEqual(st_mode & 0o777, 0o777)
+
+ def test_link_fallback_normalizes(self):
+ # Make sure hardlink fallbacks work for non-normalized paths for all
+ # filters
+ with ArchiveMaker() as arc:
+ arc.add("dir/")
+ arc.add("dir/../afile")
+ arc.add("link1", hardlink_to='dir/../afile')
+ arc.add("link2", hardlink_to='dir/../dir/../afile')
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter) as cc:
+ self.expect_file("dir/")
+ self.expect_file("afile")
+ self.expect_file("link1")
+ self.expect_file("link2")
+
def test_modes(self):
# Test how file modes are extracted
# (Note that the modes are ignored on platforms without working chmod)
@@ -4072,7 +4346,7 @@ class TestExtractionFilters(unittest.TestCase):
# The 'tar' filter returns TarInfo objects with the same name/type.
# (It can also fail for particularly "evil" input, but we don't have
# that in the test archive.)
- with tarfile.TarFile.open(tarname) as tar:
+ with tarfile.TarFile.open(tarname, encoding="iso8859-1") as tar:
for tarinfo in tar.getmembers():
try:
filtered = tarfile.tar_filter(tarinfo, '')
@@ -4084,7 +4358,7 @@ class TestExtractionFilters(unittest.TestCase):
def test_data_filter(self):
# The 'data' filter either raises, or returns TarInfo with the same
# name/type.
- with tarfile.TarFile.open(tarname) as tar:
+ with tarfile.TarFile.open(tarname, encoding="iso8859-1") as tar:
for tarinfo in tar.getmembers():
try:
filtered = tarfile.data_filter(tarinfo, '')
@@ -4242,13 +4516,13 @@ class TestExtractionFilters(unittest.TestCase):
# If errorlevel is 0, errors affected by errorlevel are ignored
with self.check_context(arc.open(errorlevel=0), extracterror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), filtererror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), oserror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), tarerror_filter):
self.expect_exception(tarfile.TarError)
@@ -4259,7 +4533,7 @@ class TestExtractionFilters(unittest.TestCase):
# If 1, all fatal errors are raised
with self.check_context(arc.open(errorlevel=1), extracterror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=1), filtererror_filter):
self.expect_exception(tarfile.FilterError)
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index 0e51e7fc8c5..00a3037c3e1 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -28,7 +28,7 @@ from test import lock_tests
from test import support
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ImportError:
interpreters = None
@@ -1247,13 +1247,68 @@ class ThreadTests(BaseTestCase):
self.assertEqual(err, b"")
self.assertIn(b"all clear", out)
+ @support.subTests('lock_class_name', ['Lock', 'RLock'])
+ def test_acquire_daemon_thread_lock_in_finalization(self, lock_class_name):
+ # gh-123940: Py_Finalize() prevents other threads from running Python
+ # code (and so, releasing locks), so acquiring a locked lock can not
+ # succeed.
+ # We raise an exception rather than hang.
+ code = textwrap.dedent(f"""
+ import threading
+ import time
+
+ thread_started_event = threading.Event()
+
+ lock = threading.{lock_class_name}()
+ def loop():
+ if {lock_class_name!r} == 'RLock':
+ lock.acquire()
+ with lock:
+ thread_started_event.set()
+ while True:
+ time.sleep(1)
+
+ uncontested_lock = threading.{lock_class_name}()
+
+ class Cycle:
+ def __init__(self):
+ self.self_ref = self
+ self.thr = threading.Thread(
+ target=loop, daemon=True)
+ self.thr.start()
+ thread_started_event.wait()
+
+ def __del__(self):
+ assert self.thr.is_alive()
+
+ # We *can* acquire an unlocked lock
+ uncontested_lock.acquire()
+ if {lock_class_name!r} == 'RLock':
+ uncontested_lock.acquire()
+
+ # Acquiring a locked one fails
+ try:
+ lock.acquire()
+ except PythonFinalizationError:
+ assert self.thr.is_alive()
+ print('got the correct exception!')
+
+ # Cycle holds a reference to itself, which ensures it is
+ # cleaned up during the GC that runs after daemon threads
+ # have been forced to exit during finalization.
+ Cycle()
+ """)
+ rc, out, err = assert_python_ok("-c", code)
+ self.assertEqual(err, b"")
+ self.assertIn(b"got the correct exception", out)
+
def test_start_new_thread_failed(self):
# gh-109746: if Python fails to start newly created thread
# due to failure of underlying PyThread_start_new_thread() call,
# its state should be removed from interpreter' thread states list
# to avoid its double cleanup
try:
- from resource import setrlimit, RLIMIT_NPROC
+ from resource import setrlimit, RLIMIT_NPROC # noqa: F401
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
code = """if 1:
@@ -1284,12 +1339,6 @@ class ThreadTests(BaseTestCase):
@cpython_only
def test_finalize_daemon_thread_hang(self):
- if support.check_sanitizer(thread=True, memory=True):
- # the thread running `time.sleep(100)` below will still be alive
- # at process exit
- self.skipTest(
- "https://github.com/python/cpython/issues/124878 - Known"
- " race condition that TSAN identifies.")
# gh-87135: tests that daemon threads hang during finalization
script = textwrap.dedent('''
import os
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 2d41a5e5ac0..865e0c5b40d 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1,6 +1,8 @@
import contextlib
+import itertools
import os
import re
+import string
import tempfile
import token
import tokenize
@@ -1975,6 +1977,10 @@ if 1:
for case in cases:
self.check_roundtrip(case)
+ self.check_roundtrip(r"t'{ {}}'")
+ self.check_roundtrip(r"t'{f'{ {}}'}{ {}}'")
+ self.check_roundtrip(r"f'{t'{ {}}'}{ {}}'")
+
def test_continuation(self):
# Balancing continuation
@@ -3234,5 +3240,77 @@ class CommandLineTest(unittest.TestCase):
self.check_output(source, expect, flag)
+class StringPrefixTest(unittest.TestCase):
+ @staticmethod
+ def determine_valid_prefixes():
+ # Try all lengths until we find a length that has zero valid
+ # prefixes. This will miss the case where for example there
+ # are no valid 3 character prefixes, but there are valid 4
+ # character prefixes. That seems unlikely.
+
+ single_char_valid_prefixes = set()
+
+ # Find all of the single character string prefixes. Just get
+ # the lowercase version, we'll deal with combinations of upper
+ # and lower case later. I'm using this logic just in case
+ # some uppercase-only prefix is added.
+ for letter in itertools.chain(string.ascii_lowercase, string.ascii_uppercase):
+ try:
+ eval(f'{letter}""')
+ single_char_valid_prefixes.add(letter.lower())
+ except SyntaxError:
+ pass
+
+ # This logic assumes that all combinations of valid prefixes only use
+ # the characters that are valid single character prefixes. That seems
+ # like a valid assumption, but if it ever changes this will need
+ # adjusting.
+ valid_prefixes = set()
+ for length in itertools.count():
+ num_at_this_length = 0
+ for prefix in (
+ "".join(l)
+ for l in itertools.combinations(single_char_valid_prefixes, length)
+ ):
+ for t in itertools.permutations(prefix):
+ for u in itertools.product(*[(c, c.upper()) for c in t]):
+ p = "".join(u)
+ if p == "not":
+ # 'not' can never be a string prefix,
+ # because it's a valid expression: not ""
+ continue
+ try:
+ eval(f'{p}""')
+
+ # No syntax error, so p is a valid string
+ # prefix.
+
+ valid_prefixes.add(p)
+ num_at_this_length += 1
+ except SyntaxError:
+ pass
+ if num_at_this_length == 0:
+ return valid_prefixes
+
+
+ def test_prefixes(self):
+ # Get the list of defined string prefixes. I don't see an
+ # obvious documented way of doing this, but probably the best
+ # thing is to split apart tokenize.StringPrefix.
+
+ # Make sure StringPrefix begins and ends in parens. We're
+ # assuming it's of the form "(a|b|ab)", if a, b, and cd are
+ # valid string prefixes.
+ self.assertEqual(tokenize.StringPrefix[0], '(')
+ self.assertEqual(tokenize.StringPrefix[-1], ')')
+
+ # Then split apart everything else by '|'.
+ defined_prefixes = set(tokenize.StringPrefix[1:-1].split('|'))
+
+ # Now compute the actual allowed string prefixes and compare
+ # to what is defined in the tokenize module.
+ self.assertEqual(defined_prefixes, self.determine_valid_prefixes())
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_tools/i18n_data/docstrings.py b/Lib/test/test_tools/i18n_data/docstrings.py
index 151a55a4b56..14559a632da 100644
--- a/Lib/test/test_tools/i18n_data/docstrings.py
+++ b/Lib/test/test_tools/i18n_data/docstrings.py
@@ -1,7 +1,7 @@
"""Module docstring"""
# Test docstring extraction
-from gettext import gettext as _
+from gettext import gettext as _ # noqa: F401
# Empty docstring
diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py
index b9be87f357f..74b979d0096 100644
--- a/Lib/test/test_traceback.py
+++ b/Lib/test/test_traceback.py
@@ -4188,6 +4188,15 @@ class SuggestionFormattingTestBase:
self.assertNotIn("blech", actual)
self.assertNotIn("oh no!", actual)
+ def test_attribute_error_with_non_string_candidates(self):
+ class T:
+ bluch = 1
+
+ instance = T()
+ instance.__dict__[0] = 1
+ actual = self.get_suggestion(instance, 'blich')
+ self.assertIn("bluch", actual)
+
def test_attribute_error_with_bad_name(self):
def raise_attribute_error_with_bad_name():
raise AttributeError(name=12, obj=23)
@@ -4223,8 +4232,8 @@ class SuggestionFormattingTestBase:
return mod_name
- def get_import_from_suggestion(self, mod_dict, name):
- modname = self.make_module(mod_dict)
+ def get_import_from_suggestion(self, code, name):
+ modname = self.make_module(code)
def callable():
try:
@@ -4301,6 +4310,13 @@ class SuggestionFormattingTestBase:
self.assertIn("'_bluch'", self.get_import_from_suggestion(code, '_luch'))
self.assertNotIn("'_bluch'", self.get_import_from_suggestion(code, 'bluch'))
+ def test_import_from_suggestions_non_string(self):
+ modWithNonStringAttr = textwrap.dedent("""\
+ globals()[0] = 1
+ bluch = 1
+ """)
+ self.assertIn("'bluch'", self.get_import_from_suggestion(modWithNonStringAttr, 'blech'))
+
def test_import_from_suggestions_do_not_trigger_for_long_attributes(self):
code = "blech = None"
@@ -4397,6 +4413,15 @@ class SuggestionFormattingTestBase:
actual = self.get_suggestion(func)
self.assertIn("'ZeroDivisionError'?", actual)
+ def test_name_error_suggestions_with_non_string_candidates(self):
+ def func():
+ abc = 1
+ custom_globals = globals().copy()
+ custom_globals[0] = 1
+ print(eval("abv", custom_globals, locals()))
+ actual = self.get_suggestion(func)
+ self.assertIn("abc", actual)
+
def test_name_error_suggestions_do_not_trigger_for_long_names(self):
def func():
somethingverywronghehehehehehe = None
diff --git a/Lib/test/test_tstring.py b/Lib/test/test_tstring.py
index e72a1ea5417..aabae385567 100644
--- a/Lib/test/test_tstring.py
+++ b/Lib/test/test_tstring.py
@@ -219,6 +219,7 @@ class TestTString(unittest.TestCase, TStringBaseCase):
("t'{lambda:1}'", "t-string: lambda expressions are not allowed "
"without parentheses"),
("t'{x:{;}}'", "t-string: expecting a valid expression after '{'"),
+ ("t'{1:d\n}'", "t-string: newlines are not allowed in format specifiers")
):
with self.subTest(case), self.assertRaisesRegex(SyntaxError, err):
eval(case)
diff --git a/Lib/test/test_type_annotations.py b/Lib/test/test_type_annotations.py
index 2c886bb6d36..c66cb058552 100644
--- a/Lib/test/test_type_annotations.py
+++ b/Lib/test/test_type_annotations.py
@@ -498,6 +498,28 @@ class DeferredEvaluationTests(unittest.TestCase):
self.assertEqual(f.__annotate__(annotationlib.Format.VALUE), annos)
self.assertEqual(f.__annotations__, annos)
+ def test_set_annotations(self):
+ function_code = textwrap.dedent("""
+ def f(x: int):
+ pass
+ """)
+ class_code = textwrap.dedent("""
+ class f:
+ x: int
+ """)
+ for future in (False, True):
+ for label, code in (("function", function_code), ("class", class_code)):
+ with self.subTest(future=future, label=label):
+ if future:
+ code = "from __future__ import annotations\n" + code
+ ns = run_code(code)
+ f = ns["f"]
+ anno = "int" if future else int
+ self.assertEqual(f.__annotations__, {"x": anno})
+
+ f.__annotations__ = {"x": str}
+ self.assertEqual(f.__annotations__, {"x": str})
+
def test_name_clash_with_format(self):
# this test would fail if __annotate__'s parameter was called "format"
# during symbol table construction
diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py
index 3097c7ddf05..fccdcc975e6 100644
--- a/Lib/test/test_types.py
+++ b/Lib/test/test_types.py
@@ -2,7 +2,7 @@
from test.support import (
run_with_locale, cpython_only, no_rerun,
- MISSING_C_DOCSTRINGS, EqualToForwardRef,
+ MISSING_C_DOCSTRINGS, EqualToForwardRef, check_disallow_instantiation,
)
from test.support.script_helper import assert_python_ok
from test.support.import_helper import import_fresh_module
@@ -21,6 +21,7 @@ import types
import unittest.mock
import weakref
import typing
+import re
c_types = import_fresh_module('types', fresh=['_types'])
py_types = import_fresh_module('types', blocked=['_types'])
@@ -517,8 +518,8 @@ class TypesTests(unittest.TestCase):
# and a number after the decimal. This is tricky, because
# a totally empty format specifier means something else.
# So, just use a sign flag
- test(1e200, '+g', '+1e+200')
- test(1e200, '+', '+1e+200')
+ test(1.25e200, '+g', '+1.25e+200')
+ test(1.25e200, '+', '+1.25e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
@@ -1148,8 +1149,7 @@ class UnionTests(unittest.TestCase):
msg='Check for union reference leak.')
def test_instantiation(self):
- with self.assertRaises(TypeError):
- types.UnionType()
+ check_disallow_instantiation(self, types.UnionType)
self.assertIs(int, types.UnionType[int])
self.assertIs(int, types.UnionType[int, int])
self.assertEqual(int | str, types.UnionType[int, str])
@@ -1376,6 +1376,27 @@ class MappingProxyTests(unittest.TestCase):
view = self.mappingproxy(mapping)
self.assertEqual(hash(view), hash(mapping))
+ def test_richcompare(self):
+ mp1 = self.mappingproxy({'a': 1})
+ mp1_2 = self.mappingproxy({'a': 1})
+ mp2 = self.mappingproxy({'a': 2})
+
+ self.assertTrue(mp1 == mp1_2)
+ self.assertFalse(mp1 != mp1_2)
+ self.assertFalse(mp1 == mp2)
+ self.assertTrue(mp1 != mp2)
+
+ msg = "not supported between instances of 'mappingproxy' and 'mappingproxy'"
+
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1 > mp2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1 < mp1_2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp2 >= mp2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1_2 <= mp1
+
class ClassCreationTests(unittest.TestCase):
@@ -2010,6 +2031,24 @@ class SimpleNamespaceTests(unittest.TestCase):
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns2, types.SimpleNamespace())
+ def test_richcompare_unsupported(self):
+ ns1 = types.SimpleNamespace(x=1)
+ ns2 = types.SimpleNamespace(y=2)
+
+ msg = re.escape(
+ "not supported between instances of "
+ "'types.SimpleNamespace' and 'types.SimpleNamespace'"
+ )
+
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 > ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 >= ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 < ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 <= ns2
+
def test_nested(self):
ns1 = types.SimpleNamespace(a=1, b=2)
ns2 = types.SimpleNamespace()
@@ -2513,15 +2552,16 @@ class SubinterpreterTests(unittest.TestCase):
def setUpClass(cls):
global interpreters
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ModuleNotFoundError:
raise unittest.SkipTest('subinterpreters required')
- import test.support.interpreters.channels
+ from test.support import channels # noqa: F401
+ cls.create_channel = staticmethod(channels.create)
@cpython_only
@no_rerun('channels (and queues) might have a refleak; see gh-122199')
def test_static_types_inherited_slots(self):
- rch, sch = interpreters.channels.create()
+ rch, sch = self.create_channel()
script = textwrap.dedent("""
import test.support
@@ -2547,7 +2587,7 @@ class SubinterpreterTests(unittest.TestCase):
main_results = collate_results(raw)
interp = interpreters.create()
- interp.exec('from test.support import interpreters')
+ interp.exec('from concurrent import interpreters')
interp.prepare_main(sch=sch)
interp.exec(script)
raw = rch.recv_nowait()
diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py
index 246be22a0d8..932c7b9c0a5 100644
--- a/Lib/test/test_typing.py
+++ b/Lib/test/test_typing.py
@@ -46,11 +46,10 @@ import abc
import textwrap
import typing
import weakref
-import warnings
import types
from test.support import (
- captured_stderr, cpython_only, infinite_recursion, requires_docstrings, import_helper, run_code,
+ captured_stderr, cpython_only, requires_docstrings, import_helper, run_code,
EqualToForwardRef,
)
from test.typinganndata import (
@@ -1606,7 +1605,10 @@ class TypeVarTupleTests(BaseTestCase):
self.assertEqual(gth(func1), {'args': Unpack[Ts]})
def func2(*args: *tuple[int, str]): pass
- self.assertEqual(gth(func2), {'args': Unpack[tuple[int, str]]})
+ hint = gth(func2)['args']
+ self.assertIsInstance(hint, types.GenericAlias)
+ self.assertEqual(hint.__args__[0], int)
+ self.assertIs(hint.__unpacked__, True)
class CustomVariadic(Generic[*Ts]): pass
@@ -1621,7 +1623,10 @@ class TypeVarTupleTests(BaseTestCase):
{'args': Unpack[Ts]})
def func2(*args: '*tuple[int, str]'): pass
- self.assertEqual(gth(func2), {'args': Unpack[tuple[int, str]]})
+ hint = gth(func2)['args']
+ self.assertIsInstance(hint, types.GenericAlias)
+ self.assertEqual(hint.__args__[0], int)
+ self.assertIs(hint.__unpacked__, True)
class CustomVariadic(Generic[*Ts]): pass
@@ -6304,31 +6309,6 @@ class NoTypeCheckTests(BaseTestCase):
class InternalsTests(BaseTestCase):
- def test_deprecation_for_no_type_params_passed_to__evaluate(self):
- with self.assertWarnsRegex(
- DeprecationWarning,
- (
- "Failing to pass a value to the 'type_params' parameter "
- "of 'typing._eval_type' is deprecated"
- )
- ) as cm:
- self.assertEqual(typing._eval_type(list["int"], globals(), {}), list[int])
-
- self.assertEqual(cm.filename, __file__)
-
- f = ForwardRef("int")
-
- with self.assertWarnsRegex(
- DeprecationWarning,
- (
- "Failing to pass a value to the 'type_params' parameter "
- "of 'typing.ForwardRef._evaluate' is deprecated"
- )
- ) as cm:
- self.assertIs(f._evaluate(globals(), {}, recursive_guard=frozenset()), int)
-
- self.assertEqual(cm.filename, __file__)
-
def test_collect_parameters(self):
typing = import_helper.import_fresh_module("typing")
with self.assertWarnsRegex(
@@ -6859,12 +6839,10 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(hints, {'value': Final})
def test_top_level_class_var(self):
- # https://bugs.python.org/issue45166
- with self.assertRaisesRegex(
- TypeError,
- r'typing.ClassVar\[int\] is not valid as type argument',
- ):
- get_type_hints(ann_module6)
+ # This is not meaningful but we don't raise for it.
+ # https://github.com/python/cpython/issues/133959
+ hints = get_type_hints(ann_module6)
+ self.assertEqual(hints, {'wrong': ClassVar[int]})
def test_get_type_hints_typeddict(self):
self.assertEqual(get_type_hints(TotalMovie), {'title': str, 'year': int})
@@ -6967,6 +6945,11 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
+ def test_special_forms_no_forward(self):
+ def f(x: ClassVar[int]):
+ pass
+ self.assertEqual(get_type_hints(f), {'x': ClassVar[int]})
+
def test_special_forms_forward(self):
class C:
@@ -6982,8 +6965,9 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(get_type_hints(C, globals())['b'], Final[int])
self.assertEqual(get_type_hints(C, globals())['x'], ClassVar)
self.assertEqual(get_type_hints(C, globals())['y'], Final)
- with self.assertRaises(TypeError):
- get_type_hints(CF, globals()),
+ lfi = get_type_hints(CF, globals())['b']
+ self.assertIs(get_origin(lfi), list)
+ self.assertEqual(get_args(lfi), (Final[int],))
def test_union_forward_recursion(self):
ValueList = List['Value']
@@ -7111,6 +7095,24 @@ class GetTypeHintsTests(BaseTestCase):
right_hints = get_type_hints(t.add_right, globals(), locals())
self.assertEqual(right_hints['node'], Node[T])
+ def test_get_type_hints_preserve_generic_alias_subclasses(self):
+ # https://github.com/python/cpython/issues/130870
+ # A real world example of this is `collections.abc.Callable`. When parameterized,
+ # the result is a subclass of `types.GenericAlias`.
+ class MyAlias(types.GenericAlias):
+ pass
+
+ class MyClass:
+ def __class_getitem__(cls, args):
+ return MyAlias(cls, args)
+
+ # Using a forward reference is important, otherwise it works as expected.
+ # `y` tests that the `GenericAlias` subclass is preserved when stripping `Annotated`.
+ def func(x: MyClass['int'], y: MyClass[Annotated[int, ...]]): ...
+
+ assert isinstance(get_type_hints(func)['x'], MyAlias)
+ assert isinstance(get_type_hints(func)['y'], MyAlias)
+
class GetUtilitiesTestCase(TestCase):
def test_get_origin(self):
@@ -7216,33 +7218,113 @@ class GetUtilitiesTestCase(TestCase):
class EvaluateForwardRefTests(BaseTestCase):
def test_evaluate_forward_ref(self):
int_ref = ForwardRef('int')
- missing = ForwardRef('missing')
+ self.assertIs(typing.evaluate_forward_ref(int_ref), int)
self.assertIs(
typing.evaluate_forward_ref(int_ref, type_params=()),
int,
)
self.assertIs(
+ typing.evaluate_forward_ref(int_ref, format=annotationlib.Format.VALUE),
+ int,
+ )
+ self.assertIs(
typing.evaluate_forward_ref(
- int_ref, type_params=(), format=annotationlib.Format.FORWARDREF,
+ int_ref, format=annotationlib.Format.FORWARDREF,
),
int,
)
+ self.assertEqual(
+ typing.evaluate_forward_ref(
+ int_ref, format=annotationlib.Format.STRING,
+ ),
+ 'int',
+ )
+
+ def test_evaluate_forward_ref_undefined(self):
+ missing = ForwardRef('missing')
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(missing)
self.assertIs(
typing.evaluate_forward_ref(
- missing, type_params=(), format=annotationlib.Format.FORWARDREF,
+ missing, format=annotationlib.Format.FORWARDREF,
),
missing,
)
self.assertEqual(
typing.evaluate_forward_ref(
- int_ref, type_params=(), format=annotationlib.Format.STRING,
+ missing, format=annotationlib.Format.STRING,
),
- 'int',
+ "missing",
+ )
+
+ def test_evaluate_forward_ref_nested(self):
+ ref = ForwardRef("int | list['str']")
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref),
+ int | list[str],
)
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.FORWARDREF),
+ int | list[str],
+ )
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.STRING),
+ "int | list['str']",
+ )
+
+ why = ForwardRef('"\'str\'"')
+ self.assertIs(typing.evaluate_forward_ref(why), str)
- def test_evaluate_forward_ref_no_type_params(self):
- ref = ForwardRef('int')
- self.assertIs(typing.evaluate_forward_ref(ref), int)
+ def test_evaluate_forward_ref_none(self):
+ none_ref = ForwardRef('None')
+ self.assertIs(typing.evaluate_forward_ref(none_ref), None)
+
+ def test_globals(self):
+ A = "str"
+ ref = ForwardRef('list[A]')
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, globals={'A': A}),
+ list[str],
+ )
+
+ def test_owner(self):
+ ref = ForwardRef("A")
+
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+
+ # We default to the globals of `owner`,
+ # so it no longer raises `NameError`
+ self.assertIs(
+ typing.evaluate_forward_ref(ref, owner=Loop), A
+ )
+
+ def test_inherited_owner(self):
+ # owner passed to evaluate_forward_ref
+ ref = ForwardRef("list['A']")
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, owner=Loop),
+ list[A],
+ )
+
+ # owner set on the ForwardRef
+ ref = ForwardRef("list['A']", owner=Loop)
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref),
+ list[A],
+ )
+
+ def test_partial_evaluation(self):
+ ref = ForwardRef("list[A]")
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.FORWARDREF),
+ list[EqualToForwardRef('A')],
+ )
class CollectionsAbcTests(BaseTestCase):
diff --git a/Lib/test/test_unittest/test_case.py b/Lib/test/test_unittest/test_case.py
index d66cab146af..cf10e956bf2 100644
--- a/Lib/test/test_unittest/test_case.py
+++ b/Lib/test/test_unittest/test_case.py
@@ -1920,6 +1920,22 @@ test case
with self.assertLogs():
raise ZeroDivisionError("Unexpected")
+ def testAssertLogsWithFormatter(self):
+ # Check alternative formats will be respected
+ format = "[No.1: the larch] %(levelname)s:%(name)s:%(message)s"
+ formatter = logging.Formatter(format)
+ with self.assertNoStderr():
+ with self.assertLogs() as cm:
+ log_foo.info("1")
+ log_foobar.debug("2")
+ self.assertEqual(cm.output, ["INFO:foo:1"])
+ self.assertLogRecords(cm.records, [{'name': 'foo'}])
+ with self.assertLogs(formatter=formatter) as cm:
+ log_foo.info("1")
+ log_foobar.debug("2")
+ self.assertEqual(cm.output, ["[No.1: the larch] INFO:foo:1"])
+ self.assertLogRecords(cm.records, [{'name': 'foo'}])
+
def testAssertNoLogsDefault(self):
with self.assertRaises(self.failureException) as cm:
with self.assertNoLogs():
diff --git a/Lib/test/test_unittest/testmock/testhelpers.py b/Lib/test/test_unittest/testmock/testhelpers.py
index d1e48bde982..0e82c723ec3 100644
--- a/Lib/test/test_unittest/testmock/testhelpers.py
+++ b/Lib/test/test_unittest/testmock/testhelpers.py
@@ -1050,6 +1050,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithPostInit()),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithPostInit)
self.assertIsInstance(mock.a, int)
self.assertIsInstance(mock.b, int)
@@ -1072,6 +1073,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithDefault(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithDefault)
self.assertIsInstance(mock.a, int)
self.assertIsInstance(mock.b, int)
@@ -1087,6 +1089,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithMethod(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithMethod)
self.assertIsInstance(mock.a, int)
mock.b.assert_not_called()
@@ -1102,11 +1105,29 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithNonFields(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithNonFields)
with self.assertRaisesRegex(AttributeError, msg):
mock.a
with self.assertRaisesRegex(AttributeError, msg):
mock.b
+ def test_dataclass_special_attrs(self):
+ @dataclass
+ class Description:
+ name: str
+
+ for mock in [
+ create_autospec(Description, instance=True),
+ create_autospec(Description(1)),
+ ]:
+ with self.subTest(mock=mock):
+ self.assertIsInstance(mock, Description)
+ self.assertIs(mock.__class__, Description)
+ self.assertIsInstance(mock.__dataclass_fields__, MagicMock)
+ self.assertIsInstance(mock.__dataclass_params__, MagicMock)
+ self.assertIsInstance(mock.__match_args__, MagicMock)
+ self.assertIsInstance(mock.__hash__, MagicMock)
+
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index bc1030eea60..1d889ae7cf4 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1569,6 +1569,7 @@ class Pathname_Tests(unittest.TestCase):
urllib.request.url2pathname,
url, require_scheme=True)
+ @unittest.skipIf(support.is_emscripten, "Fixed by https://github.com/emscripten-core/emscripten/pull/24593")
def test_url2pathname_resolve_host(self):
fn = urllib.request.url2pathname
sep = os.path.sep
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index aabc360289a..b2bde5a9b1d 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -2,6 +2,7 @@ import sys
import unicodedata
import unittest
import urllib.parse
+from test import support
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
@@ -156,27 +157,25 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
- def test_qsl(self):
- for orig, expect in parse_qsl_test_cases:
- result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- expect_without_blanks = [v for v in expect if len(v[1])]
- result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
- self.assertEqual(result, expect_without_blanks,
- "Error parsing %r" % orig)
-
- def test_qs(self):
- for orig, expect in parse_qs_test_cases:
- result = urllib.parse.parse_qs(orig, keep_blank_values=True)
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- expect_without_blanks = {v: expect[v]
- for v in expect if len(expect[v][0])}
- result = urllib.parse.parse_qs(orig, keep_blank_values=False)
- self.assertEqual(result, expect_without_blanks,
- "Error parsing %r" % orig)
-
- def test_roundtrips(self):
- str_cases = [
+ @support.subTests('orig,expect', parse_qsl_test_cases)
+ def test_qsl(self, orig, expect):
+ result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect)
+ expect_without_blanks = [v for v in expect if len(v[1])]
+ result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
+ self.assertEqual(result, expect_without_blanks)
+
+ @support.subTests('orig,expect', parse_qs_test_cases)
+ def test_qs(self, orig, expect):
+ result = urllib.parse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect)
+ expect_without_blanks = {v: expect[v]
+ for v in expect if len(expect[v][0])}
+ result = urllib.parse.parse_qs(orig, keep_blank_values=False)
+ self.assertEqual(result, expect_without_blanks)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,parsed,split', [
('path/to/file',
('', '', 'path/to/file', '', '', ''),
('', '', 'path/to/file', '', '')),
@@ -263,23 +262,21 @@ class UrlParseTestCase(unittest.TestCase):
('sch_me:path/to/file',
('', '', 'sch_me:path/to/file', '', '', ''),
('', '', 'sch_me:path/to/file', '', '')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- tuple(x.encode('ascii') for x in t[1]),
- tuple(x.encode('ascii') for x in t[2]))
- bytes_cases = [_encode(x) for x in str_cases]
- str_cases += [
('schรจme:path/to/file',
('', '', 'schรจme:path/to/file', '', '', ''),
('', '', 'schรจme:path/to/file', '', '')),
- ]
- for url, parsed, split in str_cases + bytes_cases:
- with self.subTest(url):
- self.checkRoundtrips(url, parsed, split)
-
- def test_roundtrips_normalization(self):
- str_cases = [
+ ])
+ def test_roundtrips(self, bytes, url, parsed, split):
+ if bytes:
+ if not url.isascii():
+ self.skipTest('non-ASCII bytes')
+ url = str_encode(url)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ self.checkRoundtrips(url, parsed, split)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,url2,parsed,split', [
('///path/to/file',
'/path/to/file',
('', '', '/path/to/file', '', '', ''),
@@ -300,22 +297,18 @@ class UrlParseTestCase(unittest.TestCase):
'https:///tmp/junk.txt',
('https', '', '/tmp/junk.txt', '', '', ''),
('https', '', '/tmp/junk.txt', '', '')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- t[1].encode('ascii'),
- tuple(x.encode('ascii') for x in t[2]),
- tuple(x.encode('ascii') for x in t[3]))
- bytes_cases = [_encode(x) for x in str_cases]
- for url, url2, parsed, split in str_cases + bytes_cases:
- with self.subTest(url):
- self.checkRoundtrips(url, parsed, split, url2)
-
- def test_http_roundtrips(self):
- # urllib.parse.urlsplit treats 'http:' as an optimized special case,
- # so we test both 'http:' and 'https:' in all the following.
- # Three cheers for white box knowledge!
- str_cases = [
+ ])
+ def test_roundtrips_normalization(self, bytes, url, url2, parsed, split):
+ if bytes:
+ url = str_encode(url)
+ url2 = str_encode(url2)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ self.checkRoundtrips(url, parsed, split, url2)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('scheme', ('http', 'https'))
+ @support.subTests('url,parsed,split', [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
@@ -331,23 +324,20 @@ class UrlParseTestCase(unittest.TestCase):
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- tuple(x.encode('ascii') for x in t[1]),
- tuple(x.encode('ascii') for x in t[2]))
- bytes_cases = [_encode(x) for x in str_cases]
- str_schemes = ('http', 'https')
- bytes_schemes = (b'http', b'https')
- str_tests = str_schemes, str_cases
- bytes_tests = bytes_schemes, bytes_cases
- for schemes, test_cases in (str_tests, bytes_tests):
- for scheme in schemes:
- for url, parsed, split in test_cases:
- url = scheme + url
- parsed = (scheme,) + parsed
- split = (scheme,) + split
- self.checkRoundtrips(url, parsed, split)
+ ])
+ def test_http_roundtrips(self, bytes, scheme, url, parsed, split):
+ # urllib.parse.urlsplit treats 'http:' as an optimized special case,
+ # so we test both 'http:' and 'https:' in all the following.
+ # Three cheers for white box knowledge!
+ if bytes:
+ scheme = str_encode(scheme)
+ url = str_encode(url)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ url = scheme + url
+ parsed = (scheme,) + parsed
+ split = (scheme,) + split
+ self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected, *, relroundtrip=True):
with self.subTest(base=base, relurl=relurl):
@@ -363,12 +353,13 @@ class UrlParseTestCase(unittest.TestCase):
relurlb = urllib.parse.urlunsplit(urllib.parse.urlsplit(relurlb))
self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
- def test_unparse_parse(self):
- str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
- bytes_cases = [x.encode('ascii') for x in str_cases]
- for u in str_cases + bytes_cases:
- self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
- self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
+ @support.subTests('bytes', (False, True))
+ @support.subTests('u', ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',])
+ def test_unparse_parse(self, bytes, u):
+ if bytes:
+ u = str_encode(u)
+ self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
+ self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
@@ -695,8 +686,8 @@ class UrlParseTestCase(unittest.TestCase):
self.checkJoin('///b/c', '///w', '///w')
self.checkJoin('///b/c', 'w', '///b/w')
- def test_RFC2732(self):
- str_cases = [
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,hostname,port', [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
@@ -727,26 +718,28 @@ class UrlParseTestCase(unittest.TestCase):
('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]:/foo/',
'::ffff:12.34.56.78', None),
- ]
- def _encode(t):
- return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
- bytes_cases = [_encode(x) for x in str_cases]
- for url, hostname, port in str_cases + bytes_cases:
- urlparsed = urllib.parse.urlparse(url)
- self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
-
- str_cases = [
+ ])
+ def test_RFC2732(self, bytes, url, hostname, port):
+ if bytes:
+ url = str_encode(url)
+ hostname = str_encode(hostname)
+ urlparsed = urllib.parse.urlparse(url)
+ self.assertEqual((urlparsed.hostname, urlparsed.port), (hostname, port))
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('invalid_url', [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
- 'http://[::ffff:12.34.56.78']
- bytes_cases = [x.encode('ascii') for x in str_cases]
- for invalid_url in str_cases + bytes_cases:
- self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
-
- def test_urldefrag(self):
- str_cases = [
+ 'http://[::ffff:12.34.56.78'])
+ def test_RFC2732_invalid(self, bytes, invalid_url):
+ if bytes:
+ invalid_url = str_encode(invalid_url)
+ self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,defrag,frag', [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
@@ -770,18 +763,18 @@ class UrlParseTestCase(unittest.TestCase):
('http:?q#f', 'http:?q', 'f'),
('//a/b/c;p?q#f', '//a/b/c;p?q', 'f'),
('://a/b/c;p?q#f', '://a/b/c;p?q', 'f'),
- ]
- def _encode(t):
- return type(t)(x.encode('ascii') for x in t)
- bytes_cases = [_encode(x) for x in str_cases]
- for url, defrag, frag in str_cases + bytes_cases:
- with self.subTest(url):
- result = urllib.parse.urldefrag(url)
- hash = '#' if isinstance(url, str) else b'#'
- self.assertEqual(result.geturl(), url.rstrip(hash))
- self.assertEqual(result, (defrag, frag))
- self.assertEqual(result.url, defrag)
- self.assertEqual(result.fragment, frag)
+ ])
+ def test_urldefrag(self, bytes, url, defrag, frag):
+ if bytes:
+ url = str_encode(url)
+ defrag = str_encode(defrag)
+ frag = str_encode(frag)
+ result = urllib.parse.urldefrag(url)
+ hash = '#' if isinstance(url, str) else b'#'
+ self.assertEqual(result.geturl(), url.rstrip(hash))
+ self.assertEqual(result, (defrag, frag))
+ self.assertEqual(result.url, defrag)
+ self.assertEqual(result.fragment, frag)
def test_urlsplit_scoped_IPv6(self):
p = urllib.parse.urlsplit('http://[FE80::822a:a8ff:fe49:470c%tESt]:1234')
@@ -981,42 +974,35 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p.scheme, "https")
self.assertEqual(p.geturl(), "https://www.python.org/")
- def test_attributes_bad_port(self):
+ @support.subTests('bytes', (False, True))
+ @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+ @support.subTests('port', ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", "1 ", "เฅฌ"))
+ def test_attributes_bad_port(self, bytes, parse, port):
"""Check handling of invalid ports."""
- for bytes in (False, True):
- for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
- for port in ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", "1 ", "เฅฌ"):
- with self.subTest(bytes=bytes, parse=parse, port=port):
- netloc = "www.example.net:" + port
- url = "http://" + netloc + "/"
- if bytes:
- if netloc.isascii() and port.isascii():
- netloc = netloc.encode("ascii")
- url = url.encode("ascii")
- else:
- continue
- p = parse(url)
- self.assertEqual(p.netloc, netloc)
- with self.assertRaises(ValueError):
- p.port
+ netloc = "www.example.net:" + port
+ url = "http://" + netloc + "/"
+ if bytes:
+ if not (netloc.isascii() and port.isascii()):
+ self.skipTest('non-ASCII bytes')
+ netloc = str_encode(netloc)
+ url = str_encode(url)
+ p = parse(url)
+ self.assertEqual(p.netloc, netloc)
+ with self.assertRaises(ValueError):
+ p.port
- def test_attributes_bad_scheme(self):
+ @support.subTests('bytes', (False, True))
+ @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+ @support.subTests('scheme', (".", "+", "-", "0", "http&", "เฅฌhttp"))
+ def test_attributes_bad_scheme(self, bytes, parse, scheme):
"""Check handling of invalid schemes."""
- for bytes in (False, True):
- for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
- for scheme in (".", "+", "-", "0", "http&", "เฅฌhttp"):
- with self.subTest(bytes=bytes, parse=parse, scheme=scheme):
- url = scheme + "://www.example.net"
- if bytes:
- if url.isascii():
- url = url.encode("ascii")
- else:
- continue
- p = parse(url)
- if bytes:
- self.assertEqual(p.scheme, b"")
- else:
- self.assertEqual(p.scheme, "")
+ url = scheme + "://www.example.net"
+ if bytes:
+ if not url.isascii():
+ self.skipTest('non-ASCII bytes')
+ url = url.encode("ascii")
+ p = parse(url)
+ self.assertEqual(p.scheme, b"" if bytes else "")
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
@@ -1128,24 +1114,21 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b''))
- def test_default_scheme(self):
+ @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+ def test_default_scheme(self, func):
# Exercise the scheme parameter of urlparse() and urlsplit()
- for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
- with self.subTest(function=func):
- result = func("http://example.net/", "ftp")
- self.assertEqual(result.scheme, "http")
- result = func(b"http://example.net/", b"ftp")
- self.assertEqual(result.scheme, b"http")
- self.assertEqual(func("path", "ftp").scheme, "ftp")
- self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
- self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
- self.assertEqual(func("path").scheme, "")
- self.assertEqual(func(b"path").scheme, b"")
- self.assertEqual(func(b"path", "").scheme, b"")
-
- def test_parse_fragments(self):
- # Exercise the allow_fragments parameter of urlparse() and urlsplit()
- tests = (
+ result = func("http://example.net/", "ftp")
+ self.assertEqual(result.scheme, "http")
+ result = func(b"http://example.net/", b"ftp")
+ self.assertEqual(result.scheme, b"http")
+ self.assertEqual(func("path", "ftp").scheme, "ftp")
+ self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
+ self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
+ self.assertEqual(func("path").scheme, "")
+ self.assertEqual(func(b"path").scheme, b"")
+ self.assertEqual(func(b"path", "").scheme, b"")
+
+ @support.subTests('url,attr,expected_frag', (
("http:#frag", "path", "frag"),
("//example.net#frag", "path", "frag"),
("index.html#frag", "path", "frag"),
@@ -1156,24 +1139,24 @@ class UrlParseTestCase(unittest.TestCase):
("//abc#@frag", "path", "@frag"),
("//abc:80#@frag", "path", "@frag"),
("//abc#@frag:80", "path", "@frag:80"),
- )
- for url, attr, expected_frag in tests:
- for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
- if attr == "params" and func is urllib.parse.urlsplit:
- attr = "path"
- with self.subTest(url=url, function=func):
- result = func(url, allow_fragments=False)
- self.assertEqual(result.fragment, "")
- self.assertEndsWith(getattr(result, attr),
- "#" + expected_frag)
- self.assertEqual(func(url, "", False).fragment, "")
-
- result = func(url, allow_fragments=True)
- self.assertEqual(result.fragment, expected_frag)
- self.assertNotEndsWith(getattr(result, attr), expected_frag)
- self.assertEqual(func(url, "", True).fragment,
- expected_frag)
- self.assertEqual(func(url).fragment, expected_frag)
+ ))
+ @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+ def test_parse_fragments(self, url, attr, expected_frag, func):
+ # Exercise the allow_fragments parameter of urlparse() and urlsplit()
+ if attr == "params" and func is urllib.parse.urlsplit:
+ attr = "path"
+ result = func(url, allow_fragments=False)
+ self.assertEqual(result.fragment, "")
+ self.assertEndsWith(getattr(result, attr),
+ "#" + expected_frag)
+ self.assertEqual(func(url, "", False).fragment, "")
+
+ result = func(url, allow_fragments=True)
+ self.assertEqual(result.fragment, expected_frag)
+ self.assertNotEndsWith(getattr(result, attr), expected_frag)
+ self.assertEqual(func(url, "", True).fragment,
+ expected_frag)
+ self.assertEqual(func(url).fragment, expected_frag)
def test_mixed_types_rejected(self):
# Several functions that process either strings or ASCII encoded bytes
@@ -1199,7 +1182,14 @@ class UrlParseTestCase(unittest.TestCase):
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin(b"http://python.org", "http://python.org")
- def _check_result_type(self, str_type):
+ @support.subTests('result_type', [
+ urllib.parse.DefragResult,
+ urllib.parse.SplitResult,
+ urllib.parse.ParseResult,
+ ])
+ def test_result_pairs(self, result_type):
+ # Check encoding and decoding between result pairs
+ str_type = result_type
num_args = len(str_type._fields)
bytes_type = str_type._encoded_counterpart
self.assertIs(bytes_type._decoded_counterpart, str_type)
@@ -1224,16 +1214,6 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(str_result.encode(encoding, errors), bytes_args)
self.assertEqual(str_result.encode(encoding, errors), bytes_result)
- def test_result_pairs(self):
- # Check encoding and decoding between result pairs
- result_types = [
- urllib.parse.DefragResult,
- urllib.parse.SplitResult,
- urllib.parse.ParseResult,
- ]
- for result_type in result_types:
- self._check_result_type(result_type)
-
def test_parse_qs_encoding(self):
result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, {'key': ['\u0141\xE9']})
@@ -1265,8 +1245,7 @@ class UrlParseTestCase(unittest.TestCase):
urllib.parse.parse_qsl('&'.join(['a=a']*11), max_num_fields=10)
urllib.parse.parse_qsl('&'.join(['a=a']*10), max_num_fields=10)
- def test_parse_qs_separator(self):
- parse_qs_semicolon_cases = [
+ @support.subTests('orig,expect', [
(";", {}),
(";;", {}),
(";a=b", {'a': ['b']}),
@@ -1277,17 +1256,14 @@ class UrlParseTestCase(unittest.TestCase):
(b";a=b", {b'a': [b'b']}),
(b"a=a+b;b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1;a=2", {b'a': [b'1', b'2']}),
- ]
- for orig, expect in parse_qs_semicolon_cases:
- with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
- result = urllib.parse.parse_qs(orig, separator=';')
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- result_bytes = urllib.parse.parse_qs(orig, separator=b';')
- self.assertEqual(result_bytes, expect, "Error parsing %r" % orig)
-
-
- def test_parse_qsl_separator(self):
- parse_qsl_semicolon_cases = [
+ ])
+ def test_parse_qs_separator(self, orig, expect):
+ result = urllib.parse.parse_qs(orig, separator=';')
+ self.assertEqual(result, expect)
+ result_bytes = urllib.parse.parse_qs(orig, separator=b';')
+ self.assertEqual(result_bytes, expect)
+
+ @support.subTests('orig,expect', [
(";", []),
(";;", []),
(";a=b", [('a', 'b')]),
@@ -1298,13 +1274,12 @@ class UrlParseTestCase(unittest.TestCase):
(b";a=b", [(b'a', b'b')]),
(b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
- ]
- for orig, expect in parse_qsl_semicolon_cases:
- with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
- result = urllib.parse.parse_qsl(orig, separator=';')
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
- self.assertEqual(result_bytes, expect, "Error parsing %r" % orig)
+ ])
+ def test_parse_qsl_separator(self, orig, expect):
+ result = urllib.parse.parse_qsl(orig, separator=';')
+ self.assertEqual(result, expect)
+ result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
+ self.assertEqual(result_bytes, expect)
def test_parse_qsl_bytes(self):
self.assertEqual(urllib.parse.parse_qsl(b'a=b'), [(b'a', b'b')])
@@ -1695,11 +1670,12 @@ class Utility_Tests(unittest.TestCase):
self.assertRaises(UnicodeError, urllib.parse._to_bytes,
'http://www.python.org/medi\u00e6val')
- def test_unwrap(self):
- for wrapped_url in ('<URL:scheme://host/path>', '<scheme://host/path>',
- 'URL:scheme://host/path', 'scheme://host/path'):
- url = urllib.parse.unwrap(wrapped_url)
- self.assertEqual(url, 'scheme://host/path')
+ @support.subTests('wrapped_url',
+ ('<URL:scheme://host/path>', '<scheme://host/path>',
+ 'URL:scheme://host/path', 'scheme://host/path'))
+ def test_unwrap(self, wrapped_url):
+ url = urllib.parse.unwrap(wrapped_url)
+ self.assertEqual(url, 'scheme://host/path')
class DeprecationTest(unittest.TestCase):
@@ -1780,5 +1756,11 @@ class DeprecationTest(unittest.TestCase):
'urllib.parse.to_bytes() is deprecated as of 3.8')
+def str_encode(s):
+ return s.encode('ascii')
+
+def tuple_encode(t):
+ return tuple(str_encode(x) for x in t)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_uuid.py b/Lib/test/test_uuid.py
index 958be5408ce..7ddacf07a2c 100755
--- a/Lib/test/test_uuid.py
+++ b/Lib/test/test_uuid.py
@@ -14,6 +14,7 @@ from unittest import mock
from test import support
from test.support import import_helper
+from test.support.script_helper import assert_python_ok
py_uuid = import_helper.import_fresh_module('uuid', blocked=['_uuid'])
c_uuid = import_helper.import_fresh_module('uuid', fresh=['_uuid'])
@@ -1217,10 +1218,37 @@ class BaseTestUUID:
class TestUUIDWithoutExtModule(BaseTestUUID, unittest.TestCase):
uuid = py_uuid
+
@unittest.skipUnless(c_uuid, 'requires the C _uuid module')
class TestUUIDWithExtModule(BaseTestUUID, unittest.TestCase):
uuid = c_uuid
+ def check_has_stable_libuuid_extractable_node(self):
+ if not self.uuid._has_stable_extractable_node:
+ self.skipTest("libuuid cannot deduce MAC address")
+
+ @unittest.skipUnless(os.name == 'posix', 'POSIX only')
+ def test_unix_getnode_from_libuuid(self):
+ self.check_has_stable_libuuid_extractable_node()
+ script = 'import uuid; print(uuid._unix_getnode())'
+ _, n_a, _ = assert_python_ok('-c', script)
+ _, n_b, _ = assert_python_ok('-c', script)
+ n_a, n_b = n_a.decode().strip(), n_b.decode().strip()
+ self.assertTrue(n_a.isdigit())
+ self.assertTrue(n_b.isdigit())
+ self.assertEqual(n_a, n_b)
+
+ @unittest.skipUnless(os.name == 'nt', 'Windows only')
+ def test_windows_getnode_from_libuuid(self):
+ self.check_has_stable_libuuid_extractable_node()
+ script = 'import uuid; print(uuid._windll_getnode())'
+ _, n_a, _ = assert_python_ok('-c', script)
+ _, n_b, _ = assert_python_ok('-c', script)
+ n_a, n_b = n_a.decode().strip(), n_b.decode().strip()
+ self.assertTrue(n_a.isdigit())
+ self.assertTrue(n_b.isdigit())
+ self.assertEqual(n_a, n_b)
+
class BaseTestInternals:
_uuid = py_uuid
diff --git a/Lib/test/test_venv.py b/Lib/test/test_venv.py
index 12c30e178ae..d62f3fba2d1 100644
--- a/Lib/test/test_venv.py
+++ b/Lib/test/test_venv.py
@@ -1008,7 +1008,7 @@ class EnsurePipTest(BaseTest):
err, flags=re.MULTILINE)
# Ignore warning about missing optional module:
try:
- import ssl
+ import ssl # noqa: F401
except ImportError:
err = re.sub(
"^WARNING: Disabling truststore since ssl support is missing$",
diff --git a/Lib/test/test_webbrowser.py b/Lib/test/test_webbrowser.py
index 4c3ea1cd8df..6b577ae100e 100644
--- a/Lib/test/test_webbrowser.py
+++ b/Lib/test/test_webbrowser.py
@@ -6,7 +6,6 @@ import subprocess
import sys
import unittest
import webbrowser
-from functools import partial
from test import support
from test.support import import_helper
from test.support import is_apple_mobile
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index 38be2cd437f..bf6d5074fde 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -218,6 +218,33 @@ class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
+ def test_constructor(self):
+ # Test constructor behavior.
+
+ with self.assertRaises(TypeError):
+ tree = ET.ElementTree("")
+ with self.assertRaises(TypeError):
+ tree = ET.ElementTree(ET.ElementTree())
+
+ def test_setroot(self):
+ # Test _setroot behavior.
+
+ tree = ET.ElementTree()
+ element = ET.Element("tag")
+ tree._setroot(element)
+ self.assertEqual(tree.getroot().tag, "tag")
+ self.assertEqual(tree.getroot(), element)
+
+ # Test behavior with an invalid root element
+
+ tree = ET.ElementTree()
+ with self.assertRaises(TypeError):
+ tree._setroot("")
+ with self.assertRaises(TypeError):
+ tree._setroot(ET.ElementTree())
+ with self.assertRaises(TypeError):
+ tree._setroot(None)
+
def test_interface(self):
# Test element tree interface.
diff --git a/Lib/test/test_zipfile/__main__.py b/Lib/test/test_zipfile/__main__.py
index e25ac946edf..90da74ade38 100644
--- a/Lib/test/test_zipfile/__main__.py
+++ b/Lib/test/test_zipfile/__main__.py
@@ -1,6 +1,6 @@
import unittest
-from . import load_tests # noqa: F401
+from . import load_tests
if __name__ == "__main__":
diff --git a/Lib/test/test_zipfile/_path/_test_params.py b/Lib/test/test_zipfile/_path/_test_params.py
index bc95b4ebf4a..00a9eaf2f99 100644
--- a/Lib/test/test_zipfile/_path/_test_params.py
+++ b/Lib/test/test_zipfile/_path/_test_params.py
@@ -1,5 +1,5 @@
-import types
import functools
+import types
from ._itertools import always_iterable
diff --git a/Lib/test/test_zipfile/_path/test_complexity.py b/Lib/test/test_zipfile/_path/test_complexity.py
index b505dd7c376..7c108fc6ab8 100644
--- a/Lib/test/test_zipfile/_path/test_complexity.py
+++ b/Lib/test/test_zipfile/_path/test_complexity.py
@@ -8,10 +8,8 @@ import zipfile
from ._functools import compose
from ._itertools import consume
-
from ._support import import_or_skip
-
big_o = import_or_skip('big_o')
pytest = import_or_skip('pytest')
diff --git a/Lib/test/test_zipfile/_path/test_path.py b/Lib/test/test_zipfile/_path/test_path.py
index 0afabc0c668..696134023a5 100644
--- a/Lib/test/test_zipfile/_path/test_path.py
+++ b/Lib/test/test_zipfile/_path/test_path.py
@@ -1,6 +1,6 @@
+import contextlib
import io
import itertools
-import contextlib
import pathlib
import pickle
import stat
@@ -9,12 +9,11 @@ import unittest
import zipfile
import zipfile._path
-from test.support.os_helper import temp_dir, FakePath
+from test.support.os_helper import FakePath, temp_dir
from ._functools import compose
from ._itertools import Counter
-
-from ._test_params import parameterize, Invoked
+from ._test_params import Invoked, parameterize
class jaraco:
@@ -193,10 +192,10 @@ class TestPath(unittest.TestCase):
"""EncodingWarning must blame the read_text and open calls."""
assert sys.flags.warn_default_encoding
root = zipfile.Path(alpharep)
- with self.assertWarns(EncodingWarning) as wc:
+ with self.assertWarns(EncodingWarning) as wc: # noqa: F821 (astral-sh/ruff#13296)
root.joinpath("a.txt").read_text()
assert __file__ == wc.filename
- with self.assertWarns(EncodingWarning) as wc:
+ with self.assertWarns(EncodingWarning) as wc: # noqa: F821 (astral-sh/ruff#13296)
root.joinpath("a.txt").open("r").close()
assert __file__ == wc.filename
@@ -365,6 +364,17 @@ class TestPath(unittest.TestCase):
assert root.name == 'alpharep.zip' == root.filename.name
@pass_alpharep
+ def test_root_on_disk(self, alpharep):
+ """
+ The name/stem of the root should match the zipfile on disk.
+
+ This condition must hold across platforms.
+ """
+ root = zipfile.Path(self.zipfile_ondisk(alpharep))
+ assert root.name == 'alpharep.zip' == root.filename.name
+ assert root.stem == 'alpharep' == root.filename.stem
+
+ @pass_alpharep
def test_suffix(self, alpharep):
"""
The suffix of the root should be the suffix of the zipfile.
diff --git a/Lib/test/test_zipfile/_path/write-alpharep.py b/Lib/test/test_zipfile/_path/write-alpharep.py
index 48c09b53717..7418391abad 100644
--- a/Lib/test/test_zipfile/_path/write-alpharep.py
+++ b/Lib/test/test_zipfile/_path/write-alpharep.py
@@ -1,4 +1,3 @@
from . import test_path
-
__name__ == '__main__' and test_path.build_alpharep_fixture().extractall('alpharep')
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 4d97fe56f3a..c57ab51eca1 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -119,6 +119,114 @@ class ChecksumTestCase(unittest.TestCase):
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
+class ChecksumCombineMixin:
+ """Mixin class for testing checksum combination."""
+
+ N = 1000
+ default_iv: int
+
+ def parse_iv(self, iv):
+ """Parse an IV value.
+
+ - The default IV is returned if *iv* is None.
+ - A random IV is returned if *iv* is -1.
+ - Otherwise, *iv* is returned as is.
+ """
+ if iv is None:
+ return self.default_iv
+ if iv == -1:
+ return random.randint(1, 0x80000000)
+ return iv
+
+ def checksum(self, data, init=None):
+ """Compute the checksum of data with a given initial value.
+
+ The *init* value is parsed by ``parse_iv``.
+ """
+ iv = self.parse_iv(init)
+ return self._checksum(data, iv)
+
+ def _checksum(self, data, init):
+ raise NotImplementedError
+
+ def combine(self, a, b, blen):
+ """Combine two checksums together."""
+ raise NotImplementedError
+
+ def get_random_data(self, data_len, *, iv=None):
+ """Get a triplet (data, iv, checksum)."""
+ data = random.randbytes(data_len)
+ init = self.parse_iv(iv)
+ checksum = self.checksum(data, init)
+ return data, init, checksum
+
+ def test_combine_empty(self):
+ for _ in range(self.N):
+ a, iv, checksum = self.get_random_data(32, iv=-1)
+ res = self.combine(iv, self.checksum(a), len(a))
+ self.assertEqual(res, checksum)
+
+ def test_combine_no_iv(self):
+ for _ in range(self.N):
+ a, _, chk_a = self.get_random_data(32)
+ b, _, chk_b = self.get_random_data(64)
+ res = self.combine(chk_a, chk_b, len(b))
+ self.assertEqual(res, self.checksum(a + b))
+
+ def test_combine_no_iv_invalid_length(self):
+ a, _, chk_a = self.get_random_data(32)
+ b, _, chk_b = self.get_random_data(64)
+ checksum = self.checksum(a + b)
+ for invalid_len in [1, len(a), 48, len(b) + 1, 191]:
+ invalid_res = self.combine(chk_a, chk_b, invalid_len)
+ self.assertNotEqual(invalid_res, checksum)
+
+ self.assertRaises(TypeError, self.combine, 0, 0, "len")
+
+ def test_combine_with_iv(self):
+ for _ in range(self.N):
+ a, iv_a, chk_a_with_iv = self.get_random_data(32, iv=-1)
+ chk_a_no_iv = self.checksum(a)
+ b, iv_b, chk_b_with_iv = self.get_random_data(64, iv=-1)
+ chk_b_no_iv = self.checksum(b)
+
+ # We can represent c = COMBINE(CHK(a, iv_a), CHK(b, iv_b)) as:
+ #
+ # c = CHK(CHK(b'', iv_a) + CHK(a) + CHK(b'', iv_b) + CHK(b))
+ # = COMBINE(
+ # COMBINE(CHK(b'', iv_a), CHK(a)),
+ # COMBINE(CHK(b'', iv_b), CHK(b)),
+ # )
+ # = COMBINE(COMBINE(iv_a, CHK(a)), COMBINE(iv_b, CHK(b)))
+ tmp0 = self.combine(iv_a, chk_a_no_iv, len(a))
+ tmp1 = self.combine(iv_b, chk_b_no_iv, len(b))
+ expected = self.combine(tmp0, tmp1, len(b))
+ checksum = self.combine(chk_a_with_iv, chk_b_with_iv, len(b))
+ self.assertEqual(checksum, expected)
+
+
+class CRC32CombineTestCase(ChecksumCombineMixin, unittest.TestCase):
+
+ default_iv = 0
+
+ def _checksum(self, data, init):
+ return zlib.crc32(data, init)
+
+ def combine(self, a, b, blen):
+ return zlib.crc32_combine(a, b, blen)
+
+
+class Adler32CombineTestCase(ChecksumCombineMixin, unittest.TestCase):
+
+ default_iv = 1
+
+ def _checksum(self, data, init):
+ return zlib.adler32(data, init)
+
+ def combine(self, a, b, blen):
+ return zlib.adler32_combine(a, b, blen)
+
+
# Issue #10276 - check that inputs >=4 GiB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
diff --git a/Lib/test/test_zoneinfo/test_zoneinfo.py b/Lib/test/test_zoneinfo/test_zoneinfo.py
index f313e394f49..44e87e71c8e 100644
--- a/Lib/test/test_zoneinfo/test_zoneinfo.py
+++ b/Lib/test/test_zoneinfo/test_zoneinfo.py
@@ -58,6 +58,10 @@ def tearDownModule():
shutil.rmtree(TEMP_DIR)
+class CustomError(Exception):
+ pass
+
+
class TzPathUserMixin:
"""
Adds a setUp() and tearDown() to make TZPATH manipulations thread-safe.
@@ -404,6 +408,25 @@ class ZoneInfoTest(TzPathUserMixin, ZoneInfoTestBase):
self.assertEqual(t.utcoffset(), offset.utcoffset)
self.assertEqual(t.dst(), offset.dst)
+ def test_cache_exception(self):
+ class Incomparable(str):
+ eq_called = False
+ def __eq__(self, other):
+ self.eq_called = True
+ raise CustomError
+ __hash__ = str.__hash__
+
+ key = "America/Los_Angeles"
+ tz1 = self.klass(key)
+ key = Incomparable(key)
+ try:
+ tz2 = self.klass(key)
+ except CustomError:
+ self.assertTrue(key.eq_called)
+ else:
+ self.assertFalse(key.eq_called)
+ self.assertIs(tz2, tz1)
+
class CZoneInfoTest(ZoneInfoTest):
module = c_zoneinfo
@@ -1507,6 +1530,26 @@ class ZoneInfoCacheTest(TzPathUserMixin, ZoneInfoTestBase):
self.assertIsNot(dub0, dub1)
self.assertIs(tok0, tok1)
+ def test_clear_cache_refleak(self):
+ class Stringy(str):
+ allow_comparisons = True
+ def __eq__(self, other):
+ if not self.allow_comparisons:
+ raise CustomError
+ return super().__eq__(other)
+ __hash__ = str.__hash__
+
+ key = Stringy("America/Los_Angeles")
+ self.klass(key)
+ key.allow_comparisons = False
+ try:
+ # Note: This is try/except rather than assertRaises because
+ # there is no guarantee that the key is even still in the cache,
+ # or that the key for the cache is the original `key` object.
+ self.klass.clear_cache(only_keys="America/Los_Angeles")
+ except CustomError:
+ pass
+
class CZoneInfoCacheTest(ZoneInfoCacheTest):
module = c_zoneinfo
diff --git a/Lib/test/test_zoneinfo/test_zoneinfo_property.py b/Lib/test/test_zoneinfo/test_zoneinfo_property.py
index feaa77f3e7f..294c7e9b27a 100644
--- a/Lib/test/test_zoneinfo/test_zoneinfo_property.py
+++ b/Lib/test/test_zoneinfo/test_zoneinfo_property.py
@@ -146,20 +146,24 @@ class ZoneInfoPickleTest(ZoneInfoTestBase):
@add_key_examples
def test_pickle_unpickle_cache(self, key):
zi = self.klass(key)
- pkl_str = pickle.dumps(zi)
- zi_rt = pickle.loads(pkl_str)
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(proto=proto):
+ pkl_str = pickle.dumps(zi, proto)
+ zi_rt = pickle.loads(pkl_str)
- self.assertIs(zi, zi_rt)
+ self.assertIs(zi, zi_rt)
@hypothesis.given(key=valid_keys())
@add_key_examples
def test_pickle_unpickle_no_cache(self, key):
zi = self.klass.no_cache(key)
- pkl_str = pickle.dumps(zi)
- zi_rt = pickle.loads(pkl_str)
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(proto=proto):
+ pkl_str = pickle.dumps(zi, proto)
+ zi_rt = pickle.loads(pkl_str)
- self.assertIsNot(zi, zi_rt)
- self.assertEqual(str(zi), str(zi_rt))
+ self.assertIsNot(zi, zi_rt)
+ self.assertEqual(str(zi), str(zi_rt))
@hypothesis.given(key=valid_keys())
@add_key_examples
diff --git a/Lib/test/test_zstd.py b/Lib/test/test_zstd.py
index 34c7c721b1a..90b2adc9665 100644
--- a/Lib/test/test_zstd.py
+++ b/Lib/test/test_zstd.py
@@ -12,7 +12,6 @@ import threading
from test.support.import_helper import import_module
from test.support import threading_helper
from test.support import _1M
-from test.support import Py_GIL_DISABLED
_zstd = import_module("_zstd")
zstd = import_module("compression.zstd")
@@ -63,11 +62,18 @@ SAMPLES = None
TRAINED_DICT = None
-SUPPORT_MULTITHREADING = False
+# Cannot be deferred to setup as it is used to check whether or not to skip
+# tests
+try:
+ SUPPORT_MULTITHREADING = CompressionParameter.nb_workers.bounds() != (0, 0)
+except Exception:
+ SUPPORT_MULTITHREADING = False
+
+C_INT_MIN = -(2**31)
+C_INT_MAX = (2**31) - 1
+
def setUpModule():
- global SUPPORT_MULTITHREADING
- SUPPORT_MULTITHREADING = CompressionParameter.nb_workers.bounds() != (0, 0)
# uncompressed size 130KB, more than a zstd block.
# with a frame epilogue, 4 bytes checksum.
global DAT_130K_D
@@ -196,14 +202,21 @@ class CompressorTestCase(unittest.TestCase):
self.assertRaises(TypeError, ZstdCompressor, zstd_dict=b"abcd1234")
self.assertRaises(TypeError, ZstdCompressor, zstd_dict={1: 2, 3: 4})
- with self.assertRaises(ValueError):
- ZstdCompressor(2**31)
- with self.assertRaises(ValueError):
- ZstdCompressor(options={2**31: 100})
+ # valid range for compression level is [-(1<<17), 22]
+ msg = r'illegal compression level {}; the valid range is \[-?\d+, -?\d+\]'
+ with self.assertRaisesRegex(ValueError, msg.format(C_INT_MAX)):
+ ZstdCompressor(C_INT_MAX)
+ with self.assertRaisesRegex(ValueError, msg.format(C_INT_MIN)):
+ ZstdCompressor(C_INT_MIN)
+ msg = r'illegal compression level; the valid range is \[-?\d+, -?\d+\]'
+ with self.assertRaisesRegex(ValueError, msg):
+ ZstdCompressor(level=-(2**1000))
+ with self.assertRaisesRegex(ValueError, msg):
+ ZstdCompressor(level=2**1000)
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.window_log: 100})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={3333: 100})
# Method bad arguments
@@ -254,43 +267,57 @@ class CompressorTestCase(unittest.TestCase):
}
ZstdCompressor(options=d)
- # larger than signed int, ValueError
d1 = d.copy()
- d1[CompressionParameter.ldm_bucket_size_log] = 2**31
- self.assertRaises(ValueError, ZstdCompressor, options=d1)
+ # larger than signed int
+ d1[CompressionParameter.ldm_bucket_size_log] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ ZstdCompressor(options=d1)
+ # smaller than signed int
+ d1[CompressionParameter.ldm_bucket_size_log] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ ZstdCompressor(options=d1)
- # clamp compressionLevel
+ # out of bounds compression level
level_min, level_max = CompressionParameter.compression_level.bounds()
- compress(b'', level_max+1)
- compress(b'', level_min-1)
-
- compress(b'', options={CompressionParameter.compression_level:level_max+1})
- compress(b'', options={CompressionParameter.compression_level:level_min-1})
+ with self.assertRaises(ValueError):
+ compress(b'', level_max+1)
+ with self.assertRaises(ValueError):
+ compress(b'', level_min-1)
+ with self.assertRaises(ValueError):
+ compress(b'', 2**1000)
+ with self.assertRaises(ValueError):
+ compress(b'', -(2**1000))
+ with self.assertRaises(ValueError):
+ compress(b'', options={
+ CompressionParameter.compression_level: level_max+1})
+ with self.assertRaises(ValueError):
+ compress(b'', options={
+ CompressionParameter.compression_level: level_min-1})
# zstd lib doesn't support MT compression
if not SUPPORT_MULTITHREADING:
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.nb_workers:4})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.job_size:4})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.overlap_log:4})
# out of bounds error msg
option = {CompressionParameter.window_log:100}
- with self.assertRaisesRegex(ZstdError,
- (r'Error when setting zstd compression parameter "window_log", '
- r'it should \d+ <= value <= \d+, provided value is 100\. '
- r'\((?:32|64)-bit build\)')):
+ with self.assertRaisesRegex(
+ ValueError,
+ "compression parameter 'window_log' received an illegal value 100; "
+ r'the valid range is \[-?\d+, -?\d+\]',
+ ):
compress(b'', options=option)
def test_unknown_compression_parameter(self):
KEY = 100001234
option = {CompressionParameter.compression_level: 10,
KEY: 200000000}
- pattern = (r'Invalid zstd compression parameter.*?'
- fr'"unknown parameter \(key {KEY}\)"')
- with self.assertRaisesRegex(ZstdError, pattern):
+ pattern = rf"invalid compression parameter 'unknown parameter \(key {KEY}\)'"
+ with self.assertRaisesRegex(ValueError, pattern):
ZstdCompressor(options=option)
@unittest.skipIf(not SUPPORT_MULTITHREADING,
@@ -371,6 +398,115 @@ class CompressorTestCase(unittest.TestCase):
c = ZstdCompressor()
self.assertNotEqual(c.compress(b'', c.FLUSH_FRAME), b'')
+ def test_set_pledged_input_size(self):
+ DAT = DECOMPRESSED_100_PLUS_32KB
+ CHUNK_SIZE = len(DAT) // 3
+
+ # wrong value
+ c = ZstdCompressor()
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(-300)
+ # overflow
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64)
+ # ZSTD_CONTENTSIZE_ERROR is invalid
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64-2)
+ # ZSTD_CONTENTSIZE_UNKNOWN should use None
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64-1)
+
+ # check valid values are settable
+ c.set_pledged_input_size(2**63)
+ c.set_pledged_input_size(2**64-3)
+
+ # check that zero means empty frame
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(0)
+ c.compress(b'')
+ dat = c.flush()
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, 0)
+
+
+ # wrong mode
+ c = ZstdCompressor(level=1)
+ c.compress(b'123456')
+ self.assertEqual(c.last_mode, c.CONTINUE)
+ with self.assertRaisesRegex(ValueError,
+ r'last_mode == FLUSH_FRAME'):
+ c.set_pledged_input_size(300)
+
+ # None value
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(None)
+ dat = c.compress(DAT) + c.flush()
+
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, None)
+
+ # correct value
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT))
+
+ chunks = []
+ posi = 0
+ while posi < len(DAT):
+ dat = c.compress(DAT[posi:posi+CHUNK_SIZE])
+ posi += CHUNK_SIZE
+ chunks.append(dat)
+
+ dat = c.flush()
+ chunks.append(dat)
+ chunks = b''.join(chunks)
+
+ ret = get_frame_info(chunks)
+ self.assertEqual(ret.decompressed_size, len(DAT))
+ self.assertEqual(decompress(chunks), DAT)
+
+ c.set_pledged_input_size(len(DAT)) # the second frame
+ dat = c.compress(DAT) + c.flush()
+
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, len(DAT))
+ self.assertEqual(decompress(dat), DAT)
+
+ # not enough data
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT)+1)
+
+ for start in range(0, len(DAT), CHUNK_SIZE):
+ end = min(start+CHUNK_SIZE, len(DAT))
+ _dat = c.compress(DAT[start:end])
+
+ with self.assertRaises(ZstdError):
+ c.flush()
+
+ # too much data
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT))
+
+ for start in range(0, len(DAT), CHUNK_SIZE):
+ end = min(start+CHUNK_SIZE, len(DAT))
+ _dat = c.compress(DAT[start:end])
+
+ with self.assertRaises(ZstdError):
+ c.compress(b'extra', ZstdCompressor.FLUSH_FRAME)
+
+ # content size not set if content_size_flag == 0
+ c = ZstdCompressor(options={CompressionParameter.content_size_flag: 0})
+ c.set_pledged_input_size(10)
+ dat1 = c.compress(b"hello")
+ dat2 = c.compress(b"world")
+ dat3 = c.flush()
+ frame_data = get_frame_info(dat1 + dat2 + dat3)
+ self.assertIsNone(frame_data.decompressed_size)
+
+
class DecompressorTestCase(unittest.TestCase):
def test_simple_decompress_bad_args(self):
@@ -385,12 +521,22 @@ class DecompressorTestCase(unittest.TestCase):
self.assertRaises(TypeError, ZstdDecompressor, options=b'abc')
with self.assertRaises(ValueError):
- ZstdDecompressor(options={2**31 : 100})
+ ZstdDecompressor(options={C_INT_MAX: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={C_INT_MIN: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={0: C_INT_MAX})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={2**1000: 100})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={-(2**1000): 100})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={0: -(2**1000)})
- with self.assertRaises(ZstdError):
- ZstdDecompressor(options={DecompressionParameter.window_log_max:100})
- with self.assertRaises(ZstdError):
- ZstdDecompressor(options={3333 : 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={DecompressionParameter.window_log_max: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={3333: 100})
empty = compress(b'')
lzd = ZstdDecompressor()
@@ -403,26 +549,52 @@ class DecompressorTestCase(unittest.TestCase):
d = {DecompressionParameter.window_log_max : 15}
ZstdDecompressor(options=d)
- # larger than signed int, ValueError
d1 = d.copy()
- d1[DecompressionParameter.window_log_max] = 2**31
- self.assertRaises(ValueError, ZstdDecompressor, None, d1)
+ # larger than signed int
+ d1[DecompressionParameter.window_log_max] = 2**1000
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(None, d1)
+ # smaller than signed int
+ d1[DecompressionParameter.window_log_max] = -(2**1000)
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(None, d1)
+
+ d1[DecompressionParameter.window_log_max] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(None, d1)
+ d1[DecompressionParameter.window_log_max] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(None, d1)
# out of bounds error msg
options = {DecompressionParameter.window_log_max:100}
- with self.assertRaisesRegex(ZstdError,
- (r'Error when setting zstd decompression parameter "window_log_max", '
- r'it should \d+ <= value <= \d+, provided value is 100\. '
- r'\((?:32|64)-bit build\)')):
+ with self.assertRaisesRegex(
+ ValueError,
+ "decompression parameter 'window_log_max' received an illegal value 100; "
+ r'the valid range is \[-?\d+, -?\d+\]',
+ ):
+ decompress(b'', options=options)
+
+ # out of bounds deecompression parameter
+ options[DecompressionParameter.window_log_max] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = 2**1000
+ with self.assertRaises(OverflowError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = -(2**1000)
+ with self.assertRaises(OverflowError):
decompress(b'', options=options)
def test_unknown_decompression_parameter(self):
KEY = 100001234
options = {DecompressionParameter.window_log_max: DecompressionParameter.window_log_max.bounds()[1],
KEY: 200000000}
- pattern = (r'Invalid zstd decompression parameter.*?'
- fr'"unknown parameter \(key {KEY}\)"')
- with self.assertRaisesRegex(ZstdError, pattern):
+ pattern = rf"invalid decompression parameter 'unknown parameter \(key {KEY}\)'"
+ with self.assertRaisesRegex(ValueError, pattern):
ZstdDecompressor(options=options)
def test_decompress_epilogue_flags(self):
@@ -1078,27 +1250,41 @@ class ZstdDictTestCase(unittest.TestCase):
ZstdDecompressor(zd)
# wrong type
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
- ZstdCompressor(zstd_dict=(zd, b'123'))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=[zd, 1])
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=(zd, 1.0))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=(zd,))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, 1, 2))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, -1))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, 3))
-
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
- ZstdDecompressor(zstd_dict=(zd, b'123'))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaises(OverflowError):
+ ZstdCompressor(zstd_dict=(zd, 2**1000))
+ with self.assertRaises(OverflowError):
+ ZstdCompressor(zstd_dict=(zd, -2**1000))
+
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor(zstd_dict=[zd, 1])
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor(zstd_dict=(zd, 1.0))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor((zd,))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, 1, 2))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, -1))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, 3))
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor((zd, 2**1000))
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor((zd, -2**1000))
def test_train_dict(self):
-
-
TRAINED_DICT = train_dict(SAMPLES, DICT_SIZE1)
ZstdDict(TRAINED_DICT.dict_content, is_raw=False)
@@ -1180,17 +1366,36 @@ class ZstdDictTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
_zstd.train_dict({}, (), 100)
with self.assertRaises(TypeError):
+ _zstd.train_dict(bytearray(), (), 100)
+ with self.assertRaises(TypeError):
_zstd.train_dict(b'', 99, 100)
with self.assertRaises(TypeError):
+ _zstd.train_dict(b'', [], 100)
+ with self.assertRaises(TypeError):
_zstd.train_dict(b'', (), 100.1)
+ with self.assertRaises(TypeError):
+ _zstd.train_dict(b'', (99.1,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'abc', (4, -1), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'abc', (2,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (99,), 100)
# size > size_t
with self.assertRaises(ValueError):
- _zstd.train_dict(b'', (2**64+1,), 100)
+ _zstd.train_dict(b'', (2**1000,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (-2**1000,), 100)
# dict_size <= 0
with self.assertRaises(ValueError):
_zstd.train_dict(b'', (), 0)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (), -1)
+
+ with self.assertRaises(ZstdError):
+ _zstd.train_dict(b'', (), 1)
def test_finalize_dict_c(self):
with self.assertRaises(TypeError):
@@ -1200,21 +1405,50 @@ class ZstdDictTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
_zstd.finalize_dict({}, b'', (), 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(bytearray(TRAINED_DICT.dict_content), b'', (), 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, {}, (), 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, bytearray(), (), 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', 99, 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', [], 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100.1, 5)
with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 5.1)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'abc', (4, -1), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'abc', (2,), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (99,), 100, 5)
+
# size > size_t
with self.assertRaises(ValueError):
- _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (2**64+1,), 100, 5)
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (2**1000,), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (-2**1000,), 100, 5)
# dict_size <= 0
with self.assertRaises(ValueError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 0, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), -1, 5)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 2**1000, 5)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), -2**1000, 5)
+
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 2**1000)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, -2**1000)
+
+ with self.assertRaises(ZstdError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 5)
def test_train_buffer_protocol_samples(self):
def _nbytes(dat):
@@ -1425,11 +1659,11 @@ class FileTestCase(unittest.TestCase):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB), "rw")
with self.assertRaisesRegex(TypeError,
- r"NOT be a CompressionParameter"):
+ r"not be a CompressionParameter"):
ZstdFile(io.BytesIO(), 'rb',
options={CompressionParameter.compression_level:5})
with self.assertRaisesRegex(TypeError,
- r"NOT be a DecompressionParameter"):
+ r"not be a DecompressionParameter"):
ZstdFile(io.BytesIO(), 'wb',
options={DecompressionParameter.window_log_max:21})
@@ -1440,19 +1674,19 @@ class FileTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
ZstdFile(io.BytesIO(), "w", level='asd')
# CHECK_UNKNOWN and anything above CHECK_ID_MAX should be invalid.
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(), "w", options={999:9999})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(), "w", options={CompressionParameter.window_log:99})
with self.assertRaises(TypeError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB), "r", options=33)
- with self.assertRaises(ValueError):
+ with self.assertRaises(OverflowError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB),
options={DecompressionParameter.window_log_max:2**31})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB),
options={444:333})
@@ -1468,7 +1702,7 @@ class FileTestCase(unittest.TestCase):
tmp_f.write(DAT_130K_C)
filename = tmp_f.name
- with self.assertRaises(ValueError):
+ with self.assertRaises(TypeError):
ZstdFile(filename, options={'a':'b'})
# for PyPy
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 8d01fd7bce4..7e71755068e 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -86,7 +86,7 @@ def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permutations (include 'fr', but not
# 'rf'). The various permutations will be generated.
- _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
+ _valid_string_prefixes = ['b', 'r', 'u', 'f', 't', 'br', 'fr', 'tr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
@@ -274,7 +274,7 @@ class Untokenizer:
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
- in_fstring = 0
+ in_fstring_or_tstring = 0
for tok in _itertools.chain([token], iterable):
toknum, tokval = tok[:2]
@@ -293,10 +293,10 @@ class Untokenizer:
else:
prevstring = False
- if toknum == FSTRING_START:
- in_fstring += 1
- elif toknum == FSTRING_END:
- in_fstring -= 1
+ if toknum in {FSTRING_START, TSTRING_START}:
+ in_fstring_or_tstring += 1
+ elif toknum in {FSTRING_END, TSTRING_END}:
+ in_fstring_or_tstring -= 1
if toknum == INDENT:
indents.append(tokval)
continue
@@ -311,8 +311,8 @@ class Untokenizer:
elif toknum in {FSTRING_MIDDLE, TSTRING_MIDDLE}:
tokval = self.escape_brackets(tokval)
- # Insert a space between two consecutive brackets if we are in an f-string
- if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring:
+ # Insert a space between two consecutive brackets if we are in an f-string or t-string
+ if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring_or_tstring:
tokval = ' ' + tokval
# Insert a space between two consecutive f-strings
diff --git a/Lib/traceback.py b/Lib/traceback.py
index 17b082eced6..a1f175dbbaa 100644
--- a/Lib/traceback.py
+++ b/Lib/traceback.py
@@ -1595,7 +1595,11 @@ def _compute_suggestion_error(exc_value, tb, wrong_name):
if isinstance(exc_value, AttributeError):
obj = exc_value.obj
try:
- d = dir(obj)
+ try:
+ d = dir(obj)
+ except TypeError: # Attributes are unsortable, e.g. int and str
+ d = list(obj.__class__.__dict__.keys()) + list(obj.__dict__.keys())
+ d = sorted([x for x in d if isinstance(x, str)])
hide_underscored = (wrong_name[:1] != '_')
if hide_underscored and tb is not None:
while tb.tb_next is not None:
@@ -1610,7 +1614,11 @@ def _compute_suggestion_error(exc_value, tb, wrong_name):
elif isinstance(exc_value, ImportError):
try:
mod = __import__(exc_value.name)
- d = dir(mod)
+ try:
+ d = dir(mod)
+ except TypeError: # Attributes are unsortable, e.g. int and str
+ d = list(mod.__dict__.keys())
+ d = sorted([x for x in d if isinstance(x, str)])
if wrong_name[:1] != '_':
d = [x for x in d if x[:1] != '_']
except Exception:
@@ -1628,6 +1636,7 @@ def _compute_suggestion_error(exc_value, tb, wrong_name):
+ list(frame.f_globals)
+ list(frame.f_builtins)
)
+ d = [x for x in d if isinstance(x, str)]
# Check first if we are in a method and the instance
# has the wrong name as attribute
diff --git a/Lib/typing.py b/Lib/typing.py
index 98af61be8b0..3ef377b9542 100644
--- a/Lib/typing.py
+++ b/Lib/typing.py
@@ -407,6 +407,17 @@ def _tp_cache(func=None, /, *, typed=False):
return decorator
+def _rebuild_generic_alias(alias: GenericAlias, args: tuple[object, ...]) -> GenericAlias:
+ is_unpacked = alias.__unpacked__
+ if _should_unflatten_callable_args(alias, args):
+ t = alias.__origin__[(args[:-1], args[-1])]
+ else:
+ t = alias.__origin__[args]
+ if is_unpacked:
+ t = Unpack[t]
+ return t
+
+
def _deprecation_warning_for_no_type_params_passed(funcname: str) -> None:
import warnings
@@ -426,10 +437,7 @@ class _Sentinel:
return '<sentinel>'
-_sentinel = _Sentinel()
-
-
-def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=frozenset(),
+def _eval_type(t, globalns, localns, type_params, *, recursive_guard=frozenset(),
format=None, owner=None):
"""Evaluate all forward references in the given type t.
@@ -437,9 +445,6 @@ def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=f
recursive_guard is used to prevent infinite recursion with a recursive
ForwardRef.
"""
- if type_params is _sentinel:
- _deprecation_warning_for_no_type_params_passed("typing._eval_type")
- type_params = ()
if isinstance(t, _lazy_annotationlib.ForwardRef):
# If the forward_ref has __forward_module__ set, evaluate() infers the globals
# from the module, and it will probably pick better than the globals we have here.
@@ -454,25 +459,20 @@ def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=f
_make_forward_ref(arg) if isinstance(arg, str) else arg
for arg in t.__args__
)
- is_unpacked = t.__unpacked__
- if _should_unflatten_callable_args(t, args):
- t = t.__origin__[(args[:-1], args[-1])]
- else:
- t = t.__origin__[args]
- if is_unpacked:
- t = Unpack[t]
+ else:
+ args = t.__args__
ev_args = tuple(
_eval_type(
a, globalns, localns, type_params, recursive_guard=recursive_guard,
format=format, owner=owner,
)
- for a in t.__args__
+ for a in args
)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
- return GenericAlias(t.__origin__, ev_args)
+ return _rebuild_generic_alias(t, ev_args)
if isinstance(t, Union):
return functools.reduce(operator.or_, ev_args)
else:
@@ -956,12 +956,8 @@ def evaluate_forward_ref(
"""Evaluate a forward reference as a type hint.
This is similar to calling the ForwardRef.evaluate() method,
- but unlike that method, evaluate_forward_ref() also:
-
- * Recursively evaluates forward references nested within the type hint.
- * Rejects certain objects that are not valid type hints.
- * Replaces type hints that evaluate to None with types.NoneType.
- * Supports the *FORWARDREF* and *STRING* formats.
+ but unlike that method, evaluate_forward_ref() also
+ recursively evaluates forward references nested within the type hint.
*forward_ref* must be an instance of ForwardRef. *owner*, if given,
should be the object that holds the annotations that the forward reference
@@ -981,23 +977,24 @@ def evaluate_forward_ref(
if forward_ref.__forward_arg__ in _recursive_guard:
return forward_ref
- try:
- value = forward_ref.evaluate(globals=globals, locals=locals,
- type_params=type_params, owner=owner)
- except NameError:
- if format == _lazy_annotationlib.Format.FORWARDREF:
- return forward_ref
- else:
- raise
-
- type_ = _type_check(
- value,
- "Forward references must evaluate to types.",
- is_argument=forward_ref.__forward_is_argument__,
- allow_special_forms=forward_ref.__forward_is_class__,
- )
+ if format is None:
+ format = _lazy_annotationlib.Format.VALUE
+ value = forward_ref.evaluate(globals=globals, locals=locals,
+ type_params=type_params, owner=owner, format=format)
+
+ if (isinstance(value, _lazy_annotationlib.ForwardRef)
+ and format == _lazy_annotationlib.Format.FORWARDREF):
+ return value
+
+ if isinstance(value, str):
+ value = _make_forward_ref(value, module=forward_ref.__forward_module__,
+ owner=owner or forward_ref.__owner__,
+ is_argument=forward_ref.__forward_is_argument__,
+ is_class=forward_ref.__forward_is_class__)
+ if owner is None:
+ owner = forward_ref.__owner__
return _eval_type(
- type_,
+ value,
globals,
locals,
type_params,
@@ -1863,7 +1860,9 @@ def _allow_reckless_class_checks(depth=2):
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
- return _caller(depth) in {'abc', 'functools', None}
+ # gh-136047: When `_abc` module is not available, `_py_abc` is required to
+ # allow `_py_abc.ABCMeta` fallback.
+ return _caller(depth) in {'abc', '_py_abc', 'functools', None}
_PROTO_ALLOWLIST = {
@@ -2338,12 +2337,12 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False,
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
- if value is None:
- value = type(None)
if isinstance(value, str):
value = _make_forward_ref(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals, base.__type_params__,
format=format, owner=obj)
+ if value is None:
+ value = type(None)
hints[name] = value
if include_extras or format == Format.STRING:
return hints
@@ -2377,8 +2376,6 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False,
localns = globalns
type_params = getattr(obj, "__type_params__", ())
for name, value in hints.items():
- if value is None:
- value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
@@ -2387,7 +2384,10 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
- hints[name] = _eval_type(value, globalns, localns, type_params, format=format, owner=obj)
+ value = _eval_type(value, globalns, localns, type_params, format=format, owner=obj)
+ if value is None:
+ value = type(None)
+ hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
@@ -2406,7 +2406,7 @@ def _strip_annotations(t):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
- return GenericAlias(t.__origin__, stripped_args)
+ return _rebuild_generic_alias(t, stripped_args)
if isinstance(t, Union):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
diff --git a/Lib/unittest/_log.py b/Lib/unittest/_log.py
index 94868e5bb95..3d69385ea24 100644
--- a/Lib/unittest/_log.py
+++ b/Lib/unittest/_log.py
@@ -30,7 +30,7 @@ class _AssertLogsContext(_BaseTestCaseContext):
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
- def __init__(self, test_case, logger_name, level, no_logs):
+ def __init__(self, test_case, logger_name, level, no_logs, formatter=None):
_BaseTestCaseContext.__init__(self, test_case)
self.logger_name = logger_name
if level:
@@ -39,13 +39,14 @@ class _AssertLogsContext(_BaseTestCaseContext):
self.level = logging.INFO
self.msg = None
self.no_logs = no_logs
+ self.formatter = formatter
def __enter__(self):
if isinstance(self.logger_name, logging.Logger):
logger = self.logger = self.logger_name
else:
logger = self.logger = logging.getLogger(self.logger_name)
- formatter = logging.Formatter(self.LOGGING_FORMAT)
+ formatter = self.formatter or logging.Formatter(self.LOGGING_FORMAT)
handler = _CapturingHandler()
handler.setLevel(self.level)
handler.setFormatter(formatter)
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index db10de68e4a..eba50839cd3 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -849,7 +849,7 @@ class TestCase(object):
context = _AssertNotWarnsContext(expected_warning, self)
return context.handle('_assertNotWarns', args, kwargs)
- def assertLogs(self, logger=None, level=None):
+ def assertLogs(self, logger=None, level=None, formatter=None):
"""Fail unless a log message of level *level* or higher is emitted
on *logger_name* or its children. If omitted, *level* defaults to
INFO and *logger* defaults to the root logger.
@@ -861,6 +861,8 @@ class TestCase(object):
`records` attribute will be a list of the corresponding LogRecord
objects.
+ Optionally supply `formatter` to control how messages are formatted.
+
Example::
with self.assertLogs('foo', level='INFO') as cm:
@@ -871,7 +873,7 @@ class TestCase(object):
"""
# Lazy import to avoid importing logging if it is not needed.
from ._log import _AssertLogsContext
- return _AssertLogsContext(self, logger, level, no_logs=False)
+ return _AssertLogsContext(self, logger, level, no_logs=False, formatter=formatter)
def assertNoLogs(self, logger=None, level=None):
""" Fail unless no log messages of level *level* or higher are emitted
diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py
index 55cb4b1f6af..e1dbfdacf56 100644
--- a/Lib/unittest/mock.py
+++ b/Lib/unittest/mock.py
@@ -569,6 +569,11 @@ class NonCallableMock(Base):
__dict__['_mock_methods'] = spec
__dict__['_spec_asyncs'] = _spec_asyncs
+ def _mock_extend_spec_methods(self, spec_methods):
+ methods = self.__dict__.get('_mock_methods') or []
+ methods.extend(spec_methods)
+ self.__dict__['_mock_methods'] = methods
+
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
@@ -981,7 +986,7 @@ class NonCallableMock(Base):
def assert_called_once_with(self, /, *args, **kwargs):
- """assert that the mock was called exactly once and that that call was
+ """assert that the mock was called exactly once and that call was
with the specified arguments."""
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times.%s"
@@ -2766,14 +2771,16 @@ def create_autospec(spec, spec_set=False, instance=False, _parent=None,
raise InvalidSpecError(f'Cannot autospec a Mock object. '
f'[object={spec!r}]')
is_async_func = _is_async_func(spec)
+ _kwargs = {'spec': spec}
entries = [(entry, _missing) for entry in dir(spec)]
if is_type and instance and is_dataclass(spec):
+ is_dataclass_spec = True
dataclass_fields = fields(spec)
entries.extend((f.name, f.type) for f in dataclass_fields)
- _kwargs = {'spec': [f.name for f in dataclass_fields]}
+ dataclass_spec_list = [f.name for f in dataclass_fields]
else:
- _kwargs = {'spec': spec}
+ is_dataclass_spec = False
if spec_set:
_kwargs = {'spec_set': spec}
@@ -2810,6 +2817,8 @@ def create_autospec(spec, spec_set=False, instance=False, _parent=None,
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
+ if is_dataclass_spec:
+ mock._mock_extend_spec_methods(dataclass_spec_list)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
diff --git a/Lib/uuid.py b/Lib/uuid.py
index 036ffebf67a..313f2fc46cb 100644
--- a/Lib/uuid.py
+++ b/Lib/uuid.py
@@ -633,39 +633,43 @@ def _netstat_getnode():
try:
import _uuid
_generate_time_safe = getattr(_uuid, "generate_time_safe", None)
+ _has_stable_extractable_node = _uuid.has_stable_extractable_node
_UuidCreate = getattr(_uuid, "UuidCreate", None)
except ImportError:
_uuid = None
_generate_time_safe = None
+ _has_stable_extractable_node = False
_UuidCreate = None
def _unix_getnode():
"""Get the hardware address on Unix using the _uuid extension module."""
- if _generate_time_safe:
+ if _generate_time_safe and _has_stable_extractable_node:
uuid_time, _ = _generate_time_safe()
return UUID(bytes=uuid_time).node
def _windll_getnode():
"""Get the hardware address on Windows using the _uuid extension module."""
- if _UuidCreate:
+ if _UuidCreate and _has_stable_extractable_node:
uuid_bytes = _UuidCreate()
return UUID(bytes_le=uuid_bytes).node
def _random_getnode():
"""Get a random node ID."""
- # RFC 4122, $4.1.6 says "For systems with no IEEE address, a randomly or
- # pseudo-randomly generated value may be used; see Section 4.5. The
- # multicast bit must be set in such addresses, in order that they will
- # never conflict with addresses obtained from network cards."
+ # RFC 9562, ยง6.10-3 says that
+ #
+ # Implementations MAY elect to obtain a 48-bit cryptographic-quality
+ # random number as per Section 6.9 to use as the Node ID. [...] [and]
+ # implementations MUST set the least significant bit of the first octet
+ # of the Node ID to 1. This bit is the unicast or multicast bit, which
+ # will never be set in IEEE 802 addresses obtained from network cards.
#
# The "multicast bit" of a MAC address is defined to be "the least
# significant bit of the first octet". This works out to be the 41st bit
# counting from 1 being the least significant bit, or 1<<40.
#
# See https://en.wikipedia.org/w/index.php?title=MAC_address&oldid=1128764812#Universal_vs._local_(U/L_bit)
- import random
- return random.getrandbits(48) | (1 << 40)
+ return int.from_bytes(os.urandom(6)) | (1 << 40)
# _OS_GETTERS, when known, are targeted for a specific OS or platform.
diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py
index 44ab5d18624..dafe5b1b8a0 100644
--- a/Lib/xml/etree/ElementTree.py
+++ b/Lib/xml/etree/ElementTree.py
@@ -527,7 +527,9 @@ class ElementTree:
"""
def __init__(self, element=None, file=None):
- # assert element is None or iselement(element)
+ if element is not None and not iselement(element):
+ raise TypeError('expected an Element, not %s' %
+ type(element).__name__)
self._root = element # first node
if file:
self.parse(file)
@@ -543,7 +545,9 @@ class ElementTree:
with the given element. Use with care!
"""
- # assert iselement(element)
+ if not iselement(element):
+ raise TypeError('expected an Element, not %s'
+ % type(element).__name__)
self._root = element
def parse(self, source, parser=None):
@@ -709,6 +713,8 @@ class ElementTree:
of start/end tags
"""
+ if self._root is None:
+ raise TypeError('ElementTree not initialized')
if not method:
method = "xml"
elif method not in _serialize:
diff --git a/Lib/xmlrpc/server.py b/Lib/xmlrpc/server.py
index 90a356fbb8e..8130c739af2 100644
--- a/Lib/xmlrpc/server.py
+++ b/Lib/xmlrpc/server.py
@@ -578,7 +578,7 @@ class SimpleXMLRPCServer(socketserver.TCPServer,
"""
allow_reuse_address = True
- allow_reuse_port = True
+ allow_reuse_port = False
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
diff --git a/Lib/zipfile/_path/__init__.py b/Lib/zipfile/_path/__init__.py
index 5ae16ec970d..faae4c84cae 100644
--- a/Lib/zipfile/_path/__init__.py
+++ b/Lib/zipfile/_path/__init__.py
@@ -7,19 +7,19 @@ https://github.com/python/importlib_metadata/wiki/Development-Methodology
for more detail.
"""
+import functools
import io
-import posixpath
-import zipfile
import itertools
-import contextlib
import pathlib
+import posixpath
import re
import stat
import sys
+import zipfile
+from ._functools import save_method_args
from .glob import Translator
-
__all__ = ['Path']
@@ -86,13 +86,12 @@ class InitializedState:
Mix-in to save the initialization state for pickling.
"""
+ @save_method_args
def __init__(self, *args, **kwargs):
- self.__args = args
- self.__kwargs = kwargs
super().__init__(*args, **kwargs)
def __getstate__(self):
- return self.__args, self.__kwargs
+ return self._saved___init__.args, self._saved___init__.kwargs
def __setstate__(self, state):
args, kwargs = state
@@ -181,22 +180,27 @@ class FastLookup(CompleteDirs):
"""
def namelist(self):
- with contextlib.suppress(AttributeError):
- return self.__names
- self.__names = super().namelist()
- return self.__names
+ return self._namelist
+
+ @functools.cached_property
+ def _namelist(self):
+ return super().namelist()
def _name_set(self):
- with contextlib.suppress(AttributeError):
- return self.__lookup
- self.__lookup = super()._name_set()
- return self.__lookup
+ return self._name_set_prop
+
+ @functools.cached_property
+ def _name_set_prop(self):
+ return super()._name_set()
def _extract_text_encoding(encoding=None, *args, **kwargs):
# compute stack level so that the caller of the caller sees any warning.
is_pypy = sys.implementation.name == 'pypy'
- stack_level = 3 + is_pypy
+ # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18)
+ # See jaraco/zipp#143
+ is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19)
+ stack_level = 3 + is_old_pypi
return io.text_encoding(encoding, stack_level), args, kwargs
@@ -351,7 +355,7 @@ class Path:
return io.TextIOWrapper(stream, encoding, *args, **kwargs)
def _base(self):
- return pathlib.PurePosixPath(self.at or self.root.filename)
+ return pathlib.PurePosixPath(self.at) if self.at else self.filename
@property
def name(self):
diff --git a/Lib/zipfile/_path/_functools.py b/Lib/zipfile/_path/_functools.py
new file mode 100644
index 00000000000..7390be21873
--- /dev/null
+++ b/Lib/zipfile/_path/_functools.py
@@ -0,0 +1,20 @@
+import collections
+import functools
+
+
+# from jaraco.functools 4.0.2
+def save_method_args(method):
+ """
+ Wrap a method such that when it is called, the args and kwargs are
+ saved on the method.
+ """
+ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') # noqa: PYI024
+
+ @functools.wraps(method)
+ def wrapper(self, /, *args, **kwargs):
+ attr_name = '_saved_' + method.__name__
+ attr = args_and_kwargs(args, kwargs)
+ setattr(self, attr_name, attr)
+ return method(self, *args, **kwargs)
+
+ return wrapper
diff --git a/Lib/zipfile/_path/glob.py b/Lib/zipfile/_path/glob.py
index d7fe45a4947..bd2839304b7 100644
--- a/Lib/zipfile/_path/glob.py
+++ b/Lib/zipfile/_path/glob.py
@@ -1,7 +1,6 @@
import os
import re
-
_default_seps = os.sep + str(os.altsep) * bool(os.altsep)
diff --git a/Lib/zoneinfo/_common.py b/Lib/zoneinfo/_common.py
index 6e05abc3239..03cc42149f9 100644
--- a/Lib/zoneinfo/_common.py
+++ b/Lib/zoneinfo/_common.py
@@ -9,9 +9,13 @@ def load_tzdata(key):
resource_name = components[-1]
try:
- return resources.files(package_name).joinpath(resource_name).open("rb")
+ path = resources.files(package_name).joinpath(resource_name)
+ # gh-85702: Prevent PermissionError on Windows
+ if path.is_dir():
+ raise IsADirectoryError
+ return path.open("rb")
except (ImportError, FileNotFoundError, UnicodeEncodeError, IsADirectoryError):
- # There are three types of exception that can be raised that all amount
+ # There are four types of exception that can be raised that all amount
# to "we cannot find this key":
#
# ImportError: If package_name doesn't exist (e.g. if tzdata is not
diff --git a/Lib/zoneinfo/_zoneinfo.py b/Lib/zoneinfo/_zoneinfo.py
index b77dc0ed391..3ffdb4c8371 100644
--- a/Lib/zoneinfo/_zoneinfo.py
+++ b/Lib/zoneinfo/_zoneinfo.py
@@ -75,12 +75,12 @@ class ZoneInfo(tzinfo):
return obj
@classmethod
- def from_file(cls, fobj, /, key=None):
+ def from_file(cls, file_obj, /, key=None):
obj = super().__new__(cls)
obj._key = key
obj._file_path = None
- obj._load_file(fobj)
- obj._file_repr = repr(fobj)
+ obj._load_file(file_obj)
+ obj._file_repr = repr(file_obj)
# Disable pickling for objects created from files
obj.__reduce__ = obj._file_reduce