aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Lib/test
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/test')
-rw-r--r--Lib/test/.ruff.toml7
-rw-r--r--Lib/test/_code_definitions.py30
-rw-r--r--Lib/test/_test_multiprocessing.py51
-rw-r--r--Lib/test/audit-tests.py28
-rw-r--r--Lib/test/libregrtest/main.py23
-rw-r--r--Lib/test/libregrtest/setup.py9
-rw-r--r--Lib/test/libregrtest/single.py2
-rw-r--r--Lib/test/libregrtest/tsan.py2
-rw-r--r--Lib/test/mp_preload_flush.py15
-rw-r--r--Lib/test/pickletester.py5
-rw-r--r--Lib/test/pythoninfo.py13
-rw-r--r--Lib/test/subprocessdata/fd_status.py4
-rw-r--r--Lib/test/support/__init__.py55
-rw-r--r--Lib/test/support/channels.py (renamed from Lib/test/support/interpreters/channels.py)8
-rw-r--r--Lib/test/support/interpreters/__init__.py258
-rw-r--r--Lib/test/support/interpreters/_crossinterp.py102
-rw-r--r--Lib/test/support/interpreters/queues.py289
-rw-r--r--Lib/test/test__interpchannels.py2
-rw-r--r--Lib/test/test__interpreters.py15
-rw-r--r--Lib/test/test_annotationlib.py70
-rw-r--r--Lib/test/test_ast/test_ast.py96
-rw-r--r--Lib/test/test_asyncgen.py9
-rw-r--r--Lib/test/test_asyncio/test_base_events.py59
-rw-r--r--Lib/test/test_asyncio/test_tools.py1560
-rw-r--r--Lib/test/test_audit.py8
-rw-r--r--Lib/test/test_build_details.py16
-rw-r--r--Lib/test/test_builtin.py3
-rw-r--r--Lib/test/test_calendar.py2
-rw-r--r--Lib/test/test_capi/test_abstract.py25
-rw-r--r--Lib/test/test_capi/test_config.py1
-rw-r--r--Lib/test/test_capi/test_misc.py2
-rw-r--r--Lib/test/test_capi/test_object.py1
-rw-r--r--Lib/test/test_capi/test_opt.py275
-rw-r--r--Lib/test/test_capi/test_sys.py64
-rw-r--r--Lib/test/test_capi/test_type.py10
-rw-r--r--Lib/test/test_capi/test_unicode.py21
-rw-r--r--Lib/test/test_class.py1
-rw-r--r--Lib/test/test_code.py53
-rw-r--r--Lib/test/test_codeccallbacks.py1
-rw-r--r--Lib/test/test_codecs.py7
-rw-r--r--Lib/test/test_collections.py2
-rw-r--r--Lib/test/test_concurrent_futures/test_init.py4
-rw-r--r--Lib/test/test_concurrent_futures/test_interpreter_pool.py265
-rw-r--r--Lib/test/test_concurrent_futures/test_shutdown.py58
-rw-r--r--Lib/test/test_configparser.py12
-rw-r--r--Lib/test/test_cprofile.py19
-rw-r--r--Lib/test/test_crossinterp.py2
-rw-r--r--Lib/test/test_csv.py48
-rw-r--r--Lib/test/test_ctypes/_support.py1
-rw-r--r--Lib/test/test_ctypes/test_byteswap.py1
-rw-r--r--Lib/test/test_ctypes/test_generated_structs.py2
-rw-r--r--Lib/test/test_ctypes/test_incomplete.py10
-rw-r--r--Lib/test/test_ctypes/test_parameters.py4
-rw-r--r--Lib/test/test_dbm.py61
-rw-r--r--Lib/test/test_dbm_gnu.py27
-rw-r--r--Lib/test/test_decimal.py3
-rw-r--r--Lib/test/test_descr.py28
-rw-r--r--Lib/test/test_dict.py32
-rw-r--r--Lib/test/test_difflib.py6
-rw-r--r--Lib/test/test_difflib_expect.html48
-rw-r--r--Lib/test/test_dis.py209
-rw-r--r--Lib/test/test_doctest/sample_doctest_errors.py46
-rw-r--r--Lib/test/test_doctest/test_doctest.py447
-rw-r--r--Lib/test/test_doctest/test_doctest_errors.txt14
-rw-r--r--Lib/test/test_doctest/test_doctest_skip.txt2
-rw-r--r--Lib/test/test_doctest/test_doctest_skip2.txt6
-rw-r--r--Lib/test/test_email/test__header_value_parser.py78
-rw-r--r--Lib/test/test_email/test_email.py30
-rw-r--r--Lib/test/test_enum.py2
-rw-r--r--Lib/test/test_exceptions.py2
-rw-r--r--Lib/test/test_external_inspection.py569
-rw-r--r--Lib/test/test_fcntl.py13
-rw-r--r--Lib/test/test_fileio.py12
-rw-r--r--Lib/test/test_float.py2
-rw-r--r--Lib/test/test_format.py10
-rw-r--r--Lib/test/test_fractions.py15
-rw-r--r--Lib/test/test_free_threading/test_generators.py51
-rw-r--r--Lib/test/test_free_threading/test_heapq.py267
-rw-r--r--Lib/test/test_free_threading/test_io.py42
-rw-r--r--Lib/test/test_free_threading/test_itertools.py95
-rw-r--r--Lib/test/test_free_threading/test_itertools_batched.py38
-rw-r--r--Lib/test/test_free_threading/test_itertools_combinatoric.py51
-rw-r--r--Lib/test/test_fstring.py10
-rw-r--r--Lib/test/test_generated_cases.py299
-rw-r--r--Lib/test/test_genericpath.py2
-rw-r--r--Lib/test/test_getpath.py21
-rw-r--r--Lib/test/test_grammar.py14
-rw-r--r--Lib/test/test_gzip.py1
-rw-r--r--Lib/test/test_hashlib.py249
-rw-r--r--Lib/test/test_hmac.py1
-rw-r--r--Lib/test/test_htmlparser.py282
-rw-r--r--Lib/test/test_http_cookiejar.py187
-rw-r--r--Lib/test/test_httpservers.py70
-rw-r--r--Lib/test/test_idle.py2
-rw-r--r--Lib/test/test_inspect/test_inspect.py4
-rw-r--r--Lib/test/test_interpreters/test_api.py865
-rw-r--r--Lib/test/test_interpreters/test_channels.py16
-rw-r--r--Lib/test/test_interpreters/test_lifecycle.py4
-rw-r--r--Lib/test/test_interpreters/test_queues.py87
-rw-r--r--Lib/test/test_interpreters/test_stress.py2
-rw-r--r--Lib/test/test_interpreters/utils.py3
-rw-r--r--Lib/test/test_io.py32
-rw-r--r--Lib/test/test_ioctl.py13
-rw-r--r--Lib/test/test_ipaddress.py26
-rw-r--r--Lib/test/test_iter.py2
-rw-r--r--Lib/test/test_json/test_dump.py8
-rw-r--r--Lib/test/test_json/test_recursion.py1
-rw-r--r--Lib/test/test_json/test_tool.py2
-rw-r--r--Lib/test/test_list.py15
-rw-r--r--Lib/test/test_listcomps.py2
-rw-r--r--Lib/test/test_locale.py4
-rw-r--r--Lib/test/test_logging.py85
-rw-r--r--Lib/test/test_math.py43
-rw-r--r--Lib/test/test_memoryview.py20
-rw-r--r--Lib/test/test_monitoring.py15
-rw-r--r--Lib/test/test_netrc.py13
-rw-r--r--Lib/test/test_ntpath.py201
-rw-r--r--Lib/test/test_optparse.py4
-rw-r--r--Lib/test/test_os.py14
-rw-r--r--Lib/test/test_pathlib/test_pathlib.py8
-rw-r--r--Lib/test/test_peepholer.py101
-rw-r--r--Lib/test/test_perf_profiler.py5
-rw-r--r--Lib/test/test_platform.py16
-rw-r--r--Lib/test/test_posixpath.py236
-rw-r--r--Lib/test/test_pprint.py2
-rw-r--r--Lib/test/test_pty.py1
-rw-r--r--Lib/test/test_pyclbr.py4
-rw-r--r--Lib/test/test_pydoc/test_pydoc.py2
-rw-r--r--Lib/test/test_pyexpat.py20
-rw-r--r--Lib/test/test_pyrepl/test_pyrepl.py39
-rw-r--r--Lib/test/test_pyrepl/test_unix_console.py1
-rw-r--r--Lib/test/test_pyrepl/test_windows_console.py2
-rw-r--r--Lib/test/test_queue.py20
-rw-r--r--Lib/test/test_random.py327
-rw-r--r--Lib/test/test_re.py28
-rw-r--r--Lib/test/test_readline.py8
-rw-r--r--Lib/test/test_regrtest.py65
-rw-r--r--Lib/test/test_remote_pdb.py9
-rw-r--r--Lib/test/test_reprlib.py60
-rw-r--r--Lib/test/test_shutil.py2
-rw-r--r--Lib/test/test_sqlite3/test_cli.py113
-rw-r--r--Lib/test/test_ssl.py43
-rw-r--r--Lib/test/test_stable_abi_ctypes.py4
-rw-r--r--Lib/test/test_statistics.py4
-rw-r--r--Lib/test/test_str.py8
-rw-r--r--Lib/test/test_string/_support.py1
-rw-r--r--Lib/test/test_strptime.py37
-rw-r--r--Lib/test/test_syntax.py42
-rw-r--r--Lib/test/test_sys.py64
-rw-r--r--Lib/test/test_sysconfig.py5
-rw-r--r--Lib/test/test_tarfile.py310
-rw-r--r--Lib/test/test_threading.py65
-rw-r--r--Lib/test/test_tokenize.py78
-rw-r--r--Lib/test/test_tools/i18n_data/docstrings.py2
-rw-r--r--Lib/test/test_traceback.py29
-rw-r--r--Lib/test/test_tstring.py1
-rw-r--r--Lib/test/test_type_annotations.py22
-rw-r--r--Lib/test/test_types.py58
-rw-r--r--Lib/test/test_typing.py147
-rw-r--r--Lib/test/test_unittest/test_case.py16
-rw-r--r--Lib/test/test_unittest/test_result.py37
-rw-r--r--Lib/test/test_unittest/test_runner.py52
-rw-r--r--Lib/test/test_unittest/testmock/testhelpers.py21
-rw-r--r--Lib/test/test_urllib.py1
-rw-r--r--Lib/test/test_urlparse.py398
-rwxr-xr-xLib/test/test_uuid.py28
-rw-r--r--Lib/test/test_venv.py2
-rw-r--r--Lib/test/test_webbrowser.py1
-rw-r--r--Lib/test/test_xml_etree.py27
-rw-r--r--Lib/test/test_zipfile/__main__.py2
-rw-r--r--Lib/test/test_zipfile/_path/_test_params.py2
-rw-r--r--Lib/test/test_zipfile/_path/test_complexity.py2
-rw-r--r--Lib/test/test_zipfile/_path/test_path.py22
-rw-r--r--Lib/test/test_zipfile/_path/write-alpharep.py1
-rw-r--r--Lib/test/test_zlib.py108
-rw-r--r--Lib/test/test_zoneinfo/test_zoneinfo.py43
-rw-r--r--Lib/test/test_zoneinfo/test_zoneinfo_property.py18
-rw-r--r--Lib/test/test_zstd.py423
178 files changed, 8773 insertions, 2875 deletions
diff --git a/Lib/test/.ruff.toml b/Lib/test/.ruff.toml
index 7aa8a4785d6..f1a967203ce 100644
--- a/Lib/test/.ruff.toml
+++ b/Lib/test/.ruff.toml
@@ -19,5 +19,12 @@ extend-exclude = [
[lint]
select = [
+ "F401", # Unused import
"F811", # Redefinition of unused variable (useful for finding test methods with the same name)
]
+
+[lint.per-file-ignores]
+"*/**/__main__.py" = ["F401"] # Unused import
+"test_import/*.py" = ["F401"] # Unused import
+"test_importlib/*.py" = ["F401"] # Unused import
+"typinganndata/partialexecution/*.py" = ["F401"] # Unused import
diff --git a/Lib/test/_code_definitions.py b/Lib/test/_code_definitions.py
index 733a15b25f6..70c44da2ec6 100644
--- a/Lib/test/_code_definitions.py
+++ b/Lib/test/_code_definitions.py
@@ -57,6 +57,22 @@ def spam_with_globals_and_builtins():
print(res)
+def spam_with_global_and_attr_same_name():
+ try:
+ spam_minimal.spam_minimal
+ except AttributeError:
+ pass
+
+
+def spam_full_args(a, b, /, c, d, *args, e, f, **kwargs):
+ return (a, b, c, d, e, f, args, kwargs)
+
+
+def spam_full_args_with_defaults(a=-1, b=-2, /, c=-3, d=-4, *args,
+ e=-5, f=-6, **kwargs):
+ return (a, b, c, d, e, f, args, kwargs)
+
+
def spam_args_attrs_and_builtins(a, b, /, c, d, *args, e, f, **kwargs):
if args.__len__() > 2:
return None
@@ -67,6 +83,10 @@ def spam_returns_arg(x):
return x
+def spam_raises():
+ raise Exception('spam!')
+
+
def spam_with_inner_not_closure():
def eggs():
pass
@@ -177,8 +197,12 @@ TOP_FUNCTIONS = [
spam_minimal,
spam_with_builtins,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
+ spam_full_args,
+ spam_full_args_with_defaults,
spam_args_attrs_and_builtins,
spam_returns_arg,
+ spam_raises,
spam_with_inner_not_closure,
spam_with_inner_closure,
spam_annotated,
@@ -219,8 +243,10 @@ STATELESS_FUNCTIONS = [
spam,
spam_minimal,
spam_with_builtins,
+ spam_full_args,
spam_args_attrs_and_builtins,
spam_returns_arg,
+ spam_raises,
spam_annotated,
spam_with_inner_not_closure,
spam_with_inner_closure,
@@ -238,7 +264,9 @@ STATELESS_FUNCTIONS = [
STATELESS_CODE = [
*STATELESS_FUNCTIONS,
script_with_globals,
+ spam_full_args_with_defaults,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
spam_full,
]
@@ -248,6 +276,7 @@ PURE_SCRIPT_FUNCTIONS = [
script_with_explicit_empty_return,
spam_minimal,
spam_with_builtins,
+ spam_raises,
spam_with_inner_not_closure,
spam_with_inner_closure,
]
@@ -255,6 +284,7 @@ SCRIPT_FUNCTIONS = [
*PURE_SCRIPT_FUNCTIONS,
script_with_globals,
spam_with_globals_and_builtins,
+ spam_with_global_and_attr_same_name,
]
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 6a20a1eb03e..a1259ff1d63 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -6801,6 +6801,35 @@ class _TestSpawnedSysPath(BaseTestCase):
self.assertEqual(child_sys_path[1:], sys.path[1:])
self.assertIsNone(import_error, msg=f"child could not import {self._mod_name}")
+ def test_std_streams_flushed_after_preload(self):
+ # gh-135335: Check fork server flushes standard streams after
+ # preloading modules
+ if multiprocessing.get_start_method() != "forkserver":
+ self.skipTest("forkserver specific test")
+
+ # Create a test module in the temporary directory on the child's path
+ # TODO: This can all be simplified once gh-126631 is fixed and we can
+ # use __main__ instead of a module.
+ dirname = os.path.join(self._temp_dir, 'preloaded_module')
+ init_name = os.path.join(dirname, '__init__.py')
+ os.mkdir(dirname)
+ with open(init_name, "w") as f:
+ cmd = '''if 1:
+ import sys
+ print('stderr', end='', file=sys.stderr)
+ print('stdout', end='', file=sys.stdout)
+ '''
+ f.write(cmd)
+
+ name = os.path.join(os.path.dirname(__file__), 'mp_preload_flush.py')
+ env = {'PYTHONPATH': self._temp_dir}
+ _, out, err = test.support.script_helper.assert_python_ok(name, **env)
+
+ # Check stderr first, as it is more likely to be useful to see in the
+ # event of a failure.
+ self.assertEqual(err.decode().rstrip(), 'stderr')
+ self.assertEqual(out.decode().rstrip(), 'stdout')
+
class MiscTestCase(unittest.TestCase):
def test__all__(self):
@@ -6844,6 +6873,28 @@ class MiscTestCase(unittest.TestCase):
self.assertEqual("332833500", out.decode('utf-8').strip())
self.assertFalse(err, msg=err.decode('utf-8'))
+ def test_forked_thread_not_started(self):
+ # gh-134381: Ensure that a thread that has not been started yet in
+ # the parent process can be started within a forked child process.
+
+ if multiprocessing.get_start_method() != "fork":
+ self.skipTest("fork specific test")
+
+ q = multiprocessing.Queue()
+ t = threading.Thread(target=lambda: q.put("done"), daemon=True)
+
+ def child():
+ t.start()
+ t.join()
+
+ p = multiprocessing.Process(target=child)
+ p.start()
+ p.join(support.SHORT_TIMEOUT)
+
+ self.assertEqual(p.exitcode, 0)
+ self.assertEqual(q.get_nowait(), "done")
+ close_queue(q)
+
#
# Mixins
diff --git a/Lib/test/audit-tests.py b/Lib/test/audit-tests.py
index 08b638e4b8d..6884ac0dbe6 100644
--- a/Lib/test/audit-tests.py
+++ b/Lib/test/audit-tests.py
@@ -643,6 +643,34 @@ def test_assert_unicode():
else:
raise RuntimeError("Expected sys.audit(9) to fail.")
+def test_sys_remote_exec():
+ import tempfile
+
+ pid = os.getpid()
+ event_pid = -1
+ event_script_path = ""
+ remote_event_script_path = ""
+ def hook(event, args):
+ if event not in ["sys.remote_exec", "cpython.remote_debugger_script"]:
+ return
+ print(event, args)
+ match event:
+ case "sys.remote_exec":
+ nonlocal event_pid, event_script_path
+ event_pid = args[0]
+ event_script_path = args[1]
+ case "cpython.remote_debugger_script":
+ nonlocal remote_event_script_path
+ remote_event_script_path = args[0]
+
+ sys.addaudithook(hook)
+ with tempfile.NamedTemporaryFile(mode='w+', delete=True) as tmp_file:
+ tmp_file.write("a = 1+1\n")
+ tmp_file.flush()
+ sys.remote_exec(pid, tmp_file.name)
+ assertEqual(event_pid, pid)
+ assertEqual(event_script_path, tmp_file.name)
+ assertEqual(remote_event_script_path, tmp_file.name)
if __name__ == "__main__":
from test.support import suppress_msvcrt_asserts
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 713cbedb299..a2d01b157ac 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -190,6 +190,12 @@ class Regrtest:
strip_py_suffix(tests)
+ exclude_tests = set()
+ if self.exclude:
+ for arg in self.cmdline_args:
+ exclude_tests.add(arg)
+ self.cmdline_args = []
+
if self.pgo:
# add default PGO tests if no tests are specified
setup_pgo_tests(self.cmdline_args, self.pgo_extended)
@@ -200,17 +206,15 @@ class Regrtest:
if self.tsan_parallel:
setup_tsan_parallel_tests(self.cmdline_args)
- exclude_tests = set()
- if self.exclude:
- for arg in self.cmdline_args:
- exclude_tests.add(arg)
- self.cmdline_args = []
-
alltests = findtests(testdir=self.test_dir,
exclude=exclude_tests)
if not self.fromfile:
selected = tests or self.cmdline_args
+ if exclude_tests:
+ # Support "--pgo/--tsan -x test_xxx" command
+ selected = [name for name in selected
+ if name not in exclude_tests]
if selected:
selected = split_test_packages(selected)
else:
@@ -543,8 +547,6 @@ class Regrtest:
self.first_runtests = runtests
self.logger.set_tests(runtests)
- setup_process()
-
if (runtests.hunt_refleak is not None) and (not self.num_workers):
# gh-109739: WindowsLoadTracker thread interferes with refleak check
use_load_tracker = False
@@ -721,10 +723,7 @@ class Regrtest:
self._execute_python(cmd, environ)
def _init(self):
- # Set sys.stdout encoder error handler to backslashreplace,
- # similar to sys.stderr error handler, to avoid UnicodeEncodeError
- # when printing a traceback or any other non-encodable character.
- sys.stdout.reconfigure(errors="backslashreplace")
+ setup_process()
if self.junit_filename and not os.path.isabs(self.junit_filename):
self.junit_filename = os.path.abspath(self.junit_filename)
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
index c3d1f60a400..9bfc414cd61 100644
--- a/Lib/test/libregrtest/setup.py
+++ b/Lib/test/libregrtest/setup.py
@@ -1,5 +1,6 @@
import faulthandler
import gc
+import io
import os
import random
import signal
@@ -52,6 +53,14 @@ def setup_process() -> None:
support.record_original_stdout(sys.stdout)
+ # Set sys.stdout encoder error handler to backslashreplace,
+ # similar to sys.stderr error handler, to avoid UnicodeEncodeError
+ # when printing a traceback or any other non-encodable character.
+ #
+ # Use an assertion to fix mypy error.
+ assert isinstance(sys.stdout, io.TextIOWrapper)
+ sys.stdout.reconfigure(errors="backslashreplace")
+
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
index 57d7b649d2e..958a915626a 100644
--- a/Lib/test/libregrtest/single.py
+++ b/Lib/test/libregrtest/single.py
@@ -283,7 +283,7 @@ def _runtest(result: TestResult, runtests: RunTests) -> None:
try:
setup_tests(runtests)
- if output_on_failure:
+ if output_on_failure or runtests.pgo:
support.verbose = True
stream = io.StringIO()
diff --git a/Lib/test/libregrtest/tsan.py b/Lib/test/libregrtest/tsan.py
index d984a735bdf..3545c5f999f 100644
--- a/Lib/test/libregrtest/tsan.py
+++ b/Lib/test/libregrtest/tsan.py
@@ -8,7 +8,7 @@ TSAN_TESTS = [
'test_capi.test_pyatomic',
'test_code',
'test_ctypes',
- # 'test_concurrent_futures', # gh-130605: too many data races
+ 'test_concurrent_futures',
'test_enum',
'test_functools',
'test_httpservers',
diff --git a/Lib/test/mp_preload_flush.py b/Lib/test/mp_preload_flush.py
new file mode 100644
index 00000000000..3501554d366
--- /dev/null
+++ b/Lib/test/mp_preload_flush.py
@@ -0,0 +1,15 @@
+import multiprocessing
+import sys
+
+modname = 'preloaded_module'
+if __name__ == '__main__':
+ if modname in sys.modules:
+ raise AssertionError(f'{modname!r} is not in sys.modules')
+ multiprocessing.set_start_method('forkserver')
+ multiprocessing.set_forkserver_preload([modname])
+ for _ in range(2):
+ p = multiprocessing.Process()
+ p.start()
+ p.join()
+elif modname not in sys.modules:
+ raise AssertionError(f'{modname!r} is not in sys.modules')
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index 9d6ae3e4d00..9a3a26a8400 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -1100,6 +1100,11 @@ class AbstractUnpickleTests:
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
+ def test_large_binstring(self):
+ errmsg = 'BINSTRING pickle has negative byte count'
+ with self.assertRaisesRegex(pickle.UnpicklingError, errmsg):
+ self.loads(b'T\0\0\0\x80')
+
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
diff --git a/Lib/test/pythoninfo.py b/Lib/test/pythoninfo.py
index e1830f2e6eb..80a262c18a5 100644
--- a/Lib/test/pythoninfo.py
+++ b/Lib/test/pythoninfo.py
@@ -920,10 +920,17 @@ def collect_windows(info_add):
try:
import _winapi
- dll_path = _winapi.GetModuleFileName(sys.dllhandle)
- info_add('windows.dll_path', dll_path)
- except (ImportError, AttributeError):
+ except ImportError:
pass
+ else:
+ try:
+ dll_path = _winapi.GetModuleFileName(sys.dllhandle)
+ info_add('windows.dll_path', dll_path)
+ except AttributeError:
+ pass
+
+ call_func(info_add, 'windows.ansi_code_page', _winapi, 'GetACP')
+ call_func(info_add, 'windows.oem_code_page', _winapi, 'GetOEMCP')
# windows.version_caption: "wmic os get Caption,Version /value" command
import subprocess
diff --git a/Lib/test/subprocessdata/fd_status.py b/Lib/test/subprocessdata/fd_status.py
index d12bd95abee..90e785981ae 100644
--- a/Lib/test/subprocessdata/fd_status.py
+++ b/Lib/test/subprocessdata/fd_status.py
@@ -2,7 +2,7 @@
file descriptors on stdout.
Usage:
-fd_stats.py: check all file descriptors
+fd_status.py: check all file descriptors (up to 255)
fd_status.py fd1 fd2 ...: check only specified file descriptors
"""
@@ -18,7 +18,7 @@ if __name__ == "__main__":
_MAXFD = os.sysconf("SC_OPEN_MAX")
except:
_MAXFD = 256
- test_fds = range(0, _MAXFD)
+ test_fds = range(0, min(_MAXFD, 256))
else:
test_fds = map(int, sys.argv[1:])
for fd in test_fds:
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index b7cd7940eb1..fd39d3f7c95 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -46,6 +46,7 @@ __all__ = [
# sys
"MS_WINDOWS", "is_jython", "is_android", "is_emscripten", "is_wasi",
"is_apple_mobile", "check_impl_detail", "unix_shell", "setswitchinterval",
+ "support_remote_exec_only",
# os
"get_pagesize",
# network
@@ -945,6 +946,31 @@ def check_sizeof(test, o, size):
% (type(o), result, size)
test.assertEqual(result, size, msg)
+def subTests(arg_names, arg_values, /, *, _do_cleanups=False):
+ """Run multiple subtests with different parameters.
+ """
+ single_param = False
+ if isinstance(arg_names, str):
+ arg_names = arg_names.replace(',',' ').split()
+ if len(arg_names) == 1:
+ single_param = True
+ arg_values = tuple(arg_values)
+ def decorator(func):
+ if isinstance(func, type):
+ raise TypeError('subTests() can only decorate methods, not classes')
+ @functools.wraps(func)
+ def wrapper(self, /, *args, **kwargs):
+ for values in arg_values:
+ if single_param:
+ values = (values,)
+ subtest_kwargs = dict(zip(arg_names, values))
+ with self.subTest(**subtest_kwargs):
+ func(self, *args, **kwargs, **subtest_kwargs)
+ if _do_cleanups:
+ self.doCleanups()
+ return wrapper
+ return decorator
+
#=======================================================================
# Decorator/context manager for running a code in a different locale,
# correctly resetting it afterwards.
@@ -1084,7 +1110,7 @@ def set_memlimit(limit: str) -> None:
global real_max_memuse
memlimit = _parse_memlimit(limit)
if memlimit < _2G - 1:
- raise ValueError('Memory limit {limit!r} too low to be useful')
+ raise ValueError(f'Memory limit {limit!r} too low to be useful')
real_max_memuse = memlimit
memlimit = min(memlimit, MAX_Py_ssize_t)
@@ -1101,7 +1127,6 @@ class _MemoryWatchdog:
self.started = False
def start(self):
- import warnings
try:
f = open(self.procfile, 'r')
except OSError as e:
@@ -2308,6 +2333,7 @@ def check_disallow_instantiation(testcase, tp, *args, **kwds):
qualname = f"{name}"
msg = f"cannot create '{re.escape(qualname)}' instances"
testcase.assertRaisesRegex(TypeError, msg, tp, *args, **kwds)
+ testcase.assertRaisesRegex(TypeError, msg, tp.__new__, tp, *args, **kwds)
def get_recursion_depth():
"""Get the recursion depth of the caller function.
@@ -2359,7 +2385,7 @@ def infinite_recursion(max_depth=None):
# very deep recursion.
max_depth = 20_000
elif max_depth < 3:
- raise ValueError("max_depth must be at least 3, got {max_depth}")
+ raise ValueError(f"max_depth must be at least 3, got {max_depth}")
depth = get_recursion_depth()
depth = max(depth - 1, 1) # Ignore infinite_recursion() frame.
limit = depth + max_depth
@@ -2728,7 +2754,7 @@ def iter_builtin_types():
# Fall back to making a best-effort guess.
if hasattr(object, '__flags__'):
# Look for any type object with the Py_TPFLAGS_STATIC_BUILTIN flag set.
- import datetime
+ import datetime # noqa: F401
seen = set()
for cls, subs in walk_class_hierarchy(object):
if cls in seen:
@@ -3045,6 +3071,27 @@ def is_libssl_fips_mode():
return False # more of a maybe, unless we add this to the _ssl module.
return get_fips_mode() != 0
+def _supports_remote_attaching():
+ PROCESS_VM_READV_SUPPORTED = False
+
+ try:
+ from _remote_debugging import PROCESS_VM_READV_SUPPORTED
+ except ImportError:
+ pass
+
+ return PROCESS_VM_READV_SUPPORTED
+
+def _support_remote_exec_only_impl():
+ if not sys.is_remote_debug_enabled():
+ return unittest.skip("Remote debugging is not enabled")
+ if sys.platform not in ("darwin", "linux", "win32"):
+ return unittest.skip("Test only runs on Linux, Windows and macOS")
+ if sys.platform == "linux" and not _supports_remote_attaching():
+ return unittest.skip("Test only runs on Linux with process_vm_readv support")
+ return _id
+
+def support_remote_exec_only(test):
+ return _support_remote_exec_only_impl()(test)
class EqualToForwardRef:
"""Helper to ease use of annotationlib.ForwardRef in tests.
diff --git a/Lib/test/support/interpreters/channels.py b/Lib/test/support/channels.py
index 3b6e0f0effd..b2de24d9d3e 100644
--- a/Lib/test/support/interpreters/channels.py
+++ b/Lib/test/support/channels.py
@@ -2,14 +2,14 @@
import time
import _interpchannels as _channels
-from . import _crossinterp
+from concurrent.interpreters import _crossinterp
# aliases:
from _interpchannels import (
- ChannelError, ChannelNotFoundError, ChannelClosedError,
- ChannelEmptyError, ChannelNotEmptyError,
+ ChannelError, ChannelNotFoundError, ChannelClosedError, # noqa: F401
+ ChannelEmptyError, ChannelNotEmptyError, # noqa: F401
)
-from ._crossinterp import (
+from concurrent.interpreters._crossinterp import (
UNBOUND_ERROR, UNBOUND_REMOVE,
)
diff --git a/Lib/test/support/interpreters/__init__.py b/Lib/test/support/interpreters/__init__.py
deleted file mode 100644
index e067f259364..00000000000
--- a/Lib/test/support/interpreters/__init__.py
+++ /dev/null
@@ -1,258 +0,0 @@
-"""Subinterpreters High Level Module."""
-
-import threading
-import weakref
-import _interpreters
-
-# aliases:
-from _interpreters import (
- InterpreterError, InterpreterNotFoundError, NotShareableError,
- is_shareable,
-)
-
-
-__all__ = [
- 'get_current', 'get_main', 'create', 'list_all', 'is_shareable',
- 'Interpreter',
- 'InterpreterError', 'InterpreterNotFoundError', 'ExecutionFailed',
- 'NotShareableError',
- 'create_queue', 'Queue', 'QueueEmpty', 'QueueFull',
-]
-
-
-_queuemod = None
-
-def __getattr__(name):
- if name in ('Queue', 'QueueEmpty', 'QueueFull', 'create_queue'):
- global create_queue, Queue, QueueEmpty, QueueFull
- ns = globals()
- from .queues import (
- create as create_queue,
- Queue, QueueEmpty, QueueFull,
- )
- return ns[name]
- else:
- raise AttributeError(name)
-
-
-_EXEC_FAILURE_STR = """
-{superstr}
-
-Uncaught in the interpreter:
-
-{formatted}
-""".strip()
-
-class ExecutionFailed(InterpreterError):
- """An unhandled exception happened during execution.
-
- This is raised from Interpreter.exec() and Interpreter.call().
- """
-
- def __init__(self, excinfo):
- msg = excinfo.formatted
- if not msg:
- if excinfo.type and excinfo.msg:
- msg = f'{excinfo.type.__name__}: {excinfo.msg}'
- else:
- msg = excinfo.type.__name__ or excinfo.msg
- super().__init__(msg)
- self.excinfo = excinfo
-
- def __str__(self):
- try:
- formatted = self.excinfo.errdisplay
- except Exception:
- return super().__str__()
- else:
- return _EXEC_FAILURE_STR.format(
- superstr=super().__str__(),
- formatted=formatted,
- )
-
-
-def create():
- """Return a new (idle) Python interpreter."""
- id = _interpreters.create(reqrefs=True)
- return Interpreter(id, _ownsref=True)
-
-
-def list_all():
- """Return all existing interpreters."""
- return [Interpreter(id, _whence=whence)
- for id, whence in _interpreters.list_all(require_ready=True)]
-
-
-def get_current():
- """Return the currently running interpreter."""
- id, whence = _interpreters.get_current()
- return Interpreter(id, _whence=whence)
-
-
-def get_main():
- """Return the main interpreter."""
- id, whence = _interpreters.get_main()
- assert whence == _interpreters.WHENCE_RUNTIME, repr(whence)
- return Interpreter(id, _whence=whence)
-
-
-_known = weakref.WeakValueDictionary()
-
-class Interpreter:
- """A single Python interpreter.
-
- Attributes:
-
- "id" - the unique process-global ID number for the interpreter
- "whence" - indicates where the interpreter was created
-
- If the interpreter wasn't created by this module
- then any method that modifies the interpreter will fail,
- i.e. .close(), .prepare_main(), .exec(), and .call()
- """
-
- _WHENCE_TO_STR = {
- _interpreters.WHENCE_UNKNOWN: 'unknown',
- _interpreters.WHENCE_RUNTIME: 'runtime init',
- _interpreters.WHENCE_LEGACY_CAPI: 'legacy C-API',
- _interpreters.WHENCE_CAPI: 'C-API',
- _interpreters.WHENCE_XI: 'cross-interpreter C-API',
- _interpreters.WHENCE_STDLIB: '_interpreters module',
- }
-
- def __new__(cls, id, /, _whence=None, _ownsref=None):
- # There is only one instance for any given ID.
- if not isinstance(id, int):
- raise TypeError(f'id must be an int, got {id!r}')
- id = int(id)
- if _whence is None:
- if _ownsref:
- _whence = _interpreters.WHENCE_STDLIB
- else:
- _whence = _interpreters.whence(id)
- assert _whence in cls._WHENCE_TO_STR, repr(_whence)
- if _ownsref is None:
- _ownsref = (_whence == _interpreters.WHENCE_STDLIB)
- try:
- self = _known[id]
- assert hasattr(self, '_ownsref')
- except KeyError:
- self = super().__new__(cls)
- _known[id] = self
- self._id = id
- self._whence = _whence
- self._ownsref = _ownsref
- if _ownsref:
- # This may raise InterpreterNotFoundError:
- _interpreters.incref(id)
- return self
-
- def __repr__(self):
- return f'{type(self).__name__}({self.id})'
-
- def __hash__(self):
- return hash(self._id)
-
- def __del__(self):
- self._decref()
-
- # for pickling:
- def __getnewargs__(self):
- return (self._id,)
-
- # for pickling:
- def __getstate__(self):
- return None
-
- def _decref(self):
- if not self._ownsref:
- return
- self._ownsref = False
- try:
- _interpreters.decref(self._id)
- except InterpreterNotFoundError:
- pass
-
- @property
- def id(self):
- return self._id
-
- @property
- def whence(self):
- return self._WHENCE_TO_STR[self._whence]
-
- def is_running(self):
- """Return whether or not the identified interpreter is running."""
- return _interpreters.is_running(self._id)
-
- # Everything past here is available only to interpreters created by
- # interpreters.create().
-
- def close(self):
- """Finalize and destroy the interpreter.
-
- Attempting to destroy the current interpreter results
- in an InterpreterError.
- """
- return _interpreters.destroy(self._id, restrict=True)
-
- def prepare_main(self, ns=None, /, **kwargs):
- """Bind the given values into the interpreter's __main__.
-
- The values must be shareable.
- """
- ns = dict(ns, **kwargs) if ns is not None else kwargs
- _interpreters.set___main___attrs(self._id, ns, restrict=True)
-
- def exec(self, code, /):
- """Run the given source code in the interpreter.
-
- This is essentially the same as calling the builtin "exec"
- with this interpreter, using the __dict__ of its __main__
- module as both globals and locals.
-
- There is no return value.
-
- If the code raises an unhandled exception then an ExecutionFailed
- exception is raised, which summarizes the unhandled exception.
- The actual exception is discarded because objects cannot be
- shared between interpreters.
-
- This blocks the current Python thread until done. During
- that time, the previous interpreter is allowed to run
- in other threads.
- """
- excinfo = _interpreters.exec(self._id, code, restrict=True)
- if excinfo is not None:
- raise ExecutionFailed(excinfo)
-
- def call(self, callable, /):
- """Call the object in the interpreter with given args/kwargs.
-
- Only functions that take no arguments and have no closure
- are supported.
-
- The return value is discarded.
-
- If the callable raises an exception then the error display
- (including full traceback) is send back between the interpreters
- and an ExecutionFailed exception is raised, much like what
- happens with Interpreter.exec().
- """
- # XXX Support args and kwargs.
- # XXX Support arbitrary callables.
- # XXX Support returning the return value (e.g. via pickle).
- excinfo = _interpreters.call(self._id, callable, restrict=True)
- if excinfo is not None:
- raise ExecutionFailed(excinfo)
-
- def call_in_thread(self, callable, /):
- """Return a new thread that calls the object in the interpreter.
-
- The return value and any raised exception are discarded.
- """
- def task():
- self.call(callable)
- t = threading.Thread(target=task)
- t.start()
- return t
diff --git a/Lib/test/support/interpreters/_crossinterp.py b/Lib/test/support/interpreters/_crossinterp.py
deleted file mode 100644
index 544e197ba4c..00000000000
--- a/Lib/test/support/interpreters/_crossinterp.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""Common code between queues and channels."""
-
-
-class ItemInterpreterDestroyed(Exception):
- """Raised when trying to get an item whose interpreter was destroyed."""
-
-
-class classonly:
- """A non-data descriptor that makes a value only visible on the class.
-
- This is like the "classmethod" builtin, but does not show up on
- instances of the class. It may be used as a decorator.
- """
-
- def __init__(self, value):
- self.value = value
- self.getter = classmethod(value).__get__
- self.name = None
-
- def __set_name__(self, cls, name):
- if self.name is not None:
- raise TypeError('already used')
- self.name = name
-
- def __get__(self, obj, cls):
- if obj is not None:
- raise AttributeError(self.name)
- # called on the class
- return self.getter(None, cls)
-
-
-class UnboundItem:
- """Represents a cross-interpreter item no longer bound to an interpreter.
-
- An item is unbound when the interpreter that added it to the
- cross-interpreter container is destroyed.
- """
-
- __slots__ = ()
-
- @classonly
- def singleton(cls, kind, module, name='UNBOUND'):
- doc = cls.__doc__.replace('cross-interpreter container', kind)
- doc = doc.replace('cross-interpreter', kind)
- subclass = type(
- f'Unbound{kind.capitalize()}Item',
- (cls,),
- dict(
- _MODULE=module,
- _NAME=name,
- __doc__=doc,
- ),
- )
- return object.__new__(subclass)
-
- _MODULE = __name__
- _NAME = 'UNBOUND'
-
- def __new__(cls):
- raise Exception(f'use {cls._MODULE}.{cls._NAME}')
-
- def __repr__(self):
- return f'{self._MODULE}.{self._NAME}'
-# return f'interpreters.queues.UNBOUND'
-
-
-UNBOUND = object.__new__(UnboundItem)
-UNBOUND_ERROR = object()
-UNBOUND_REMOVE = object()
-
-_UNBOUND_CONSTANT_TO_FLAG = {
- UNBOUND_REMOVE: 1,
- UNBOUND_ERROR: 2,
- UNBOUND: 3,
-}
-_UNBOUND_FLAG_TO_CONSTANT = {v: k
- for k, v in _UNBOUND_CONSTANT_TO_FLAG.items()}
-
-
-def serialize_unbound(unbound):
- op = unbound
- try:
- flag = _UNBOUND_CONSTANT_TO_FLAG[op]
- except KeyError:
- raise NotImplementedError(f'unsupported unbound replacement op {op!r}')
- return flag,
-
-
-def resolve_unbound(flag, exctype_destroyed):
- try:
- op = _UNBOUND_FLAG_TO_CONSTANT[flag]
- except KeyError:
- raise NotImplementedError(f'unsupported unbound replacement op {flag!r}')
- if op is UNBOUND_REMOVE:
- # "remove" not possible here
- raise NotImplementedError
- elif op is UNBOUND_ERROR:
- raise exctype_destroyed("item's original interpreter destroyed")
- elif op is UNBOUND:
- return UNBOUND
- else:
- raise NotImplementedError(repr(op))
diff --git a/Lib/test/support/interpreters/queues.py b/Lib/test/support/interpreters/queues.py
deleted file mode 100644
index d6a3197d9e0..00000000000
--- a/Lib/test/support/interpreters/queues.py
+++ /dev/null
@@ -1,289 +0,0 @@
-"""Cross-interpreter Queues High Level Module."""
-
-import pickle
-import queue
-import time
-import weakref
-import _interpqueues as _queues
-from . import _crossinterp
-
-# aliases:
-from _interpqueues import (
- QueueError, QueueNotFoundError,
-)
-from ._crossinterp import (
- UNBOUND_ERROR, UNBOUND_REMOVE,
-)
-
-__all__ = [
- 'UNBOUND', 'UNBOUND_ERROR', 'UNBOUND_REMOVE',
- 'create', 'list_all',
- 'Queue',
- 'QueueError', 'QueueNotFoundError', 'QueueEmpty', 'QueueFull',
- 'ItemInterpreterDestroyed',
-]
-
-
-class QueueEmpty(QueueError, queue.Empty):
- """Raised from get_nowait() when the queue is empty.
-
- It is also raised from get() if it times out.
- """
-
-
-class QueueFull(QueueError, queue.Full):
- """Raised from put_nowait() when the queue is full.
-
- It is also raised from put() if it times out.
- """
-
-
-class ItemInterpreterDestroyed(QueueError,
- _crossinterp.ItemInterpreterDestroyed):
- """Raised from get() and get_nowait()."""
-
-
-_SHARED_ONLY = 0
-_PICKLED = 1
-
-
-UNBOUND = _crossinterp.UnboundItem.singleton('queue', __name__)
-
-
-def _serialize_unbound(unbound):
- if unbound is UNBOUND:
- unbound = _crossinterp.UNBOUND
- return _crossinterp.serialize_unbound(unbound)
-
-
-def _resolve_unbound(flag):
- resolved = _crossinterp.resolve_unbound(flag, ItemInterpreterDestroyed)
- if resolved is _crossinterp.UNBOUND:
- resolved = UNBOUND
- return resolved
-
-
-def create(maxsize=0, *, unbounditems=UNBOUND):
- """Return a new cross-interpreter queue.
-
- The queue may be used to pass data safely between interpreters.
-
- "unbounditems" sets the default for Queue.put(); see that method for
- supported values. The default value is UNBOUND, which replaces
- the unbound item.
- """
- unbound = _serialize_unbound(unbounditems)
- unboundop, = unbound
- qid = _queues.create(maxsize, unboundop, -1)
- self = Queue(qid)
- self._set_unbound(unboundop, unbounditems)
- return self
-
-
-def list_all():
- """Return a list of all open queues."""
- queues = []
- for qid, unboundop, _ in _queues.list_all():
- self = Queue(qid)
- if not hasattr(self, '_unbound'):
- self._set_unbound(unboundop)
- else:
- assert self._unbound[0] == unboundop
- queues.append(self)
- return queues
-
-
-_known_queues = weakref.WeakValueDictionary()
-
-class Queue:
- """A cross-interpreter queue."""
-
- def __new__(cls, id, /):
- # There is only one instance for any given ID.
- if isinstance(id, int):
- id = int(id)
- else:
- raise TypeError(f'id must be an int, got {id!r}')
- try:
- self = _known_queues[id]
- except KeyError:
- self = super().__new__(cls)
- self._id = id
- _known_queues[id] = self
- _queues.bind(id)
- return self
-
- def __del__(self):
- try:
- _queues.release(self._id)
- except QueueNotFoundError:
- pass
- try:
- del _known_queues[self._id]
- except KeyError:
- pass
-
- def __repr__(self):
- return f'{type(self).__name__}({self.id})'
-
- def __hash__(self):
- return hash(self._id)
-
- # for pickling:
- def __getnewargs__(self):
- return (self._id,)
-
- # for pickling:
- def __getstate__(self):
- return None
-
- def _set_unbound(self, op, items=None):
- assert not hasattr(self, '_unbound')
- if items is None:
- items = _resolve_unbound(op)
- unbound = (op, items)
- self._unbound = unbound
- return unbound
-
- @property
- def id(self):
- return self._id
-
- @property
- def unbounditems(self):
- try:
- _, items = self._unbound
- except AttributeError:
- op, _ = _queues.get_queue_defaults(self._id)
- _, items = self._set_unbound(op)
- return items
-
- @property
- def maxsize(self):
- try:
- return self._maxsize
- except AttributeError:
- self._maxsize = _queues.get_maxsize(self._id)
- return self._maxsize
-
- def empty(self):
- return self.qsize() == 0
-
- def full(self):
- return _queues.is_full(self._id)
-
- def qsize(self):
- return _queues.get_count(self._id)
-
- def put(self, obj, timeout=None, *,
- unbounditems=None,
- _delay=10 / 1000, # 10 milliseconds
- ):
- """Add the object to the queue.
-
- This blocks while the queue is full.
-
- For most objects, the object received through Queue.get() will
- be a new one, equivalent to the original and not sharing any
- actual underlying data. The notable exceptions include
- cross-interpreter types (like Queue) and memoryview, where the
- underlying data is actually shared. Furthermore, some types
- can be sent through a queue more efficiently than others. This
- group includes various immutable types like int, str, bytes, and
- tuple (if the items are likewise efficiently shareable). See interpreters.is_shareable().
-
- "unbounditems" controls the behavior of Queue.get() for the given
- object if the current interpreter (calling put()) is later
- destroyed.
-
- If "unbounditems" is None (the default) then it uses the
- queue's default, set with create_queue(),
- which is usually UNBOUND.
-
- If "unbounditems" is UNBOUND_ERROR then get() will raise an
- ItemInterpreterDestroyed exception if the original interpreter
- has been destroyed. This does not otherwise affect the queue;
- the next call to put() will work like normal, returning the next
- item in the queue.
-
- If "unbounditems" is UNBOUND_REMOVE then the item will be removed
- from the queue as soon as the original interpreter is destroyed.
- Be aware that this will introduce an imbalance between put()
- and get() calls.
-
- If "unbounditems" is UNBOUND then it is returned by get() in place
- of the unbound item.
- """
- if unbounditems is None:
- unboundop = -1
- else:
- unboundop, = _serialize_unbound(unbounditems)
- if timeout is not None:
- timeout = int(timeout)
- if timeout < 0:
- raise ValueError(f'timeout value must be non-negative')
- end = time.time() + timeout
- while True:
- try:
- _queues.put(self._id, obj, unboundop)
- except QueueFull as exc:
- if timeout is not None and time.time() >= end:
- raise # re-raise
- time.sleep(_delay)
- else:
- break
-
- def put_nowait(self, obj, *, unbounditems=None):
- if unbounditems is None:
- unboundop = -1
- else:
- unboundop, = _serialize_unbound(unbounditems)
- _queues.put(self._id, obj, unboundop)
-
- def get(self, timeout=None, *,
- _delay=10 / 1000, # 10 milliseconds
- ):
- """Return the next object from the queue.
-
- This blocks while the queue is empty.
-
- If the next item's original interpreter has been destroyed
- then the "next object" is determined by the value of the
- "unbounditems" argument to put().
- """
- if timeout is not None:
- timeout = int(timeout)
- if timeout < 0:
- raise ValueError(f'timeout value must be non-negative')
- end = time.time() + timeout
- while True:
- try:
- obj, unboundop = _queues.get(self._id)
- except QueueEmpty as exc:
- if timeout is not None and time.time() >= end:
- raise # re-raise
- time.sleep(_delay)
- else:
- break
- if unboundop is not None:
- assert obj is None, repr(obj)
- return _resolve_unbound(unboundop)
- return obj
-
- def get_nowait(self):
- """Return the next object from the channel.
-
- If the queue is empty then raise QueueEmpty. Otherwise this
- is the same as get().
- """
- try:
- obj, unboundop = _queues.get(self._id)
- except QueueEmpty as exc:
- raise # re-raise
- if unboundop is not None:
- assert obj is None, repr(obj)
- return _resolve_unbound(unboundop)
- return obj
-
-
-_queues._register_heap_types(Queue, QueueEmpty, QueueFull)
diff --git a/Lib/test/test__interpchannels.py b/Lib/test/test__interpchannels.py
index 88eee03a3de..858d31a73cf 100644
--- a/Lib/test/test__interpchannels.py
+++ b/Lib/test/test__interpchannels.py
@@ -9,7 +9,7 @@ import unittest
from test.support import import_helper, skip_if_sanitizer
_channels = import_helper.import_module('_interpchannels')
-from test.support.interpreters import _crossinterp
+from concurrent.interpreters import _crossinterp
from test.test__interpreters import (
_interpreters,
_run_output,
diff --git a/Lib/test/test__interpreters.py b/Lib/test/test__interpreters.py
index ad3ebbfdff6..a32d5d81d2b 100644
--- a/Lib/test/test__interpreters.py
+++ b/Lib/test/test__interpreters.py
@@ -485,6 +485,21 @@ class CommonTests(TestBase):
msg = r'_interpreters.run_func\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=1)
+ # See https://github.com/python/cpython/issues/135855
+ msg = r'_interpreters.set___main___attrs\(\) argument 2 must be dict, not int'
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.set___main___attrs(self.id, 1)
+
+ def test_invalid_shared_none(self):
+ msg = r'must be dict, not None'
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.exec(self.id, 'a', shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.run_string(self.id, 'a', shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.run_func(self.id, lambda: None, shared=None)
+ with self.assertRaisesRegex(TypeError, msg):
+ _interpreters.set___main___attrs(self.id, None)
def test_invalid_shared_encoding(self):
# See https://github.com/python/cpython/issues/127196
diff --git a/Lib/test/test_annotationlib.py b/Lib/test/test_annotationlib.py
index 73a821d15e3..ae0e73f08c5 100644
--- a/Lib/test/test_annotationlib.py
+++ b/Lib/test/test_annotationlib.py
@@ -7,7 +7,7 @@ import collections
import functools
import itertools
import pickle
-from string.templatelib import Interpolation, Template
+from string.templatelib import Template
import typing
import unittest
from annotationlib import (
@@ -815,6 +815,70 @@ class TestGetAnnotations(unittest.TestCase):
{"x": int},
)
+ def test_stringized_annotation_permutations(self):
+ def define_class(name, has_future, has_annos, base_text, extra_names=None):
+ lines = []
+ if has_future:
+ lines.append("from __future__ import annotations")
+ lines.append(f"class {name}({base_text}):")
+ if has_annos:
+ lines.append(f" {name}_attr: int")
+ else:
+ lines.append(" pass")
+ code = "\n".join(lines)
+ ns = support.run_code(code, extra_names=extra_names)
+ return ns[name]
+
+ def check_annotations(cls, has_future, has_annos):
+ if has_annos:
+ if has_future:
+ anno = "int"
+ else:
+ anno = int
+ self.assertEqual(get_annotations(cls), {f"{cls.__name__}_attr": anno})
+ else:
+ self.assertEqual(get_annotations(cls), {})
+
+ for meta_future, base_future, child_future, meta_has_annos, base_has_annos, child_has_annos in itertools.product(
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ (False, True),
+ ):
+ with self.subTest(
+ meta_future=meta_future,
+ base_future=base_future,
+ child_future=child_future,
+ meta_has_annos=meta_has_annos,
+ base_has_annos=base_has_annos,
+ child_has_annos=child_has_annos,
+ ):
+ meta = define_class(
+ "Meta",
+ has_future=meta_future,
+ has_annos=meta_has_annos,
+ base_text="type",
+ )
+ base = define_class(
+ "Base",
+ has_future=base_future,
+ has_annos=base_has_annos,
+ base_text="metaclass=Meta",
+ extra_names={"Meta": meta},
+ )
+ child = define_class(
+ "Child",
+ has_future=child_future,
+ has_annos=child_has_annos,
+ base_text="Base",
+ extra_names={"Base": base},
+ )
+ check_annotations(meta, meta_future, meta_has_annos)
+ check_annotations(base, base_future, base_has_annos)
+ check_annotations(child, child_future, child_has_annos)
+
def test_modify_annotations(self):
def f(x: int):
pass
@@ -1586,9 +1650,11 @@ class TestForwardRefClass(unittest.TestCase):
with support.swap_attr(builtins, "int", dict):
self.assertIs(ForwardRef("int").evaluate(), dict)
- with self.assertRaises(NameError):
+ with self.assertRaises(NameError, msg="name 'doesntexist' is not defined") as exc:
ForwardRef("doesntexist").evaluate()
+ self.assertEqual(exc.exception.name, "doesntexist")
+
def test_fwdref_invalid_syntax(self):
fr = ForwardRef("if")
with self.assertRaises(SyntaxError):
diff --git a/Lib/test/test_ast/test_ast.py b/Lib/test/test_ast/test_ast.py
index 1479a8eafd6..cc46529c0ef 100644
--- a/Lib/test/test_ast/test_ast.py
+++ b/Lib/test/test_ast/test_ast.py
@@ -1372,17 +1372,17 @@ class ASTHelpers_Test(unittest.TestCase):
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
- "args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])"
+ "Module(body=[Expr(value=Call(func=Name(id='spam'), "
+ "args=[Name(id='eggs'), Constant(value='and cheese')]))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
+ "Module([Expr(Call(Name('spam'), [Name('eggs'), "
"Constant('and cheese')]))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "Module(body=[Expr(value=Call(func=Name(id='spam', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=4), "
- "args=[Name(id='eggs', ctx=Load(), lineno=1, col_offset=5, "
+ "args=[Name(id='eggs', lineno=1, col_offset=5, "
"end_lineno=1, end_col_offset=9), Constant(value='and cheese', "
"lineno=1, col_offset=11, end_lineno=1, end_col_offset=23)], "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=24), "
@@ -1396,18 +1396,18 @@ Module(
body=[
Expr(
value=Call(
- func=Name(id='spam', ctx=Load()),
+ func=Name(id='spam'),
args=[
- Name(id='eggs', ctx=Load()),
+ Name(id='eggs'),
Constant(value='and cheese')]))])""")
self.assertEqual(ast.dump(node, annotate_fields=False, indent='\t'), """\
Module(
\t[
\t\tExpr(
\t\t\tCall(
-\t\t\t\tName('spam', Load()),
+\t\t\t\tName('spam'),
\t\t\t\t[
-\t\t\t\t\tName('eggs', Load()),
+\t\t\t\t\tName('eggs'),
\t\t\t\t\tConstant('and cheese')]))])""")
self.assertEqual(ast.dump(node, include_attributes=True, indent=3), """\
Module(
@@ -1416,7 +1416,6 @@ Module(
value=Call(
func=Name(
id='spam',
- ctx=Load(),
lineno=1,
col_offset=0,
end_lineno=1,
@@ -1424,7 +1423,6 @@ Module(
args=[
Name(
id='eggs',
- ctx=Load(),
lineno=1,
col_offset=5,
end_lineno=1,
@@ -1454,23 +1452,23 @@ Module(
)
node = ast.Raise(exc=ast.Name(id='e', ctx=ast.Load()), lineno=3, col_offset=4)
self.assertEqual(ast.dump(node),
- "Raise(exc=Name(id='e', ctx=Load()))"
+ "Raise(exc=Name(id='e'))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(Name('e', Load()))"
+ "Raise(Name('e'))"
)
self.assertEqual(ast.dump(node, include_attributes=True),
- "Raise(exc=Name(id='e', ctx=Load()), lineno=3, col_offset=4)"
+ "Raise(exc=Name(id='e'), lineno=3, col_offset=4)"
)
self.assertEqual(ast.dump(node, annotate_fields=False, include_attributes=True),
- "Raise(Name('e', Load()), lineno=3, col_offset=4)"
+ "Raise(Name('e'), lineno=3, col_offset=4)"
)
node = ast.Raise(cause=ast.Name(id='e', ctx=ast.Load()))
self.assertEqual(ast.dump(node),
- "Raise(cause=Name(id='e', ctx=Load()))"
+ "Raise(cause=Name(id='e'))"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "Raise(cause=Name('e', Load()))"
+ "Raise(cause=Name('e'))"
)
# Arguments:
node = ast.arguments(args=[ast.arg("x")])
@@ -1502,10 +1500,10 @@ Module(
[ast.Name('dataclass', ctx=ast.Load())],
)
self.assertEqual(ast.dump(node),
- "ClassDef(name='T', keywords=[keyword(arg='a', value=Constant(value=None))], decorator_list=[Name(id='dataclass', ctx=Load())])",
+ "ClassDef(name='T', keywords=[keyword(arg='a', value=Constant(value=None))], decorator_list=[Name(id='dataclass')])",
)
self.assertEqual(ast.dump(node, annotate_fields=False),
- "ClassDef('T', [], [keyword('a', Constant(None))], [], [Name('dataclass', Load())])",
+ "ClassDef('T', [], [keyword('a', Constant(None))], [], [Name('dataclass')])",
)
def test_dump_show_empty(self):
@@ -1533,7 +1531,7 @@ Module(
check_node(
# Corner case: there are no real `Name` instances with `id=''`:
ast.Name(id='', ctx=ast.Load()),
- empty="Name(id='', ctx=Load())",
+ empty="Name(id='')",
full="Name(id='', ctx=Load())",
)
@@ -1544,39 +1542,63 @@ Module(
)
check_node(
+ ast.MatchSingleton(value=[]),
+ empty="MatchSingleton(value=[])",
+ full="MatchSingleton(value=[])",
+ )
+
+ check_node(
ast.Constant(value=None),
empty="Constant(value=None)",
full="Constant(value=None)",
)
check_node(
+ ast.Constant(value=[]),
+ empty="Constant(value=[])",
+ full="Constant(value=[])",
+ )
+
+ check_node(
ast.Constant(value=''),
empty="Constant(value='')",
full="Constant(value='')",
)
+ check_node(
+ ast.Interpolation(value=ast.Constant(42), str=None, conversion=-1),
+ empty="Interpolation(value=Constant(value=42), str=None, conversion=-1)",
+ full="Interpolation(value=Constant(value=42), str=None, conversion=-1)",
+ )
+
+ check_node(
+ ast.Interpolation(value=ast.Constant(42), str=[], conversion=-1),
+ empty="Interpolation(value=Constant(value=42), str=[], conversion=-1)",
+ full="Interpolation(value=Constant(value=42), str=[], conversion=-1)",
+ )
+
check_text(
"def a(b: int = 0, *, c): ...",
- empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))])])",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int'))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))])])",
full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load()))], kwonlyargs=[arg(arg='c')], kw_defaults=[None], defaults=[Constant(value=0)]), body=[Expr(value=Constant(value=Ellipsis))], decorator_list=[], type_params=[])], type_ignores=[])",
)
check_text(
"def a(b: int = 0, *, c): ...",
- empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)])",
+ empty="Module(body=[FunctionDef(name='a', args=arguments(args=[arg(arg='b', annotation=Name(id='int', lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)])",
full="Module(body=[FunctionDef(name='a', args=arguments(posonlyargs=[], args=[arg(arg='b', annotation=Name(id='int', ctx=Load(), lineno=1, col_offset=9, end_lineno=1, end_col_offset=12), lineno=1, col_offset=6, end_lineno=1, end_col_offset=12)], kwonlyargs=[arg(arg='c', lineno=1, col_offset=21, end_lineno=1, end_col_offset=22)], kw_defaults=[None], defaults=[Constant(value=0, lineno=1, col_offset=15, end_lineno=1, end_col_offset=16)]), body=[Expr(value=Constant(value=Ellipsis, lineno=1, col_offset=25, end_lineno=1, end_col_offset=28), lineno=1, col_offset=25, end_lineno=1, end_col_offset=28)], decorator_list=[], type_params=[], lineno=1, col_offset=0, end_lineno=1, end_col_offset=28)], type_ignores=[])",
include_attributes=True,
)
check_text(
'spam(eggs, "and cheese")',
- empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')]))])",
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam'), args=[Name(id='eggs'), Constant(value='and cheese')]))])",
full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load()), Constant(value='and cheese')], keywords=[]))], type_ignores=[])",
)
check_text(
'spam(eggs, text="and cheese")',
- empty="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))])",
+ empty="Module(body=[Expr(value=Call(func=Name(id='spam'), args=[Name(id='eggs')], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))])",
full="Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), args=[Name(id='eggs', ctx=Load())], keywords=[keyword(arg='text', value=Constant(value='and cheese'))]))], type_ignores=[])",
)
@@ -1610,12 +1632,12 @@ Module(
self.assertEqual(src, ast.fix_missing_locations(src))
self.maxDiff = None
self.assertEqual(ast.dump(src, include_attributes=True),
- "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
+ "Module(body=[Expr(value=Call(func=Name(id='write', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=5), "
"args=[Constant(value='spam', lineno=1, col_offset=6, end_lineno=1, "
"end_col_offset=12)], lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=13), lineno=1, col_offset=0, end_lineno=1, "
- "end_col_offset=13), Expr(value=Call(func=Name(id='spam', ctx=Load(), "
+ "end_col_offset=13), Expr(value=Call(func=Name(id='spam', "
"lineno=1, col_offset=0, end_lineno=1, end_col_offset=0), "
"args=[Constant(value='eggs', lineno=1, col_offset=0, end_lineno=1, "
"end_col_offset=0)], lineno=1, col_offset=0, end_lineno=1, "
@@ -3292,6 +3314,7 @@ class CommandLineTests(unittest.TestCase):
expect = self.text_normalize(expect)
self.assertEqual(res, expect)
+ @support.requires_resource('cpu')
def test_invocation(self):
# test various combinations of parameters
base_flags = (
@@ -3334,7 +3357,7 @@ class CommandLineTests(unittest.TestCase):
body=[
AnnAssign(
target=Name(id='x', ctx=Store()),
- annotation=Name(id='bool', ctx=Load()),
+ annotation=Name(id='bool'),
value=Constant(value=1),
simple=1)],
type_ignores=[
@@ -3362,7 +3385,7 @@ class CommandLineTests(unittest.TestCase):
expect = '''
Expression(
body=Call(
- func=Name(id='print', ctx=Load()),
+ func=Name(id='print'),
args=[
Constant(value=1),
Constant(value=2),
@@ -3378,12 +3401,11 @@ class CommandLineTests(unittest.TestCase):
expect = '''
FunctionType(
argtypes=[
- Name(id='int', ctx=Load()),
- Name(id='str', ctx=Load())],
+ Name(id='int'),
+ Name(id='str')],
returns=Subscript(
- value=Name(id='list', ctx=Load()),
- slice=Name(id='int', ctx=Load()),
- ctx=Load()))
+ value=Name(id='list'),
+ slice=Name(id='int')))
'''
for flag in ('-m=func_type', '--mode=func_type'):
with self.subTest(flag=flag):
@@ -3397,7 +3419,7 @@ class CommandLineTests(unittest.TestCase):
body=[
AnnAssign(
target=Name(id='x', ctx=Store()),
- annotation=Name(id='bool', ctx=Load()),
+ annotation=Name(id='bool'),
value=Constant(value=1),
simple=1)])
'''
@@ -3442,7 +3464,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='x', ctx=Load()),
+ subject=Name(id='x'),
cases=[
match_case(
pattern=MatchValue(
@@ -3465,7 +3487,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='a', ctx=Load()),
+ subject=Name(id='a'),
cases=[
match_case(
pattern=MatchValue(
@@ -3491,7 +3513,7 @@ class CommandLineTests(unittest.TestCase):
Module(
body=[
Match(
- subject=Name(id='a', ctx=Load()),
+ subject=Name(id='a'),
cases=[
match_case(
pattern=MatchValue(
diff --git a/Lib/test/test_asyncgen.py b/Lib/test/test_asyncgen.py
index 2c44647bf3e..636cb33dd98 100644
--- a/Lib/test/test_asyncgen.py
+++ b/Lib/test/test_asyncgen.py
@@ -2021,6 +2021,15 @@ class TestUnawaitedWarnings(unittest.TestCase):
g.athrow(RuntimeError)
gc_collect()
+ def test_athrow_throws_immediately(self):
+ async def gen():
+ yield 1
+
+ g = gen()
+ msg = "athrow expected at least 1 argument, got 0"
+ with self.assertRaisesRegex(TypeError, msg):
+ g.athrow()
+
def test_aclose(self):
async def gen():
yield 1
diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py
index 2ca5c4c6719..12179eb0c9e 100644
--- a/Lib/test/test_asyncio/test_base_events.py
+++ b/Lib/test/test_asyncio/test_base_events.py
@@ -24,6 +24,10 @@ import warnings
MOCK_ANY = mock.ANY
+class CustomError(Exception):
+ pass
+
+
def tearDownModule():
asyncio._set_event_loop_policy(None)
@@ -1190,6 +1194,36 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
+ @patch_socket
+ def test_create_connection_happy_eyeballs_empty_exceptions(self, m_socket):
+ # See gh-135836: Fix IndexError when Happy Eyeballs algorithm
+ # results in empty exceptions list
+
+ async def getaddrinfo(*args, **kw):
+ return [(socket.AF_INET, socket.SOCK_STREAM, 0, '', ('127.0.0.1', 80)),
+ (socket.AF_INET6, socket.SOCK_STREAM, 0, '', ('::1', 80))]
+
+ def getaddrinfo_task(*args, **kwds):
+ return self.loop.create_task(getaddrinfo(*args, **kwds))
+
+ self.loop.getaddrinfo = getaddrinfo_task
+
+ # Mock staggered_race to return empty exceptions list
+ # This simulates the scenario where Happy Eyeballs algorithm
+ # cancels all attempts but doesn't properly collect exceptions
+ with mock.patch('asyncio.staggered.staggered_race') as mock_staggered:
+ # Return (None, []) - no winner, empty exceptions list
+ async def mock_race(coro_fns, delay, loop):
+ return None, []
+ mock_staggered.side_effect = mock_race
+
+ coro = self.loop.create_connection(
+ MyProto, 'example.com', 80, happy_eyeballs_delay=0.1)
+
+ # Should raise TimeoutError instead of IndexError
+ with self.assertRaisesRegex(TimeoutError, "create_connection failed"):
+ self.loop.run_until_complete(coro)
+
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
@@ -1296,6 +1330,31 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertEqual(len(cm.exception.exceptions), 1)
self.assertIsInstance(cm.exception.exceptions[0], OSError)
+ @patch_socket
+ def test_create_connection_connect_non_os_err_close_err(self, m_socket):
+ # Test the case when sock_connect() raises non-OSError exception
+ # and sock.close() raises OSError.
+ async def getaddrinfo(*args, **kw):
+ return [(2, 1, 6, '', ('107.6.106.82', 80))]
+
+ def getaddrinfo_task(*args, **kwds):
+ return self.loop.create_task(getaddrinfo(*args, **kwds))
+
+ self.loop.getaddrinfo = getaddrinfo_task
+ self.loop.sock_connect = mock.Mock()
+ self.loop.sock_connect.side_effect = CustomError
+ sock = mock.Mock()
+ m_socket.socket.return_value = sock
+ sock.close.side_effect = OSError
+
+ coro = self.loop.create_connection(MyProto, 'example.com', 80)
+ self.assertRaises(
+ CustomError, self.loop.run_until_complete, coro)
+
+ coro = self.loop.create_connection(MyProto, 'example.com', 80, all_errors=True)
+ self.assertRaises(
+ CustomError, self.loop.run_until_complete, coro)
+
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
diff --git a/Lib/test/test_asyncio/test_tools.py b/Lib/test/test_asyncio/test_tools.py
index ba36e759ccd..34e94830204 100644
--- a/Lib/test/test_asyncio/test_tools.py
+++ b/Lib/test/test_asyncio/test_tools.py
@@ -2,6 +2,13 @@ import unittest
from asyncio import tools
+from collections import namedtuple
+
+FrameInfo = namedtuple('FrameInfo', ['funcname', 'filename', 'lineno'])
+CoroInfo = namedtuple('CoroInfo', ['call_stack', 'task_name'])
+TaskInfo = namedtuple('TaskInfo', ['task_id', 'task_name', 'coroutine_stack', 'awaited_by'])
+AwaitedInfo = namedtuple('AwaitedInfo', ['thread_id', 'awaited_by'])
+
# mock output of get_all_awaited_by function.
TEST_INPUTS_TREE = [
@@ -10,81 +17,151 @@ TEST_INPUTS_TREE = [
# different subtasks part of a TaskGroup (root1 and root2) which call
# awaiter functions.
(
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "timer",
- [
- [[("awaiter3", "/path/to/app.py", 130),
- ("awaiter2", "/path/to/app.py", 120),
- ("awaiter", "/path/to/app.py", 110)], 4],
- [[("awaiterB3", "/path/to/app.py", 190),
- ("awaiterB2", "/path/to/app.py", 180),
- ("awaiterB", "/path/to/app.py", 170)], 5],
- [[("awaiterB3", "/path/to/app.py", 190),
- ("awaiterB2", "/path/to/app.py", 180),
- ("awaiterB", "/path/to/app.py", 170)], 6],
- [[("awaiter3", "/path/to/app.py", 130),
- ("awaiter2", "/path/to/app.py", 120),
- ("awaiter", "/path/to/app.py", 110)], 7],
- ],
- ),
- (
- 8,
- "root1",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 9,
- "root2",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 4,
- "child1_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 6,
- "child2_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 7,
- "child1_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
- ),
- (
- 5,
- "child2_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="timer",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "/path/to/app.py", 130),
+ FrameInfo("awaiter2", "/path/to/app.py", 120),
+ FrameInfo("awaiter", "/path/to/app.py", 110)
+ ],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiterB3", "/path/to/app.py", 190),
+ FrameInfo("awaiterB2", "/path/to/app.py", 180),
+ FrameInfo("awaiterB", "/path/to/app.py", 170)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiterB3", "/path/to/app.py", 190),
+ FrameInfo("awaiterB2", "/path/to/app.py", 180),
+ FrameInfo("awaiterB", "/path/to/app.py", 170)
+ ],
+ task_name=6
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "/path/to/app.py", 130),
+ FrameInfo("awaiter2", "/path/to/app.py", 120),
+ FrameInfo("awaiter", "/path/to/app.py", 110)
+ ],
+ task_name=7
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="root1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=9,
+ task_name="root2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="child1_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="child2_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="child1_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="child2_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
@@ -130,26 +207,96 @@ TEST_INPUTS_TREE = [
[
# test case containing two roots
(
- (
- 9,
- [
- (5, "Task-5", []),
- (6, "Task-6", [[["main2"], 5]]),
- (7, "Task-7", [[["main2"], 5]]),
- (8, "Task-8", [[["main2"], 5]]),
- ],
+ AwaitedInfo(
+ thread_id=9,
+ awaited_by=[
+ TaskInfo(
+ task_id=5,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-6",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-7",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="Task-8",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ )
+ ]
),
- (
- 10,
- [
- (1, "Task-1", []),
- (2, "Task-2", [[["main"], 1]]),
- (3, "Task-3", [[["main"], 1]]),
- (4, "Task-4", [[["main"], 1]]),
- ],
+ AwaitedInfo(
+ thread_id=10,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
),
- (11, []),
- (0, []),
+ AwaitedInfo(thread_id=11, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
@@ -174,18 +321,63 @@ TEST_INPUTS_TREE = [
# test case containing two roots, one of them without subtasks
(
[
- (1, [(2, "Task-5", [])]),
- (
- 3,
- [
- (4, "Task-1", []),
- (5, "Task-2", [[["main"], 4]]),
- (6, "Task-3", [[["main"], 4]]),
- (7, "Task-4", [[["main"], 4]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
),
- (8, []),
- (0, []),
+ AwaitedInfo(
+ thread_id=3,
+ awaited_by=[
+ TaskInfo(
+ task_id=4,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ )
+ ]
+ ),
+ AwaitedInfo(thread_id=8, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
@@ -208,19 +400,44 @@ TEST_INPUTS_CYCLES_TREE = [
# this test case contains a cycle: two tasks awaiting each other.
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "a",
- [[["awaiter2"], 4], [["main"], 2]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (4, "b", [[["awaiter"], 3]]),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="a",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter2", "", 0)],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="b",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
([[4, 3, 4]]),
@@ -229,32 +446,85 @@ TEST_INPUTS_CYCLES_TREE = [
# this test case contains two cycles
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "A",
- [[["nested", "nested", "task_b"], 4]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (
- 4,
- "B",
- [
- [["nested", "nested", "task_c"], 5],
- [["nested", "nested", "task_a"], 3],
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
),
- (5, "C", [[["nested", "nested"], 6]]),
- (
- 6,
- "Task-2",
- [[["nested", "nested", "task_b"], 4]],
+ TaskInfo(
+ task_id=4,
+ task_name="B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_c", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_a", "", 0)
+ ],
+ task_name=3
+ )
+ ]
),
- ],
+ TaskInfo(
+ task_id=5,
+ task_name="C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0)
+ ],
+ task_name=6
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
([[4, 3, 4], [4, 6, 5, 4]]),
@@ -267,81 +537,160 @@ TEST_INPUTS_TABLE = [
# different subtasks part of a TaskGroup (root1 and root2) which call
# awaiter functions.
(
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "timer",
- [
- [["awaiter3", "awaiter2", "awaiter"], 4],
- [["awaiter1_3", "awaiter1_2", "awaiter1"], 5],
- [["awaiter1_3", "awaiter1_2", "awaiter1"], 6],
- [["awaiter3", "awaiter2", "awaiter"], 7],
- ],
- ),
- (
- 8,
- "root1",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 9,
- "root2",
- [[["_aexit", "__aexit__", "main"], 2]],
- ),
- (
- 4,
- "child1_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 6,
- "child2_1",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 8,
- ]
- ],
- ),
- (
- 7,
- "child1_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
- ),
- (
- 5,
- "child2_2",
- [
- [
- ["_aexit", "__aexit__", "blocho_caller", "bloch"],
- 9,
- ]
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="timer",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "", 0),
+ FrameInfo("awaiter2", "", 0),
+ FrameInfo("awaiter", "", 0)
+ ],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter1_3", "", 0),
+ FrameInfo("awaiter1_2", "", 0),
+ FrameInfo("awaiter1", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter1_3", "", 0),
+ FrameInfo("awaiter1_2", "", 0),
+ FrameInfo("awaiter1", "", 0)
+ ],
+ task_name=6
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("awaiter3", "", 0),
+ FrameInfo("awaiter2", "", 0),
+ FrameInfo("awaiter", "", 0)
+ ],
+ task_name=7
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="root1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=9,
+ task_name="root2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("main", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="child1_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="child2_1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=8
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="child1_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="child2_2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("_aexit", "", 0),
+ FrameInfo("__aexit__", "", 0),
+ FrameInfo("blocho_caller", "", 0),
+ FrameInfo("bloch", "", 0)
+ ],
+ task_name=9
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
[
1,
"0x3",
"timer",
+ "",
"awaiter3 -> awaiter2 -> awaiter",
"child1_1",
"0x4",
@@ -350,6 +699,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter1_3 -> awaiter1_2 -> awaiter1",
"child2_2",
"0x5",
@@ -358,6 +708,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter1_3 -> awaiter1_2 -> awaiter1",
"child2_1",
"0x6",
@@ -366,6 +717,7 @@ TEST_INPUTS_TABLE = [
1,
"0x3",
"timer",
+ "",
"awaiter3 -> awaiter2 -> awaiter",
"child1_2",
"0x7",
@@ -374,6 +726,7 @@ TEST_INPUTS_TABLE = [
1,
"0x8",
"root1",
+ "",
"_aexit -> __aexit__ -> main",
"Task-1",
"0x2",
@@ -382,6 +735,7 @@ TEST_INPUTS_TABLE = [
1,
"0x9",
"root2",
+ "",
"_aexit -> __aexit__ -> main",
"Task-1",
"0x2",
@@ -390,6 +744,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"child1_1",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root1",
"0x8",
@@ -398,6 +753,7 @@ TEST_INPUTS_TABLE = [
1,
"0x6",
"child2_1",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root1",
"0x8",
@@ -406,6 +762,7 @@ TEST_INPUTS_TABLE = [
1,
"0x7",
"child1_2",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root2",
"0x9",
@@ -414,6 +771,7 @@ TEST_INPUTS_TABLE = [
1,
"0x5",
"child2_2",
+ "",
"_aexit -> __aexit__ -> blocho_caller -> bloch",
"root2",
"0x9",
@@ -424,37 +782,107 @@ TEST_INPUTS_TABLE = [
[
# test case containing two roots
(
- (
- 9,
- [
- (5, "Task-5", []),
- (6, "Task-6", [[["main2"], 5]]),
- (7, "Task-7", [[["main2"], 5]]),
- (8, "Task-8", [[["main2"], 5]]),
- ],
+ AwaitedInfo(
+ thread_id=9,
+ awaited_by=[
+ TaskInfo(
+ task_id=5,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-6",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-7",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=8,
+ task_name="Task-8",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main2", "", 0)],
+ task_name=5
+ )
+ ]
+ )
+ ]
),
- (
- 10,
- [
- (1, "Task-1", []),
- (2, "Task-2", [[["main"], 1]]),
- (3, "Task-3", [[["main"], 1]]),
- (4, "Task-4", [[["main"], 1]]),
- ],
+ AwaitedInfo(
+ thread_id=10,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
),
- (11, []),
- (0, []),
+ AwaitedInfo(thread_id=11, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
),
(
[
- [9, "0x5", "Task-5", "", "", "0x0"],
- [9, "0x6", "Task-6", "main2", "Task-5", "0x5"],
- [9, "0x7", "Task-7", "main2", "Task-5", "0x5"],
- [9, "0x8", "Task-8", "main2", "Task-5", "0x5"],
- [10, "0x1", "Task-1", "", "", "0x0"],
- [10, "0x2", "Task-2", "main", "Task-1", "0x1"],
- [10, "0x3", "Task-3", "main", "Task-1", "0x1"],
- [10, "0x4", "Task-4", "main", "Task-1", "0x1"],
+ [9, "0x5", "Task-5", "", "", "", "0x0"],
+ [9, "0x6", "Task-6", "", "main2", "Task-5", "0x5"],
+ [9, "0x7", "Task-7", "", "main2", "Task-5", "0x5"],
+ [9, "0x8", "Task-8", "", "main2", "Task-5", "0x5"],
+ [10, "0x1", "Task-1", "", "", "", "0x0"],
+ [10, "0x2", "Task-2", "", "main", "Task-1", "0x1"],
+ [10, "0x3", "Task-3", "", "main", "Task-1", "0x1"],
+ [10, "0x4", "Task-4", "", "main", "Task-1", "0x1"],
]
),
],
@@ -462,27 +890,72 @@ TEST_INPUTS_TABLE = [
# test case containing two roots, one of them without subtasks
(
[
- (1, [(2, "Task-5", [])]),
- (
- 3,
- [
- (4, "Task-1", []),
- (5, "Task-2", [[["main"], 4]]),
- (6, "Task-3", [[["main"], 4]]),
- (7, "Task-4", [[["main"], 4]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-5",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
),
- (8, []),
- (0, []),
+ AwaitedInfo(
+ thread_id=3,
+ awaited_by=[
+ TaskInfo(
+ task_id=4,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=5,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=6,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=7,
+ task_name="Task-4",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=4
+ )
+ ]
+ )
+ ]
+ ),
+ AwaitedInfo(thread_id=8, awaited_by=[]),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-5", "", "", "0x0"],
- [3, "0x4", "Task-1", "", "", "0x0"],
- [3, "0x5", "Task-2", "main", "Task-1", "0x4"],
- [3, "0x6", "Task-3", "main", "Task-1", "0x4"],
- [3, "0x7", "Task-4", "main", "Task-1", "0x4"],
+ [1, "0x2", "Task-5", "", "", "", "0x0"],
+ [3, "0x4", "Task-1", "", "", "", "0x0"],
+ [3, "0x5", "Task-2", "", "main", "Task-1", "0x4"],
+ [3, "0x6", "Task-3", "", "main", "Task-1", "0x4"],
+ [3, "0x7", "Task-4", "", "main", "Task-1", "0x4"],
]
),
],
@@ -491,27 +964,52 @@ TEST_INPUTS_TABLE = [
# this test case contains a cycle: two tasks awaiting each other.
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "a",
- [[["awaiter2"], 4], [["main"], 2]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
),
- (4, "b", [[["awaiter"], 3]]),
- ],
+ TaskInfo(
+ task_id=3,
+ task_name="a",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter2", "", 0)],
+ task_name=4
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="b",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("awaiter", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
- [1, "0x3", "a", "awaiter2", "b", "0x4"],
- [1, "0x3", "a", "main", "Task-1", "0x2"],
- [1, "0x4", "b", "awaiter", "a", "0x3"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
+ [1, "0x3", "a", "", "awaiter2", "b", "0x4"],
+ [1, "0x3", "a", "", "main", "Task-1", "0x2"],
+ [1, "0x4", "b", "", "awaiter", "a", "0x3"],
]
),
],
@@ -519,41 +1017,95 @@ TEST_INPUTS_TABLE = [
# this test case contains two cycles
(
[
- (
- 1,
- [
- (2, "Task-1", []),
- (
- 3,
- "A",
- [[["nested", "nested", "task_b"], 4]],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
),
- (
- 4,
- "B",
- [
- [["nested", "nested", "task_c"], 5],
- [["nested", "nested", "task_a"], 3],
- ],
+ TaskInfo(
+ task_id=4,
+ task_name="B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_c", "", 0)
+ ],
+ task_name=5
+ ),
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_a", "", 0)
+ ],
+ task_name=3
+ )
+ ]
),
- (5, "C", [[["nested", "nested"], 6]]),
- (
- 6,
- "Task-2",
- [[["nested", "nested", "task_b"], 4]],
+ TaskInfo(
+ task_id=5,
+ task_name="C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0)
+ ],
+ task_name=6
+ )
+ ]
),
- ],
+ TaskInfo(
+ task_id=6,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("nested", "", 0),
+ FrameInfo("nested", "", 0),
+ FrameInfo("task_b", "", 0)
+ ],
+ task_name=4
+ )
+ ]
+ )
+ ]
),
- (0, []),
+ AwaitedInfo(thread_id=0, awaited_by=[])
]
),
(
[
- [1, "0x2", "Task-1", "", "", "0x0"],
+ [1, "0x2", "Task-1", "", "", "", "0x0"],
[
1,
"0x3",
"A",
+ "",
"nested -> nested -> task_b",
"B",
"0x4",
@@ -562,6 +1114,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"B",
+ "",
"nested -> nested -> task_c",
"C",
"0x5",
@@ -570,6 +1123,7 @@ TEST_INPUTS_TABLE = [
1,
"0x4",
"B",
+ "",
"nested -> nested -> task_a",
"A",
"0x3",
@@ -578,6 +1132,7 @@ TEST_INPUTS_TABLE = [
1,
"0x5",
"C",
+ "",
"nested -> nested",
"Task-2",
"0x6",
@@ -586,6 +1141,7 @@ TEST_INPUTS_TABLE = [
1,
"0x6",
"Task-2",
+ "",
"nested -> nested -> task_b",
"B",
"0x4",
@@ -600,7 +1156,8 @@ class TestAsyncioToolsTree(unittest.TestCase):
def test_asyncio_utils(self):
for input_, tree in TEST_INPUTS_TREE:
with self.subTest(input_):
- self.assertEqual(tools.build_async_tree(input_), tree)
+ result = tools.build_async_tree(input_)
+ self.assertEqual(result, tree)
def test_asyncio_utils_cycles(self):
for input_, cycles in TEST_INPUTS_CYCLES_TREE:
@@ -615,7 +1172,8 @@ class TestAsyncioToolsTable(unittest.TestCase):
def test_asyncio_utils(self):
for input_, table in TEST_INPUTS_TABLE:
with self.subTest(input_):
- self.assertEqual(tools.build_task_table(input_), table)
+ result = tools.build_task_table(input_)
+ self.assertEqual(result, table)
class TestAsyncioToolsBasic(unittest.TestCase):
@@ -632,26 +1190,67 @@ class TestAsyncioToolsBasic(unittest.TestCase):
self.assertEqual(tools.build_task_table(result), expected_output)
def test_only_independent_tasks_tree(self):
- input_ = [(1, [(10, "taskA", []), (11, "taskB", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="taskA",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="taskB",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
expected = [["└── (T) taskA"], ["└── (T) taskB"]]
result = tools.build_async_tree(input_)
self.assertEqual(sorted(result), sorted(expected))
def test_only_independent_tasks_table(self):
- input_ = [(1, [(10, "taskA", []), (11, "taskB", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="taskA",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="taskB",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
self.assertEqual(
tools.build_task_table(input_),
- [[1, "0xa", "taskA", "", "", "0x0"], [1, "0xb", "taskB", "", "", "0x0"]],
+ [[1, '0xa', 'taskA', '', '', '', '0x0'], [1, '0xb', 'taskB', '', '', '', '0x0']]
)
def test_single_task_tree(self):
"""Test build_async_tree with a single task and no awaits."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
expected_output = [
@@ -664,25 +1263,50 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_single_task_table(self):
"""Test build_task_table with a single task and no awaits."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
- expected_output = [[1, "0x2", "Task-1", "", "", "0x0"]]
+ expected_output = [[1, '0x2', 'Task-1', '', '', '', '0x0']]
self.assertEqual(tools.build_task_table(result), expected_output)
def test_cycle_detection(self):
"""Test build_async_tree raises CycleFoundException for cyclic input."""
result = [
- (
- 1,
- [
- (2, "Task-1", [[["main"], 3]]),
- (3, "Task-2", [[["main"], 2]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ )
+ ]
)
]
with self.assertRaises(tools.CycleFoundException) as context:
@@ -692,13 +1316,38 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_complex_tree(self):
"""Test build_async_tree with a more complex tree structure."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- (3, "Task-2", [[["main"], 2]]),
- (4, "Task-3", [[["main"], 3]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
)
]
expected_output = [
@@ -715,30 +1364,76 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_complex_table(self):
"""Test build_task_table with a more complex tree structure."""
result = [
- (
- 1,
- [
- (2, "Task-1", []),
- (3, "Task-2", [[["main"], 2]]),
- (4, "Task-3", [[["main"], 3]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=2,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=4,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("main", "", 0)],
+ task_name=3
+ )
+ ]
+ )
+ ]
)
]
expected_output = [
- [1, "0x2", "Task-1", "", "", "0x0"],
- [1, "0x3", "Task-2", "main", "Task-1", "0x2"],
- [1, "0x4", "Task-3", "main", "Task-2", "0x3"],
+ [1, '0x2', 'Task-1', '', '', '', '0x0'],
+ [1, '0x3', 'Task-2', '', 'main', 'Task-1', '0x2'],
+ [1, '0x4', 'Task-3', '', 'main', 'Task-2', '0x3']
]
self.assertEqual(tools.build_task_table(result), expected_output)
def test_deep_coroutine_chain(self):
input_ = [
- (
- 1,
- [
- (10, "leaf", [[["c1", "c2", "c3", "c4", "c5"], 11]]),
- (11, "root", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=10,
+ task_name="leaf",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("c1", "", 0),
+ FrameInfo("c2", "", 0),
+ FrameInfo("c3", "", 0),
+ FrameInfo("c4", "", 0),
+ FrameInfo("c5", "", 0)
+ ],
+ task_name=11
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=11,
+ task_name="root",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
expected = [
@@ -757,13 +1452,47 @@ class TestAsyncioToolsBasic(unittest.TestCase):
def test_multiple_cycles_same_node(self):
input_ = [
- (
- 1,
- [
- (1, "Task-A", [[["call1"], 2]]),
- (2, "Task-B", [[["call2"], 3]]),
- (3, "Task-C", [[["call3"], 1], [["call4"], 2]]),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call1", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-B",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call2", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-C",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("call3", "", 0)],
+ task_name=1
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("call4", "", 0)],
+ task_name=2
+ )
+ ]
+ )
+ ]
)
]
with self.assertRaises(tools.CycleFoundException) as ctx:
@@ -772,19 +1501,43 @@ class TestAsyncioToolsBasic(unittest.TestCase):
self.assertTrue(any(set(c) == {1, 2, 3} for c in cycles))
def test_table_output_format(self):
- input_ = [(1, [(1, "Task-A", [[["foo"], 2]]), (2, "Task-B", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("foo", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-B",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
table = tools.build_task_table(input_)
for row in table:
- self.assertEqual(len(row), 6)
+ self.assertEqual(len(row), 7)
self.assertIsInstance(row[0], int) # thread ID
self.assertTrue(
isinstance(row[1], str) and row[1].startswith("0x")
) # hex task ID
self.assertIsInstance(row[2], str) # task name
- self.assertIsInstance(row[3], str) # coroutine chain
- self.assertIsInstance(row[4], str) # awaiter name
+ self.assertIsInstance(row[3], str) # coroutine stack
+ self.assertIsInstance(row[4], str) # coroutine chain
+ self.assertIsInstance(row[5], str) # awaiter name
self.assertTrue(
- isinstance(row[5], str) and row[5].startswith("0x")
+ isinstance(row[6], str) and row[6].startswith("0x")
) # hex awaiter ID
@@ -792,28 +1545,86 @@ class TestAsyncioToolsEdgeCases(unittest.TestCase):
def test_task_awaits_self(self):
"""A task directly awaits itself - should raise a cycle."""
- input_ = [(1, [(1, "Self-Awaiter", [[["loopback"], 1]])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Self-Awaiter",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("loopback", "", 0)],
+ task_name=1
+ )
+ ]
+ )
+ ]
+ )
+ ]
with self.assertRaises(tools.CycleFoundException) as ctx:
tools.build_async_tree(input_)
self.assertIn([1, 1], ctx.exception.cycles)
def test_task_with_missing_awaiter_id(self):
"""Awaiter ID not in task list - should not crash, just show 'Unknown'."""
- input_ = [(1, [(1, "Task-A", [[["coro"], 999]])])] # 999 not defined
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-A",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("coro", "", 0)],
+ task_name=999
+ )
+ ]
+ )
+ ]
+ )
+ ]
table = tools.build_task_table(input_)
self.assertEqual(len(table), 1)
- self.assertEqual(table[0][4], "Unknown")
+ self.assertEqual(table[0][5], "Unknown")
def test_duplicate_coroutine_frames(self):
"""Same coroutine frame repeated under a parent - should deduplicate."""
input_ = [
- (
- 1,
- [
- (1, "Task-1", [[["frameA"], 2], [["frameA"], 3]]),
- (2, "Task-2", []),
- (3, "Task-3", []),
- ],
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="Task-1",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("frameA", "", 0)],
+ task_name=2
+ ),
+ CoroInfo(
+ call_stack=[FrameInfo("frameA", "", 0)],
+ task_name=3
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="Task-2",
+ coroutine_stack=[],
+ awaited_by=[]
+ ),
+ TaskInfo(
+ task_id=3,
+ task_name="Task-3",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
)
]
tree = tools.build_async_tree(input_)
@@ -830,14 +1641,63 @@ class TestAsyncioToolsEdgeCases(unittest.TestCase):
def test_task_with_no_name(self):
"""Task with no name in id2name - should still render with fallback."""
- input_ = [(1, [(1, "root", [[["f1"], 2]]), (2, None, [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="root",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[FrameInfo("f1", "", 0)],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name=None,
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
# If name is None, fallback to string should not crash
tree = tools.build_async_tree(input_)
self.assertIn("(T) None", "\n".join(tree[0]))
def test_tree_rendering_with_custom_emojis(self):
"""Pass custom emojis to the tree renderer."""
- input_ = [(1, [(1, "MainTask", [[["f1", "f2"], 2]]), (2, "SubTask", [])])]
+ input_ = [
+ AwaitedInfo(
+ thread_id=1,
+ awaited_by=[
+ TaskInfo(
+ task_id=1,
+ task_name="MainTask",
+ coroutine_stack=[],
+ awaited_by=[
+ CoroInfo(
+ call_stack=[
+ FrameInfo("f1", "", 0),
+ FrameInfo("f2", "", 0)
+ ],
+ task_name=2
+ )
+ ]
+ ),
+ TaskInfo(
+ task_id=2,
+ task_name="SubTask",
+ coroutine_stack=[],
+ awaited_by=[]
+ )
+ ]
+ )
+ ]
tree = tools.build_async_tree(input_, task_emoji="🧵", cor_emoji="🔁")
flat = "\n".join(tree[0])
self.assertIn("🧵 MainTask", flat)
diff --git a/Lib/test/test_audit.py b/Lib/test/test_audit.py
index 5f9eb381f60..077765fcda2 100644
--- a/Lib/test/test_audit.py
+++ b/Lib/test/test_audit.py
@@ -322,6 +322,14 @@ class AuditTest(unittest.TestCase):
if returncode:
self.fail(stderr)
+ @support.support_remote_exec_only
+ @support.cpython_only
+ def test_sys_remote_exec(self):
+ returncode, events, stderr = self.run_python("test_sys_remote_exec")
+ self.assertTrue(any(["sys.remote_exec" in event for event in events]))
+ self.assertTrue(any(["cpython.remote_debugger_script" in event for event in events]))
+ if returncode:
+ self.fail(stderr)
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_build_details.py b/Lib/test/test_build_details.py
index 05ce163a337..ba4b8c5aa9b 100644
--- a/Lib/test/test_build_details.py
+++ b/Lib/test/test_build_details.py
@@ -117,12 +117,26 @@ class CPythonBuildDetailsTests(unittest.TestCase, FormatTestsBase):
# Override generic format tests with tests for our specific implemenation.
@needs_installed_python
- @unittest.skipIf(is_android or is_apple_mobile, 'Android and iOS run tests via a custom testbed method that changes sys.executable')
+ @unittest.skipIf(
+ is_android or is_apple_mobile,
+ 'Android and iOS run tests via a custom testbed method that changes sys.executable'
+ )
def test_base_interpreter(self):
value = self.key('base_interpreter')
self.assertEqual(os.path.realpath(value), os.path.realpath(sys.executable))
+ @needs_installed_python
+ @unittest.skipIf(
+ is_android or is_apple_mobile,
+ "Android and iOS run tests via a custom testbed method that doesn't ship headers"
+ )
+ def test_c_api(self):
+ value = self.key('c_api')
+ self.assertTrue(os.path.exists(os.path.join(value['headers'], 'Python.h')))
+ version = sysconfig.get_config_var('VERSION')
+ self.assertTrue(os.path.exists(os.path.join(value['pkgconfig_path'], f'python-{version}.pc')))
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index d221aa5e1d9..14fe3355239 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -2991,7 +2991,8 @@ class TestType(unittest.TestCase):
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
- tests.addTest(DocTestSuite(builtins))
+ if sys.float_repr_style == 'short':
+ tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
diff --git a/Lib/test/test_calendar.py b/Lib/test/test_calendar.py
index 7ade4271b7a..bc39c86b8cf 100644
--- a/Lib/test/test_calendar.py
+++ b/Lib/test/test_calendar.py
@@ -417,7 +417,7 @@ class OutputTestCase(unittest.TestCase):
self.check_htmlcalendar_encoding('utf-8', 'utf-8')
def test_output_htmlcalendar_encoding_default(self):
- self.check_htmlcalendar_encoding(None, sys.getdefaultencoding())
+ self.check_htmlcalendar_encoding(None, 'utf-8')
def test_yeardatescalendar(self):
def shrink(cal):
diff --git a/Lib/test/test_capi/test_abstract.py b/Lib/test/test_capi/test_abstract.py
index 7d548ae87c0..3a2ed9f5db8 100644
--- a/Lib/test/test_capi/test_abstract.py
+++ b/Lib/test/test_capi/test_abstract.py
@@ -1077,6 +1077,31 @@ class CAPITest(unittest.TestCase):
with self.assertRaisesRegex(TypeError, regex):
PyIter_NextItem(10)
+ def test_object_setattr_null_exc(self):
+ class Obj:
+ pass
+ obj = Obj()
+ obj.attr = 123
+
+ exc = ValueError("error")
+ with self.assertRaises(SystemError) as cm:
+ _testcapi.object_setattr_null_exc(obj, 'attr', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+ self.assertHasAttr(obj, 'attr')
+
+ with self.assertRaises(SystemError) as cm:
+ _testcapi.object_setattrstring_null_exc(obj, 'attr', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+ self.assertHasAttr(obj, 'attr')
+
+ with self.assertRaises(SystemError) as cm:
+ # undecodable name
+ _testcapi.object_setattrstring_null_exc(obj, b'\xff', exc)
+ self.assertIs(cm.exception.__context__, exc)
+ self.assertIsNone(cm.exception.__cause__)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_capi/test_config.py b/Lib/test/test_capi/test_config.py
index a2d70dd3af4..04a27de8d84 100644
--- a/Lib/test/test_capi/test_config.py
+++ b/Lib/test/test_capi/test_config.py
@@ -3,7 +3,6 @@ Tests PyConfig_Get() and PyConfig_Set() C API (PEP 741).
"""
import os
import sys
-import sysconfig
import types
import unittest
from test import support
diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py
index f74694a7a74..ef950f5df04 100644
--- a/Lib/test/test_capi/test_misc.py
+++ b/Lib/test/test_capi/test_misc.py
@@ -413,11 +413,13 @@ class CAPITest(unittest.TestCase):
@support.requires_resource('cpu')
@support.skip_emscripten_stack_overflow()
+ @support.skip_wasi_stack_overflow()
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
@support.skip_emscripten_stack_overflow()
+ @support.skip_wasi_stack_overflow()
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
diff --git a/Lib/test/test_capi/test_object.py b/Lib/test/test_capi/test_object.py
index cd772120bde..d4056727d07 100644
--- a/Lib/test/test_capi/test_object.py
+++ b/Lib/test/test_capi/test_object.py
@@ -221,6 +221,7 @@ class CAPITest(unittest.TestCase):
"""
self.check_negative_refcount(code)
+ @support.requires_resource('cpu')
def test_decref_delayed(self):
# gh-130519: Test that _PyObject_XDecRefDelayed() and QSBR code path
# handles destructors that are possibly re-entrant or trigger a GC.
diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py
index cb6eae48414..7be1c9eebb3 100644
--- a/Lib/test/test_capi/test_opt.py
+++ b/Lib/test/test_capi/test_opt.py
@@ -407,12 +407,12 @@ class TestUops(unittest.TestCase):
x = 0
for i in range(m):
for j in MyIter(n):
- x += 1000*i + j
+ x += j
return x
- x = testfunc(TIER2_THRESHOLD, TIER2_THRESHOLD)
+ x = testfunc(TIER2_THRESHOLD, 2)
- self.assertEqual(x, sum(range(TIER2_THRESHOLD)) * TIER2_THRESHOLD * 1001)
+ self.assertEqual(x, sum(range(TIER2_THRESHOLD)) * 2)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -678,7 +678,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_ADD_FLOAT", uops)
+ self.assertIn("_BINARY_OP_ADD_FLOAT__NO_DECREF_INPUTS", uops)
def test_float_subtract_constant_propagation(self):
def testfunc(n):
@@ -700,7 +700,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_SUBTRACT_FLOAT", uops)
+ self.assertIn("_BINARY_OP_SUBTRACT_FLOAT__NO_DECREF_INPUTS", uops)
def test_float_multiply_constant_propagation(self):
def testfunc(n):
@@ -722,7 +722,7 @@ class TestUopsOptimization(unittest.TestCase):
self.assertLessEqual(len(guard_nos_float_count), 1)
# TODO gh-115506: this assertion may change after propagating constants.
# We'll also need to verify that propagation actually occurs.
- self.assertIn("_BINARY_OP_MULTIPLY_FLOAT", uops)
+ self.assertIn("_BINARY_OP_MULTIPLY_FLOAT__NO_DECREF_INPUTS", uops)
def test_add_unicode_propagation(self):
def testfunc(n):
@@ -1183,6 +1183,17 @@ class TestUopsOptimization(unittest.TestCase):
self.assertIsNotNone(ex)
self.assertIn("_RETURN_GENERATOR", get_opnames(ex))
+ def test_for_iter(self):
+ def testfunc(n):
+ t = 0
+ for i in set(range(n)):
+ t += i
+ return t
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD * (TIER2_THRESHOLD - 1) // 2)
+ self.assertIsNotNone(ex)
+ self.assertIn("_FOR_ITER_TIER_TWO", get_opnames(ex))
+
@unittest.skip("Tracing into generators currently isn't supported.")
def test_for_iter_gen(self):
def gen(n):
@@ -1370,6 +1381,21 @@ class TestUopsOptimization(unittest.TestCase):
# Removed guard
self.assertNotIn("_CHECK_FUNCTION_EXACT_ARGS", uops)
+ def test_method_guards_removed_or_reduced(self):
+ def testfunc(n):
+ result = 0
+ for i in range(n):
+ result += test_bound_method(i)
+ return result
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, sum(range(TIER2_THRESHOLD)))
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_PUSH_FRAME", uops)
+ # Strength reduced version
+ self.assertIn("_CHECK_FUNCTION_VERSION_INLINE", uops)
+ self.assertNotIn("_CHECK_METHOD_VERSION", uops)
+
def test_jit_error_pops(self):
"""
Tests that the correct number of pops are inserted into the
@@ -1655,13 +1681,11 @@ class TestUopsOptimization(unittest.TestCase):
self.assertIn("_CONTAINS_OP_DICT", uops)
self.assertNotIn("_TO_BOOL_BOOL", uops)
-
def test_remove_guard_for_known_type_str(self):
def f(n):
for i in range(n):
false = i == TIER2_THRESHOLD
empty = "X"[:false]
- empty += "" # Make JIT realize this is a string.
if empty:
return 1
return 0
@@ -1767,11 +1791,12 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_TOS_UNICODE", uops)
self.assertIn("_BINARY_OP_ADD_UNICODE", uops)
- def test_call_type_1(self):
+ def test_call_type_1_guards_removed(self):
def testfunc(n):
x = 0
for _ in range(n):
- x += type(42) is int
+ foo = eval('42')
+ x += type(foo) is int
return x
res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
@@ -1782,6 +1807,25 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_NOS_NULL", uops)
self.assertNotIn("_GUARD_CALLABLE_TYPE_1", uops)
+ def test_call_type_1_known_type(self):
+ def testfunc(n):
+ x = 0
+ for _ in range(n):
+ x += type(42) is int
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the result of type(...) is known, _CALL_TYPE_1 is replaced with
+ # _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW which is optimized away in
+ # remove_unneeded_uops.
+ self.assertNotIn("_CALL_TYPE_1", uops)
+ self.assertNotIn("_POP_CALL_ONE_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_CALL_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_TOP_LOAD_CONST_INLINE_BORROW", uops)
+
def test_call_type_1_result_is_const(self):
def testfunc(n):
x = 0
@@ -1795,7 +1839,6 @@ class TestUopsOptimization(unittest.TestCase):
self.assertEqual(res, TIER2_THRESHOLD)
self.assertIsNotNone(ex)
uops = get_opnames(ex)
- self.assertIn("_CALL_TYPE_1", uops)
self.assertNotIn("_GUARD_IS_NOT_NONE_POP", uops)
def test_call_str_1(self):
@@ -1925,6 +1968,49 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_GUARD_NOS_INT", uops)
self.assertNotIn("_GUARD_TOS_INT", uops)
+ def test_call_len_known_length_small_int(self):
+ def testfunc(n):
+ x = 0
+ for _ in range(n):
+ t = (1, 2, 3, 4, 5)
+ if len(t) == 5:
+ x += 1
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the length is < _PY_NSMALLPOSINTS, the len() call is replaced
+ # with just an inline load.
+ self.assertNotIn("_CALL_LEN", uops)
+ self.assertNotIn("_POP_CALL_ONE_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_CALL_LOAD_CONST_INLINE_BORROW", uops)
+ self.assertNotIn("_POP_TOP_LOAD_CONST_INLINE_BORROW", uops)
+
+ def test_call_len_known_length(self):
+ def testfunc(n):
+ class C:
+ t = tuple(range(300))
+
+ x = 0
+ for _ in range(n):
+ if len(C.t) == 300: # comparison + guard removed
+ x += 1
+ return x
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ # When the length is >= _PY_NSMALLPOSINTS, we cannot replace
+ # the len() call with an inline load, but knowing the exact
+ # length allows us to optimize more code, such as conditionals
+ # in this case
+ self.assertIn("_CALL_LEN", uops)
+ self.assertNotIn("_COMPARE_OP_INT", uops)
+ self.assertNotIn("_GUARD_IS_TRUE_POP", uops)
+
def test_get_len_with_const_tuple(self):
def testfunc(n):
x = 0.0
@@ -2219,9 +2305,176 @@ class TestUopsOptimization(unittest.TestCase):
self.assertNotIn("_LOAD_ATTR_METHOD_NO_DICT", uops)
self.assertNotIn("_LOAD_ATTR_METHOD_LAZY_DICT", uops)
+ def test_float_op_refcount_elimination(self):
+ def testfunc(args):
+ a, b, n = args
+ c = 0.0
+ for _ in range(n):
+ c += a + b
+ return c
+
+ res, ex = self._run_with_optimizer(testfunc, (0.1, 0.1, TIER2_THRESHOLD))
+ self.assertAlmostEqual(res, TIER2_THRESHOLD * (0.1 + 0.1))
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_BINARY_OP_ADD_FLOAT__NO_DECREF_INPUTS", uops)
+
+ def test_remove_guard_for_slice_list(self):
+ def f(n):
+ for i in range(n):
+ false = i == TIER2_THRESHOLD
+ sliced = [1, 2, 3][:false]
+ if sliced:
+ return 1
+ return 0
+
+ res, ex = self._run_with_optimizer(f, TIER2_THRESHOLD)
+ self.assertEqual(res, 0)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_TO_BOOL_LIST", uops)
+ self.assertNotIn("_GUARD_TOS_LIST", uops)
+
+ def test_remove_guard_for_slice_tuple(self):
+ def f(n):
+ for i in range(n):
+ false = i == TIER2_THRESHOLD
+ a, b = (1, 2, 3)[: false + 2]
+
+ _, ex = self._run_with_optimizer(f, TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertIn("_UNPACK_SEQUENCE_TWO_TUPLE", uops)
+ self.assertNotIn("_GUARD_TOS_TUPLE", uops)
+
+ def test_unary_invert_long_type(self):
+ def testfunc(n):
+ for _ in range(n):
+ a = 9397
+ x = ~a + ~a
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertNotIn("_GUARD_TOS_INT", uops)
+ self.assertNotIn("_GUARD_NOS_INT", uops)
+
+ def test_attr_promotion_failure(self):
+ # We're not testing for any specific uops here, just
+ # testing it doesn't crash.
+ script_helper.assert_python_ok('-c', textwrap.dedent("""
+ import _testinternalcapi
+ import _opcode
+ import email
+
+ def get_first_executor(func):
+ code = func.__code__
+ co_code = code.co_code
+ for i in range(0, len(co_code), 2):
+ try:
+ return _opcode.get_executor(code, i)
+ except ValueError:
+ pass
+ return None
+
+ def testfunc(n):
+ for _ in range(n):
+ email.jit_testing = None
+ prompt = email.jit_testing
+ del email.jit_testing
+
+
+ testfunc(_testinternalcapi.TIER2_THRESHOLD)
+ ex = get_first_executor(testfunc)
+ assert ex is not None
+ """))
+
+ def test_pop_top_specialize_none(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(None)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_NOP", uops)
+
+ def test_pop_top_specialize_int(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(100000)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_INT", uops)
+
+ def test_pop_top_specialize_float(self):
+ def testfunc(n):
+ for _ in range(n):
+ global_identity(1e6)
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertIn("_POP_TOP_FLOAT", uops)
+
+
+ def test_unary_negative_long_float_type(self):
+ def testfunc(n):
+ for _ in range(n):
+ a = 9397
+ f = 9397.0
+ x = -a + -a
+ y = -f + -f
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ self.assertNotIn("_GUARD_TOS_INT", uops)
+ self.assertNotIn("_GUARD_NOS_INT", uops)
+ self.assertNotIn("_GUARD_TOS_FLOAT", uops)
+ self.assertNotIn("_GUARD_NOS_FLOAT", uops)
+
+ def test_binary_op_constant_evaluate(self):
+ def testfunc(n):
+ for _ in range(n):
+ 2 ** 65
+
+ testfunc(TIER2_THRESHOLD)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+
+ # For now... until we constant propagate it away.
+ self.assertIn("_BINARY_OP", uops)
+
def global_identity(x):
return x
+class TestObject:
+ def test(self, *args, **kwargs):
+ return args[0]
+
+test_object = TestObject()
+test_bound_method = TestObject.test.__get__(test_object)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_capi/test_sys.py b/Lib/test/test_capi/test_sys.py
index d3a9b378e77..3793ce2461e 100644
--- a/Lib/test/test_capi/test_sys.py
+++ b/Lib/test/test_capi/test_sys.py
@@ -19,6 +19,68 @@ class CAPITest(unittest.TestCase):
maxDiff = None
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getattr(self):
+ # Test PySys_GetAttr()
+ sys_getattr = _testlimitedcapi.sys_getattr
+
+ self.assertIs(sys_getattr('stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(sys_getattr('\U0001f40d'), 42)
+
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.nonexistent'):
+ sys_getattr('nonexistent')
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.\U0001f40d'):
+ sys_getattr('\U0001f40d')
+ self.assertRaises(TypeError, sys_getattr, 1)
+ self.assertRaises(TypeError, sys_getattr, [])
+ # CRASHES sys_getattr(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getattrstring(self):
+ # Test PySys_GetAttrString()
+ getattrstring = _testlimitedcapi.sys_getattrstring
+
+ self.assertIs(getattrstring(b'stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getattrstring('\U0001f40d'.encode()), 42)
+
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.nonexistent'):
+ getattrstring(b'nonexistent')
+ with self.assertRaisesRegex(RuntimeError, r'lost sys\.\U0001f40d'):
+ getattrstring('\U0001f40d'.encode())
+ self.assertRaises(UnicodeDecodeError, getattrstring, b'\xff')
+ # CRASHES getattrstring(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getoptionalattr(self):
+ # Test PySys_GetOptionalAttr()
+ getoptionalattr = _testlimitedcapi.sys_getoptionalattr
+
+ self.assertIs(getoptionalattr('stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getoptionalattr('\U0001f40d'), 42)
+
+ self.assertIs(getoptionalattr('nonexistent'), AttributeError)
+ self.assertIs(getoptionalattr('\U0001f40d'), AttributeError)
+ self.assertRaises(TypeError, getoptionalattr, 1)
+ self.assertRaises(TypeError, getoptionalattr, [])
+ # CRASHES getoptionalattr(NULL)
+
+ @unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
+ def test_sys_getoptionalattrstring(self):
+ # Test PySys_GetOptionalAttrString()
+ getoptionalattrstring = _testlimitedcapi.sys_getoptionalattrstring
+
+ self.assertIs(getoptionalattrstring(b'stdout'), sys.stdout)
+ with support.swap_attr(sys, '\U0001f40d', 42):
+ self.assertEqual(getoptionalattrstring('\U0001f40d'.encode()), 42)
+
+ self.assertIs(getoptionalattrstring(b'nonexistent'), AttributeError)
+ self.assertIs(getoptionalattrstring('\U0001f40d'.encode()), AttributeError)
+ self.assertRaises(UnicodeDecodeError, getoptionalattrstring, b'\xff')
+ # CRASHES getoptionalattrstring(NULL)
+
@support.cpython_only
@unittest.skipIf(_testlimitedcapi is None, 'need _testlimitedcapi module')
def test_sys_getobject(self):
@@ -29,7 +91,7 @@ class CAPITest(unittest.TestCase):
with support.swap_attr(sys, '\U0001f40d', 42):
self.assertEqual(getobject('\U0001f40d'.encode()), 42)
- self.assertIs(getobject(b'nonexisting'), AttributeError)
+ self.assertIs(getobject(b'nonexistent'), AttributeError)
with support.catch_unraisable_exception() as cm:
self.assertIs(getobject(b'\xff'), AttributeError)
self.assertEqual(cm.unraisable.exc_type, UnicodeDecodeError)
diff --git a/Lib/test/test_capi/test_type.py b/Lib/test/test_capi/test_type.py
index 3c9974c7387..15fb4a93e2a 100644
--- a/Lib/test/test_capi/test_type.py
+++ b/Lib/test/test_capi/test_type.py
@@ -264,3 +264,13 @@ class TypeTests(unittest.TestCase):
ManualHeapType = _testcapi.ManualHeapType
for i in range(100):
self.assertIsInstance(ManualHeapType(), ManualHeapType)
+
+ def test_extension_managed_dict_type(self):
+ ManagedDictType = _testcapi.ManagedDictType
+ obj = ManagedDictType()
+ obj.foo = 42
+ self.assertEqual(obj.foo, 42)
+ self.assertEqual(obj.__dict__, {'foo': 42})
+ obj.__dict__ = {'bar': 3}
+ self.assertEqual(obj.__dict__, {'bar': 3})
+ self.assertEqual(obj.bar, 3)
diff --git a/Lib/test/test_capi/test_unicode.py b/Lib/test/test_capi/test_unicode.py
index 3408c10f426..6a9c60f3a6d 100644
--- a/Lib/test/test_capi/test_unicode.py
+++ b/Lib/test/test_capi/test_unicode.py
@@ -1739,6 +1739,20 @@ class CAPITest(unittest.TestCase):
# Check that the second call returns the same result
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
+ @support.cpython_only
+ @unittest.skipIf(_testcapi is None, 'need _testcapi module')
+ def test_GET_CACHED_HASH(self):
+ from _testcapi import unicode_GET_CACHED_HASH
+ content_bytes = b'some new string'
+ # avoid parser interning & constant folding
+ obj = str(content_bytes, 'ascii')
+ # impl detail: fresh strings do not have cached hash
+ self.assertEqual(unicode_GET_CACHED_HASH(obj), -1)
+ # impl detail: adding string to a dict caches its hash
+ {obj: obj}
+ # impl detail: ASCII string hashes are equal to bytes ones
+ self.assertEqual(unicode_GET_CACHED_HASH(obj), hash(content_bytes))
+
class PyUnicodeWriterTest(unittest.TestCase):
def create_writer(self, size):
@@ -1776,6 +1790,13 @@ class PyUnicodeWriterTest(unittest.TestCase):
self.assertEqual(writer.finish(),
"ascii-latin1=\xE9-euro=\u20AC.")
+ def test_ascii(self):
+ writer = self.create_writer(0)
+ writer.write_ascii(b"Hello ", -1)
+ writer.write_ascii(b"", 0)
+ writer.write_ascii(b"Python! <truncated>", 6)
+ self.assertEqual(writer.finish(), "Hello Python")
+
def test_invalid_utf8(self):
writer = self.create_writer(0)
with self.assertRaises(UnicodeDecodeError):
diff --git a/Lib/test/test_class.py b/Lib/test/test_class.py
index 4c12d43556f..8c7a62a74ba 100644
--- a/Lib/test/test_class.py
+++ b/Lib/test/test_class.py
@@ -652,6 +652,7 @@ class ClassTests(unittest.TestCase):
a = A(hash(A.f)^(-1))
hash(a.f)
+ @cpython_only
def testSetattrWrapperNameIntern(self):
# Issue #25794: __setattr__ should intern the attribute name
class A:
diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py
index 32cf8aacaf6..655f5a9be7f 100644
--- a/Lib/test/test_code.py
+++ b/Lib/test/test_code.py
@@ -701,6 +701,27 @@ class CodeTest(unittest.TestCase):
'checks': CO_FAST_LOCAL,
'res': CO_FAST_LOCAL,
},
+ defs.spam_with_global_and_attr_same_name: {},
+ defs.spam_full_args: {
+ 'a': POSONLY,
+ 'b': POSONLY,
+ 'c': POSORKW,
+ 'd': POSORKW,
+ 'e': KWONLY,
+ 'f': KWONLY,
+ 'args': VARARGS,
+ 'kwargs': VARKWARGS,
+ },
+ defs.spam_full_args_with_defaults: {
+ 'a': POSONLY,
+ 'b': POSONLY,
+ 'c': POSORKW,
+ 'd': POSORKW,
+ 'e': KWONLY,
+ 'f': KWONLY,
+ 'args': VARARGS,
+ 'kwargs': VARKWARGS,
+ },
defs.spam_args_attrs_and_builtins: {
'a': POSONLY,
'b': POSONLY,
@@ -714,6 +735,7 @@ class CodeTest(unittest.TestCase):
defs.spam_returns_arg: {
'x': POSORKW,
},
+ defs.spam_raises: {},
defs.spam_with_inner_not_closure: {
'eggs': CO_FAST_LOCAL,
},
@@ -934,6 +956,24 @@ class CodeTest(unittest.TestCase):
purelocals=5,
globalvars=6,
),
+ defs.spam_with_global_and_attr_same_name: new_var_counts(
+ globalvars=2,
+ attrs=1,
+ ),
+ defs.spam_full_args: new_var_counts(
+ posonly=2,
+ posorkw=2,
+ kwonly=2,
+ varargs=1,
+ varkwargs=1,
+ ),
+ defs.spam_full_args_with_defaults: new_var_counts(
+ posonly=2,
+ posorkw=2,
+ kwonly=2,
+ varargs=1,
+ varkwargs=1,
+ ),
defs.spam_args_attrs_and_builtins: new_var_counts(
posonly=2,
posorkw=2,
@@ -945,6 +985,9 @@ class CodeTest(unittest.TestCase):
defs.spam_returns_arg: new_var_counts(
posorkw=1,
),
+ defs.spam_raises: new_var_counts(
+ globalvars=1,
+ ),
defs.spam_with_inner_not_closure: new_var_counts(
purelocals=1,
),
@@ -1097,10 +1140,16 @@ class CodeTest(unittest.TestCase):
def test_stateless(self):
self.maxDiff = None
+ STATELESS_FUNCTIONS = [
+ *defs.STATELESS_FUNCTIONS,
+ # stateless with defaults
+ defs.spam_full_args_with_defaults,
+ ]
+
for func in defs.STATELESS_CODE:
with self.subTest((func, '(code)')):
_testinternalcapi.verify_stateless_code(func.__code__)
- for func in defs.STATELESS_FUNCTIONS:
+ for func in STATELESS_FUNCTIONS:
with self.subTest((func, '(func)')):
_testinternalcapi.verify_stateless_code(func)
@@ -1110,7 +1159,7 @@ class CodeTest(unittest.TestCase):
with self.assertRaises(Exception):
_testinternalcapi.verify_stateless_code(func.__code__)
- if func not in defs.STATELESS_FUNCTIONS:
+ if func not in STATELESS_FUNCTIONS:
with self.subTest((func, '(func)')):
with self.assertRaises(Exception):
_testinternalcapi.verify_stateless_code(func)
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index a767f67a02c..65d54d1004d 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -2,7 +2,6 @@ from _codecs import _unregister_error as _codecs_unregister_error
import codecs
import html.entities
import itertools
-import re
import sys
import unicodedata
import unittest
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 8c9a0972492..d8666f7290e 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1,6 +1,7 @@
import codecs
import contextlib
import copy
+import importlib
import io
import pickle
import os
@@ -3111,9 +3112,9 @@ class TransformCodecTest(unittest.TestCase):
def test_alias_modules_exist(self):
encodings_dir = os.path.dirname(encodings.__file__)
for value in encodings.aliases.aliases.values():
- codec_file = os.path.join(encodings_dir, value + ".py")
- self.assertTrue(os.path.isfile(codec_file),
- "Codec file not found: " + codec_file)
+ codec_mod = f"encodings.{value}"
+ self.assertIsNotNone(importlib.util.find_spec(codec_mod),
+ f"Codec module not found: {codec_mod}")
def test_quopri_stateless(self):
# Should encode with quotetabs=True
diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py
index 1e93530398b..d9d61e5c205 100644
--- a/Lib/test/test_collections.py
+++ b/Lib/test/test_collections.py
@@ -542,6 +542,8 @@ class TestNamedTuple(unittest.TestCase):
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
+ @support.requires_resource('cpu')
+ def test_large_size(self):
n = support.exceeds_recursion_limit()
names = list(set(''.join([choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
diff --git a/Lib/test/test_concurrent_futures/test_init.py b/Lib/test/test_concurrent_futures/test_init.py
index df640929309..6b8484c0d5f 100644
--- a/Lib/test/test_concurrent_futures/test_init.py
+++ b/Lib/test/test_concurrent_futures/test_init.py
@@ -20,6 +20,10 @@ INITIALIZER_STATUS = 'uninitialized'
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
+ # InterpreterPoolInitializerTest.test_initializer fails
+ # if we don't have a LOAD_GLOBAL. (It could be any global.)
+ # We will address this separately.
+ INITIALIZER_STATUS
def get_init_status():
return INITIALIZER_STATUS
diff --git a/Lib/test/test_concurrent_futures/test_interpreter_pool.py b/Lib/test/test_concurrent_futures/test_interpreter_pool.py
index f6c62ae4b20..844dfdd6fc9 100644
--- a/Lib/test/test_concurrent_futures/test_interpreter_pool.py
+++ b/Lib/test/test_concurrent_futures/test_interpreter_pool.py
@@ -2,35 +2,78 @@ import asyncio
import contextlib
import io
import os
-import pickle
+import sys
import time
import unittest
-from concurrent.futures.interpreter import (
- ExecutionFailed, BrokenInterpreterPool,
-)
+from concurrent.futures.interpreter import BrokenInterpreterPool
+from concurrent import interpreters
+from concurrent.interpreters import _queues as queues
import _interpreters
from test import support
+from test.support import os_helper
+from test.support import script_helper
import test.test_asyncio.utils as testasyncio_utils
-from test.support.interpreters import queues
from .executor import ExecutorTest, mul
from .util import BaseTestCase, InterpreterPoolMixin, setup_module
+WINDOWS = sys.platform.startswith('win')
+
+
+@contextlib.contextmanager
+def nonblocking(fd):
+ blocking = os.get_blocking(fd)
+ if blocking:
+ os.set_blocking(fd, False)
+ try:
+ yield
+ finally:
+ if blocking:
+ os.set_blocking(fd, blocking)
+
+
+def read_file_with_timeout(fd, nbytes, timeout):
+ with nonblocking(fd):
+ end = time.time() + timeout
+ try:
+ return os.read(fd, nbytes)
+ except BlockingIOError:
+ pass
+ while time.time() < end:
+ try:
+ return os.read(fd, nbytes)
+ except BlockingIOError:
+ continue
+ else:
+ raise TimeoutError('nothing to read')
+
+
+if not WINDOWS:
+ import select
+ def read_file_with_timeout(fd, nbytes, timeout):
+ r, _, _ = select.select([fd], [], [], timeout)
+ if fd not in r:
+ raise TimeoutError('nothing to read')
+ return os.read(fd, nbytes)
+
+
def noop():
pass
def write_msg(fd, msg):
+ import os
os.write(fd, msg + b'\0')
-def read_msg(fd):
+def read_msg(fd, timeout=10.0):
msg = b''
- while ch := os.read(fd, 1):
- if ch == b'\0':
- return msg
+ ch = read_file_with_timeout(fd, 1, timeout)
+ while ch != b'\0':
msg += ch
+ ch = os.read(fd, 1)
+ return msg
def get_current_name():
@@ -113,6 +156,38 @@ class InterpreterPoolExecutorTest(
self.assertEqual(before, b'\0')
self.assertEqual(after, msg)
+ def test_init_with___main___global(self):
+ # See https://github.com/python/cpython/pull/133957#issuecomment-2927415311.
+ text = """if True:
+ from concurrent.futures import InterpreterPoolExecutor
+
+ INITIALIZER_STATUS = 'uninitialized'
+
+ def init(x):
+ global INITIALIZER_STATUS
+ INITIALIZER_STATUS = x
+ INITIALIZER_STATUS
+
+ def get_init_status():
+ return INITIALIZER_STATUS
+
+ if __name__ == "__main__":
+ exe = InterpreterPoolExecutor(initializer=init,
+ initargs=('initialized',))
+ fut = exe.submit(get_init_status)
+ print(fut.result()) # 'initialized'
+ exe.shutdown(wait=True)
+ print(INITIALIZER_STATUS) # 'uninitialized'
+ """
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ self.assertEqual(stdout.splitlines(), [
+ 'initialized',
+ 'uninitialized',
+ ])
+
def test_init_closure(self):
count = 0
def init1():
@@ -121,10 +196,19 @@ class InterpreterPoolExecutorTest(
nonlocal count
count += 1
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=init1)
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=init2)
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=init1) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
+
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=init2) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
def test_init_instance_method(self):
class Spam:
@@ -132,26 +216,12 @@ class InterpreterPoolExecutorTest(
raise NotImplementedError
spam = Spam()
- with self.assertRaises(pickle.PicklingError):
- self.executor_type(initializer=spam.initializer)
-
- def test_init_shared(self):
- msg = b'eggs'
- r, w = self.pipe()
- script = f"""if True:
- import os
- if __name__ != '__main__':
- import __main__
- spam = __main__.spam
- os.write({w}, spam + b'\\0')
- """
-
- executor = self.executor_type(shared={'spam': msg})
- fut = executor.submit(exec, script)
- fut.result()
- after = read_msg(r)
-
- self.assertEqual(after, msg)
+ with contextlib.redirect_stderr(io.StringIO()) as stderr:
+ with self.executor_type(initializer=spam.initializer) as executor:
+ fut = executor.submit(lambda: None)
+ self.assertIn('NotShareableError', stderr.getvalue())
+ with self.assertRaises(BrokenInterpreterPool):
+ fut.result()
@unittest.expectedFailure
def test_init_exception_in_script(self):
@@ -178,8 +248,6 @@ class InterpreterPoolExecutorTest(
stderr = stderr.getvalue()
self.assertIn('ExecutionFailed: Exception: spam', stderr)
self.assertIn('Uncaught in the interpreter:', stderr)
- self.assertIn('The above exception was the direct cause of the following exception:',
- stderr)
@unittest.expectedFailure
def test_submit_script(self):
@@ -208,10 +276,14 @@ class InterpreterPoolExecutorTest(
return spam
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(task1)
- with self.assertRaises(pickle.PicklingError):
- executor.submit(task2)
+
+ fut = executor.submit(task1)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
+
+ fut = executor.submit(task2)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_local_instance(self):
class Spam:
@@ -219,8 +291,9 @@ class InterpreterPoolExecutorTest(
self.value = True
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(Spam)
+ fut = executor.submit(Spam)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_instance_method(self):
class Spam:
@@ -229,8 +302,9 @@ class InterpreterPoolExecutorTest(
spam = Spam()
executor = self.executor_type()
- with self.assertRaises(pickle.PicklingError):
- executor.submit(spam.run)
+ fut = executor.submit(spam.run)
+ with self.assertRaises(_interpreters.NotShareableError):
+ fut.result()
def test_submit_func_globals(self):
executor = self.executor_type()
@@ -242,13 +316,14 @@ class InterpreterPoolExecutorTest(
@unittest.expectedFailure
def test_submit_exception_in_script(self):
+ # Scripts are not supported currently.
fut = self.executor.submit('raise Exception("spam")')
with self.assertRaises(Exception) as captured:
fut.result()
self.assertIs(type(captured.exception), Exception)
self.assertEqual(str(captured.exception), 'spam')
cause = captured.exception.__cause__
- self.assertIs(type(cause), ExecutionFailed)
+ self.assertIs(type(cause), interpreters.ExecutionFailed)
for attr in ('__name__', '__qualname__', '__module__'):
self.assertEqual(getattr(cause.excinfo.type, attr),
getattr(Exception, attr))
@@ -261,7 +336,7 @@ class InterpreterPoolExecutorTest(
self.assertIs(type(captured.exception), Exception)
self.assertEqual(str(captured.exception), 'spam')
cause = captured.exception.__cause__
- self.assertIs(type(cause), ExecutionFailed)
+ self.assertIs(type(cause), interpreters.ExecutionFailed)
for attr in ('__name__', '__qualname__', '__module__'):
self.assertEqual(getattr(cause.excinfo.type, attr),
getattr(Exception, attr))
@@ -269,16 +344,93 @@ class InterpreterPoolExecutorTest(
def test_saturation(self):
blocker = queues.create()
- executor = self.executor_type(4, shared=dict(blocker=blocker))
+ executor = self.executor_type(4)
for i in range(15 * executor._max_workers):
- executor.submit(exec, 'import __main__; __main__.blocker.get()')
- #executor.submit('blocker.get()')
+ executor.submit(blocker.get)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
blocker.put_nowait(None)
executor.shutdown(wait=True)
+ def test_blocking(self):
+ # There is no guarantee that a worker will be created for every
+ # submitted task. That's because there's a race between:
+ #
+ # * a new worker thread, created when task A was just submitted,
+ # becoming non-idle when it picks up task A
+ # * after task B is added to the queue, a new worker thread
+ # is started only if there are no idle workers
+ # (the check in ThreadPoolExecutor._adjust_thread_count())
+ #
+ # That means we must not block waiting for *all* tasks to report
+ # "ready" before we unblock the known-ready workers.
+ ready = queues.create()
+ blocker = queues.create()
+
+ def run(taskid, ready, blocker):
+ # There can't be any globals here.
+ ready.put_nowait(taskid)
+ blocker.get() # blocking
+
+ numtasks = 10
+ futures = []
+ with self.executor_type() as executor:
+ # Request the jobs.
+ for i in range(numtasks):
+ fut = executor.submit(run, i, ready, blocker)
+ futures.append(fut)
+ pending = numtasks
+ while pending > 0:
+ # Wait for any to be ready.
+ done = 0
+ for _ in range(pending):
+ try:
+ ready.get(timeout=1) # blocking
+ except interpreters.QueueEmpty:
+ pass
+ else:
+ done += 1
+ pending -= done
+ # Unblock the workers.
+ for _ in range(done):
+ blocker.put_nowait(None)
+
+ def test_blocking_with_limited_workers(self):
+ # This is essentially the same as test_blocking,
+ # but we explicitly force a limited number of workers,
+ # instead of it happening implicitly sometimes due to a race.
+ ready = queues.create()
+ blocker = queues.create()
+
+ def run(taskid, ready, blocker):
+ # There can't be any globals here.
+ ready.put_nowait(taskid)
+ blocker.get() # blocking
+
+ numtasks = 10
+ futures = []
+ with self.executor_type(4) as executor:
+ # Request the jobs.
+ for i in range(numtasks):
+ fut = executor.submit(run, i, ready, blocker)
+ futures.append(fut)
+ pending = numtasks
+ while pending > 0:
+ # Wait for any to be ready.
+ done = 0
+ for _ in range(pending):
+ try:
+ ready.get(timeout=1) # blocking
+ except interpreters.QueueEmpty:
+ pass
+ else:
+ done += 1
+ pending -= done
+ # Unblock the workers.
+ for _ in range(done):
+ blocker.put_nowait(None)
+
@support.requires_gil_enabled("gh-117344: test is flaky without the GIL")
def test_idle_thread_reuse(self):
executor = self.executor_type()
@@ -289,12 +441,21 @@ class InterpreterPoolExecutorTest(
executor.shutdown(wait=True)
def test_pickle_errors_propagate(self):
- # GH-125864: Pickle errors happen before the script tries to execute, so the
- # queue used to wait infinitely.
-
+ # GH-125864: Pickle errors happen before the script tries to execute,
+ # so the queue used to wait infinitely.
fut = self.executor.submit(PickleShenanigans(0))
- with self.assertRaisesRegex(RuntimeError, "gotcha"):
+ expected = interpreters.NotShareableError
+ with self.assertRaisesRegex(expected, 'args not shareable') as cm:
fut.result()
+ self.assertRegex(str(cm.exception.__cause__), 'unpickled')
+
+ def test_no_stale_references(self):
+ # Weak references don't cross between interpreters.
+ raise unittest.SkipTest('not applicable')
+
+ def test_free_reference(self):
+ # Weak references don't cross between interpreters.
+ raise unittest.SkipTest('not applicable')
class AsyncioTest(InterpretersMixin, testasyncio_utils.TestCase):
diff --git a/Lib/test/test_concurrent_futures/test_shutdown.py b/Lib/test/test_concurrent_futures/test_shutdown.py
index 7a4065afd46..99b315b47e2 100644
--- a/Lib/test/test_concurrent_futures/test_shutdown.py
+++ b/Lib/test/test_concurrent_futures/test_shutdown.py
@@ -330,6 +330,64 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
# shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
+ @classmethod
+ def _failing_task_gh_132969(cls, n):
+ raise ValueError("failing task")
+
+ @classmethod
+ def _good_task_gh_132969(cls, n):
+ time.sleep(0.1 * n)
+ return n
+
+ def _run_test_issue_gh_132969(self, max_workers):
+ # max_workers=2 will repro exception
+ # max_workers=4 will repro exception and then hang
+
+ # Repro conditions
+ # max_tasks_per_child=1
+ # a task ends abnormally
+ # shutdown(wait=False) is called
+ start_method = self.get_context().get_start_method()
+ if (start_method == "fork" or
+ (start_method == "forkserver" and sys.platform.startswith("win"))):
+ self.skipTest(f"Skipping test for {start_method = }")
+ executor = futures.ProcessPoolExecutor(
+ max_workers=max_workers,
+ max_tasks_per_child=1,
+ mp_context=self.get_context())
+ f1 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 1)
+ f2 = executor.submit(ProcessPoolShutdownTest._failing_task_gh_132969, 2)
+ f3 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 3)
+ result = 0
+ try:
+ result += f1.result()
+ result += f2.result()
+ result += f3.result()
+ except ValueError:
+ # stop processing results upon first exception
+ pass
+
+ # Ensure that the executor cleans up after called
+ # shutdown with wait=False
+ executor_manager_thread = executor._executor_manager_thread
+ executor.shutdown(wait=False)
+ time.sleep(0.2)
+ executor_manager_thread.join()
+ return result
+
+ def test_shutdown_gh_132969_case_1(self):
+ # gh-132969: test that exception "object of type 'NoneType' has no len()"
+ # is not raised when shutdown(wait=False) is called.
+ result = self._run_test_issue_gh_132969(2)
+ self.assertEqual(result, 1)
+
+ def test_shutdown_gh_132969_case_2(self):
+ # gh-132969: test that process does not hang and
+ # exception "object of type 'NoneType' has no len()" is not raised
+ # when shutdown(wait=False) is called.
+ result = self._run_test_issue_gh_132969(4)
+ self.assertEqual(result, 1)
+
create_executor_tests(globals(), ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py
index 23904d17d32..e7364e18742 100644
--- a/Lib/test/test_configparser.py
+++ b/Lib/test/test_configparser.py
@@ -986,12 +986,12 @@ class ConfigParserTestCase(BasicTestCase, unittest.TestCase):
def test_defaults_keyword(self):
"""bpo-23835 fix for ConfigParser"""
- cf = self.newconfig(defaults={1: 2.4})
- self.assertEqual(cf[self.default_section]['1'], '2.4')
- self.assertAlmostEqual(cf[self.default_section].getfloat('1'), 2.4)
- cf = self.newconfig(defaults={"A": 5.2})
- self.assertEqual(cf[self.default_section]['a'], '5.2')
- self.assertAlmostEqual(cf[self.default_section].getfloat('a'), 5.2)
+ cf = self.newconfig(defaults={1: 2.5})
+ self.assertEqual(cf[self.default_section]['1'], '2.5')
+ self.assertAlmostEqual(cf[self.default_section].getfloat('1'), 2.5)
+ cf = self.newconfig(defaults={"A": 5.25})
+ self.assertEqual(cf[self.default_section]['a'], '5.25')
+ self.assertAlmostEqual(cf[self.default_section].getfloat('a'), 5.25)
class ConfigParserTestCaseNoInterpolation(BasicTestCase, unittest.TestCase):
diff --git a/Lib/test/test_cprofile.py b/Lib/test/test_cprofile.py
index 192c8eab26e..57e818b1c68 100644
--- a/Lib/test/test_cprofile.py
+++ b/Lib/test/test_cprofile.py
@@ -125,21 +125,22 @@ class CProfileTest(ProfileTest):
"""
gh-106152
generator.throw() should trigger a call in cProfile
- In the any() call below, there should be two entries for the generator:
- * one for the call to __next__ which gets a True and terminates any
- * one when the generator is garbage collected which will effectively
- do a throw.
"""
+
+ def gen():
+ yield
+
pr = self.profilerclass()
pr.enable()
- any(a == 1 for a in (1, 2))
+ g = gen()
+ try:
+ g.throw(SyntaxError)
+ except SyntaxError:
+ pass
pr.disable()
pr.create_stats()
- for func, (cc, nc, _, _, _) in pr.stats.items():
- if func[2] == "<genexpr>":
- self.assertEqual(cc, 1)
- self.assertEqual(nc, 1)
+ self.assertTrue(any("throw" in func[2] for func in pr.stats.keys())),
def test_bad_descriptor(self):
# gh-132250
diff --git a/Lib/test/test_crossinterp.py b/Lib/test/test_crossinterp.py
index c54635eaeab..2fa0077a09b 100644
--- a/Lib/test/test_crossinterp.py
+++ b/Lib/test/test_crossinterp.py
@@ -1,6 +1,4 @@
import contextlib
-import importlib
-import importlib.util
import itertools
import sys
import types
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index 9aace57633b..60feab225a1 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -1122,19 +1122,22 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be a 1-character string')
+ '"quotechar" must be a unicode character or None, '
+ 'not a string of length 0')
mydialect.quotechar = "''"
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be a 1-character string')
+ '"quotechar" must be a unicode character or None, '
+ 'not a string of length 2')
mydialect.quotechar = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"quotechar" must be string or None, not int')
+ '"quotechar" must be a unicode character or None, '
+ 'not int')
def test_delimiter(self):
class mydialect(csv.Dialect):
@@ -1151,31 +1154,32 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be a 1-character string')
+ '"delimiter" must be a unicode character, '
+ 'not a string of length 3')
mydialect.delimiter = ""
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be a 1-character string')
+ '"delimiter" must be a unicode character, not a string of length 0')
mydialect.delimiter = b","
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not bytes')
+ '"delimiter" must be a unicode character, not bytes')
mydialect.delimiter = 4
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not int')
+ '"delimiter" must be a unicode character, not int')
mydialect.delimiter = None
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"delimiter" must be string, not NoneType')
+ '"delimiter" must be a unicode character, not NoneType')
def test_escapechar(self):
class mydialect(csv.Dialect):
@@ -1189,20 +1193,32 @@ class TestDialectValidity(unittest.TestCase):
self.assertEqual(d.escapechar, "\\")
mydialect.escapechar = ""
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be a 1-character string'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not a string of length 0')
mydialect.escapechar = "**"
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be a 1-character string'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not a string of length 2')
mydialect.escapechar = b"*"
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be string or None, not bytes'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not bytes')
mydialect.escapechar = 4
- with self.assertRaisesRegex(csv.Error, '"escapechar" must be string or None, not int'):
+ with self.assertRaises(csv.Error) as cm:
mydialect()
+ self.assertEqual(str(cm.exception),
+ '"escapechar" must be a unicode character or None, '
+ 'not int')
def test_lineterminator(self):
class mydialect(csv.Dialect):
@@ -1223,7 +1239,13 @@ class TestDialectValidity(unittest.TestCase):
with self.assertRaises(csv.Error) as cm:
mydialect()
self.assertEqual(str(cm.exception),
- '"lineterminator" must be a string')
+ '"lineterminator" must be a string, not int')
+
+ mydialect.lineterminator = None
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"lineterminator" must be a string, not NoneType')
def test_invalid_chars(self):
def create_invalid(field_name, value, **kwargs):
diff --git a/Lib/test/test_ctypes/_support.py b/Lib/test/test_ctypes/_support.py
index 946d654a19a..700657a4e41 100644
--- a/Lib/test/test_ctypes/_support.py
+++ b/Lib/test/test_ctypes/_support.py
@@ -3,7 +3,6 @@
import ctypes
from _ctypes import Structure, Union, _Pointer, Array, _SimpleCData, CFuncPtr
import sys
-from test import support
_CData = Structure.__base__
diff --git a/Lib/test/test_ctypes/test_byteswap.py b/Lib/test/test_ctypes/test_byteswap.py
index ea5951603f9..f14e1aa32e1 100644
--- a/Lib/test/test_ctypes/test_byteswap.py
+++ b/Lib/test/test_ctypes/test_byteswap.py
@@ -1,5 +1,4 @@
import binascii
-import ctypes
import math
import struct
import sys
diff --git a/Lib/test/test_ctypes/test_generated_structs.py b/Lib/test/test_ctypes/test_generated_structs.py
index aa448fad5bb..1cb46a82701 100644
--- a/Lib/test/test_ctypes/test_generated_structs.py
+++ b/Lib/test/test_ctypes/test_generated_structs.py
@@ -10,7 +10,7 @@ Run this module to regenerate the files:
"""
import unittest
-from test.support import import_helper, verbose
+from test.support import import_helper
import re
from dataclasses import dataclass
from functools import cached_property
diff --git a/Lib/test/test_ctypes/test_incomplete.py b/Lib/test/test_ctypes/test_incomplete.py
index fefdfe9102e..3189fcd1bd1 100644
--- a/Lib/test/test_ctypes/test_incomplete.py
+++ b/Lib/test/test_ctypes/test_incomplete.py
@@ -1,6 +1,5 @@
import ctypes
import unittest
-import warnings
from ctypes import Structure, POINTER, pointer, c_char_p
# String-based "incomplete pointers" were implemented in ctypes 0.6.3 (2003, when
@@ -21,9 +20,7 @@ class TestSetPointerType(unittest.TestCase):
_fields_ = [("name", c_char_p),
("next", lpcell)]
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- ctypes.SetPointerType(lpcell, cell)
+ lpcell.set_type(cell)
self.assertIs(POINTER(cell), lpcell)
@@ -50,10 +47,9 @@ class TestSetPointerType(unittest.TestCase):
_fields_ = [("name", c_char_p),
("next", lpcell)]
- with self.assertWarns(DeprecationWarning):
- ctypes.SetPointerType(lpcell, cell)
-
+ lpcell.set_type(cell)
self.assertIs(POINTER(cell), lpcell)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_ctypes/test_parameters.py b/Lib/test/test_ctypes/test_parameters.py
index f89521cf8b3..46f8ff93efa 100644
--- a/Lib/test/test_ctypes/test_parameters.py
+++ b/Lib/test/test_ctypes/test_parameters.py
@@ -1,3 +1,4 @@
+import sys
import unittest
import test.support
from ctypes import (CDLL, PyDLL, ArgumentError,
@@ -240,7 +241,8 @@ class SimpleTypesTestCase(unittest.TestCase):
self.assertRegex(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
- self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
+ if sys.float_repr_style == 'short':
+ self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
self.assertRegex(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
self.assertRegex(repr(c_char_p.from_param(b'hihi')), r"^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
self.assertRegex(repr(c_wchar_p.from_param('hihi')), r"^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
diff --git a/Lib/test/test_dbm.py b/Lib/test/test_dbm.py
index a10922a403e..7e8d78b8940 100644
--- a/Lib/test/test_dbm.py
+++ b/Lib/test/test_dbm.py
@@ -135,6 +135,67 @@ class AnyDBMTestCase:
assert(f[key] == b"Python:")
f.close()
+ def test_anydbm_readonly_reorganize(self):
+ self.init_db()
+ with dbm.open(_fname, 'r') as d:
+ # Early stopping.
+ if not hasattr(d, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ self.assertRaises(dbm.error, lambda: d.reorganize())
+
+ def test_anydbm_reorganize_not_changed_content(self):
+ self.init_db()
+ with dbm.open(_fname, 'c') as d:
+ # Early stopping.
+ if not hasattr(d, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ keys_before = sorted(d.keys())
+ values_before = [d[k] for k in keys_before]
+ d.reorganize()
+ keys_after = sorted(d.keys())
+ values_after = [d[k] for k in keys_before]
+ self.assertEqual(keys_before, keys_after)
+ self.assertEqual(values_before, values_after)
+
+ def test_anydbm_reorganize_decreased_size(self):
+
+ def _calculate_db_size(db_path):
+ if os.path.isfile(db_path):
+ return os.path.getsize(db_path)
+ total_size = 0
+ for root, _, filenames in os.walk(db_path):
+ for filename in filenames:
+ file_path = os.path.join(root, filename)
+ total_size += os.path.getsize(file_path)
+ return total_size
+
+ # This test requires relatively large databases to reliably show difference in size before and after reorganizing.
+ with dbm.open(_fname, 'n') as f:
+ # Early stopping.
+ if not hasattr(f, 'reorganize'):
+ self.skipTest("method reorganize not available this dbm submodule")
+
+ for k in self._dict:
+ f[k.encode('ascii')] = self._dict[k] * 100000
+ db_keys = list(f.keys())
+
+ # Make sure to calculate size of database only after file is closed to ensure file content are flushed to disk.
+ size_before = _calculate_db_size(os.path.dirname(_fname))
+
+ # Delete some elements from the start of the database.
+ keys_to_delete = db_keys[:len(db_keys) // 2]
+ with dbm.open(_fname, 'c') as f:
+ for k in keys_to_delete:
+ del f[k]
+ f.reorganize()
+
+ # Make sure to calculate size of database only after file is closed to ensure file content are flushed to disk.
+ size_after = _calculate_db_size(os.path.dirname(_fname))
+
+ self.assertLess(size_after, size_before)
+
def test_open_with_bytes(self):
dbm.open(os.fsencode(_fname), "c").close()
diff --git a/Lib/test/test_dbm_gnu.py b/Lib/test/test_dbm_gnu.py
index 66268c42a30..e0b988b7b95 100644
--- a/Lib/test/test_dbm_gnu.py
+++ b/Lib/test/test_dbm_gnu.py
@@ -74,12 +74,12 @@ class TestGdbm(unittest.TestCase):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
- modes = all - set('fsu')
+ modes = all - set('fsum')
for mode in sorted(modes): # put "c" mode first
self.g = gdbm.open(filename, mode)
self.g.close()
- # Test additional flags (presumably "fsu").
+ # Test additional flags (presumably "fsum").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
@@ -217,6 +217,29 @@ class TestGdbm(unittest.TestCase):
create_empty_file(os.path.join(d, 'test'))
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
+ @unittest.skipUnless('m' in gdbm.open_flags, "requires 'm' in open_flags")
+ def test_nommap_no_crash(self):
+ self.g = g = gdbm.open(filename, 'nm')
+ os.truncate(filename, 0)
+
+ g.get(b'a', b'c')
+ g.keys()
+ g.firstkey()
+ g.nextkey(b'a')
+ with self.assertRaises(KeyError):
+ g[b'a']
+ with self.assertRaises(gdbm.error):
+ len(g)
+
+ with self.assertRaises(gdbm.error):
+ g[b'a'] = b'c'
+ with self.assertRaises(gdbm.error):
+ del g[b'a']
+ with self.assertRaises(gdbm.error):
+ g.setdefault(b'a', b'c')
+ with self.assertRaises(gdbm.error):
+ g.reorganize()
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index 9e298401dc3..ef64b878805 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -28,7 +28,6 @@ import logging
import math
import os, sys
import operator
-import warnings
import pickle, copy
import unittest
import numbers
@@ -982,6 +981,7 @@ class FormatTest:
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
+ ('.01f', '3.14159265', '3.1'), # leading zero in precision
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
@@ -1067,6 +1067,7 @@ class FormatTest:
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
+ ('008,', '123456', '0,123,456'), # leading zero in width
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index ea076ba4fef..f6ec2cf5ce8 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -4114,6 +4114,34 @@ class ClassPropertiesAndMethods(unittest.TestCase):
else:
self.fail("shouldn't be able to create inheritance cycles")
+ def test_assign_bases_many_subclasses(self):
+ # This is intended to check that typeobject.c:queue_slot_update() can
+ # handle updating many subclasses when a slot method is re-assigned.
+ class A:
+ x = 'hello'
+ def __call__(self):
+ return 123
+ def __getitem__(self, index):
+ return None
+
+ class X:
+ x = 'bye'
+
+ class B(A):
+ pass
+
+ subclasses = []
+ for i in range(1000):
+ sc = type(f'Sub{i}', (B,), {})
+ subclasses.append(sc)
+
+ self.assertEqual(subclasses[0]()(), 123)
+ self.assertEqual(subclasses[0]().x, 'hello')
+ B.__bases__ = (X,)
+ with self.assertRaises(TypeError):
+ subclasses[0]()()
+ self.assertEqual(subclasses[0]().x, 'bye')
+
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py
index 52c38e42eca..60c62430370 100644
--- a/Lib/test/test_dict.py
+++ b/Lib/test/test_dict.py
@@ -290,6 +290,38 @@ class DictTest(unittest.TestCase):
['Cannot convert dictionary update sequence element #0 to a sequence'],
)
+ def test_update_shared_keys(self):
+ class MyClass: pass
+
+ # Subclass str to enable us to create an object during the
+ # dict.update() call.
+ class MyStr(str):
+ def __hash__(self):
+ return super().__hash__()
+
+ def __eq__(self, other):
+ # Create an object that shares the same PyDictKeysObject as
+ # obj.__dict__.
+ obj2 = MyClass()
+ obj2.a = "a"
+ obj2.b = "b"
+ obj2.c = "c"
+ return super().__eq__(other)
+
+ obj = MyClass()
+ obj.a = "a"
+ obj.b = "b"
+
+ x = {}
+ x[MyStr("a")] = MyStr("a")
+
+ # gh-132617: this previously raised "dict mutated during update" error
+ x.update(obj.__dict__)
+
+ self.assertEqual(x, {
+ MyStr("a"): "a",
+ "b": "b",
+ })
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py
index 9e217249be7..6ac584a08d1 100644
--- a/Lib/test/test_difflib.py
+++ b/Lib/test/test_difflib.py
@@ -255,21 +255,21 @@ class TestSFpatches(unittest.TestCase):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_from1.splitlines(),
patch914575_to1.splitlines())
- self.assertIn('content="text/html; charset=utf-8"', output)
+ self.assertIn('charset="utf-8"', output)
def test_make_file_iso88591_charset(self):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_from1.splitlines(),
patch914575_to1.splitlines(),
charset='iso-8859-1')
- self.assertIn('content="text/html; charset=iso-8859-1"', output)
+ self.assertIn('charset="iso-8859-1"', output)
def test_make_file_usascii_charset_with_nonascii_input(self):
html_diff = difflib.HtmlDiff()
output = html_diff.make_file(patch914575_nonascii_from1.splitlines(),
patch914575_nonascii_to1.splitlines(),
charset='us-ascii')
- self.assertIn('content="text/html; charset=us-ascii"', output)
+ self.assertIn('charset="us-ascii"', output)
self.assertIn('&#305;mpl&#305;c&#305;t', output)
class TestDiffer(unittest.TestCase):
diff --git a/Lib/test/test_difflib_expect.html b/Lib/test/test_difflib_expect.html
index 9f33a9e9c9c..2346a6f9f8d 100644
--- a/Lib/test/test_difflib_expect.html
+++ b/Lib/test/test_difflib_expect.html
@@ -1,22 +1,42 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-<html>
-
+<!DOCTYPE html>
+<html lang="en">
<head>
- <meta http-equiv="Content-Type"
- content="text/html; charset=utf-8" />
- <title></title>
- <style type="text/css">
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <title>Diff comparison</title>
+ <style>
:root {color-scheme: light dark}
- table.diff {font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace; border:medium}
- .diff_header {background-color:#e0e0e0}
- td.diff_header {text-align:right}
- .diff_next {background-color:#c0c0c0}
+ table.diff {
+ font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace;
+ border: medium;
+ }
+ .diff_header {
+ background-color: #e0e0e0;
+ font-weight: bold;
+ }
+ td.diff_header {
+ text-align: right;
+ padding: 0 8px;
+ }
+ .diff_next {
+ background-color: #c0c0c0;
+ padding: 4px 0;
+ }
.diff_add {background-color:palegreen}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
+ table.diff[summary="Legends"] {
+ margin-top: 20px;
+ border: 1px solid #ccc;
+ }
+ table.diff[summary="Legends"] th {
+ background-color: #e0e0e0;
+ padding: 4px 8px;
+ }
+ table.diff[summary="Legends"] td {
+ padding: 4px 8px;
+ }
@media (prefers-color-scheme: dark) {
.diff_header {background-color:#666}
@@ -24,6 +44,8 @@
.diff_add {background-color:darkgreen}
.diff_chg {background-color:#847415}
.diff_sub {background-color:darkred}
+ table.diff[summary="Legends"] {border-color:#555}
+ table.diff[summary="Legends"] th{background-color:#666}
}
</style>
</head>
diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py
index 547f6de5f5a..355990ed58e 100644
--- a/Lib/test/test_dis.py
+++ b/Lib/test/test_dis.py
@@ -606,7 +606,7 @@ dis_asyncwith = """\
POP_TOP
L1: RESUME 0
-%4d LOAD_FAST_BORROW 0 (c)
+%4d LOAD_FAST 0 (c)
COPY 1
LOAD_SPECIAL 3 (__aexit__)
SWAP 2
@@ -851,7 +851,7 @@ Disassembly of <code object <genexpr> at 0x..., file "%s", line %d>:
%4d RETURN_GENERATOR
POP_TOP
L1: RESUME 0
- LOAD_FAST_BORROW 0 (.0)
+ LOAD_FAST 0 (.0)
GET_ITER
L2: FOR_ITER 14 (to L3)
STORE_FAST 1 (z)
@@ -1821,7 +1821,7 @@ expected_opinfo_jumpy = [
make_inst(opname='LOAD_SMALL_INT', arg=10, argval=10, argrepr='', offset=12, start_offset=12, starts_line=False, line_number=3),
make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=14, start_offset=14, starts_line=False, line_number=3, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
make_inst(opname='GET_ITER', arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3),
- make_inst(opname='FOR_ITER', arg=32, argval=92, argrepr='to L4', offset=24, start_offset=24, starts_line=False, line_number=3, label=1, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='FOR_ITER', arg=33, argval=94, argrepr='to L4', offset=24, start_offset=24, starts_line=False, line_number=3, label=1, cache_info=[('counter', 1, b'\x00\x00')]),
make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=28, start_offset=28, starts_line=False, line_number=3),
make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=4, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=40, start_offset=40, starts_line=False, line_number=4),
@@ -1840,110 +1840,111 @@ expected_opinfo_jumpy = [
make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=82, start_offset=82, starts_line=False, line_number=7),
make_inst(opname='JUMP_BACKWARD', arg=32, argval=24, argrepr='to L1', offset=84, start_offset=84, starts_line=False, line_number=7, cache_info=[('counter', 1, b'\x00\x00')]),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=88, start_offset=88, starts_line=True, line_number=8, label=3),
- make_inst(opname='JUMP_FORWARD', arg=13, argval=118, argrepr='to L5', offset=90, start_offset=90, starts_line=False, line_number=8),
- make_inst(opname='END_FOR', arg=None, argval=None, argrepr='', offset=92, start_offset=92, starts_line=True, line_number=3, label=4),
- make_inst(opname='POP_ITER', arg=None, argval=None, argrepr='', offset=94, start_offset=94, starts_line=False, line_number=3),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=96, start_offset=96, starts_line=True, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=1, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=106, start_offset=106, starts_line=False, line_number=10),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=108, start_offset=108, starts_line=False, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=116, start_offset=116, starts_line=False, line_number=10),
- make_inst(opname='LOAD_FAST_CHECK', arg=0, argval='i', argrepr='i', offset=118, start_offset=118, starts_line=True, line_number=11, label=5),
- make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=120, start_offset=120, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=40, argval=212, argrepr='to L8', offset=128, start_offset=128, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=132, start_offset=132, starts_line=False, line_number=11),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=134, start_offset=134, starts_line=True, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=144, start_offset=144, starts_line=False, line_number=12),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=146, start_offset=146, starts_line=False, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=154, start_offset=154, starts_line=False, line_number=12),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=156, start_offset=156, starts_line=True, line_number=13),
- make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=158, start_offset=158, starts_line=False, line_number=13),
- make_inst(opname='BINARY_OP', arg=23, argval=23, argrepr='-=', offset=160, start_offset=160, starts_line=False, line_number=13, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
- make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=172, start_offset=172, starts_line=False, line_number=13),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=174, start_offset=174, starts_line=True, line_number=14),
- make_inst(opname='LOAD_SMALL_INT', arg=6, argval=6, argrepr='', offset=176, start_offset=176, starts_line=False, line_number=14),
- make_inst(opname='COMPARE_OP', arg=148, argval='>', argrepr='bool(>)', offset=178, start_offset=178, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=3, argval=192, argrepr='to L6', offset=182, start_offset=182, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=186, start_offset=186, starts_line=False, line_number=14),
- make_inst(opname='JUMP_BACKWARD', arg=37, argval=118, argrepr='to L5', offset=188, start_offset=188, starts_line=True, line_number=15, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=192, start_offset=192, starts_line=True, line_number=16, label=6),
- make_inst(opname='LOAD_SMALL_INT', arg=4, argval=4, argrepr='', offset=194, start_offset=194, starts_line=False, line_number=16),
- make_inst(opname='COMPARE_OP', arg=18, argval='<', argrepr='bool(<)', offset=196, start_offset=196, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_TRUE', arg=3, argval=210, argrepr='to L7', offset=200, start_offset=200, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=204, start_offset=204, starts_line=False, line_number=16),
- make_inst(opname='JUMP_BACKWARD', arg=46, argval=118, argrepr='to L5', offset=206, start_offset=206, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='JUMP_FORWARD', arg=11, argval=234, argrepr='to L9', offset=210, start_offset=210, starts_line=True, line_number=17, label=7),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=212, start_offset=212, starts_line=True, line_number=19, label=8, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=2, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=222, start_offset=222, starts_line=False, line_number=19),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=224, start_offset=224, starts_line=False, line_number=19, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=232, start_offset=232, starts_line=False, line_number=19),
- make_inst(opname='NOP', arg=None, argval=None, argrepr='', offset=234, start_offset=234, starts_line=True, line_number=20, label=9),
- make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=236, start_offset=236, starts_line=True, line_number=21),
- make_inst(opname='LOAD_SMALL_INT', arg=0, argval=0, argrepr='', offset=238, start_offset=238, starts_line=False, line_number=21),
- make_inst(opname='BINARY_OP', arg=11, argval=11, argrepr='/', offset=240, start_offset=240, starts_line=False, line_number=21, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=252, start_offset=252, starts_line=False, line_number=21),
- make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=254, start_offset=254, starts_line=True, line_number=25),
- make_inst(opname='COPY', arg=1, argval=1, argrepr='', offset=256, start_offset=256, starts_line=False, line_number=25),
- make_inst(opname='LOAD_SPECIAL', arg=1, argval=1, argrepr='__exit__', offset=258, start_offset=258, starts_line=False, line_number=25),
- make_inst(opname='SWAP', arg=2, argval=2, argrepr='', offset=260, start_offset=260, starts_line=False, line_number=25),
- make_inst(opname='SWAP', arg=3, argval=3, argrepr='', offset=262, start_offset=262, starts_line=False, line_number=25),
- make_inst(opname='LOAD_SPECIAL', arg=0, argval=0, argrepr='__enter__', offset=264, start_offset=264, starts_line=False, line_number=25),
- make_inst(opname='CALL', arg=0, argval=0, argrepr='', offset=266, start_offset=266, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='STORE_FAST', arg=1, argval='dodgy', argrepr='dodgy', offset=274, start_offset=274, starts_line=False, line_number=25),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=276, start_offset=276, starts_line=True, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=3, argval='Never reach this', argrepr="'Never reach this'", offset=286, start_offset=286, starts_line=False, line_number=26),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=288, start_offset=288, starts_line=False, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=296, start_offset=296, starts_line=False, line_number=26),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=298, start_offset=298, starts_line=True, line_number=25),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=300, start_offset=300, starts_line=False, line_number=25),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=90, start_offset=90, starts_line=False, line_number=8),
+ make_inst(opname='JUMP_FORWARD', arg=13, argval=120, argrepr='to L5', offset=92, start_offset=92, starts_line=False, line_number=8),
+ make_inst(opname='END_FOR', arg=None, argval=None, argrepr='', offset=94, start_offset=94, starts_line=True, line_number=3, label=4),
+ make_inst(opname='POP_ITER', arg=None, argval=None, argrepr='', offset=96, start_offset=96, starts_line=False, line_number=3),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=98, start_offset=98, starts_line=True, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=1, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=108, start_offset=108, starts_line=False, line_number=10),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=110, start_offset=110, starts_line=False, line_number=10, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=118, start_offset=118, starts_line=False, line_number=10),
+ make_inst(opname='LOAD_FAST_CHECK', arg=0, argval='i', argrepr='i', offset=120, start_offset=120, starts_line=True, line_number=11, label=5),
+ make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=122, start_offset=122, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=40, argval=214, argrepr='to L8', offset=130, start_offset=130, starts_line=False, line_number=11, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=134, start_offset=134, starts_line=False, line_number=11),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=136, start_offset=136, starts_line=True, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=146, start_offset=146, starts_line=False, line_number=12),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=148, start_offset=148, starts_line=False, line_number=12, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=156, start_offset=156, starts_line=False, line_number=12),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=158, start_offset=158, starts_line=True, line_number=13),
+ make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=160, start_offset=160, starts_line=False, line_number=13),
+ make_inst(opname='BINARY_OP', arg=23, argval=23, argrepr='-=', offset=162, start_offset=162, starts_line=False, line_number=13, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
+ make_inst(opname='STORE_FAST', arg=0, argval='i', argrepr='i', offset=174, start_offset=174, starts_line=False, line_number=13),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=176, start_offset=176, starts_line=True, line_number=14),
+ make_inst(opname='LOAD_SMALL_INT', arg=6, argval=6, argrepr='', offset=178, start_offset=178, starts_line=False, line_number=14),
+ make_inst(opname='COMPARE_OP', arg=148, argval='>', argrepr='bool(>)', offset=180, start_offset=180, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=3, argval=194, argrepr='to L6', offset=184, start_offset=184, starts_line=False, line_number=14, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=188, start_offset=188, starts_line=False, line_number=14),
+ make_inst(opname='JUMP_BACKWARD', arg=37, argval=120, argrepr='to L5', offset=190, start_offset=190, starts_line=True, line_number=15, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=194, start_offset=194, starts_line=True, line_number=16, label=6),
+ make_inst(opname='LOAD_SMALL_INT', arg=4, argval=4, argrepr='', offset=196, start_offset=196, starts_line=False, line_number=16),
+ make_inst(opname='COMPARE_OP', arg=18, argval='<', argrepr='bool(<)', offset=198, start_offset=198, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_TRUE', arg=3, argval=212, argrepr='to L7', offset=202, start_offset=202, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=206, start_offset=206, starts_line=False, line_number=16),
+ make_inst(opname='JUMP_BACKWARD', arg=46, argval=120, argrepr='to L5', offset=208, start_offset=208, starts_line=False, line_number=16, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='JUMP_FORWARD', arg=11, argval=236, argrepr='to L9', offset=212, start_offset=212, starts_line=True, line_number=17, label=7),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=214, start_offset=214, starts_line=True, line_number=19, label=8, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=2, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=224, start_offset=224, starts_line=False, line_number=19),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=226, start_offset=226, starts_line=False, line_number=19, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=234, start_offset=234, starts_line=False, line_number=19),
+ make_inst(opname='NOP', arg=None, argval=None, argrepr='', offset=236, start_offset=236, starts_line=True, line_number=20, label=9),
+ make_inst(opname='LOAD_SMALL_INT', arg=1, argval=1, argrepr='', offset=238, start_offset=238, starts_line=True, line_number=21),
+ make_inst(opname='LOAD_SMALL_INT', arg=0, argval=0, argrepr='', offset=240, start_offset=240, starts_line=False, line_number=21),
+ make_inst(opname='BINARY_OP', arg=11, argval=11, argrepr='/', offset=242, start_offset=242, starts_line=False, line_number=21, cache_info=[('counter', 1, b'\x00\x00'), ('descr', 4, b'\x00\x00\x00\x00\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=254, start_offset=254, starts_line=False, line_number=21),
+ make_inst(opname='LOAD_FAST_BORROW', arg=0, argval='i', argrepr='i', offset=256, start_offset=256, starts_line=True, line_number=25),
+ make_inst(opname='COPY', arg=1, argval=1, argrepr='', offset=258, start_offset=258, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_SPECIAL', arg=1, argval=1, argrepr='__exit__', offset=260, start_offset=260, starts_line=False, line_number=25),
+ make_inst(opname='SWAP', arg=2, argval=2, argrepr='', offset=262, start_offset=262, starts_line=False, line_number=25),
+ make_inst(opname='SWAP', arg=3, argval=3, argrepr='', offset=264, start_offset=264, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_SPECIAL', arg=0, argval=0, argrepr='__enter__', offset=266, start_offset=266, starts_line=False, line_number=25),
+ make_inst(opname='CALL', arg=0, argval=0, argrepr='', offset=268, start_offset=268, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='STORE_FAST', arg=1, argval='dodgy', argrepr='dodgy', offset=276, start_offset=276, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=278, start_offset=278, starts_line=True, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=3, argval='Never reach this', argrepr="'Never reach this'", offset=288, start_offset=288, starts_line=False, line_number=26),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=290, start_offset=290, starts_line=False, line_number=26, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=298, start_offset=298, starts_line=False, line_number=26),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=300, start_offset=300, starts_line=True, line_number=25),
make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=302, start_offset=302, starts_line=False, line_number=25),
- make_inst(opname='CALL', arg=3, argval=3, argrepr='', offset=304, start_offset=304, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=312, start_offset=312, starts_line=False, line_number=25),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=314, start_offset=314, starts_line=True, line_number=28, label=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=324, start_offset=324, starts_line=False, line_number=28),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=326, start_offset=326, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=334, start_offset=334, starts_line=False, line_number=28),
- make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=336, start_offset=336, starts_line=False, line_number=28),
- make_inst(opname='RETURN_VALUE', arg=None, argval=None, argrepr='', offset=338, start_offset=338, starts_line=False, line_number=28),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=340, start_offset=340, starts_line=True, line_number=25),
- make_inst(opname='WITH_EXCEPT_START', arg=None, argval=None, argrepr='', offset=342, start_offset=342, starts_line=False, line_number=25),
- make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=344, start_offset=344, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_JUMP_IF_TRUE', arg=2, argval=360, argrepr='to L11', offset=352, start_offset=352, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=356, start_offset=356, starts_line=False, line_number=25),
- make_inst(opname='RERAISE', arg=2, argval=2, argrepr='', offset=358, start_offset=358, starts_line=False, line_number=25),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=360, start_offset=360, starts_line=False, line_number=25, label=11),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=362, start_offset=362, starts_line=False, line_number=25),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=364, start_offset=364, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=304, start_offset=304, starts_line=False, line_number=25),
+ make_inst(opname='CALL', arg=3, argval=3, argrepr='', offset=306, start_offset=306, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=314, start_offset=314, starts_line=False, line_number=25),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=316, start_offset=316, starts_line=True, line_number=28, label=10, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=326, start_offset=326, starts_line=False, line_number=28),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=328, start_offset=328, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=336, start_offset=336, starts_line=False, line_number=28),
+ make_inst(opname='LOAD_CONST', arg=4, argval=None, argrepr='None', offset=338, start_offset=338, starts_line=False, line_number=28),
+ make_inst(opname='RETURN_VALUE', arg=None, argval=None, argrepr='', offset=340, start_offset=340, starts_line=False, line_number=28),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=342, start_offset=342, starts_line=True, line_number=25),
+ make_inst(opname='WITH_EXCEPT_START', arg=None, argval=None, argrepr='', offset=344, start_offset=344, starts_line=False, line_number=25),
+ make_inst(opname='TO_BOOL', arg=None, argval=None, argrepr='', offset=346, start_offset=346, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00'), ('version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_JUMP_IF_TRUE', arg=2, argval=362, argrepr='to L11', offset=354, start_offset=354, starts_line=False, line_number=25, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=358, start_offset=358, starts_line=False, line_number=25),
+ make_inst(opname='RERAISE', arg=2, argval=2, argrepr='', offset=360, start_offset=360, starts_line=False, line_number=25),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=362, start_offset=362, starts_line=False, line_number=25, label=11),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=364, start_offset=364, starts_line=False, line_number=25),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=366, start_offset=366, starts_line=False, line_number=25),
make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=368, start_offset=368, starts_line=False, line_number=25),
- make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=29, argval=314, argrepr='to L10', offset=370, start_offset=370, starts_line=False, line_number=25),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=372, start_offset=372, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=374, start_offset=374, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=376, start_offset=376, starts_line=False, line_number=None),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=378, start_offset=378, starts_line=False, line_number=None),
- make_inst(opname='LOAD_GLOBAL', arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=380, start_offset=380, starts_line=True, line_number=22, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='CHECK_EXC_MATCH', arg=None, argval=None, argrepr='', offset=390, start_offset=390, starts_line=False, line_number=22),
- make_inst(opname='POP_JUMP_IF_FALSE', arg=15, argval=426, argrepr='to L12', offset=392, start_offset=392, starts_line=False, line_number=22, cache_info=[('counter', 1, b'\x00\x00')]),
- make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=396, start_offset=396, starts_line=False, line_number=22),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=22),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=400, start_offset=400, starts_line=True, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=5, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=410, start_offset=410, starts_line=False, line_number=23),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=412, start_offset=412, starts_line=False, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=420, start_offset=420, starts_line=False, line_number=23),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=422, start_offset=422, starts_line=False, line_number=23),
- make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=56, argval=314, argrepr='to L10', offset=424, start_offset=424, starts_line=False, line_number=23),
- make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=426, start_offset=426, starts_line=True, line_number=22, label=12),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=428, start_offset=428, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=430, start_offset=430, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=432, start_offset=432, starts_line=False, line_number=None),
- make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=434, start_offset=434, starts_line=False, line_number=None),
- make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=436, start_offset=436, starts_line=True, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
- make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=446, start_offset=446, starts_line=False, line_number=28),
- make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=448, start_offset=448, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
- make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=456, start_offset=456, starts_line=False, line_number=28),
- make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=458, start_offset=458, starts_line=False, line_number=28),
- make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=460, start_offset=460, starts_line=True, line_number=None),
- make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=462, start_offset=462, starts_line=False, line_number=None),
- make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=464, start_offset=464, starts_line=False, line_number=None),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=370, start_offset=370, starts_line=False, line_number=25),
+ make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=29, argval=316, argrepr='to L10', offset=372, start_offset=372, starts_line=False, line_number=25),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=374, start_offset=374, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=376, start_offset=376, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=378, start_offset=378, starts_line=False, line_number=None),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=380, start_offset=380, starts_line=False, line_number=None),
+ make_inst(opname='LOAD_GLOBAL', arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=382, start_offset=382, starts_line=True, line_number=22, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='CHECK_EXC_MATCH', arg=None, argval=None, argrepr='', offset=392, start_offset=392, starts_line=False, line_number=22),
+ make_inst(opname='POP_JUMP_IF_FALSE', arg=15, argval=428, argrepr='to L12', offset=394, start_offset=394, starts_line=False, line_number=22, cache_info=[('counter', 1, b'\x00\x00')]),
+ make_inst(opname='NOT_TAKEN', arg=None, argval=None, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=22),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=400, start_offset=400, starts_line=False, line_number=22),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=402, start_offset=402, starts_line=True, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=5, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=412, start_offset=412, starts_line=False, line_number=23),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=414, start_offset=414, starts_line=False, line_number=23, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=422, start_offset=422, starts_line=False, line_number=23),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=424, start_offset=424, starts_line=False, line_number=23),
+ make_inst(opname='JUMP_BACKWARD_NO_INTERRUPT', arg=56, argval=316, argrepr='to L10', offset=426, start_offset=426, starts_line=False, line_number=23),
+ make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=428, start_offset=428, starts_line=True, line_number=22, label=12),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=430, start_offset=430, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=432, start_offset=432, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=434, start_offset=434, starts_line=False, line_number=None),
+ make_inst(opname='PUSH_EXC_INFO', arg=None, argval=None, argrepr='', offset=436, start_offset=436, starts_line=False, line_number=None),
+ make_inst(opname='LOAD_GLOBAL', arg=3, argval='print', argrepr='print + NULL', offset=438, start_offset=438, starts_line=True, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('index', 1, b'\x00\x00'), ('module_keys_version', 1, b'\x00\x00'), ('builtin_keys_version', 1, b'\x00\x00')]),
+ make_inst(opname='LOAD_CONST', arg=6, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=448, start_offset=448, starts_line=False, line_number=28),
+ make_inst(opname='CALL', arg=1, argval=1, argrepr='', offset=450, start_offset=450, starts_line=False, line_number=28, cache_info=[('counter', 1, b'\x00\x00'), ('func_version', 2, b'\x00\x00\x00\x00')]),
+ make_inst(opname='POP_TOP', arg=None, argval=None, argrepr='', offset=458, start_offset=458, starts_line=False, line_number=28),
+ make_inst(opname='RERAISE', arg=0, argval=0, argrepr='', offset=460, start_offset=460, starts_line=False, line_number=28),
+ make_inst(opname='COPY', arg=3, argval=3, argrepr='', offset=462, start_offset=462, starts_line=True, line_number=None),
+ make_inst(opname='POP_EXCEPT', arg=None, argval=None, argrepr='', offset=464, start_offset=464, starts_line=False, line_number=None),
+ make_inst(opname='RERAISE', arg=1, argval=1, argrepr='', offset=466, start_offset=466, starts_line=False, line_number=None),
]
# One last piece of inspect fodder to check the default line number handling
diff --git a/Lib/test/test_doctest/sample_doctest_errors.py b/Lib/test/test_doctest/sample_doctest_errors.py
new file mode 100644
index 00000000000..4a6f07af2d4
--- /dev/null
+++ b/Lib/test/test_doctest/sample_doctest_errors.py
@@ -0,0 +1,46 @@
+"""This is a sample module used for testing doctest.
+
+This module includes various scenarios involving errors.
+
+>>> 2 + 2
+5
+>>> 1/0
+1
+"""
+
+def g():
+ [][0] # line 12
+
+def errors():
+ """
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ >>> def f():
+ ... 2 + '2'
+ ...
+ >>> f()
+ 1
+ >>> g()
+ 1
+ """
+
+def syntax_error():
+ """
+ >>> 2+*3
+ 5
+ """
+
+__test__ = {
+ 'bad': """
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ """,
+}
+
+def test_suite():
+ import doctest
+ return doctest.DocTestSuite()
diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py
index a4a49298bab..72763d4a013 100644
--- a/Lib/test/test_doctest/test_doctest.py
+++ b/Lib/test/test_doctest/test_doctest.py
@@ -2267,14 +2267,24 @@ def test_DocTestSuite():
>>> import unittest
>>> import test.test_doctest.sample_doctest
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
- >>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=9 errors=2 failures=2>
+ >>> for tst, _ in result.failures:
+ ... print(tst)
+ bad (test.test_doctest.sample_doctest.__test__) [0]
+ foo (test.test_doctest.sample_doctest) [0]
+ >>> for tst, _ in result.errors:
+ ... print(tst)
+ test_silly_setup (test.test_doctest.sample_doctest) [1]
+ y_is_one (test.test_doctest.sample_doctest) [0]
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest')
- >>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The module need not contain any doctest examples:
@@ -2296,13 +2306,26 @@ def test_DocTestSuite():
>>> result
<unittest.result.TestResult run=6 errors=0 failures=2>
>>> len(result.skipped)
- 2
+ 7
+ >>> for tst, _ in result.skipped:
+ ... print(tst)
+ double_skip (test.test_doctest.sample_doctest_skip) [0]
+ double_skip (test.test_doctest.sample_doctest_skip) [1]
+ double_skip (test.test_doctest.sample_doctest_skip)
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_pass (test.test_doctest.sample_doctest_skip) [0]
+ single_skip (test.test_doctest.sample_doctest_skip) [0]
+ single_skip (test.test_doctest.sample_doctest_skip)
+ >>> for tst, _ in result.failures:
+ ... print(tst)
+ no_skip_fail (test.test_doctest.sample_doctest_skip) [0]
+ partial_skip_fail (test.test_doctest.sample_doctest_skip) [1]
We can use the current module:
>>> suite = test.test_doctest.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
We can also provide a DocTestFinder:
@@ -2310,7 +2333,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... test_finder=finder)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
The DocTestFinder need not return any tests:
@@ -2326,7 +2349,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=3 failures=2>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
@@ -2334,7 +2357,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
@@ -2342,7 +2365,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=5>
+ <unittest.result.TestResult run=9 errors=2 failures=3>
You can supply setUp and tearDown functions:
@@ -2359,7 +2382,7 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
But the tearDown restores sanity:
@@ -2377,13 +2400,115 @@ def test_DocTestSuite():
>>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=3>
+ <unittest.result.TestResult run=9 errors=1 failures=2>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
- """
+ """
+
+def test_DocTestSuite_errors():
+ """Tests for error reporting in DocTestSuite.
+
+ >>> import unittest
+ >>> import test.test_doctest.sample_doctest_errors as mod
+ >>> suite = doctest.DocTestSuite(mod)
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=4 errors=6 failures=3>
+ >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 5, in test.test_doctest.sample_doctest_errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.failures[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in test.test_doctest.sample_doctest_errors.__test__.bad
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.failures[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 16, in test.test_doctest.sample_doctest_errors.errors
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 7, in test.test_doctest.sample_doctest_errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line None, in test.test_doctest.sample_doctest_errors.__test__.bad
+ File "<doctest test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 18, in test.test_doctest.sample_doctest_errors.errors
+ >...>> 1/0
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[3][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 23, in test.test_doctest.sample_doctest_errors.errors
+ >...>> f()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ <BLANKLINE>
+ >>> print(result.errors[4][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 25, in test.test_doctest.sample_doctest_errors.errors
+ >...>> g()
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[4]>", line 1, in <module>
+ g()
+ ~^^
+ File "...sample_doctest_errors.py", line 12, in g
+ [][0] # line 12
+ ~~^^^
+ IndexError: list index out of range
+ <BLANKLINE>
+ >>> print(result.errors[5][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...sample_doctest_errors.py", line 31, in test.test_doctest.sample_doctest_errors.syntax_error
+ >...>> 2+*3
+ File "<doctest test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ <BLANKLINE>
+ """
def test_DocFileSuite():
"""We can test tests found in text files using a DocFileSuite.
@@ -2396,7 +2521,7 @@ def test_DocFileSuite():
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
@@ -2408,14 +2533,14 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... package='test.test_doctest')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test_doctest/test_doctest.txt')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
@@ -2441,7 +2566,7 @@ def test_DocFileSuite():
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=1 errors=0 failures=1>
+ <unittest.result.TestResult run=1 errors=1 failures=0>
It is an error to specify `package` when `module_relative=False`:
@@ -2455,12 +2580,19 @@ def test_DocFileSuite():
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest4.txt',
- ... 'test_doctest_skip.txt')
+ ... 'test_doctest_skip.txt',
+ ... 'test_doctest_skip2.txt')
>>> result = suite.run(unittest.TestResult())
>>> result
- <unittest.result.TestResult run=3 errors=0 failures=1>
- >>> len(result.skipped)
- 1
+ <unittest.result.TestResult run=4 errors=1 failures=0>
+ >>> len(result.skipped)
+ 4
+ >>> for tst, _ in result.skipped: # doctest: +ELLIPSIS
+ ... print('=', tst)
+ = ...test_doctest_skip.txt [0]
+ = ...test_doctest_skip.txt [1]
+ = ...test_doctest_skip.txt
+ = ...test_doctest_skip2.txt [0]
You can specify initial global variables:
@@ -2469,7 +2601,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
In this case, we supplied a missing favorite color. You can
provide doctest options:
@@ -2480,7 +2612,7 @@ def test_DocFileSuite():
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=1 failures=1>
And, you can provide setUp and tearDown functions:
@@ -2499,7 +2631,7 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=1>
+ <unittest.result.TestResult run=3 errors=1 failures=0>
But the tearDown restores sanity:
@@ -2541,9 +2673,60 @@ def test_DocFileSuite():
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=3 errors=0 failures=2>
+ <unittest.result.TestResult run=3 errors=2 failures=0>
+ """
- """
+def test_DocFileSuite_errors():
+ """Tests for error reporting in DocTestSuite.
+
+ >>> import unittest
+ >>> suite = doctest.DocFileSuite('test_doctest_errors.txt')
+ >>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=3 failures=1>
+ >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
+ >...>> 2 + 2
+ AssertionError: Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ <BLANKLINE>
+ >>> print(result.errors[0][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
+ >...>> 1/0
+ File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ <BLANKLINE>
+ >>> print(result.errors[1][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 11, in test_doctest_errors.txt
+ >...>> f()
+ File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test_doctest_errors.txt[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ <BLANKLINE>
+ >>> print(result.errors[2][1]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ File "...test_doctest_errors.txt", line 13, in test_doctest_errors.txt
+ >...>> 2+*3
+ File "<doctest test_doctest_errors.txt[4]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ <BLANKLINE>
+
+ """
def test_trailing_space_in_test():
"""
@@ -2612,14 +2795,26 @@ def test_unittest_reportflags():
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> import unittest
>>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=1 failures=1>
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- Traceback ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ Traceback (most recent call last):
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
- ...
+ print('a')
+ print()
+ print('b')
+ Expected:
+ a
+ <BLANKLINE>
+ b
+ Got:
+ a
+ <BLANKLINE>
+ b
+ <BLANKLINE>
Note that we see both failures displayed.
@@ -2628,16 +2823,8 @@ def test_unittest_reportflags():
Now, when we run the test:
- >>> result = suite.run(unittest.TestResult())
- >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
- Traceback ...
- Failed example:
- favorite_color
- Exception raised:
- ...
- NameError: name 'favorite_color' is not defined
- <BLANKLINE>
- <BLANKLINE>
+ >>> suite.run(unittest.TestResult())
+ <unittest.result.TestResult run=1 errors=1 failures=0>
We get only the first failure.
@@ -2647,19 +2834,20 @@ def test_unittest_reportflags():
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
- Then the default eporting options are ignored:
+ Then the default reporting options are ignored:
>>> result = suite.run(unittest.TestResult())
+ >>> result
+ <unittest.result.TestResult run=1 errors=1 failures=1>
*NOTE*: These doctest are intentionally not placed in raw string to depict
the trailing whitespace using `\x20` in the diff below.
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
- Failed example:
- favorite_color
- ...
- Failed example:
+ File ...
+ >...>> if 1:
+ AssertionError: Failed example:
if 1:
print('a')
print()
@@ -2670,7 +2858,6 @@ def test_unittest_reportflags():
+\x20
b
<BLANKLINE>
- <BLANKLINE>
Test runners can restore the formatting flags after they run:
@@ -2860,6 +3047,57 @@ Test the verbose output:
>>> _colorize.COLORIZE = save_colorize
"""
+def test_testfile_errors(): r"""
+Tests for error reporting in the testfile() function.
+
+ >>> doctest.testfile('test_doctest_errors.txt', verbose=False) # doctest: +ELLIPSIS
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 4, in test_doctest_errors.txt
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 6, in test_doctest_errors.txt
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test_doctest_errors.txt[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 11, in test_doctest_errors.txt
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test_doctest_errors.txt[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test_doctest_errors.txt[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ **********************************************************************
+ File "...test_doctest_errors.txt", line 13, in test_doctest_errors.txt
+ Failed example:
+ 2+*3
+ Exception raised:
+ File "<doctest test_doctest_errors.txt[4]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ **********************************************************************
+ 1 item had failures:
+ 4 of 5 in test_doctest_errors.txt
+ ***Test Failed*** 4 failures.
+ TestResults(failed=4, attempted=5)
+"""
+
class TestImporter(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
@@ -2990,6 +3228,110 @@ out of the binary module.
TestResults(failed=0, attempted=0)
"""
+def test_testmod_errors(): r"""
+Tests for error reporting in the testmod() function.
+
+ >>> import test.test_doctest.sample_doctest_errors as mod
+ >>> doctest.testmod(mod, verbose=False) # doctest: +ELLIPSIS
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 5, in test.test_doctest.sample_doctest_errors
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 7, in test.test_doctest.sample_doctest_errors
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line ?, in test.test_doctest.sample_doctest_errors.__test__.bad
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line ?, in test.test_doctest.sample_doctest_errors.__test__.bad
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.__test__.bad[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 16, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ 2 + 2
+ Expected:
+ 5
+ Got:
+ 4
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 18, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ 1/0
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[1]>", line 1, in <module>
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 23, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[3]>", line 1, in <module>
+ f()
+ ~^^
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[2]>", line 2, in f
+ 2 + '2'
+ ~~^~~~~
+ TypeError: ...
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 25, in test.test_doctest.sample_doctest_errors.errors
+ Failed example:
+ g()
+ Exception raised:
+ Traceback (most recent call last):
+ File "<doctest test.test_doctest.sample_doctest_errors.errors[4]>", line 1, in <module>
+ g()
+ ~^^
+ File "...sample_doctest_errors.py", line 12, in g
+ [][0] # line 12
+ ~~^^^
+ IndexError: list index out of range
+ **********************************************************************
+ File "...sample_doctest_errors.py", line 31, in test.test_doctest.sample_doctest_errors.syntax_error
+ Failed example:
+ 2+*3
+ Exception raised:
+ File "<doctest test.test_doctest.sample_doctest_errors.syntax_error[0]>", line 1
+ 2+*3
+ ^
+ SyntaxError: invalid syntax
+ **********************************************************************
+ 4 items had failures:
+ 2 of 2 in test.test_doctest.sample_doctest_errors
+ 2 of 2 in test.test_doctest.sample_doctest_errors.__test__.bad
+ 4 of 5 in test.test_doctest.sample_doctest_errors.errors
+ 1 of 1 in test.test_doctest.sample_doctest_errors.syntax_error
+ ***Test Failed*** 9 failures.
+ TestResults(failed=9, attempted=10)
+"""
+
try:
os.fsencode("foo-bär@baz.py")
supports_unicode = True
@@ -3021,11 +3363,6 @@ Check doctest with a non-ascii filename:
raise Exception('clé')
Exception raised:
Traceback (most recent call last):
- File ...
- exec(compile(example.source, filename, "single",
- ~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- compileflags, True), test.globs)
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<doctest foo-bär@baz[0]>", line 1, in <module>
raise Exception('clé')
Exception: clé
@@ -3318,9 +3655,9 @@ def test_run_doctestsuite_multiple_times():
>>> import test.test_doctest.sample_doctest
>>> suite = doctest.DocTestSuite(test.test_doctest.sample_doctest)
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
>>> suite.run(unittest.TestResult())
- <unittest.result.TestResult run=9 errors=0 failures=4>
+ <unittest.result.TestResult run=9 errors=2 failures=2>
"""
diff --git a/Lib/test/test_doctest/test_doctest_errors.txt b/Lib/test/test_doctest/test_doctest_errors.txt
new file mode 100644
index 00000000000..93c3c106e60
--- /dev/null
+++ b/Lib/test/test_doctest/test_doctest_errors.txt
@@ -0,0 +1,14 @@
+This is a sample doctest in a text file, in which all examples fail
+or raise an exception.
+
+ >>> 2 + 2
+ 5
+ >>> 1/0
+ 1
+ >>> def f():
+ ... 2 + '2'
+ ...
+ >>> f()
+ 1
+ >>> 2+*3
+ 5
diff --git a/Lib/test/test_doctest/test_doctest_skip.txt b/Lib/test/test_doctest/test_doctest_skip.txt
index f340e2b8141..06c23d06e60 100644
--- a/Lib/test/test_doctest/test_doctest_skip.txt
+++ b/Lib/test/test_doctest/test_doctest_skip.txt
@@ -2,3 +2,5 @@ This is a sample doctest in a text file, in which all examples are skipped.
>>> 2 + 2 # doctest: +SKIP
5
+ >>> 2 + 2 # doctest: +SKIP
+ 4
diff --git a/Lib/test/test_doctest/test_doctest_skip2.txt b/Lib/test/test_doctest/test_doctest_skip2.txt
new file mode 100644
index 00000000000..85e4938c346
--- /dev/null
+++ b/Lib/test/test_doctest/test_doctest_skip2.txt
@@ -0,0 +1,6 @@
+This is a sample doctest in a text file, in which some examples are skipped.
+
+ >>> 2 + 2 # doctest: +SKIP
+ 5
+ >>> 2 + 2
+ 4
diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py
index ac12c3b2306..179e236ecdf 100644
--- a/Lib/test/test_email/test__header_value_parser.py
+++ b/Lib/test/test_email/test__header_value_parser.py
@@ -463,6 +463,19 @@ class TestParser(TestParserMixin, TestEmailBase):
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
+ def test_get_qp_ctext_close_paren_only(self):
+ self._test_get_x(parser.get_qp_ctext,
+ ')', '', ' ', [], ')')
+
+ def test_get_qp_ctext_open_paren_only(self):
+ self._test_get_x(parser.get_qp_ctext,
+ '(', '', ' ', [], '(')
+
+ def test_get_qp_ctext_no_end_char(self):
+ self._test_get_x(parser.get_qp_ctext,
+ '', '', ' ', [], '')
+
+
# get_qcontent
def test_get_qcontent_only(self):
@@ -503,6 +516,14 @@ class TestParser(TestParserMixin, TestEmailBase):
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
+ def test_get_qcontent_empty(self):
+ self._test_get_x(parser.get_qcontent,
+ '"', '', '', [], '"')
+
+ def test_get_qcontent_no_end_char(self):
+ self._test_get_x(parser.get_qcontent,
+ '', '', '', [], '')
+
# get_atext
def test_get_atext_only(self):
@@ -1283,6 +1304,18 @@ class TestParser(TestParserMixin, TestEmailBase):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
+ def test_get_dtext_open_bracket_only(self):
+ self._test_get_x(parser.get_dtext,
+ '[', '', '', [], '[')
+
+ def test_get_dtext_close_bracket_only(self):
+ self._test_get_x(parser.get_dtext,
+ ']', '', '', [], ']')
+
+ def test_get_dtext_empty(self):
+ self._test_get_x(parser.get_dtext,
+ '', '', '', [], '')
+
# get_domain_literal
def test_get_domain_literal_only(self):
@@ -2458,6 +2491,38 @@ class TestParser(TestParserMixin, TestEmailBase):
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
+ def test_get_address_with_invalid_domain(self):
+ address = self._test_get_x(parser.get_address,
+ '<T@[',
+ '<T@[]>',
+ '<T@[]>',
+ [errors.InvalidHeaderDefect, # missing trailing '>' on angle-addr
+ errors.InvalidHeaderDefect, # end of input inside domain-literal
+ ],
+ '')
+ self.assertEqual(address.token_type, 'address')
+ self.assertEqual(len(address.mailboxes), 0)
+ self.assertEqual(len(address.all_mailboxes), 1)
+ self.assertEqual(address.all_mailboxes[0].domain, '[]')
+ self.assertEqual(address.all_mailboxes[0].local_part, 'T')
+ self.assertEqual(address.all_mailboxes[0].token_type, 'invalid-mailbox')
+ self.assertEqual(address[0].token_type, 'invalid-mailbox')
+
+ address = self._test_get_x(parser.get_address,
+ '!an??:=m==fr2@[C',
+ '!an??:=m==fr2@[C];',
+ '!an??:=m==fr2@[C];',
+ [errors.InvalidHeaderDefect, # end of header in group
+ errors.InvalidHeaderDefect, # end of input inside domain-literal
+ ],
+ '')
+ self.assertEqual(address.token_type, 'address')
+ self.assertEqual(len(address.mailboxes), 0)
+ self.assertEqual(len(address.all_mailboxes), 1)
+ self.assertEqual(address.all_mailboxes[0].domain, '[C]')
+ self.assertEqual(address.all_mailboxes[0].local_part, '=m==fr2')
+ self.assertEqual(address.all_mailboxes[0].token_type, 'invalid-mailbox')
+ self.assertEqual(address[0].token_type, 'group')
# get_address_list
@@ -2732,6 +2797,19 @@ class TestParser(TestParserMixin, TestEmailBase):
)
self.assertEqual(message_id.token_type, 'message-id')
+ def test_parse_message_id_with_invalid_domain(self):
+ message_id = self._test_parse_x(
+ parser.parse_message_id,
+ "<T@[",
+ "<T@[]>",
+ "<T@[]>",
+ [errors.ObsoleteHeaderDefect] + [errors.InvalidHeaderDefect] * 2,
+ [],
+ )
+ self.assertEqual(message_id.token_type, 'message-id')
+ self.assertEqual(str(message_id.all_defects[-1]),
+ "end of input inside domain-literal")
+
def test_parse_message_id_with_remaining(self):
message_id = self._test_parse_x(
parser.parse_message_id,
diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py
index 7b14305f997..b8116d073a2 100644
--- a/Lib/test/test_email/test_email.py
+++ b/Lib/test/test_email/test_email.py
@@ -389,6 +389,24 @@ class TestMessageAPI(TestEmailBase):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
+ def test_continuation_sorting_part_order(self):
+ msg = email.message_from_string(
+ "Content-Disposition: attachment; "
+ "filename*=\"ignored\"; "
+ "filename*0*=\"utf-8''foo%20\"; "
+ "filename*1*=\"bar.txt\"\n"
+ )
+ filename = msg.get_filename()
+ self.assertEqual(filename, 'foo bar.txt')
+
+ def test_sorting_no_continuations(self):
+ msg = email.message_from_string(
+ "Content-Disposition: attachment; "
+ "filename*=\"bar.txt\"; "
+ )
+ filename = msg.get_filename()
+ self.assertEqual(filename, 'bar.txt')
+
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
@@ -2550,6 +2568,18 @@ Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
self.assertEqual(str(make_header(decode_header(s))),
'"Müller T" <T.Mueller@xxx.com>')
+ def test_unencoded_ascii(self):
+ # bpo-22833/gh-67022: returns [(str, None)] rather than [(bytes, None)]
+ s = 'header without encoded words'
+ self.assertEqual(decode_header(s),
+ [('header without encoded words', None)])
+
+ def test_unencoded_utf8(self):
+ # bpo-22833/gh-67022: returns [(str, None)] rather than [(bytes, None)]
+ s = 'header with unexpected non ASCII caract\xe8res'
+ self.assertEqual(decode_header(s),
+ [('header with unexpected non ASCII caract\xe8res', None)])
+
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py
index 221f9db7763..bbc7630fa83 100644
--- a/Lib/test/test_enum.py
+++ b/Lib/test/test_enum.py
@@ -36,7 +36,7 @@ def load_tests(loader, tests, ignore):
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
howto_tests = os.path.join(REPO_ROOT, 'Doc/howto/enum.rst')
- if os.path.exists(howto_tests):
+ if os.path.exists(howto_tests) and sys.float_repr_style == 'short':
tests.addTests(doctest.DocFileSuite(
howto_tests,
module_relative=False,
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 175ef531386..57d0656487d 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -1445,6 +1445,7 @@ class ExceptionTests(unittest.TestCase):
foo()
support.gc_collect()
+ @support.skip_emscripten_stack_overflow()
@cpython_only
def test_recursion_normalizing_exception(self):
import_module("_testinternalcapi")
@@ -1522,6 +1523,7 @@ class ExceptionTests(unittest.TestCase):
self.assertIn(b'Done.', out)
+ @support.skip_emscripten_stack_overflow()
def test_recursion_in_except_handler(self):
def set_relative_recursion_limit(n):
diff --git a/Lib/test/test_external_inspection.py b/Lib/test/test_external_inspection.py
index ad3f669a030..0f31c225e68 100644
--- a/Lib/test/test_external_inspection.py
+++ b/Lib/test/test_external_inspection.py
@@ -4,9 +4,10 @@ import textwrap
import importlib
import sys
import socket
-from asyncio import staggered, taskgroups
+import threading
+from asyncio import staggered, taskgroups, base_events, tasks
from unittest.mock import ANY
-from test.support import os_helper, SHORT_TIMEOUT, busy_retry
+from test.support import os_helper, SHORT_TIMEOUT, busy_retry, requires_gil_enabled
from test.support.script_helper import make_script
from test.support.socket_helper import find_unused_port
@@ -16,11 +17,13 @@ PROCESS_VM_READV_SUPPORTED = False
try:
from _remote_debugging import PROCESS_VM_READV_SUPPORTED
- from _remote_debugging import get_stack_trace
- from _remote_debugging import get_async_stack_trace
- from _remote_debugging import get_all_awaited_by
+ from _remote_debugging import RemoteUnwinder
+ from _remote_debugging import FrameInfo, CoroInfo, TaskInfo
except ImportError:
- raise unittest.SkipTest("Test only runs when _remote_debugging is available")
+ raise unittest.SkipTest(
+ "Test only runs when _remote_debugging is available"
+ )
+
def _make_test_script(script_dir, script_basename, source):
to_return = make_script(script_dir, script_basename, source)
@@ -29,12 +32,32 @@ def _make_test_script(script_dir, script_basename, source):
skip_if_not_supported = unittest.skipIf(
- (sys.platform != "darwin" and sys.platform != "linux" and sys.platform != "win32"),
+ (
+ sys.platform != "darwin"
+ and sys.platform != "linux"
+ and sys.platform != "win32"
+ ),
"Test only runs on Linux, Windows and MacOS",
)
+def get_stack_trace(pid):
+ unwinder = RemoteUnwinder(pid, all_threads=True, debug=True)
+ return unwinder.get_stack_trace()
+
+
+def get_async_stack_trace(pid):
+ unwinder = RemoteUnwinder(pid, debug=True)
+ return unwinder.get_async_stack_trace()
+
+
+def get_all_awaited_by(pid):
+ unwinder = RemoteUnwinder(pid, debug=True)
+ return unwinder.get_all_awaited_by()
+
+
class TestGetStackTrace(unittest.TestCase):
+ maxDiff = None
@skip_if_not_supported
@unittest.skipIf(
@@ -46,7 +69,7 @@ class TestGetStackTrace(unittest.TestCase):
port = find_unused_port()
script = textwrap.dedent(
f"""\
- import time, sys, socket
+ import time, sys, socket, threading
# Connect to the test process
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', {port}))
@@ -55,13 +78,16 @@ class TestGetStackTrace(unittest.TestCase):
for x in range(100):
if x == 50:
baz()
+
def baz():
foo()
def foo():
- sock.sendall(b"ready"); time.sleep(10_000) # same line number
+ sock.sendall(b"ready:thread\\n"); time.sleep(10_000) # same line number
- bar()
+ t = threading.Thread(target=bar)
+ t.start()
+ sock.sendall(b"ready:main\\n"); t.join() # same line number
"""
)
stack_trace = None
@@ -82,11 +108,17 @@ class TestGetStackTrace(unittest.TestCase):
p = subprocess.Popen([sys.executable, script_name])
client_socket, _ = server_socket.accept()
server_socket.close()
- response = client_socket.recv(1024)
- self.assertEqual(response, b"ready")
+ response = b""
+ while (
+ b"ready:main" not in response
+ or b"ready:thread" not in response
+ ):
+ response += client_socket.recv(1024)
stack_trace = get_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -94,13 +126,23 @@ class TestGetStackTrace(unittest.TestCase):
p.terminate()
p.wait(timeout=SHORT_TIMEOUT)
- expected_stack_trace = [
- ("foo", script_name, 14),
- ("baz", script_name, 11),
- ("bar", script_name, 9),
- ("<module>", script_name, 16),
+ thread_expected_stack_trace = [
+ FrameInfo([script_name, 15, "foo"]),
+ FrameInfo([script_name, 12, "baz"]),
+ FrameInfo([script_name, 9, "bar"]),
+ FrameInfo([threading.__file__, ANY, "Thread.run"]),
]
- self.assertEqual(stack_trace, expected_stack_trace)
+ # Is possible that there are more threads, so we check that the
+ # expected stack traces are in the result (looking at you Windows!)
+ self.assertIn((ANY, thread_expected_stack_trace), stack_trace)
+
+ # Check that the main thread stack trace is in the result
+ frame = FrameInfo([script_name, 19, "<module>"])
+ for _, stack in stack_trace:
+ if frame in stack:
+ break
+ else:
+ self.fail("Main thread stack trace not found in result")
@skip_if_not_supported
@unittest.skipIf(
@@ -160,8 +202,12 @@ class TestGetStackTrace(unittest.TestCase):
):
script_dir = os.path.join(work_dir, "script_pkg")
os.mkdir(script_dir)
- server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ server_socket = socket.socket(
+ socket.AF_INET, socket.SOCK_STREAM
+ )
+ server_socket.setsockopt(
+ socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
+ )
server_socket.bind(("localhost", port))
server_socket.settimeout(SHORT_TIMEOUT)
server_socket.listen(1)
@@ -179,7 +225,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -190,79 +238,49 @@ class TestGetStackTrace(unittest.TestCase):
# sets are unordered, so we want to sort "awaited_by"s
stack_trace[2].sort(key=lambda x: x[1])
- root_task = "Task-1"
expected_stack_trace = [
[
- ("c5", script_name, 10),
- ("c4", script_name, 14),
- ("c3", script_name, 17),
- ("c2", script_name, 20),
+ FrameInfo([script_name, 10, "c5"]),
+ FrameInfo([script_name, 14, "c4"]),
+ FrameInfo([script_name, 17, "c3"]),
+ FrameInfo([script_name, 20, "c2"]),
],
"c2_root",
[
- [
- [
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ],
- [
- [("c1", script_name, 23)],
- "sub_main_1",
+ CoroInfo(
[
[
- [
- (
- "TaskGroup._aexit",
+ FrameInfo(
+ [
taskgroups.__file__,
ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ]
- ],
- ],
- [
- [("c1", script_name, 23)],
- "sub_main_2",
- [
- [
- [
- (
"TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
taskgroups.__file__,
ANY,
- ),
- (
"TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("main", script_name, 26),
- ],
- "Task-1",
- [],
- ]
- ],
- ],
+ ]
+ ),
+ FrameInfo([script_name, 26, "main"]),
+ ],
+ "Task-1",
+ ]
+ ),
+ CoroInfo(
+ [
+ [FrameInfo([script_name, 23, "c1"])],
+ "sub_main_1",
+ ]
+ ),
+ CoroInfo(
+ [
+ [FrameInfo([script_name, 23, "c1"])],
+ "sub_main_2",
+ ]
+ ),
],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -321,7 +339,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -334,9 +354,9 @@ class TestGetStackTrace(unittest.TestCase):
expected_stack_trace = [
[
- ("gen_nested_call", script_name, 10),
- ("gen", script_name, 16),
- ("main", script_name, 19),
+ FrameInfo([script_name, 10, "gen_nested_call"]),
+ FrameInfo([script_name, 16, "gen"]),
+ FrameInfo([script_name, 19, "main"]),
],
"Task-1",
[],
@@ -398,7 +418,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -410,9 +432,12 @@ class TestGetStackTrace(unittest.TestCase):
stack_trace[2].sort(key=lambda x: x[1])
expected_stack_trace = [
- [("deep", script_name, 11), ("c1", script_name, 15)],
+ [
+ FrameInfo([script_name, 11, "deep"]),
+ FrameInfo([script_name, 15, "c1"]),
+ ],
"Task-2",
- [[[("main", script_name, 21)], "Task-1", []]],
+ [CoroInfo([[FrameInfo([script_name, 21, "main"])], "Task-1"])],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -474,7 +499,9 @@ class TestGetStackTrace(unittest.TestCase):
self.assertEqual(response, b"ready")
stack_trace = get_async_stack_trace(p.pid)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -486,20 +513,29 @@ class TestGetStackTrace(unittest.TestCase):
stack_trace[2].sort(key=lambda x: x[1])
expected_stack_trace = [
[
- ("deep", script_name, 11),
- ("c1", script_name, 15),
- ("staggered_race.<locals>.run_one_coro", staggered.__file__, ANY),
+ FrameInfo([script_name, 11, "deep"]),
+ FrameInfo([script_name, 15, "c1"]),
+ FrameInfo(
+ [
+ staggered.__file__,
+ ANY,
+ "staggered_race.<locals>.run_one_coro",
+ ]
+ ),
],
"Task-2",
[
- [
+ CoroInfo(
[
- ("staggered_race", staggered.__file__, ANY),
- ("main", script_name, 21),
- ],
- "Task-1",
- [],
- ]
+ [
+ FrameInfo(
+ [staggered.__file__, ANY, "staggered_race"]
+ ),
+ FrameInfo([script_name, 21, "main"]),
+ ],
+ "Task-1",
+ ]
+ )
],
]
self.assertEqual(stack_trace, expected_stack_trace)
@@ -630,62 +666,174 @@ class TestGetStackTrace(unittest.TestCase):
# expected: at least 1000 pending tasks
self.assertGreaterEqual(len(entries), 1000)
# the first three tasks stem from the code structure
- self.assertIn((ANY, "Task-1", []), entries)
main_stack = [
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
+ FrameInfo([taskgroups.__file__, ANY, "TaskGroup._aexit"]),
+ FrameInfo(
+ [taskgroups.__file__, ANY, "TaskGroup.__aexit__"]
),
- ("main", script_name, 60),
+ FrameInfo([script_name, 60, "main"]),
]
self.assertIn(
- (ANY, "server task", [[main_stack, ANY]]),
+ TaskInfo(
+ [ANY, "Task-1", [CoroInfo([main_stack, ANY])], []]
+ ),
entries,
)
self.assertIn(
- (ANY, "echo client spam", [[main_stack, ANY]]),
+ TaskInfo(
+ [
+ ANY,
+ "server task",
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ base_events.__file__,
+ ANY,
+ "Server.serve_forever",
+ ]
+ )
+ ],
+ ANY,
+ ]
+ )
+ ],
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [script_name, ANY, "main"]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ ]
+ ),
+ entries,
+ )
+ self.assertIn(
+ TaskInfo(
+ [
+ ANY,
+ "Task-4",
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [tasks.__file__, ANY, "sleep"]
+ ),
+ FrameInfo(
+ [
+ script_name,
+ 38,
+ "echo_client",
+ ]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ [
+ CoroInfo(
+ [
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [
+ script_name,
+ 41,
+ "echo_client_spam",
+ ]
+ ),
+ ],
+ ANY,
+ ]
+ )
+ ],
+ ]
+ ),
entries,
)
- expected_stack = [
- [
+ expected_awaited_by = [
+ CoroInfo(
[
- (
- "TaskGroup._aexit",
- taskgroups.__file__,
- ANY,
- ),
- (
- "TaskGroup.__aexit__",
- taskgroups.__file__,
- ANY,
- ),
- ("echo_client_spam", script_name, 41),
- ],
- ANY,
- ]
+ [
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup._aexit",
+ ]
+ ),
+ FrameInfo(
+ [
+ taskgroups.__file__,
+ ANY,
+ "TaskGroup.__aexit__",
+ ]
+ ),
+ FrameInfo(
+ [script_name, 41, "echo_client_spam"]
+ ),
+ ],
+ ANY,
+ ]
+ )
]
- tasks_with_stack = [
- task for task in entries if task[2] == expected_stack
+ tasks_with_awaited = [
+ task
+ for task in entries
+ if task.awaited_by == expected_awaited_by
]
- self.assertGreaterEqual(len(tasks_with_stack), 1000)
+ self.assertGreaterEqual(len(tasks_with_awaited), 1000)
# the final task will have some random number, but it should for
# sure be one of the echo client spam horde (In windows this is not true
# for some reason)
if sys.platform != "win32":
self.assertEqual(
- expected_stack,
- entries[-1][2],
+ tasks_with_awaited[-1].awaited_by,
+ entries[-1].awaited_by,
)
except PermissionError:
- self.skipTest("Insufficient permissions to read the stack trace")
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
finally:
if client_socket is not None:
client_socket.close()
@@ -700,15 +848,154 @@ class TestGetStackTrace(unittest.TestCase):
)
def test_self_trace(self):
stack_trace = get_stack_trace(os.getpid())
+ # Is possible that there are more threads, so we check that the
+ # expected stack traces are in the result (looking at you Windows!)
+ this_tread_stack = None
+ for thread_id, stack in stack_trace:
+ if thread_id == threading.get_native_id():
+ this_tread_stack = stack
+ break
+ self.assertIsNotNone(this_tread_stack)
self.assertEqual(
- stack_trace[0],
- (
- "TestGetStackTrace.test_self_trace",
- __file__,
- self.test_self_trace.__code__.co_firstlineno + 6,
- ),
+ stack[:2],
+ [
+ FrameInfo(
+ [
+ __file__,
+ get_stack_trace.__code__.co_firstlineno + 2,
+ "get_stack_trace",
+ ]
+ ),
+ FrameInfo(
+ [
+ __file__,
+ self.test_self_trace.__code__.co_firstlineno + 6,
+ "TestGetStackTrace.test_self_trace",
+ ]
+ ),
+ ],
+ )
+
+ @skip_if_not_supported
+ @unittest.skipIf(
+ sys.platform == "linux" and not PROCESS_VM_READV_SUPPORTED,
+ "Test only runs on Linux with process_vm_readv support",
+ )
+ @requires_gil_enabled("Free threaded builds don't have an 'active thread'")
+ def test_only_active_thread(self):
+ # Test that only_active_thread parameter works correctly
+ port = find_unused_port()
+ script = textwrap.dedent(
+ f"""\
+ import time, sys, socket, threading
+
+ # Connect to the test process
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect(('localhost', {port}))
+
+ def worker_thread(name, barrier, ready_event):
+ barrier.wait() # Synchronize thread start
+ ready_event.wait() # Wait for main thread signal
+ # Sleep to keep thread alive
+ time.sleep(10_000)
+
+ def main_work():
+ # Do busy work to hold the GIL
+ sock.sendall(b"working\\n")
+ count = 0
+ while count < 100000000:
+ count += 1
+ if count % 10000000 == 0:
+ pass # Keep main thread busy
+ sock.sendall(b"done\\n")
+
+ # Create synchronization primitives
+ num_threads = 3
+ barrier = threading.Barrier(num_threads + 1) # +1 for main thread
+ ready_event = threading.Event()
+
+ # Start worker threads
+ threads = []
+ for i in range(num_threads):
+ t = threading.Thread(target=worker_thread, args=(f"Worker-{{i}}", barrier, ready_event))
+ t.start()
+ threads.append(t)
+
+ # Wait for all threads to be ready
+ barrier.wait()
+
+ # Signal ready to parent process
+ sock.sendall(b"ready\\n")
+
+ # Signal threads to start waiting
+ ready_event.set()
+
+ # Give threads time to start sleeping
+ time.sleep(0.1)
+
+ # Now do busy work to hold the GIL
+ main_work()
+ """
)
+ with os_helper.temp_dir() as work_dir:
+ script_dir = os.path.join(work_dir, "script_pkg")
+ os.mkdir(script_dir)
+
+ # Create a socket server to communicate with the target process
+ server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ server_socket.bind(("localhost", port))
+ server_socket.settimeout(SHORT_TIMEOUT)
+ server_socket.listen(1)
+
+ script_name = _make_test_script(script_dir, "script", script)
+ client_socket = None
+ try:
+ p = subprocess.Popen([sys.executable, script_name])
+ client_socket, _ = server_socket.accept()
+ server_socket.close()
+
+ # Wait for ready signal
+ response = b""
+ while b"ready" not in response:
+ response += client_socket.recv(1024)
+
+ # Wait for the main thread to start its busy work
+ while b"working" not in response:
+ response += client_socket.recv(1024)
+
+ # Get stack trace with all threads
+ unwinder_all = RemoteUnwinder(p.pid, all_threads=True)
+ all_traces = unwinder_all.get_stack_trace()
+
+ # Get stack trace with only GIL holder
+ unwinder_gil = RemoteUnwinder(p.pid, only_active_thread=True)
+ gil_traces = unwinder_gil.get_stack_trace()
+
+ except PermissionError:
+ self.skipTest(
+ "Insufficient permissions to read the stack trace"
+ )
+ finally:
+ if client_socket is not None:
+ client_socket.close()
+ p.kill()
+ p.terminate()
+ p.wait(timeout=SHORT_TIMEOUT)
+
+ # Verify we got multiple threads in all_traces
+ self.assertGreater(len(all_traces), 1, "Should have multiple threads")
+
+ # Verify we got exactly one thread in gil_traces
+ self.assertEqual(len(gil_traces), 1, "Should have exactly one GIL holder")
+
+ # The GIL holder should be in the all_traces list
+ gil_thread_id = gil_traces[0][0]
+ all_thread_ids = [trace[0] for trace in all_traces]
+ self.assertIn(gil_thread_id, all_thread_ids,
+ "GIL holder should be among all threads")
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_fcntl.py b/Lib/test/test_fcntl.py
index e0e6782258f..7140a7b4f29 100644
--- a/Lib/test/test_fcntl.py
+++ b/Lib/test/test_fcntl.py
@@ -11,7 +11,7 @@ from test.support import (
cpython_only, get_pagesize, is_apple, requires_subprocess, verbose
)
from test.support.import_helper import import_module
-from test.support.os_helper import TESTFN, unlink
+from test.support.os_helper import TESTFN, unlink, make_bad_fd
# Skip test if no fcntl module.
@@ -274,6 +274,17 @@ class TestFcntl(unittest.TestCase):
def test_fcntl_large_buffer(self):
self._check_fcntl_not_mutate_len(2024)
+ @unittest.skipUnless(hasattr(fcntl, 'F_DUPFD'), 'need fcntl.F_DUPFD')
+ def test_bad_fd(self):
+ # gh-134744: Test error handling
+ fd = make_bad_fd()
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, 0)
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, b'\0' * 10)
+ with self.assertRaises(OSError):
+ fcntl.fcntl(fd, fcntl.F_DUPFD, b'\0' * 2048)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py
index 5a0f033ebb8..e3d54f6315a 100644
--- a/Lib/test/test_fileio.py
+++ b/Lib/test/test_fileio.py
@@ -591,7 +591,7 @@ class OtherFileTests:
try:
f.write(b"abc")
f.close()
- with open(TESTFN_ASCII, "rb") as f:
+ with self.open(TESTFN_ASCII, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_ASCII)
@@ -608,7 +608,7 @@ class OtherFileTests:
try:
f.write(b"abc")
f.close()
- with open(TESTFN_UNICODE, "rb") as f:
+ with self.open(TESTFN_UNICODE, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_UNICODE)
@@ -692,13 +692,13 @@ class OtherFileTests:
def testAppend(self):
try:
- f = open(TESTFN, 'wb')
+ f = self.FileIO(TESTFN, 'wb')
f.write(b'spam')
f.close()
- f = open(TESTFN, 'ab')
+ f = self.FileIO(TESTFN, 'ab')
f.write(b'eggs')
f.close()
- f = open(TESTFN, 'rb')
+ f = self.FileIO(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
@@ -734,6 +734,7 @@ class OtherFileTests:
class COtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
+ open = _io.open
@cpython_only
def testInvalidFd_overflow(self):
@@ -755,6 +756,7 @@ class COtherFileTests(OtherFileTests, unittest.TestCase):
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
+ open = _pyio.open
def test_open_code(self):
# Check that the default behaviour of open_code matches
diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py
index 237d7b5d35e..00518abcb11 100644
--- a/Lib/test/test_float.py
+++ b/Lib/test/test_float.py
@@ -795,6 +795,8 @@ class FormatTestCase(unittest.TestCase):
self.assertRaises(ValueError, format, x, '.6,n')
@support.requires_IEEE_754
+ @unittest.skipUnless(sys.float_repr_style == 'short',
+ "applies only when using short float repr style")
def test_format_testfile(self):
with open(format_testfile, encoding="utf-8") as testfile:
for line in testfile:
diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py
index c7cc32e0949..1f626d87fa6 100644
--- a/Lib/test/test_format.py
+++ b/Lib/test/test_format.py
@@ -346,12 +346,12 @@ class FormatTest(unittest.TestCase):
testcommon(b"%s", memoryview(b"abc"), b"abc")
# %a will give the equivalent of
# repr(some_obj).encode('ascii', 'backslashreplace')
- testcommon(b"%a", 3.14, b"3.14")
+ testcommon(b"%a", 3.25, b"3.25")
testcommon(b"%a", b"ghi", b"b'ghi'")
testcommon(b"%a", "jkl", b"'jkl'")
testcommon(b"%a", "\u0544", b"'\\u0544'")
# %r is an alias for %a
- testcommon(b"%r", 3.14, b"3.14")
+ testcommon(b"%r", 3.25, b"3.25")
testcommon(b"%r", b"ghi", b"b'ghi'")
testcommon(b"%r", "jkl", b"'jkl'")
testcommon(b"%r", "\u0544", b"'\\u0544'")
@@ -407,19 +407,19 @@ class FormatTest(unittest.TestCase):
self.assertEqual(format("abc", "\u2007<5"), "abc\u2007\u2007")
self.assertEqual(format(123, "\u2007<5"), "123\u2007\u2007")
- self.assertEqual(format(12.3, "\u2007<6"), "12.3\u2007\u2007")
+ self.assertEqual(format(12.5, "\u2007<6"), "12.5\u2007\u2007")
self.assertEqual(format(0j, "\u2007<4"), "0j\u2007\u2007")
self.assertEqual(format(1+2j, "\u2007<8"), "(1+2j)\u2007\u2007")
self.assertEqual(format("abc", "\u2007>5"), "\u2007\u2007abc")
self.assertEqual(format(123, "\u2007>5"), "\u2007\u2007123")
- self.assertEqual(format(12.3, "\u2007>6"), "\u2007\u200712.3")
+ self.assertEqual(format(12.5, "\u2007>6"), "\u2007\u200712.5")
self.assertEqual(format(1+2j, "\u2007>8"), "\u2007\u2007(1+2j)")
self.assertEqual(format(0j, "\u2007>4"), "\u2007\u20070j")
self.assertEqual(format("abc", "\u2007^5"), "\u2007abc\u2007")
self.assertEqual(format(123, "\u2007^5"), "\u2007123\u2007")
- self.assertEqual(format(12.3, "\u2007^6"), "\u200712.3\u2007")
+ self.assertEqual(format(12.5, "\u2007^6"), "\u200712.5\u2007")
self.assertEqual(format(1+2j, "\u2007^8"), "\u2007(1+2j)\u2007")
self.assertEqual(format(0j, "\u2007^4"), "\u20070j\u2007")
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index 96b3f305194..1875a2f529c 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -1480,11 +1480,8 @@ class FractionTest(unittest.TestCase):
(F('-1234.5678'), '08,.0f', '-001,235'),
(F('-1234.5678'), '09,.0f', '-0,001,235'),
# Corner-case - zero-padding specified through fill and align
- # instead of the zero-pad character - in this case, treat '0' as a
- # regular fill character and don't attempt to insert commas into
- # the filled portion. This differs from the int and float
- # behaviour.
- (F('1234.5678'), '0=12,.2f', '00001,234.57'),
+ # instead of the zero-pad character.
+ (F('1234.5678'), '0=12,.2f', '0,001,234.57'),
# Corner case where it's not clear whether the '0' indicates zero
# padding or gives the minimum width, but there's still an obvious
# answer to give. We want this to work in case the minimum width
@@ -1518,6 +1515,8 @@ class FractionTest(unittest.TestCase):
(F(51, 1000), '.1f', '0.1'),
(F(149, 1000), '.1f', '0.1'),
(F(151, 1000), '.1f', '0.2'),
+ (F(22, 7), '.02f', '3.14'), # issue gh-130662
+ (F(22, 7), '005.02f', '03.14'),
]
for fraction, spec, expected in testcases:
with self.subTest(fraction=fraction, spec=spec):
@@ -1616,12 +1615,6 @@ class FractionTest(unittest.TestCase):
'=010%',
'>00.2f',
'>00f',
- # Too many zeros - minimum width should not have leading zeros
- '006f',
- # Leading zeros in precision
- '.010f',
- '.02f',
- '.000f',
# Missing precision
'.e',
'.f',
diff --git a/Lib/test/test_free_threading/test_generators.py b/Lib/test/test_free_threading/test_generators.py
new file mode 100644
index 00000000000..d01675eb38b
--- /dev/null
+++ b/Lib/test/test_free_threading/test_generators.py
@@ -0,0 +1,51 @@
+import concurrent.futures
+import unittest
+from threading import Barrier
+from unittest import TestCase
+import random
+import time
+
+from test.support import threading_helper, Py_GIL_DISABLED
+
+threading_helper.requires_working_threading(module=True)
+
+
+def random_sleep():
+ delay_us = random.randint(50, 100)
+ time.sleep(delay_us * 1e-6)
+
+def random_string():
+ return ''.join(random.choice('0123456789ABCDEF') for _ in range(10))
+
+def set_gen_name(g, b):
+ b.wait()
+ random_sleep()
+ g.__name__ = random_string()
+ return g.__name__
+
+def set_gen_qualname(g, b):
+ b.wait()
+ random_sleep()
+ g.__qualname__ = random_string()
+ return g.__qualname__
+
+
+@unittest.skipUnless(Py_GIL_DISABLED, "Enable only in FT build")
+class TestFTGenerators(TestCase):
+ NUM_THREADS = 4
+
+ def concurrent_write_with_func(self, func):
+ gen = (x for x in range(42))
+ for j in range(1000):
+ with concurrent.futures.ThreadPoolExecutor(max_workers=self.NUM_THREADS) as executor:
+ b = Barrier(self.NUM_THREADS)
+ futures = {executor.submit(func, gen, b): i for i in range(self.NUM_THREADS)}
+ for fut in concurrent.futures.as_completed(futures):
+ gen_name = fut.result()
+ self.assertEqual(len(gen_name), 10)
+
+ def test_concurrent_write(self):
+ with self.subTest(func=set_gen_name):
+ self.concurrent_write_with_func(func=set_gen_name)
+ with self.subTest(func=set_gen_qualname):
+ self.concurrent_write_with_func(func=set_gen_qualname)
diff --git a/Lib/test/test_free_threading/test_heapq.py b/Lib/test/test_free_threading/test_heapq.py
new file mode 100644
index 00000000000..ee7adfb2b78
--- /dev/null
+++ b/Lib/test/test_free_threading/test_heapq.py
@@ -0,0 +1,267 @@
+import unittest
+
+import heapq
+
+from enum import Enum
+from threading import Thread, Barrier, Lock
+from random import shuffle, randint
+
+from test.support import threading_helper
+from test import test_heapq
+
+
+NTHREADS = 10
+OBJECT_COUNT = 5_000
+
+
+class Heap(Enum):
+ MIN = 1
+ MAX = 2
+
+
+@threading_helper.requires_working_threading()
+class TestHeapq(unittest.TestCase):
+ def setUp(self):
+ self.test_heapq = test_heapq.TestHeapPython()
+
+ def test_racing_heapify(self):
+ heap = list(range(OBJECT_COUNT))
+ shuffle(heap)
+
+ self.run_concurrently(
+ worker_func=heapq.heapify, args=(heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heappush(self):
+ heap = []
+
+ def heappush_func(heap):
+ for item in reversed(range(OBJECT_COUNT)):
+ heapq.heappush(heap, item)
+
+ self.run_concurrently(
+ worker_func=heappush_func, args=(heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heappop(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+
+ # Each thread pops (OBJECT_COUNT / NTHREADS) items
+ self.assertEqual(OBJECT_COUNT % NTHREADS, 0)
+ per_thread_pop_count = OBJECT_COUNT // NTHREADS
+
+ def heappop_func(heap, pop_count):
+ local_list = []
+ for _ in range(pop_count):
+ item = heapq.heappop(heap)
+ local_list.append(item)
+
+ # Each local list should be sorted
+ self.assertTrue(self.is_sorted_ascending(local_list))
+
+ self.run_concurrently(
+ worker_func=heappop_func,
+ args=(heap, per_thread_pop_count),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), 0)
+
+ def test_racing_heappushpop(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+ pushpop_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heappushpop_func(heap, pushpop_items):
+ for item in pushpop_items:
+ popped_item = heapq.heappushpop(heap, item)
+ self.assertTrue(popped_item <= item)
+
+ self.run_concurrently(
+ worker_func=heappushpop_func,
+ args=(heap, pushpop_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), OBJECT_COUNT)
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heapreplace(self):
+ heap = self.create_heap(OBJECT_COUNT, Heap.MIN)
+ replace_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heapreplace_func(heap, replace_items):
+ for item in replace_items:
+ heapq.heapreplace(heap, item)
+
+ self.run_concurrently(
+ worker_func=heapreplace_func,
+ args=(heap, replace_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(heap), OBJECT_COUNT)
+ self.test_heapq.check_invariant(heap)
+
+ def test_racing_heapify_max(self):
+ max_heap = list(range(OBJECT_COUNT))
+ shuffle(max_heap)
+
+ self.run_concurrently(
+ worker_func=heapq.heapify_max, args=(max_heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heappush_max(self):
+ max_heap = []
+
+ def heappush_max_func(max_heap):
+ for item in range(OBJECT_COUNT):
+ heapq.heappush_max(max_heap, item)
+
+ self.run_concurrently(
+ worker_func=heappush_max_func, args=(max_heap,), nthreads=NTHREADS
+ )
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heappop_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+
+ # Each thread pops (OBJECT_COUNT / NTHREADS) items
+ self.assertEqual(OBJECT_COUNT % NTHREADS, 0)
+ per_thread_pop_count = OBJECT_COUNT // NTHREADS
+
+ def heappop_max_func(max_heap, pop_count):
+ local_list = []
+ for _ in range(pop_count):
+ item = heapq.heappop_max(max_heap)
+ local_list.append(item)
+
+ # Each local list should be sorted
+ self.assertTrue(self.is_sorted_descending(local_list))
+
+ self.run_concurrently(
+ worker_func=heappop_max_func,
+ args=(max_heap, per_thread_pop_count),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), 0)
+
+ def test_racing_heappushpop_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+ pushpop_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heappushpop_max_func(max_heap, pushpop_items):
+ for item in pushpop_items:
+ popped_item = heapq.heappushpop_max(max_heap, item)
+ self.assertTrue(popped_item >= item)
+
+ self.run_concurrently(
+ worker_func=heappushpop_max_func,
+ args=(max_heap, pushpop_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), OBJECT_COUNT)
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_racing_heapreplace_max(self):
+ max_heap = self.create_heap(OBJECT_COUNT, Heap.MAX)
+ replace_items = self.create_random_list(-5_000, 10_000, OBJECT_COUNT)
+
+ def heapreplace_max_func(max_heap, replace_items):
+ for item in replace_items:
+ heapq.heapreplace_max(max_heap, item)
+
+ self.run_concurrently(
+ worker_func=heapreplace_max_func,
+ args=(max_heap, replace_items),
+ nthreads=NTHREADS,
+ )
+ self.assertEqual(len(max_heap), OBJECT_COUNT)
+ self.test_heapq.check_max_invariant(max_heap)
+
+ def test_lock_free_list_read(self):
+ n, n_threads = 1_000, 10
+ l = []
+ barrier = Barrier(n_threads * 2)
+
+ count = 0
+ lock = Lock()
+
+ def worker():
+ with lock:
+ nonlocal count
+ x = count
+ count += 1
+
+ barrier.wait()
+ for i in range(n):
+ if x % 2:
+ heapq.heappush(l, 1)
+ heapq.heappop(l)
+ else:
+ try:
+ l[0]
+ except IndexError:
+ pass
+
+ self.run_concurrently(worker, (), n_threads * 2)
+
+ @staticmethod
+ def is_sorted_ascending(lst):
+ """
+ Check if the list is sorted in ascending order (non-decreasing).
+ """
+ return all(lst[i - 1] <= lst[i] for i in range(1, len(lst)))
+
+ @staticmethod
+ def is_sorted_descending(lst):
+ """
+ Check if the list is sorted in descending order (non-increasing).
+ """
+ return all(lst[i - 1] >= lst[i] for i in range(1, len(lst)))
+
+ @staticmethod
+ def create_heap(size, heap_kind):
+ """
+ Create a min/max heap where elements are in the range (0, size - 1) and
+ shuffled before heapify.
+ """
+ heap = list(range(OBJECT_COUNT))
+ shuffle(heap)
+ if heap_kind == Heap.MIN:
+ heapq.heapify(heap)
+ else:
+ heapq.heapify_max(heap)
+
+ return heap
+
+ @staticmethod
+ def create_random_list(a, b, size):
+ """
+ Create a list of random numbers between a and b (inclusive).
+ """
+ return [randint(-a, b) for _ in range(size)]
+
+ def run_concurrently(self, worker_func, args, nthreads):
+ """
+ Run the worker function concurrently in multiple threads.
+ """
+ barrier = Barrier(nthreads)
+
+ def wrapper_func(*args):
+ # Wait for all threads to reach this point before proceeding.
+ barrier.wait()
+ worker_func(*args)
+
+ with threading_helper.catch_threading_exception() as cm:
+ workers = (
+ Thread(target=wrapper_func, args=args) for _ in range(nthreads)
+ )
+ with threading_helper.start_threads(workers):
+ pass
+
+ # Worker threads should not raise any exceptions
+ self.assertIsNone(cm.exc_value)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_free_threading/test_io.py b/Lib/test/test_free_threading/test_io.py
index f9bec740ddf..41d89e04da8 100644
--- a/Lib/test/test_free_threading/test_io.py
+++ b/Lib/test/test_free_threading/test_io.py
@@ -1,12 +1,13 @@
+import io
+import _pyio as pyio
import threading
from unittest import TestCase
from test.support import threading_helper
from random import randint
-from io import BytesIO
from sys import getsizeof
-class TestBytesIO(TestCase):
+class ThreadSafetyMixin:
# Test pretty much everything that can break under free-threading.
# Non-deterministic, but at least one of these things will fail if
# BytesIO object is not free-thread safe.
@@ -90,20 +91,27 @@ class TestBytesIO(TestCase):
barrier.wait()
getsizeof(b)
- self.check([write] * 10, BytesIO())
- self.check([writelines] * 10, BytesIO())
- self.check([write] * 10 + [truncate] * 10, BytesIO())
- self.check([truncate] + [read] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [read1] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [readline] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [readlines] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [readinto] * 10, BytesIO(b'0\n'*204800), bytearray(b'0\n'*204800))
- self.check([close] + [write] * 10, BytesIO())
- self.check([truncate] + [getvalue] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [getbuffer] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [iter] * 10, BytesIO(b'0\n'*20480))
- self.check([truncate] + [getstate] * 10, BytesIO(b'0\n'*204800))
- self.check([truncate] + [setstate] * 10, BytesIO(b'0\n'*204800), (b'123', 0, None))
- self.check([truncate] + [sizeof] * 10, BytesIO(b'0\n'*204800))
+ self.check([write] * 10, self.ioclass())
+ self.check([writelines] * 10, self.ioclass())
+ self.check([write] * 10 + [truncate] * 10, self.ioclass())
+ self.check([truncate] + [read] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [read1] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [readline] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [readlines] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [readinto] * 10, self.ioclass(b'0\n'*204800), bytearray(b'0\n'*204800))
+ self.check([close] + [write] * 10, self.ioclass())
+ self.check([truncate] + [getvalue] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [getbuffer] * 10, self.ioclass(b'0\n'*204800))
+ self.check([truncate] + [iter] * 10, self.ioclass(b'0\n'*20480))
+ self.check([truncate] + [getstate] * 10, self.ioclass(b'0\n'*204800))
+ state = self.ioclass(b'123').__getstate__()
+ self.check([truncate] + [setstate] * 10, self.ioclass(b'0\n'*204800), state)
+ self.check([truncate] + [sizeof] * 10, self.ioclass(b'0\n'*204800))
# no tests for seek or tell because they don't break anything
+
+class CBytesIOTest(ThreadSafetyMixin, TestCase):
+ ioclass = io.BytesIO
+
+class PyBytesIOTest(ThreadSafetyMixin, TestCase):
+ ioclass = pyio.BytesIO
diff --git a/Lib/test/test_free_threading/test_itertools.py b/Lib/test/test_free_threading/test_itertools.py
new file mode 100644
index 00000000000..9d366041917
--- /dev/null
+++ b/Lib/test/test_free_threading/test_itertools.py
@@ -0,0 +1,95 @@
+import unittest
+from threading import Thread, Barrier
+from itertools import batched, chain, cycle
+from test.support import threading_helper
+
+
+threading_helper.requires_working_threading(module=True)
+
+class ItertoolsThreading(unittest.TestCase):
+
+ @threading_helper.reap_threads
+ def test_batched(self):
+ number_of_threads = 10
+ number_of_iterations = 20
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ while True:
+ try:
+ next(it)
+ except StopIteration:
+ break
+
+ data = tuple(range(1000))
+ for it in range(number_of_iterations):
+ batch_iterator = batched(data, 2)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[batch_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+ @threading_helper.reap_threads
+ def test_cycle(self):
+ number_of_threads = 6
+ number_of_iterations = 10
+ number_of_cycles = 400
+
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ for _ in range(number_of_cycles):
+ try:
+ next(it)
+ except StopIteration:
+ pass
+
+ data = (1, 2, 3, 4)
+ for it in range(number_of_iterations):
+ cycle_iterator = cycle(data)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[cycle_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+ @threading_helper.reap_threads
+ def test_chain(self):
+ number_of_threads = 6
+ number_of_iterations = 20
+
+ barrier = Barrier(number_of_threads)
+ def work(it):
+ barrier.wait()
+ while True:
+ try:
+ next(it)
+ except StopIteration:
+ break
+
+ data = [(1, )] * 200
+ for it in range(number_of_iterations):
+ chain_iterator = chain(*data)
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=work, args=[chain_iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_free_threading/test_itertools_batched.py b/Lib/test/test_free_threading/test_itertools_batched.py
deleted file mode 100644
index a754b4f9ea9..00000000000
--- a/Lib/test/test_free_threading/test_itertools_batched.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import unittest
-from threading import Thread, Barrier
-from itertools import batched
-from test.support import threading_helper
-
-
-threading_helper.requires_working_threading(module=True)
-
-class EnumerateThreading(unittest.TestCase):
-
- @threading_helper.reap_threads
- def test_threading(self):
- number_of_threads = 10
- number_of_iterations = 20
- barrier = Barrier(number_of_threads)
- def work(it):
- barrier.wait()
- while True:
- try:
- _ = next(it)
- except StopIteration:
- break
-
- data = tuple(range(1000))
- for it in range(number_of_iterations):
- batch_iterator = batched(data, 2)
- worker_threads = []
- for ii in range(number_of_threads):
- worker_threads.append(
- Thread(target=work, args=[batch_iterator]))
-
- with threading_helper.start_threads(worker_threads):
- pass
-
- barrier.reset()
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Lib/test/test_free_threading/test_itertools_combinatoric.py b/Lib/test/test_free_threading/test_itertools_combinatoric.py
new file mode 100644
index 00000000000..5b3b88deedd
--- /dev/null
+++ b/Lib/test/test_free_threading/test_itertools_combinatoric.py
@@ -0,0 +1,51 @@
+import unittest
+from threading import Thread, Barrier
+from itertools import combinations, product
+from test.support import threading_helper
+
+
+threading_helper.requires_working_threading(module=True)
+
+def test_concurrent_iteration(iterator, number_of_threads):
+ barrier = Barrier(number_of_threads)
+ def iterator_worker(it):
+ barrier.wait()
+ while True:
+ try:
+ _ = next(it)
+ except StopIteration:
+ return
+
+ worker_threads = []
+ for ii in range(number_of_threads):
+ worker_threads.append(
+ Thread(target=iterator_worker, args=[iterator]))
+
+ with threading_helper.start_threads(worker_threads):
+ pass
+
+ barrier.reset()
+
+class ItertoolsThreading(unittest.TestCase):
+
+ @threading_helper.reap_threads
+ def test_combinations(self):
+ number_of_threads = 10
+ number_of_iterations = 24
+
+ for it in range(number_of_iterations):
+ iterator = combinations((1, 2, 3, 4, 5), 2)
+ test_concurrent_iteration(iterator, number_of_threads)
+
+ @threading_helper.reap_threads
+ def test_product(self):
+ number_of_threads = 10
+ number_of_iterations = 24
+
+ for it in range(number_of_iterations):
+ iterator = product((1, 2, 3, 4, 5), (10, 20, 30))
+ test_concurrent_iteration(iterator, number_of_threads)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_fstring.py b/Lib/test/test_fstring.py
index dd58e032a8b..58a30c8e6ac 100644
--- a/Lib/test/test_fstring.py
+++ b/Lib/test/test_fstring.py
@@ -1336,9 +1336,9 @@ x = (
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
- self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
- self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
- self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
+ self.assertEqual(f'{1.25!s:10.10}', '1.25 ')
+ self.assertEqual(f'{1.25!r:10.10}', '1.25 ')
+ self.assertEqual(f'{1.25!a:10.10}', '1.25 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
@@ -1347,7 +1347,7 @@ x = (
# Conversions can have trailing whitespace after them since it
# does not provide any significance
self.assertEqual(f"{3!s }", "3")
- self.assertEqual(f'{3.14!s :10.10}', '3.14 ')
+ self.assertEqual(f'{1.25!s :10.10}', '1.25 ')
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
@@ -1380,7 +1380,7 @@ x = (
for conv in ' s', ' s ':
self.assertAllRaise(SyntaxError,
"f-string: conversion type must come right after the"
- " exclamanation mark",
+ " exclamation mark",
["f'{3!" + conv + "}'"])
self.assertAllRaise(SyntaxError,
diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py
index a71ddc01d1c..81d4e39f5be 100644
--- a/Lib/test/test_generated_cases.py
+++ b/Lib/test/test_generated_cases.py
@@ -1,11 +1,9 @@
import contextlib
import os
-import re
import sys
import tempfile
import unittest
-from io import StringIO
from test import support
from test import test_tools
@@ -31,12 +29,11 @@ skip_if_different_mount_drives()
test_tools.skip_if_missing("cases_generator")
with test_tools.imports_under_tool("cases_generator"):
- from analyzer import analyze_forest, StackItem
+ from analyzer import StackItem
from cwriter import CWriter
import parser
from stack import Local, Stack
import tier1_generator
- import opcode_metadata_generator
import optimizer_generator
@@ -59,14 +56,14 @@ class TestEffects(unittest.TestCase):
def test_effect_sizes(self):
stack = Stack()
inputs = [
- x := StackItem("x", None, "1"),
- y := StackItem("y", None, "oparg"),
- z := StackItem("z", None, "oparg*2"),
+ x := StackItem("x", "1"),
+ y := StackItem("y", "oparg"),
+ z := StackItem("z", "oparg*2"),
]
outputs = [
- StackItem("x", None, "1"),
- StackItem("b", None, "oparg*4"),
- StackItem("c", None, "1"),
+ StackItem("x", "1"),
+ StackItem("b", "oparg*4"),
+ StackItem("c", "1"),
]
null = CWriter.null()
stack.pop(z, null)
@@ -1106,32 +1103,6 @@ class TestGeneratedCases(unittest.TestCase):
"""
self.run_cases_test(input, output)
- def test_pointer_to_stackref(self):
- input = """
- inst(OP, (arg: _PyStackRef * -- out)) {
- out = *arg;
- DEAD(arg);
- }
- """
- output = """
- TARGET(OP) {
- #if Py_TAIL_CALL_INTERP
- int opcode = OP;
- (void)(opcode);
- #endif
- frame->instr_ptr = next_instr;
- next_instr += 1;
- INSTRUCTION_STATS(OP);
- _PyStackRef *arg;
- _PyStackRef out;
- arg = (_PyStackRef *)stack_pointer[-1].bits;
- out = *arg;
- stack_pointer[-1] = out;
- DISPATCH();
- }
- """
- self.run_cases_test(input, output)
-
def test_unused_cached_value(self):
input = """
op(FIRST, (arg1 -- out)) {
@@ -2005,8 +1976,8 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *arg1;
- JitOptSymbol *out;
+ JitOptRef arg1;
+ JitOptRef out;
arg1 = stack_pointer[-1];
out = EGGS(arg1);
stack_pointer[-1] = out;
@@ -2014,7 +1985,7 @@ class TestGeneratedAbstractCases(unittest.TestCase):
}
case OP2: {
- JitOptSymbol *out;
+ JitOptRef out;
out = sym_new_not_null(ctx);
stack_pointer[-1] = out;
break;
@@ -2039,14 +2010,14 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *out;
+ JitOptRef out;
out = sym_new_not_null(ctx);
stack_pointer[-1] = out;
break;
}
case OP2: {
- JitOptSymbol *out;
+ JitOptRef out;
out = NULL;
stack_pointer[-1] = out;
break;
@@ -2180,7 +2151,7 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"""
output = """
case OP: {
- JitOptSymbol *foo;
+ JitOptRef foo;
foo = NULL;
stack_pointer[0] = foo;
stack_pointer += 1;
@@ -2253,5 +2224,249 @@ class TestGeneratedAbstractCases(unittest.TestCase):
"Inputs must have equal sizes"):
self.run_cases_test(input, input2, output)
+ def test_pure_uop_body_copied_in(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ res = PyStackRef_IsNone(foo);
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ res_stackref = PyStackRef_IsNone(foo);
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_pure_uop_body_copied_in_deopt(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ DEOPT_IF(PyStackRef_IsNull(foo));
+ res = foo;
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = foo;
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (PyStackRef_IsNull(foo)) {
+ ctx->done = true;
+ break;
+ }
+ res_stackref = foo;
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = foo;
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_pure_uop_body_copied_in_error_if(self):
+ # Note: any non-escaping call works.
+ # In this case, we use PyStackRef_IsNone.
+ input = """
+ pure op(OP, (foo -- res)) {
+ ERROR_IF(PyStackRef_IsNull(foo));
+ res = foo;
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = foo;
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (PyStackRef_IsNull(foo)) {
+ goto error;
+ }
+ res_stackref = foo;
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = foo;
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+
+ def test_replace_opcode_uop_body_copied_in_complex(self):
+ input = """
+ pure op(OP, (foo -- res)) {
+ if (foo) {
+ res = PyStackRef_IsNone(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (foo) {
+ res_stackref = PyStackRef_IsNone(foo);
+ }
+ else {
+ res_stackref = 1;
+ }
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_replace_opcode_escaping_uop_body_copied_in_complex(self):
+ input = """
+ pure op(OP, (foo -- res)) {
+ if (foo) {
+ res = ESCAPING_CODE(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_known(ctx, foo);
+ }
+ """
+ output = """
+ case OP: {
+ JitOptRef foo;
+ JitOptRef res;
+ foo = stack_pointer[-1];
+ if (
+ sym_is_safe_const(ctx, foo)
+ ) {
+ JitOptRef foo_sym = foo;
+ _PyStackRef foo = sym_get_const_as_stackref(ctx, foo_sym);
+ _PyStackRef res_stackref;
+ /* Start of uop copied from bytecodes for constant evaluation */
+ if (foo) {
+ res_stackref = ESCAPING_CODE(foo);
+ }
+ else {
+ res_stackref = 1;
+ }
+ /* End of uop copied from bytecodes for constant evaluation */
+ res = sym_new_const_steal(ctx, PyStackRef_AsPyObjectSteal(res_stackref));
+ stack_pointer[-1] = res;
+ break;
+ }
+ res = sym_new_known(ctx, foo);
+ stack_pointer[-1] = res;
+ break;
+ }
+ """
+ self.run_cases_test(input, input2, output)
+
+ def test_replace_opocode_uop_reject_array_effects(self):
+ input = """
+ pure op(OP, (foo[2] -- res)) {
+ if (foo) {
+ res = PyStackRef_IsNone(foo);
+ }
+ else {
+ res = 1;
+ }
+ }
+ """
+ input2 = """
+ op(OP, (foo[2] -- res)) {
+ REPLACE_OPCODE_IF_EVALUATES_PURE(foo);
+ res = sym_new_unknown(ctx);
+ }
+ """
+ output = """
+ """
+ with self.assertRaisesRegex(SyntaxError,
+ "Pure evaluation cannot take array-like inputs"):
+ self.run_cases_test(input, input2, output)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_genericpath.py b/Lib/test/test_genericpath.py
index df07af01fc7..16c3268fefb 100644
--- a/Lib/test/test_genericpath.py
+++ b/Lib/test/test_genericpath.py
@@ -8,7 +8,7 @@ import sys
import unittest
import warnings
from test.support import (
- is_apple, is_emscripten, os_helper, warnings_helper
+ is_apple, os_helper, warnings_helper
)
from test.support.script_helper import assert_python_ok
from test.support.os_helper import FakePath
diff --git a/Lib/test/test_getpath.py b/Lib/test/test_getpath.py
index f86df9d0d03..83f09f34955 100644
--- a/Lib/test/test_getpath.py
+++ b/Lib/test/test_getpath.py
@@ -354,6 +354,27 @@ class MockGetPathTests(unittest.TestCase):
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
+ def test_venv_posix_without_home_key(self):
+ ns = MockPosixNamespace(
+ argv0="/venv/bin/python3",
+ PREFIX="/usr",
+ ENV_PATH="/usr/bin",
+ )
+ # Setup the bare minimum venv
+ ns.add_known_xfile("/usr/bin/python3")
+ ns.add_known_xfile("/venv/bin/python3")
+ ns.add_known_link("/venv/bin/python3", "/usr/bin/python3")
+ ns.add_known_file("/venv/pyvenv.cfg", [
+ # home = key intentionally omitted
+ ])
+ expected = dict(
+ executable="/venv/bin/python3",
+ prefix="/venv",
+ base_prefix="/usr",
+ )
+ actual = getpath(ns, expected)
+ self.assertEqual(expected, actual)
+
def test_venv_changed_name_posix(self):
"Test a venv layout on *nix."
ns = MockPosixNamespace(
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index c39565144bf..7f5d48b9c63 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -1,7 +1,7 @@
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
-from test.support import check_syntax_error
+from test.support import check_syntax_error, skip_wasi_stack_overflow
from test.support import import_helper
import annotationlib
import inspect
@@ -249,6 +249,18 @@ the \'lazy\' dog.\n\
compile(s, "<test>", "exec")
self.assertIn("was never closed", str(cm.exception))
+ @skip_wasi_stack_overflow()
+ def test_max_level(self):
+ # Macro defined in Parser/lexer/state.h
+ MAXLEVEL = 200
+
+ result = eval("(" * MAXLEVEL + ")" * MAXLEVEL)
+ self.assertEqual(result, ())
+
+ with self.assertRaises(SyntaxError) as cm:
+ eval("(" * (MAXLEVEL + 1) + ")" * (MAXLEVEL + 1))
+ self.assertStartsWith(str(cm.exception), 'too many nested parentheses')
+
var_annot_global: int # a global annotated is necessary for test_var_annot
diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py
index ccbacc7c19b..a12ff5662a7 100644
--- a/Lib/test/test_gzip.py
+++ b/Lib/test/test_gzip.py
@@ -9,7 +9,6 @@ import os
import struct
import sys
import unittest
-import warnings
from subprocess import PIPE, Popen
from test.support import catch_unraisable_exception
from test.support import import_helper
diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py
index de4c8a1670f..5bad483ae9d 100644
--- a/Lib/test/test_hashlib.py
+++ b/Lib/test/test_hashlib.py
@@ -12,12 +12,12 @@ import io
import itertools
import logging
import os
+import re
import sys
import sysconfig
import tempfile
import threading
import unittest
-import warnings
from test import support
from test.support import _4G, bigmemtest
from test.support import hashlib_helper
@@ -98,6 +98,14 @@ def read_vectors(hash_name):
yield parts
+DEPRECATED_STRING_PARAMETER = re.escape(
+ "the 'string' keyword parameter is deprecated since "
+ "Python 3.15 and slated for removal in Python 3.19; "
+ "use the 'data' keyword parameter or pass the data "
+ "to hash as a positional argument instead"
+)
+
+
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
@@ -141,11 +149,10 @@ class HashLibTestCase(unittest.TestCase):
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
- def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm, **kwargs):
- if data is None:
- return hashlib.new(_alg, **kwargs)
- return hashlib.new(_alg, data, **kwargs)
- constructors.add(_test_algorithm_via_hashlib_new)
+ def c(*args, __algorithm_name=algorithm, **kwargs):
+ return hashlib.new(__algorithm_name, *args, **kwargs)
+ c.__name__ = f'do_test_algorithm_via_hashlib_new_{algorithm}'
+ constructors.add(c)
_hashlib = self._conditional_import_module('_hashlib')
self._hashlib = _hashlib
@@ -201,6 +208,11 @@ class HashLibTestCase(unittest.TestCase):
return itertools.chain.from_iterable(constructors)
@property
+ def shake_constructors(self):
+ for shake_name in self.shakes:
+ yield from self.constructors_to_test.get(shake_name, ())
+
+ @property
def is_fips_mode(self):
return get_fips_mode()
@@ -250,6 +262,85 @@ class HashLibTestCase(unittest.TestCase):
self._hashlib.new("md5", usedforsecurity=False)
self._hashlib.openssl_md5(usedforsecurity=False)
+ @unittest.skipIf(get_fips_mode(), "skip in FIPS mode")
+ def test_clinic_signature(self):
+ for constructor in self.hash_constructors:
+ with self.subTest(constructor.__name__):
+ constructor(b'')
+ constructor(data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ constructor(string=b'')
+
+ digest_name = constructor(b'').name
+ with self.subTest(digest_name):
+ hashlib.new(digest_name, b'')
+ hashlib.new(digest_name, data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ hashlib.new(digest_name, string=b'')
+ # Make sure that _hashlib contains the constructor
+ # to test when using a combination of libcrypto and
+ # interned hash implementations.
+ if self._hashlib and digest_name in self._hashlib._constructors:
+ self._hashlib.new(digest_name, b'')
+ self._hashlib.new(digest_name, data=b'')
+ with self.assertWarnsRegex(DeprecationWarning,
+ DEPRECATED_STRING_PARAMETER):
+ self._hashlib.new(digest_name, string=b'')
+
+ @unittest.skipIf(get_fips_mode(), "skip in FIPS mode")
+ def test_clinic_signature_errors(self):
+ nomsg = b''
+ mymsg = b'msg'
+ conflicting_call = re.escape(
+ "'data' and 'string' are mutually exclusive "
+ "and support for 'string' keyword parameter "
+ "is slated for removal in a future version."
+ )
+ duplicated_param = re.escape("given by name ('data') and position")
+ unexpected_param = re.escape("got an unexpected keyword argument '_'")
+ for args, kwds, errmsg in [
+ # Reject duplicated arguments before unknown keyword arguments.
+ ((nomsg,), dict(data=nomsg, _=nomsg), duplicated_param),
+ ((mymsg,), dict(data=nomsg, _=nomsg), duplicated_param),
+ # Reject duplicated arguments before conflicting ones.
+ *itertools.product(
+ [[nomsg], [mymsg]],
+ [dict(data=nomsg), dict(data=nomsg, string=nomsg)],
+ [duplicated_param]
+ ),
+ # Reject unknown keyword arguments before conflicting ones.
+ *itertools.product(
+ [()],
+ [
+ dict(_=None),
+ dict(data=nomsg, _=None),
+ dict(string=nomsg, _=None),
+ dict(string=nomsg, data=nomsg, _=None),
+ ],
+ [unexpected_param]
+ ),
+ ((nomsg,), dict(_=None), unexpected_param),
+ ((mymsg,), dict(_=None), unexpected_param),
+ # Reject conflicting arguments.
+ [(nomsg,), dict(string=nomsg), conflicting_call],
+ [(mymsg,), dict(string=nomsg), conflicting_call],
+ [(), dict(data=nomsg, string=nomsg), conflicting_call],
+ ]:
+ for constructor in self.hash_constructors:
+ digest_name = constructor(b'').name
+ with self.subTest(constructor.__name__, args=args, kwds=kwds):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ constructor(*args, **kwds)
+ with self.subTest(digest_name, args=args, kwds=kwds):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ hashlib.new(digest_name, *args, **kwds)
+ if (self._hashlib and
+ digest_name in self._hashlib._constructors):
+ with self.assertRaisesRegex(TypeError, errmsg):
+ self._hashlib.new(digest_name, *args, **kwds)
+
def test_unknown_hash(self):
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
@@ -284,6 +375,16 @@ class HashLibTestCase(unittest.TestCase):
self.assertIs(constructor, _md5.md5)
self.assertEqual(sorted(builtin_constructor_cache), ['MD5', 'md5'])
+ def test_copy(self):
+ for cons in self.hash_constructors:
+ h1 = cons(os.urandom(16), usedforsecurity=False)
+ h2 = h1.copy()
+ self.assertIs(type(h1), type(h2))
+ self.assertEqual(h1.name, h2.name)
+ size = (16,) if h1.name in self.shakes else ()
+ self.assertEqual(h1.digest(*size), h2.digest(*size))
+ self.assertEqual(h1.hexdigest(*size), h2.hexdigest(*size))
+
def test_hexdigest(self):
for cons in self.hash_constructors:
h = cons(usedforsecurity=False)
@@ -294,21 +395,50 @@ class HashLibTestCase(unittest.TestCase):
self.assertIsInstance(h.digest(), bytes)
self.assertEqual(hexstr(h.digest()), h.hexdigest())
- def test_digest_length_overflow(self):
- # See issue #34922
- large_sizes = (2**29, 2**32-10, 2**32+10, 2**61, 2**64-10, 2**64+10)
- for cons in self.hash_constructors:
- h = cons(usedforsecurity=False)
- if h.name not in self.shakes:
- continue
- if HASH is not None and isinstance(h, HASH):
- # _hashopenssl's take a size_t
- continue
- for digest in h.digest, h.hexdigest:
- self.assertRaises(ValueError, digest, -10)
- for length in large_sizes:
- with self.assertRaises((ValueError, OverflowError)):
- digest(length)
+ def test_shakes_zero_digest_length(self):
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(b'abcdef', usedforsecurity=False)
+ self.assertEqual(h.digest(0), b'')
+ self.assertEqual(h.hexdigest(0), '')
+
+ def test_shakes_invalid_digest_length(self):
+ # See https://github.com/python/cpython/issues/79103.
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(usedforsecurity=False)
+ # Note: digest() and hexdigest() take a signed input and
+ # raise if it is negative; the rationale is that we use
+ # internally PyBytes_FromStringAndSize() and _Py_strhex()
+ # which both take a Py_ssize_t.
+ for negative_size in (-1, -10, -(1 << 31), -sys.maxsize):
+ self.assertRaises(ValueError, h.digest, negative_size)
+ self.assertRaises(ValueError, h.hexdigest, negative_size)
+
+ def test_shakes_overflow_digest_length(self):
+ # See https://github.com/python/cpython/issues/135759.
+
+ exc_types = (OverflowError, ValueError)
+ # HACL* accepts an 'uint32_t' while OpenSSL accepts a 'size_t'.
+ openssl_overflown_sizes = (sys.maxsize + 1, 2 * sys.maxsize)
+ # https://github.com/python/cpython/issues/79103 restricts
+ # the accepted built-in lengths to 2 ** 29, even if OpenSSL
+ # accepts such lengths.
+ builtin_overflown_sizes = openssl_overflown_sizes + (
+ 2 ** 29, 2 ** 32 - 10, 2 ** 32, 2 ** 32 + 10,
+ 2 ** 61, 2 ** 64 - 10, 2 ** 64, 2 ** 64 + 10,
+ )
+
+ for constructor in self.shake_constructors:
+ with self.subTest(constructor=constructor):
+ h = constructor(usedforsecurity=False)
+ if HASH is not None and isinstance(h, HASH):
+ overflown_sizes = openssl_overflown_sizes
+ else:
+ overflown_sizes = builtin_overflown_sizes
+ for invalid_size in overflown_sizes:
+ self.assertRaises(exc_types, h.digest, invalid_size)
+ self.assertRaises(exc_types, h.hexdigest, invalid_size)
def test_name_attribute(self):
for cons in self.hash_constructors:
@@ -719,8 +849,6 @@ class HashLibTestCase(unittest.TestCase):
self.assertRaises(ValueError, constructor, node_offset=-1)
self.assertRaises(OverflowError, constructor, node_offset=max_offset+1)
- self.assertRaises(TypeError, constructor, data=b'')
- self.assertRaises(TypeError, constructor, string=b'')
self.assertRaises(TypeError, constructor, '')
constructor(
@@ -929,49 +1057,67 @@ class HashLibTestCase(unittest.TestCase):
def test_sha256_gil(self):
gil_minsize = hashlib_helper.find_gil_minsize(['_sha2', '_hashlib'])
+ data = b'1' + b'#' * gil_minsize + b'1'
+ expected = hashlib.sha256(data).hexdigest()
+
m = hashlib.sha256()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ self.assertEqual(m.hexdigest(), expected)
- m = hashlib.sha256(b'1' + b'#' * gil_minsize + b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_threaded_hashing_fast(self):
+ # Same as test_threaded_hashing_slow() but only tests some functions
+ # since otherwise test_hashlib.py becomes too slow during development.
+ for name in ['md5', 'sha1', 'sha256', 'sha3_256', 'blake2s']:
+ if constructor := getattr(hashlib, name, None):
+ with self.subTest(name):
+ self.do_test_threaded_hashing(constructor, is_shake=False)
+ if shake_128 := getattr(hashlib, 'shake_128', None):
+ self.do_test_threaded_hashing(shake_128, is_shake=True)
+ @requires_resource('cpu')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
- def test_threaded_hashing(self):
+ def test_threaded_hashing_slow(self):
+ for algorithm, constructors in self.constructors_to_test.items():
+ is_shake = algorithm in self.shakes
+ for constructor in constructors:
+ with self.subTest(constructor.__name__, is_shake=is_shake):
+ self.do_test_threaded_hashing(constructor, is_shake)
+
+ def do_test_threaded_hashing(self, constructor, is_shake):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
- hasher = hashlib.sha1()
- num_threads = 5
- smallest_data = b'swineflu'
- data = smallest_data * 200000
- expected_hash = hashlib.sha1(data*num_threads).hexdigest()
-
- def hash_in_chunks(chunk_size):
- index = 0
- while index < len(data):
- hasher.update(data[index:index + chunk_size])
- index += chunk_size
+
+ # The data to hash has length s|M|q^N and the chunk size for the i-th
+ # thread is s|M|q^(N-i), where N is the number of threads, M is a fixed
+ # message of small length, and s >= 1 and q >= 2 are small integers.
+ smallest_size, num_threads, s, q = 8, 5, 2, 10
+
+ smallest_data = os.urandom(smallest_size)
+ data = s * smallest_data * (q ** num_threads)
+
+ h1 = constructor(usedforsecurity=False)
+ h2 = constructor(data * num_threads, usedforsecurity=False)
+
+ def update(chunk_size):
+ for index in range(0, len(data), chunk_size):
+ h1.update(data[index:index + chunk_size])
threads = []
- for threadnum in range(num_threads):
- chunk_size = len(data) // (10 ** threadnum)
+ for thread_num in range(num_threads):
+ # chunk_size = len(data) // (q ** thread_num)
+ chunk_size = s * smallest_size * q ** (num_threads - thread_num)
self.assertGreater(chunk_size, 0)
- self.assertEqual(chunk_size % len(smallest_data), 0)
- thread = threading.Thread(target=hash_in_chunks,
- args=(chunk_size,))
+ self.assertEqual(chunk_size % smallest_size, 0)
+ thread = threading.Thread(target=update, args=(chunk_size,))
threads.append(thread)
for thread in threads:
@@ -979,7 +1125,10 @@ class HashLibTestCase(unittest.TestCase):
for thread in threads:
thread.join()
- self.assertEqual(expected_hash, hasher.hexdigest())
+ if is_shake:
+ self.assertEqual(h1.hexdigest(16), h2.hexdigest(16))
+ else:
+ self.assertEqual(h1.hexdigest(), h2.hexdigest())
def test_get_fips_mode(self):
fips_mode = self.is_fips_mode
diff --git a/Lib/test/test_hmac.py b/Lib/test/test_hmac.py
index e898644dd8a..ff6e1bce0ef 100644
--- a/Lib/test/test_hmac.py
+++ b/Lib/test/test_hmac.py
@@ -21,7 +21,6 @@ import functools
import hmac
import hashlib
import random
-import test.support
import test.support.hashlib_helper as hashlib_helper
import types
import unittest
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 61fa24fab57..15cad061889 100644
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -5,6 +5,7 @@ import pprint
import unittest
from unittest.mock import patch
+from test import support
class EventCollector(html.parser.HTMLParser):
@@ -80,6 +81,13 @@ class EventCollectorCharrefs(EventCollector):
self.fail('This should never be called with convert_charrefs=True')
+# The normal event collector normalizes the events in get_events,
+# so we override it to return the original list of events.
+class EventCollectorNoNormalize(EventCollector):
+ def get_events(self):
+ return self.events
+
+
class TestCaseBase(unittest.TestCase):
def get_collector(self):
@@ -264,8 +272,7 @@ text
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
- def test_cdata_content(self):
- contents = [
+ @support.subTests('content', [
'<!-- not a comment --> &not-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
@@ -278,70 +285,127 @@ text
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
- 'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
- # these two should be invalid according to the HTML 5 spec,
- # section 8.1.2.2
- #'foo = </\nscript>',
- #'foo = </ script>',
- ]
- elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
- for content in contents:
- for element in elements:
- element_lower = element.lower()
- s = '<{element}>{content}</{element}>'.format(element=element,
- content=content)
- self._run_check(s, [("starttag", element_lower, []),
- ("data", content),
- ("endtag", element_lower)])
-
- def test_cdata_with_closing_tags(self):
+ 'foo = "</ script>"',
+ 'foo = "</scripture>"',
+ 'foo = "</script\v>"',
+ 'foo = "</script\xa0>"',
+ 'foo = "</ſcript>"',
+ 'foo = "</scrıpt>"',
+ ])
+ def test_script_content(self, content):
+ s = f'<script>{content}</script>'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content),
+ ("endtag", "script")])
+
+ @support.subTests('content', [
+ 'a::before { content: "<!-- not a comment -->"; }',
+ 'a::before { content: "&not-an-entity-ref;"; }',
+ 'a::before { content: "<not a=\'start tag\'>"; }',
+ 'a::before { content: "\u2603"; }',
+ 'a::before { content: "< /style>"; }',
+ 'a::before { content: "</ style>"; }',
+ 'a::before { content: "</styled>"; }',
+ 'a::before { content: "</style\v>"; }',
+ 'a::before { content: "</style\xa0>"; }',
+ 'a::before { content: "</ſtyle>"; }',
+ ])
+ def test_style_content(self, content):
+ s = f'<style>{content}</style>'
+ self._run_check(s, [("starttag", "style", []),
+ ("data", content),
+ ("endtag", "style")])
+
+ @support.subTests('endtag', ['script', 'SCRIPT', 'script ', 'script\n',
+ 'script/', 'script foo=bar', 'script foo=">"'])
+ def test_script_closing_tag(self, endtag):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
- # The normal event collector normalizes the events in get_events,
- # so we override it to return the original list of events.
- class Collector(EventCollector):
- def get_events(self):
- return self.events
-
content = """<!-- not a comment --> &not-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
- for element in [' script', 'script ', ' script ',
- '\nscript', 'script\n', '\nscript\n']:
- element_lower = element.lower().strip()
- s = '<script>{content}</{element}>'.format(element=element,
- content=content)
- self._run_check(s, [("starttag", element_lower, []),
- ("data", content),
- ("endtag", element_lower)],
- collector=Collector(convert_charrefs=False))
-
- def test_EOF_in_cdata(self):
- content = """<!-- not a comment --> &not-an-entity-ref;
- <a href="" /> </p><p> <span></span></style>
- '</script' + '>'"""
- s = f'<script>{content}'
- self._run_check(s, [
- ("starttag", 'script', []),
- ("data", content)
- ])
+ s = f'<ScrIPt>{content}</{endtag}>'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content),
+ ("endtag", "script")],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
+
+ @support.subTests('endtag', ['style', 'STYLE', 'style ', 'style\n',
+ 'style/', 'style foo=bar', 'style foo=">"'])
+ def test_style_closing_tag(self, endtag):
+ content = """
+ b::before { content: "<!-- not a comment -->"; }
+ p::before { content: "&not-an-entity-ref;"; }
+ a::before { content: "<i>"; }
+ a::after { content: "</i>"; }
+ """
+ s = f'<StyLE>{content}</{endtag}>'
+ self._run_check(s, [("starttag", "style", []),
+ ("data", content),
+ ("endtag", "style")],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
+
+ @support.subTests('tail,end', [
+ ('', False),
+ ('<', False),
+ ('</', False),
+ ('</s', False),
+ ('</script', False),
+ ('</script ', True),
+ ('</script foo=bar', True),
+ ('</script foo=">', True),
+ ])
+ def test_eof_in_script(self, tail, end):
+ content = "a = 123"
+ s = f'<ScrIPt>{content}{tail}'
+ self._run_check(s, [("starttag", "script", []),
+ ("data", content if end else content + tail)],
+ collector=EventCollectorNoNormalize(convert_charrefs=False))
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
+ '<!----->'
'<!---->'
+ # abrupt-closing-of-empty-comment
+ '<!--->'
+ '<!-->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
- '<!-- and I have -- in the middle! -->')
+ '<!-- and I have -- in the middle! -->'
+ '<!--incorrectly-closed-comment--!>'
+ '<!----!>'
+ '<!----!-->'
+ '<!---- >-->'
+ '<!---!>-->'
+ '<!--!>-->'
+ # nested-comment
+ '<!-- <!-- nested --> -->'
+ '<!--<!-->'
+ '<!--<!--!>'
+ )
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
+ ('comment', '-'),
+ ('comment', ''),
+ ('comment', ''),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
- ('comment', ' and I have -- in the middle! ')]
+ ('comment', ' and I have -- in the middle! '),
+ ('comment', 'incorrectly-closed-comment'),
+ ('comment', ''),
+ ('comment', '--!'),
+ ('comment', '-- >'),
+ ('comment', '-!>'),
+ ('comment', '!>'),
+ ('comment', ' <!-- nested '), ('data', ' -->'),
+ ('comment', '<!'),
+ ('comment', '<!'),
+ ]
self._run_check(html, expected)
def test_condcoms(self):
@@ -430,28 +494,34 @@ text
('data', '<'),
('starttag', 'bc<', [('a', None)]),
('endtag', 'html'),
- ('data', '\n<img src="URL>'),
- ('comment', '/img'),
- ('endtag', 'html<')])
+ ('data', '\n')])
def test_starttag_junk_chars(self):
+ self._run_check("<", [('data', '<')])
+ self._run_check("<>", [('data', '<>')])
+ self._run_check("< >", [('data', '< >')])
+ self._run_check("< ", [('data', '< ')])
self._run_check("</>", [])
+ self._run_check("<$>", [('data', '<$>')])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
- self._run_check("</a", [('data', '</a')])
+ self._run_check("</a", [])
+ self._run_check("</ a>", [('comment', ' a')])
+ self._run_check("</ a", [('comment', ' a')])
self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
- self._run_check("<!", [('data', '<!')])
- self._run_check("<a", [('data', '<a')])
- self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
- self._run_check("<a foo='bar", [('data', "<a foo='bar")])
- self._run_check("<a foo='>'", [('data', "<a foo='>'")])
- self._run_check("<a foo='>", [('data', "<a foo='>")])
+ self._run_check("<!", [('comment', '')])
+ self._run_check("<a", [])
+ self._run_check("<a foo='bar'", [])
+ self._run_check("<a foo='bar", [])
+ self._run_check("<a foo='>'", [])
+ self._run_check("<a foo='>", [])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
+ self._run_check("</a$b>", [('endtag', 'a$b')])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
@@ -484,6 +554,10 @@ text
]
self._run_check(html, expected)
+ def test_slashes_in_endtag(self):
+ self._run_check('</a/>', [('endtag', 'a')])
+ self._run_check('</a foo="var"/>', [('endtag', 'a')])
+
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
@@ -518,15 +592,11 @@ text
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
- # This is technically wrong (the "> shouldn't be included in the 'data')
- # but is probably not worth fixing it (in addition to all the cases of
- # the previous test, it would require a full attribute parsing).
- # see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
- ('data', '"> confuses the parser')]
+ ('data', ' confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
@@ -553,7 +623,7 @@ text
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
- ('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
+ ('starttag', 'div', [('style', ''), (',', None), ('foo', None), ('=', None), ('"bar"', None)]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
@@ -576,21 +646,50 @@ text
for html, expected in data:
self._run_check(html, expected)
- def test_EOF_in_comments_or_decls(self):
+ def test_eof_in_comments(self):
data = [
- ('<!', [('data', '<!')]),
- ('<!-', [('data', '<!-')]),
- ('<!--', [('data', '<!--')]),
- ('<![', [('data', '<![')]),
- ('<![CDATA[', [('data', '<![CDATA[')]),
- ('<![CDATA[x', [('data', '<![CDATA[x')]),
- ('<!DOCTYPE', [('data', '<!DOCTYPE')]),
- ('<!DOCTYPE HTML', [('data', '<!DOCTYPE HTML')]),
+ ('<!--', [('comment', '')]),
+ ('<!---', [('comment', '')]),
+ ('<!----', [('comment', '')]),
+ ('<!-----', [('comment', '-')]),
+ ('<!------', [('comment', '--')]),
+ ('<!----!', [('comment', '')]),
+ ('<!---!', [('comment', '-!')]),
+ ('<!---!>', [('comment', '-!>')]),
+ ('<!--foo', [('comment', 'foo')]),
+ ('<!--foo-', [('comment', 'foo')]),
+ ('<!--foo--', [('comment', 'foo')]),
+ ('<!--foo--!', [('comment', 'foo')]),
+ ('<!--<!--', [('comment', '<!')]),
+ ('<!--<!--!', [('comment', '<!')]),
]
for html, expected in data:
self._run_check(html, expected)
+
+ def test_eof_in_declarations(self):
+ data = [
+ ('<!', [('comment', '')]),
+ ('<!-', [('comment', '-')]),
+ ('<![', [('comment', '[')]),
+ ('<![CDATA[', [('unknown decl', 'CDATA[')]),
+ ('<![CDATA[x', [('unknown decl', 'CDATA[x')]),
+ ('<![CDATA[x]', [('unknown decl', 'CDATA[x]')]),
+ ('<![CDATA[x]]', [('unknown decl', 'CDATA[x]]')]),
+ ('<!DOCTYPE', [('decl', 'DOCTYPE')]),
+ ('<!DOCTYPE ', [('decl', 'DOCTYPE ')]),
+ ('<!DOCTYPE html', [('decl', 'DOCTYPE html')]),
+ ('<!DOCTYPE html ', [('decl', 'DOCTYPE html ')]),
+ ('<!DOCTYPE html PUBLIC', [('decl', 'DOCTYPE html PUBLIC')]),
+ ('<!DOCTYPE html PUBLIC "foo', [('decl', 'DOCTYPE html PUBLIC "foo')]),
+ ('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "foo',
+ [('decl', 'DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "foo')]),
+ ]
+ for html, expected in data:
+ self._run_check(html, expected)
+
def test_bogus_comments(self):
- html = ('<! not really a comment >'
+ html = ('<!ELEMENT br EMPTY>'
+ '<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
@@ -604,6 +703,7 @@ text
'<![CDATA]]>' # required '[' after CDATA
)
expected = [
+ ('comment', 'ELEMENT br EMPTY'),
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
@@ -684,6 +784,26 @@ text
('endtag', 'a'), ('data', ' bar & baz')]
)
+ @support.requires_resource('cpu')
+ def test_eof_no_quadratic_complexity(self):
+ # Each of these examples used to take about an hour.
+ # Now they take a fraction of a second.
+ def check(source):
+ parser = html.parser.HTMLParser()
+ parser.feed(source)
+ parser.close()
+ n = 120_000
+ check("<a " * n)
+ check("<a a=" * n)
+ check("</a " * 14 * n)
+ check("</a a=" * 11 * n)
+ check("<!--" * 4 * n)
+ check("<!" * 60 * n)
+ check("<?" * 19 * n)
+ check("</$" * 15 * n)
+ check("<![CDATA[" * 9 * n)
+ check("<!doctype" * 35 * n)
+
class AttributesTestCase(TestCaseBase):
@@ -692,9 +812,15 @@ class AttributesTestCase(TestCaseBase):
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
- self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
- self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
- self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
+ self._run_check("<a foo==bar>", [('starttag', 'a', [('foo', '=bar')])])
+ self._run_check("<a foo =bar>", [('starttag', 'a', [('foo', None), ('=bar', None)])])
+ self._run_check("<a foo\t=bar>", [('starttag', 'a', [('foo', None), ('=bar', None)])])
+ self._run_check("<a foo\v=bar>", [('starttag', 'a', [('foo\v', 'bar')])])
+ self._run_check("<a foo\xa0=bar>", [('starttag', 'a', [('foo\xa0', 'bar')])])
+ self._run_check("<a foo= bar>", [('starttag', 'a', [('foo', ''), ('bar', None)])])
+ self._run_check("<a foo=\tbar>", [('starttag', 'a', [('foo', ''), ('bar', None)])])
+ self._run_check("<a foo=\vbar>", [('starttag', 'a', [('foo', '\vbar')])])
+ self._run_check("<a foo=\xa0bar>", [('starttag', 'a', [('foo', '\xa0bar')])])
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
@@ -703,6 +829,10 @@ class AttributesTestCase(TestCaseBase):
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
+ self._run_check("<a b=\t c=\n>",
+ [("starttag", "a", [("b", ""), ("c", "")])])
+ self._run_check("<a b=\v c=\xa0>",
+ [("starttag", "a", [("b", "\v"), ("c", "\xa0")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
@@ -774,7 +904,7 @@ class AttributesTestCase(TestCaseBase):
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
- ('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
+ ('starttag', 'a', [('href', None), ('=', None), ("test'&nbsp;style", 'color:red;bad4')]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
diff --git a/Lib/test/test_http_cookiejar.py b/Lib/test/test_http_cookiejar.py
index 6bc33b15ec3..04cb440cd4c 100644
--- a/Lib/test/test_http_cookiejar.py
+++ b/Lib/test/test_http_cookiejar.py
@@ -4,6 +4,7 @@ import os
import stat
import sys
import re
+from test import support
from test.support import os_helper
from test.support import warnings_helper
import time
@@ -105,8 +106,7 @@ class DateTimeTests(unittest.TestCase):
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
- def test_http2time_garbage(self):
- for test in [
+ @support.subTests('test', [
'',
'Garbage',
'Mandag 16. September 1996',
@@ -121,10 +121,9 @@ class DateTimeTests(unittest.TestCase):
'08-01-3697739',
'09 Feb 19942632 22:23:32 GMT',
'Wed, 09 Feb 1994834 22:23:32 GMT',
- ]:
- self.assertIsNone(http2time(test),
- "http2time(%s) is not None\n"
- "http2time(test) %s" % (test, http2time(test)))
+ ])
+ def test_http2time_garbage(self, test):
+ self.assertIsNone(http2time(test))
def test_http2time_redos_regression_actually_completes(self):
# LOOSE_HTTP_DATE_RE was vulnerable to malicious input which caused catastrophic backtracking (REDoS).
@@ -149,9 +148,7 @@ class DateTimeTests(unittest.TestCase):
self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"),
(1994, 2, 3, 14, 15, 29))
- def test_iso2time_formats(self):
- # test iso2time for supported dates.
- tests = [
+ @support.subTests('s', [
'1994-02-03 00:00:00 -0000', # ISO 8601 format
'1994-02-03 00:00:00 +0000', # ISO 8601 format
'1994-02-03 00:00:00', # zone is optional
@@ -164,16 +161,15 @@ class DateTimeTests(unittest.TestCase):
# A few tests with extra space at various places
' 1994-02-03 ',
' 1994-02-03T00:00:00 ',
- ]
-
+ ])
+ def test_iso2time_formats(self, s):
+ # test iso2time for supported dates.
test_t = 760233600 # assume broken POSIX counting of seconds
- for s in tests:
- self.assertEqual(iso2time(s), test_t, s)
- self.assertEqual(iso2time(s.lower()), test_t, s.lower())
- self.assertEqual(iso2time(s.upper()), test_t, s.upper())
+ self.assertEqual(iso2time(s), test_t, s)
+ self.assertEqual(iso2time(s.lower()), test_t, s.lower())
+ self.assertEqual(iso2time(s.upper()), test_t, s.upper())
- def test_iso2time_garbage(self):
- for test in [
+ @support.subTests('test', [
'',
'Garbage',
'Thursday, 03-Feb-94 00:00:00 GMT',
@@ -186,9 +182,9 @@ class DateTimeTests(unittest.TestCase):
'01-01-1980 00:00:62',
'01-01-1980T00:00:62',
'19800101T250000Z',
- ]:
- self.assertIsNone(iso2time(test),
- "iso2time(%r)" % test)
+ ])
+ def test_iso2time_garbage(self, test):
+ self.assertIsNone(iso2time(test))
def test_iso2time_performance_regression(self):
# If ISO_DATE_RE regresses to quadratic complexity, this test will take a very long time to succeed.
@@ -199,24 +195,23 @@ class DateTimeTests(unittest.TestCase):
class HeaderTests(unittest.TestCase):
- def test_parse_ns_headers(self):
- # quotes should be stripped
- expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
- for hdr in [
+ @support.subTests('hdr', [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
- ]:
- self.assertEqual(parse_ns_headers([hdr]), expected)
-
- def test_parse_ns_headers_version(self):
-
+ ])
+ def test_parse_ns_headers(self, hdr):
# quotes should be stripped
- expected = [[('foo', 'bar'), ('version', '1')]]
- for hdr in [
+ expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
+ self.assertEqual(parse_ns_headers([hdr]), expected)
+
+ @support.subTests('hdr', [
'foo=bar; version="1"',
'foo=bar; Version="1"',
- ]:
- self.assertEqual(parse_ns_headers([hdr]), expected)
+ ])
+ def test_parse_ns_headers_version(self, hdr):
+ # quotes should be stripped
+ expected = [[('foo', 'bar'), ('version', '1')]]
+ self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
@@ -226,8 +221,7 @@ class HeaderTests(unittest.TestCase):
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEqual(parse_ns_headers([hdr]), expected)
- def test_join_header_words(self):
- for src, expected in [
+ @support.subTests('src,expected', [
([[("foo", None), ("bar", "baz")]], "foo; bar=baz"),
(([]), ""),
(([[]]), ""),
@@ -237,12 +231,11 @@ class HeaderTests(unittest.TestCase):
'n; foo="foo;_", bar=foo_bar'),
([[("n", "m"), ("foo", None)], [("bar", "foo_bar")]],
'n=m; foo, bar=foo_bar'),
- ]:
- with self.subTest(src=src):
- self.assertEqual(join_header_words(src), expected)
+ ])
+ def test_join_header_words(self, src, expected):
+ self.assertEqual(join_header_words(src), expected)
- def test_split_header_words(self):
- tests = [
+ @support.subTests('arg,expect', [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
@@ -259,24 +252,22 @@ class HeaderTests(unittest.TestCase):
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
- ]
-
- for arg, expect in tests:
- try:
- result = split_header_words([arg])
- except:
- import traceback, io
- f = io.StringIO()
- traceback.print_exc(None, f)
- result = "(error -- traceback follows)\n\n%s" % f.getvalue()
- self.assertEqual(result, expect, """
+ ])
+ def test_split_header_words(self, arg, expect):
+ try:
+ result = split_header_words([arg])
+ except:
+ import traceback, io
+ f = io.StringIO()
+ traceback.print_exc(None, f)
+ result = "(error -- traceback follows)\n\n%s" % f.getvalue()
+ self.assertEqual(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
- def test_roundtrip(self):
- tests = [
+ @support.subTests('arg,expect', [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
@@ -309,12 +300,11 @@ Got: '%s'
('n; foo="foo;_", bar="foo,_"',
'n; foo="foo;_", bar="foo,_"'),
- ]
-
- for arg, expect in tests:
- input = split_header_words([arg])
- res = join_header_words(input)
- self.assertEqual(res, expect, """
+ ])
+ def test_roundtrip(self, arg, expect):
+ input = split_header_words([arg])
+ res = join_header_words(input)
+ self.assertEqual(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
@@ -516,14 +506,7 @@ class CookieTests(unittest.TestCase):
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
- def test_domain_return_ok(self):
- # test optimization: .domain_return_ok() should filter out most
- # domains in the CookieJar before we try to access them (because that
- # may require disk access -- in particular, with MSIECookieJar)
- # This is only a rough check for performance reasons, so it's not too
- # critical as long as it's sufficiently liberal.
- pol = DefaultCookiePolicy()
- for url, domain, ok in [
+ @support.subTests('url,domain,ok', [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
@@ -543,11 +526,18 @@ class CookieTests(unittest.TestCase):
("http://foo/", ".local", True),
("http://barfoo.com", ".foo.com", False),
("http://barfoo.com", "foo.com", False),
- ]:
- request = urllib.request.Request(url)
- r = pol.domain_return_ok(domain, request)
- if ok: self.assertTrue(r)
- else: self.assertFalse(r)
+ ])
+ def test_domain_return_ok(self, url, domain, ok):
+ # test optimization: .domain_return_ok() should filter out most
+ # domains in the CookieJar before we try to access them (because that
+ # may require disk access -- in particular, with MSIECookieJar)
+ # This is only a rough check for performance reasons, so it's not too
+ # critical as long as it's sufficiently liberal.
+ pol = DefaultCookiePolicy()
+ request = urllib.request.Request(url)
+ r = pol.domain_return_ok(domain, request)
+ if ok: self.assertTrue(r)
+ else: self.assertFalse(r)
def test_missing_value(self):
# missing = sign in Cookie: header is regarded by Mozilla as a missing
@@ -581,10 +571,7 @@ class CookieTests(unittest.TestCase):
self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
- def test_rfc2109_handling(self):
- # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
- # dependent on policy settings
- for rfc2109_as_netscape, rfc2965, version in [
+ @support.subTests('rfc2109_as_netscape,rfc2965,version', [
# default according to rfc2965 if not explicitly specified
(None, False, 0),
(None, True, 1),
@@ -593,24 +580,27 @@ class CookieTests(unittest.TestCase):
(False, True, 1),
(True, False, 0),
(True, True, 0),
- ]:
- policy = DefaultCookiePolicy(
- rfc2109_as_netscape=rfc2109_as_netscape,
- rfc2965=rfc2965)
- c = CookieJar(policy)
- interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
- try:
- cookie = c._cookies["www.example.com"]["/"]["ni"]
- except KeyError:
- self.assertIsNone(version) # didn't expect a stored cookie
- else:
- self.assertEqual(cookie.version, version)
- # 2965 cookies are unaffected
- interact_2965(c, "http://www.example.com/",
- "foo=bar; Version=1")
- if rfc2965:
- cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
- self.assertEqual(cookie2965.version, 1)
+ ])
+ def test_rfc2109_handling(self, rfc2109_as_netscape, rfc2965, version):
+ # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
+ # dependent on policy settings
+ policy = DefaultCookiePolicy(
+ rfc2109_as_netscape=rfc2109_as_netscape,
+ rfc2965=rfc2965)
+ c = CookieJar(policy)
+ interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
+ try:
+ cookie = c._cookies["www.example.com"]["/"]["ni"]
+ except KeyError:
+ self.assertIsNone(version) # didn't expect a stored cookie
+ else:
+ self.assertEqual(cookie.version, version)
+ # 2965 cookies are unaffected
+ interact_2965(c, "http://www.example.com/",
+ "foo=bar; Version=1")
+ if rfc2965:
+ cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
+ self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
@@ -778,8 +768,7 @@ class CookieTests(unittest.TestCase):
# Cookie is sent back to the same URI.
self.assertEqual(interact_netscape(cj, uri), value)
- def test_escape_path(self):
- cases = [
+ @support.subTests('arg,result', [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
@@ -799,9 +788,9 @@ class CookieTests(unittest.TestCase):
("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded
# unicode
("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
- ]
- for arg, result in cases:
- self.assertEqual(escape_path(arg), result)
+ ])
+ def test_escape_path(self, arg, result):
+ self.assertEqual(escape_path(arg), result)
def test_request_path(self):
# with parameters
diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py
index 0af1c45ecb2..2548a7c5f29 100644
--- a/Lib/test/test_httpservers.py
+++ b/Lib/test/test_httpservers.py
@@ -21,6 +21,7 @@ import email.utils
import html
import http, http.client
import urllib.parse
+import urllib.request
import tempfile
import time
import datetime
@@ -33,6 +34,8 @@ from test import support
from test.support import (
is_apple, import_helper, os_helper, threading_helper
)
+from test.support.script_helper import kill_python, spawn_python
+from test.support.socket_helper import find_unused_port
try:
import ssl
@@ -1452,6 +1455,73 @@ class CommandLineTestCase(unittest.TestCase):
self.assertIn('error', stderr.getvalue())
+class CommandLineRunTimeTestCase(unittest.TestCase):
+ served_data = os.urandom(32)
+ served_filename = 'served_filename'
+ tls_cert = certdata_file('ssl_cert.pem')
+ tls_key = certdata_file('ssl_key.pem')
+ tls_password = b'somepass'
+ tls_password_file = 'ssl_key_password'
+
+ def setUp(self):
+ super().setUp()
+ server_dir_context = os_helper.temp_cwd()
+ server_dir = self.enterContext(server_dir_context)
+ with open(self.served_filename, 'wb') as f:
+ f.write(self.served_data)
+ with open(self.tls_password_file, 'wb') as f:
+ f.write(self.tls_password)
+
+ def fetch_file(self, path, context=None):
+ req = urllib.request.Request(path, method='GET')
+ with urllib.request.urlopen(req, context=context) as res:
+ return res.read()
+
+ def parse_cli_output(self, output):
+ match = re.search(r'Serving (HTTP|HTTPS) on (.+) port (\d+)', output)
+ if match is None:
+ return None, None, None
+ return match.group(1).lower(), match.group(2), int(match.group(3))
+
+ def wait_for_server(self, proc, protocol, bind, port):
+ """Check that the server has been successfully started."""
+ line = proc.stdout.readline().strip()
+ if support.verbose:
+ print()
+ print('python -m http.server: ', line)
+ return self.parse_cli_output(line) == (protocol, bind, port)
+
+ def test_http_client(self):
+ bind, port = '127.0.0.1', find_unused_port()
+ proc = spawn_python('-u', '-m', 'http.server', str(port), '-b', bind,
+ bufsize=1, text=True)
+ self.addCleanup(kill_python, proc)
+ self.addCleanup(proc.terminate)
+ self.assertTrue(self.wait_for_server(proc, 'http', bind, port))
+ res = self.fetch_file(f'http://{bind}:{port}/{self.served_filename}')
+ self.assertEqual(res, self.served_data)
+
+ @unittest.skipIf(ssl is None, "requires ssl")
+ def test_https_client(self):
+ context = ssl.create_default_context()
+ # allow self-signed certificates
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+
+ bind, port = '127.0.0.1', find_unused_port()
+ proc = spawn_python('-u', '-m', 'http.server', str(port), '-b', bind,
+ '--tls-cert', self.tls_cert,
+ '--tls-key', self.tls_key,
+ '--tls-password-file', self.tls_password_file,
+ bufsize=1, text=True)
+ self.addCleanup(kill_python, proc)
+ self.addCleanup(proc.terminate)
+ self.assertTrue(self.wait_for_server(proc, 'https', bind, port))
+ url = f'https://{bind}:{port}/{self.served_filename}'
+ res = self.fetch_file(url, context=context)
+ self.assertEqual(res, self.served_data)
+
+
def setUpModule():
unittest.addModuleCleanup(os.chdir, os.getcwd())
diff --git a/Lib/test/test_idle.py b/Lib/test/test_idle.py
index 3d8b7ecc0ec..ebf572ac5ca 100644
--- a/Lib/test/test_idle.py
+++ b/Lib/test/test_idle.py
@@ -16,7 +16,7 @@ idlelib.testing = True
# Unittest.main and test.libregrtest.runtest.runtest_inner
# call load_tests, when present here, to discover tests to run.
-from idlelib.idle_test import load_tests
+from idlelib.idle_test import load_tests # noqa: F401
if __name__ == '__main__':
tk.NoDefaultRoot()
diff --git a/Lib/test/test_inspect/test_inspect.py b/Lib/test/test_inspect/test_inspect.py
index e584fb417b9..79eb103224b 100644
--- a/Lib/test/test_inspect/test_inspect.py
+++ b/Lib/test/test_inspect/test_inspect.py
@@ -5875,9 +5875,9 @@ class TestSignatureDefinitions(unittest.TestCase):
self._test_module_has_signatures(operator)
def test_os_module_has_signatures(self):
- unsupported_signature = {'chmod', 'link', 'utime'}
+ unsupported_signature = {'chmod', 'utime'}
unsupported_signature |= {name for name in
- ['get_terminal_size', 'posix_spawn', 'posix_spawnp',
+ ['get_terminal_size', 'link', 'posix_spawn', 'posix_spawnp',
'register_at_fork', 'startfile']
if hasattr(os, name)}
self._test_module_has_signatures(os, unsupported_signature=unsupported_signature)
diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py
index 165949167ce..0ee4582b5d1 100644
--- a/Lib/test/test_interpreters/test_api.py
+++ b/Lib/test/test_interpreters/test_api.py
@@ -1,18 +1,23 @@
+import contextlib
import os
import pickle
+import sys
from textwrap import dedent
import threading
import types
import unittest
from test import support
+from test.support import os_helper
+from test.support import script_helper
from test.support import import_helper
# Raise SkipTest if subinterpreters not supported.
_interpreters = import_helper.import_module('_interpreters')
+from concurrent import interpreters
from test.support import Py_GIL_DISABLED
-from test.support import interpreters
from test.support import force_not_colorized
-from test.support.interpreters import (
+import test._crossinterp_definitions as defs
+from concurrent.interpreters import (
InterpreterError, InterpreterNotFoundError, ExecutionFailed,
)
from .utils import (
@@ -29,6 +34,59 @@ WHENCE_STR_XI = 'cross-interpreter C-API'
WHENCE_STR_STDLIB = '_interpreters module'
+def is_pickleable(obj):
+ try:
+ pickle.dumps(obj)
+ except Exception:
+ return False
+ return True
+
+
+@contextlib.contextmanager
+def defined_in___main__(name, script, *, remove=False):
+ import __main__ as mainmod
+ mainns = vars(mainmod)
+ assert name not in mainns
+ exec(script, mainns, mainns)
+ if remove:
+ yield mainns.pop(name)
+ else:
+ try:
+ yield mainns[name]
+ finally:
+ mainns.pop(name, None)
+
+
+def build_excinfo(exctype, msg=None, formatted=None, errdisplay=None):
+ if isinstance(exctype, type):
+ assert issubclass(exctype, BaseException), exctype
+ exctype = types.SimpleNamespace(
+ __name__=exctype.__name__,
+ __qualname__=exctype.__qualname__,
+ __module__=exctype.__module__,
+ )
+ elif isinstance(exctype, str):
+ module, _, name = exctype.rpartition(exctype)
+ if not module and name in __builtins__:
+ module = 'builtins'
+ exctype = types.SimpleNamespace(
+ __name__=name,
+ __qualname__=exctype,
+ __module__=module or None,
+ )
+ else:
+ assert isinstance(exctype, types.SimpleNamespace)
+ assert msg is None or isinstance(msg, str), msg
+ assert formatted is None or isinstance(formatted, str), formatted
+ assert errdisplay is None or isinstance(errdisplay, str), errdisplay
+ return types.SimpleNamespace(
+ type=exctype,
+ msg=msg,
+ formatted=formatted,
+ errdisplay=errdisplay,
+ )
+
+
class ModuleTests(TestBase):
def test_queue_aliases(self):
@@ -75,7 +133,7 @@ class CreateTests(TestBase):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.create()
print(interp.id)
"""))
@@ -138,7 +196,7 @@ class GetCurrentTests(TestBase):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(cur.id)
"""))
@@ -155,7 +213,7 @@ class GetCurrentTests(TestBase):
with self.subTest('subinterpreter'):
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(id(cur))
cur = interpreters.get_current()
@@ -167,7 +225,7 @@ class GetCurrentTests(TestBase):
with self.subTest('per-interpreter'):
interp = interpreters.create()
out = _run_output(interp, dedent("""
- from test.support import interpreters
+ from concurrent import interpreters
cur = interpreters.get_current()
print(id(cur))
"""))
@@ -524,7 +582,7 @@ class TestInterpreterClose(TestBase):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.Interpreter({interp.id})
try:
interp.close()
@@ -541,7 +599,7 @@ class TestInterpreterClose(TestBase):
self.assertEqual(set(interpreters.list_all()),
{main, interp1, interp2})
interp1.exec(dedent(f"""
- from test.support import interpreters
+ from concurrent import interpreters
interp2 = interpreters.Interpreter({interp2.id})
interp2.close()
interp3 = interpreters.create()
@@ -748,7 +806,7 @@ class TestInterpreterExec(TestBase):
ham()
""")
scriptfile = self.make_script('script.py', tempdir, text="""
- from test.support import interpreters
+ from concurrent import interpreters
def script():
import spam
@@ -769,7 +827,7 @@ class TestInterpreterExec(TestBase):
~~~~~~~~~~~^^^^^^^^
{interpmod_line.strip()}
raise ExecutionFailed(excinfo)
- test.support.interpreters.ExecutionFailed: RuntimeError: uh-oh!
+ concurrent.interpreters.ExecutionFailed: RuntimeError: uh-oh!
Uncaught in the interpreter:
@@ -886,28 +944,46 @@ class TestInterpreterExec(TestBase):
with self.assertRaisesRegex(InterpreterError, 'unrecognized'):
interp.exec('raise Exception("it worked!")')
+ def test_list_comprehension(self):
+ # gh-135450: List comprehensions caused an assertion failure
+ # in _PyCode_CheckNoExternalState()
+ import string
+ r_interp, w_interp = self.pipe()
+
+ interp = interpreters.create()
+ interp.exec(f"""if True:
+ import os
+ comp = [str(i) for i in range(10)]
+ os.write({w_interp}, ''.join(comp).encode())
+ """)
+ self.assertEqual(os.read(r_interp, 10).decode(), string.digits)
+ interp.close()
+
+
# test__interpreters covers the remaining
# Interpreter.exec() behavior.
-def call_func_noop():
- pass
+call_func_noop = defs.spam_minimal
+call_func_ident = defs.spam_returns_arg
+call_func_failure = defs.spam_raises
def call_func_return_shareable():
return (1, None)
-def call_func_return_not_shareable():
- return [1, 2, 3]
+def call_func_return_stateless_func():
+ return (lambda x: x)
-def call_func_failure():
- raise Exception('spam!')
+def call_func_return_pickleable():
+ return [1, 2, 3]
-def call_func_ident(value):
- return value
+def call_func_return_unpickleable():
+ x = 42
+ return (lambda: x)
def get_call_func_closure(value):
@@ -916,6 +992,11 @@ def get_call_func_closure(value):
return call_func_closure
+def call_func_exec_wrapper(script, ns):
+ res = exec(script, ns, ns)
+ return res, ns, id(ns)
+
+
class Spam:
@staticmethod
@@ -1012,86 +1093,663 @@ class TestInterpreterCall(TestBase):
# - preserves info (e.g. SyntaxError)
# - matching error display
- def test_call(self):
+ @contextlib.contextmanager
+ def assert_fails(self, expected):
+ with self.assertRaises(ExecutionFailed) as cm:
+ yield cm
+ uncaught = cm.exception.excinfo
+ self.assertEqual(uncaught.type.__name__, expected.__name__)
+
+ def assert_fails_not_shareable(self):
+ return self.assert_fails(interpreters.NotShareableError)
+
+ def assert_code_equal(self, code1, code2):
+ if code1 == code2:
+ return
+ self.assertEqual(code1.co_name, code2.co_name)
+ self.assertEqual(code1.co_flags, code2.co_flags)
+ self.assertEqual(code1.co_consts, code2.co_consts)
+ self.assertEqual(code1.co_varnames, code2.co_varnames)
+ self.assertEqual(code1.co_cellvars, code2.co_cellvars)
+ self.assertEqual(code1.co_freevars, code2.co_freevars)
+ self.assertEqual(code1.co_names, code2.co_names)
+ self.assertEqual(
+ _testinternalcapi.get_code_var_counts(code1),
+ _testinternalcapi.get_code_var_counts(code2),
+ )
+ self.assertEqual(code1.co_code, code2.co_code)
+
+ def assert_funcs_equal(self, func1, func2):
+ if func1 == func2:
+ return
+ self.assertIs(type(func1), type(func2))
+ self.assertEqual(func1.__name__, func2.__name__)
+ self.assertEqual(func1.__defaults__, func2.__defaults__)
+ self.assertEqual(func1.__kwdefaults__, func2.__kwdefaults__)
+ self.assertEqual(func1.__closure__, func2.__closure__)
+ self.assert_code_equal(func1.__code__, func2.__code__)
+ self.assertEqual(
+ _testinternalcapi.get_code_var_counts(func1),
+ _testinternalcapi.get_code_var_counts(func2),
+ )
+
+ def assert_exceptions_equal(self, exc1, exc2):
+ assert isinstance(exc1, Exception)
+ assert isinstance(exc2, Exception)
+ if exc1 == exc2:
+ return
+ self.assertIs(type(exc1), type(exc2))
+ self.assertEqual(exc1.args, exc2.args)
+
+ def test_stateless_funcs(self):
interp = interpreters.create()
- for i, (callable, args, kwargs) in enumerate([
- (call_func_noop, (), {}),
- (Spam.noop, (), {}),
+ func = call_func_noop
+ with self.subTest('no args, no return'):
+ res = interp.call(func)
+ self.assertIsNone(res)
+
+ func = call_func_return_shareable
+ with self.subTest('no args, returns shareable'):
+ res = interp.call(func)
+ self.assertEqual(res, (1, None))
+
+ func = call_func_return_stateless_func
+ expected = (lambda x: x)
+ with self.subTest('no args, returns stateless func'):
+ res = interp.call(func)
+ self.assert_funcs_equal(res, expected)
+
+ func = call_func_return_pickleable
+ with self.subTest('no args, returns pickleable'):
+ res = interp.call(func)
+ self.assertEqual(res, [1, 2, 3])
+
+ func = call_func_return_unpickleable
+ with self.subTest('no args, returns unpickleable'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+
+ def test_stateless_func_returns_arg(self):
+ interp = interpreters.create()
+
+ for arg in [
+ None,
+ 10,
+ 'spam!',
+ b'spam!',
+ (1, 2, 'spam!'),
+ memoryview(b'spam!'),
+ ]:
+ with self.subTest(f'shareable {arg!r}'):
+ assert _interpreters.is_shareable(arg)
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assertEqual(res, arg)
+
+ for arg in defs.STATELESS_FUNCTIONS:
+ with self.subTest(f'stateless func {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assert_funcs_equal(res, arg)
+
+ for arg in defs.TOP_FUNCTIONS:
+ if arg in defs.STATELESS_FUNCTIONS:
+ continue
+ with self.subTest(f'stateful func {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ self.assert_funcs_equal(res, arg)
+ assert is_pickleable(arg)
+
+ for arg in [
+ Ellipsis,
+ NotImplemented,
+ object(),
+ 2**1000,
+ [1, 2, 3],
+ {'a': 1, 'b': 2},
+ types.SimpleNamespace(x=42),
+ # builtin types
+ object,
+ type,
+ Exception,
+ ModuleNotFoundError,
+ # builtin exceptions
+ Exception('uh-oh!'),
+ ModuleNotFoundError('mymodule'),
+ # builtin fnctions
+ len,
+ sys.exit,
+ # user classes
+ *defs.TOP_CLASSES,
+ *(c(*a) for c, a in defs.TOP_CLASSES.items()
+ if c not in defs.CLASSES_WITHOUT_EQUALITY),
+ ]:
+ with self.subTest(f'pickleable {arg!r}'):
+ res = interp.call(defs.spam_returns_arg, arg)
+ if type(arg) is object:
+ self.assertIs(type(res), object)
+ elif isinstance(arg, BaseException):
+ self.assert_exceptions_equal(res, arg)
+ else:
+ self.assertEqual(res, arg)
+ assert is_pickleable(arg)
+
+ for arg in [
+ types.MappingProxyType({}),
+ *(f for f in defs.NESTED_FUNCTIONS
+ if f not in defs.STATELESS_FUNCTIONS),
+ ]:
+ with self.subTest(f'unpickleable {arg!r}'):
+ assert not _interpreters.is_shareable(arg)
+ assert not is_pickleable(arg)
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ def test_full_args(self):
+ interp = interpreters.create()
+ expected = (1, 2, 3, 4, 5, 6, ('?',), {'g': 7, 'h': 8})
+ func = defs.spam_full_args
+ res = interp.call(func, 1, 2, 3, 4, '?', e=5, f=6, g=7, h=8)
+ self.assertEqual(res, expected)
+
+ def test_full_defaults(self):
+ # pickleable, but not stateless
+ interp = interpreters.create()
+ expected = (-1, -2, -3, -4, -5, -6, (), {'g': 8, 'h': 9})
+ res = interp.call(defs.spam_full_args_with_defaults, g=8, h=9)
+ self.assertEqual(res, expected)
+
+ def test_modified_arg(self):
+ interp = interpreters.create()
+ script = dedent("""
+ a = 7
+ b = 2
+ c = a ** b
+ """)
+ ns = {}
+ expected = {'a': 7, 'b': 2, 'c': 49}
+ res = interp.call(call_func_exec_wrapper, script, ns)
+ obj, resns, resid = res
+ del resns['__builtins__']
+ self.assertIsNone(obj)
+ self.assertEqual(ns, {})
+ self.assertEqual(resns, expected)
+ self.assertNotEqual(resid, id(ns))
+ self.assertNotEqual(resid, id(resns))
+
+ def test_func_in___main___valid(self):
+ # pickleable, already there'
+
+ with os_helper.temp_dir() as tempdir:
+ def new_mod(name, text):
+ script_helper.make_script(tempdir, name, dedent(text))
+
+ def run(text):
+ name = 'myscript'
+ text = dedent(f"""
+ import sys
+ sys.path.insert(0, {tempdir!r})
+
+ """) + dedent(text)
+ filename = script_helper.make_script(tempdir, name, text)
+ res = script_helper.assert_python_ok(filename)
+ return res.out.decode('utf-8').strip()
+
+ # no module indirection
+ with self.subTest('no indirection'):
+ text = run(f"""
+ from concurrent import interpreters
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ interp = interpreters.create()
+ res = interp.call(spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ # indirect as func, direct interp
+ new_mod('mymod', f"""
+ def run(interp, func):
+ return interp.call(func)
+ """)
+ with self.subTest('indirect as func, direct interp'):
+ text = run(f"""
+ from concurrent import interpreters
+ import mymod
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ interp = interpreters.create()
+ res = mymod.run(interp, spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ # indirect as func, indirect interp
+ new_mod('mymod', f"""
+ from concurrent import interpreters
+ def run(func):
+ interp = interpreters.create()
+ return interp.call(func)
+ """)
+ with self.subTest('indirect as func, indirect interp'):
+ text = run(f"""
+ import mymod
+
+ def spam():
+ # This a global var...
+ return __name__
+
+ if __name__ == '__main__':
+ res = mymod.run(spam)
+ print(res)
+ """)
+ self.assertEqual(text, '<fake __main__>')
+
+ def test_func_in___main___invalid(self):
+ interp = interpreters.create()
+
+ funcname = f'{__name__.replace(".", "_")}_spam_okay'
+ script = dedent(f"""
+ def {funcname}():
+ # This a global var...
+ return __name__
+ """)
+
+ with self.subTest('pickleable, added dynamically'):
+ with defined_in___main__(funcname, script) as arg:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ with self.subTest('lying about __main__'):
+ with defined_in___main__(funcname, script, remove=True) as arg:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(defs.spam_returns_arg, arg)
+
+ def test_func_in___main___hidden(self):
+ # When a top-level function that uses global variables is called
+ # through Interpreter.call(), it will be pickled, sent over,
+ # and unpickled. That requires that it be found in the other
+ # interpreter's __main__ module. However, the original script
+ # that defined the function is only run in the main interpreter,
+ # so pickle.loads() would normally fail.
+ #
+ # We work around this by running the script in the other
+ # interpreter. However, this is a one-off solution for the sake
+ # of unpickling, so we avoid modifying that interpreter's
+ # __main__ module by running the script in a hidden module.
+ #
+ # In this test we verify that the function runs with the hidden
+ # module as its __globals__ when called in the other interpreter,
+ # and that the interpreter's __main__ module is unaffected.
+ text = dedent("""
+ eggs = True
+
+ def spam(*, explicit=False):
+ if explicit:
+ import __main__
+ ns = __main__.__dict__
+ else:
+ # For now we have to have a LOAD_GLOBAL in the
+ # function in order for globals() to actually return
+ # spam.__globals__. Maybe it doesn't go through pickle?
+ # XXX We will fix this later.
+ spam
+ ns = globals()
+
+ func = ns.get('spam')
+ return [
+ id(ns),
+ ns.get('__name__'),
+ ns.get('__file__'),
+ id(func),
+ None if func is None else repr(func),
+ ns.get('eggs'),
+ ns.get('ham'),
+ ]
+
+ if __name__ == "__main__":
+ from concurrent import interpreters
+ interp = interpreters.create()
+
+ ham = True
+ print([
+ [
+ spam(explicit=True),
+ spam(),
+ ],
+ [
+ interp.call(spam, explicit=True),
+ interp.call(spam),
+ ],
+ ])
+ """)
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ local, remote = eval(stdout)
+
+ # In the main interpreter.
+ main, unpickled = local
+ nsid, _, _, funcid, func, _, _ = main
+ self.assertEqual(main, [
+ nsid,
+ '__main__',
+ filename,
+ funcid,
+ func,
+ True,
+ True,
+ ])
+ self.assertIsNot(func, None)
+ self.assertRegex(func, '^<function spam at 0x.*>$')
+ self.assertEqual(unpickled, main)
+
+ # In the subinterpreter.
+ main, unpickled = remote
+ nsid1, _, _, funcid1, _, _, _ = main
+ self.assertEqual(main, [
+ nsid1,
+ '__main__',
+ None,
+ funcid1,
+ None,
+ None,
+ None,
+ ])
+ nsid2, _, _, funcid2, func, _, _ = unpickled
+ self.assertEqual(unpickled, [
+ nsid2,
+ '<fake __main__>',
+ filename,
+ funcid2,
+ func,
+ True,
+ None,
+ ])
+ self.assertIsNot(func, None)
+ self.assertRegex(func, '^<function spam at 0x.*>$')
+ self.assertNotEqual(nsid2, nsid1)
+ self.assertNotEqual(funcid2, funcid1)
+
+ def test_func_in___main___uses_globals(self):
+ # See the note in test_func_in___main___hidden about pickle
+ # and the __main__ module.
+ #
+ # Additionally, the solution to that problem must provide
+ # for global variables on which a pickled function might rely.
+ #
+ # To check that, we run a script that has two global functions
+ # and a global variable in the __main__ module. One of the
+ # functions sets the global variable and the other returns
+ # the value.
+ #
+ # The script calls those functions multiple times in another
+ # interpreter, to verify the following:
+ #
+ # * the global variable is properly initialized
+ # * the global variable retains state between calls
+ # * the setter modifies that persistent variable
+ # * the getter uses the variable
+ # * the calls in the other interpreter do not modify
+ # the main interpreter
+ # * those calls don't modify the interpreter's __main__ module
+ # * the functions and variable do not actually show up in the
+ # other interpreter's __main__ module
+ text = dedent("""
+ count = 0
+
+ def inc(x=1):
+ global count
+ count += x
+
+ def get_count():
+ return count
+
+ if __name__ == "__main__":
+ counts = []
+ results = [count, counts]
+
+ from concurrent import interpreters
+ interp = interpreters.create()
+
+ val = interp.call(get_count)
+ counts.append(val)
+
+ interp.call(inc)
+ val = interp.call(get_count)
+ counts.append(val)
+
+ interp.call(inc, 3)
+ val = interp.call(get_count)
+ counts.append(val)
+
+ results.append(count)
+
+ modified = {name: interp.call(eval, f'{name!r} in vars()')
+ for name in ('count', 'inc', 'get_count')}
+ results.append(modified)
+
+ print(results)
+ """)
+ with os_helper.temp_dir() as tempdir:
+ filename = script_helper.make_script(tempdir, 'my-script', text)
+ res = script_helper.assert_python_ok(filename)
+ stdout = res.out.decode('utf-8').strip()
+ before, counts, after, modified = eval(stdout)
+ self.assertEqual(modified, {
+ 'count': False,
+ 'inc': False,
+ 'get_count': False,
+ })
+ self.assertEqual(before, 0)
+ self.assertEqual(after, 0)
+ self.assertEqual(counts, [0, 1, 4])
+
+ def test_raises(self):
+ interp = interpreters.create()
+ with self.assertRaises(ExecutionFailed):
+ interp.call(call_func_failure)
+
+ with self.assert_fails(ValueError):
+ interp.call(call_func_complex, '???', exc=ValueError('spam'))
+
+ def test_call_valid(self):
+ interp = interpreters.create()
+
+ for i, (callable, args, kwargs, expected) in enumerate([
+ (call_func_noop, (), {}, None),
+ (call_func_ident, ('spamspamspam',), {}, 'spamspamspam'),
+ (call_func_return_shareable, (), {}, (1, None)),
+ (call_func_return_pickleable, (), {}, [1, 2, 3]),
+ (Spam.noop, (), {}, None),
+ (Spam.from_values, (), {}, Spam(())),
+ (Spam.from_values, (1, 2, 3), {}, Spam((1, 2, 3))),
+ (Spam, ('???',), {}, Spam('???')),
+ (Spam(101), (), {}, (101, (), {})),
+ (Spam(10101).run, (), {}, (10101, (), {})),
+ (call_func_complex, ('ident', 'spam'), {}, 'spam'),
+ (call_func_complex, ('full-ident', 'spam'), {}, ('spam', (), {})),
+ (call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'},
+ ('spam', ('ham',), {'eggs': '!!!'})),
+ (call_func_complex, ('globals',), {}, __name__),
+ (call_func_complex, ('interpid',), {}, interp.id),
+ (call_func_complex, ('custom', 'spam!'), {}, Spam('spam!')),
]):
with self.subTest(f'success case #{i+1}'):
- res = interp.call(callable)
- self.assertIs(res, None)
+ res = interp.call(callable, *args, **kwargs)
+ self.assertEqual(res, expected)
+
+ def test_call_invalid(self):
+ interp = interpreters.create()
+
+ func = get_call_func_closure
+ with self.subTest(func):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, 42)
+
+ func = get_call_func_closure(42)
+ with self.subTest(func):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+
+ func = call_func_complex
+ op = 'closure'
+ with self.subTest(f'{func} ({op})'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, op, value='~~~')
+
+ op = 'custom-inner'
+ with self.subTest(f'{func} ({op})'):
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func, op, 'eggs!')
+
+ def test_callable_requires_frame(self):
+ # There are various functions that require a current frame.
+ interp = interpreters.create()
+ for call, expected in [
+ ((eval, '[1, 2, 3]'),
+ [1, 2, 3]),
+ ((eval, 'sum([1, 2, 3])'),
+ 6),
+ ((exec, '...'),
+ None),
+ ]:
+ with self.subTest(str(call)):
+ res = interp.call(*call)
+ self.assertEqual(res, expected)
+
+ result_not_pickleable = [
+ globals,
+ locals,
+ vars,
+ ]
+ for func, expectedtype in {
+ globals: dict,
+ locals: dict,
+ vars: dict,
+ dir: list,
+ }.items():
+ with self.subTest(str(func)):
+ if func in result_not_pickleable:
+ with self.assertRaises(interpreters.NotShareableError):
+ interp.call(func)
+ else:
+ res = interp.call(func)
+ self.assertIsInstance(res, expectedtype)
+ self.assertIn('__builtins__', res)
+
+ def test_globals_from_builtins(self):
+ # The builtins exec(), eval(), globals(), locals(), vars(),
+ # and dir() each runs relative to the target interpreter's
+ # __main__ module, when called directly. However,
+ # globals(), locals(), and vars() don't work when called
+ # directly so we don't check them.
+ from _frozen_importlib import BuiltinImporter
+ interp = interpreters.create()
+
+ names = interp.call(dir)
+ self.assertEqual(names, [
+ '__builtins__',
+ '__doc__',
+ '__loader__',
+ '__name__',
+ '__package__',
+ '__spec__',
+ ])
+
+ values = {name: interp.call(eval, name)
+ for name in names if name != '__builtins__'}
+ self.assertEqual(values, {
+ '__name__': '__main__',
+ '__doc__': None,
+ '__spec__': None, # It wasn't imported, so no module spec?
+ '__package__': None,
+ '__loader__': BuiltinImporter,
+ })
+ with self.assertRaises(ExecutionFailed):
+ interp.call(eval, 'spam'),
+
+ interp.call(exec, f'assert dir() == {names}')
+
+ # Update the interpreter's __main__.
+ interp.prepare_main(spam=42)
+ expected = names + ['spam']
+
+ names = interp.call(dir)
+ self.assertEqual(names, expected)
+
+ value = interp.call(eval, 'spam')
+ self.assertEqual(value, 42)
+
+ interp.call(exec, f'assert dir() == {expected}, dir()')
+
+ def test_globals_from_stateless_func(self):
+ # A stateless func, which doesn't depend on any globals,
+ # doesn't go through pickle, so it runs in __main__.
+ def set_global(name, value):
+ globals()[name] = value
+
+ def get_global(name):
+ return globals().get(name)
+
+ interp = interpreters.create()
+
+ modname = interp.call(get_global, '__name__')
+ self.assertEqual(modname, '__main__')
+
+ res = interp.call(get_global, 'spam')
+ self.assertIsNone(res)
+
+ interp.exec('spam = True')
+ res = interp.call(get_global, 'spam')
+ self.assertTrue(res)
+
+ interp.call(set_global, 'spam', 42)
+ res = interp.call(get_global, 'spam')
+ self.assertEqual(res, 42)
+
+ interp.exec('assert spam == 42, repr(spam)')
+
+ def test_call_in_thread(self):
+ interp = interpreters.create()
for i, (callable, args, kwargs) in enumerate([
- (call_func_ident, ('spamspamspam',), {}),
- (get_call_func_closure, (42,), {}),
- (get_call_func_closure(42), (), {}),
+ (call_func_noop, (), {}),
+ (call_func_return_shareable, (), {}),
+ (call_func_return_pickleable, (), {}),
(Spam.from_values, (), {}),
(Spam.from_values, (1, 2, 3), {}),
- (Spam, ('???'), {}),
(Spam(101), (), {}),
(Spam(10101).run, (), {}),
+ (Spam.noop, (), {}),
(call_func_complex, ('ident', 'spam'), {}),
(call_func_complex, ('full-ident', 'spam'), {}),
(call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'}),
(call_func_complex, ('globals',), {}),
(call_func_complex, ('interpid',), {}),
- (call_func_complex, ('closure',), {'value': '~~~'}),
(call_func_complex, ('custom', 'spam!'), {}),
- (call_func_complex, ('custom-inner', 'eggs!'), {}),
- (call_func_complex, ('???',), {'exc': ValueError('spam')}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
- ]):
- with self.subTest(f'invalid case #{i+1}'):
- with self.assertRaises(Exception):
- if args or kwargs:
- raise Exception((args, kwargs))
- interp.call(callable)
-
- with self.assertRaises(ExecutionFailed):
- interp.call(call_func_failure)
-
- def test_call_in_thread(self):
- interp = interpreters.create()
-
- for i, (callable, args, kwargs) in enumerate([
- (call_func_noop, (), {}),
- (Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
with self.captured_thread_exception() as ctx:
- t = interp.call_in_thread(callable)
+ t = interp.call_in_thread(callable, *args, **kwargs)
t.join()
self.assertIsNone(ctx.caught)
for i, (callable, args, kwargs) in enumerate([
- (call_func_ident, ('spamspamspam',), {}),
(get_call_func_closure, (42,), {}),
(get_call_func_closure(42), (), {}),
- (Spam.from_values, (), {}),
- (Spam.from_values, (1, 2, 3), {}),
- (Spam, ('???'), {}),
- (Spam(101), (), {}),
- (Spam(10101).run, (), {}),
- (call_func_complex, ('ident', 'spam'), {}),
- (call_func_complex, ('full-ident', 'spam'), {}),
- (call_func_complex, ('full-ident', 'spam', 'ham'), {'eggs': '!!!'}),
- (call_func_complex, ('globals',), {}),
- (call_func_complex, ('interpid',), {}),
- (call_func_complex, ('closure',), {'value': '~~~'}),
- (call_func_complex, ('custom', 'spam!'), {}),
- (call_func_complex, ('custom-inner', 'eggs!'), {}),
- (call_func_complex, ('???',), {'exc': ValueError('spam')}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
]):
with self.subTest(f'invalid case #{i+1}'):
- if args or kwargs:
- continue
with self.captured_thread_exception() as ctx:
- t = interp.call_in_thread(callable)
+ t = interp.call_in_thread(callable, *args, **kwargs)
t.join()
self.assertIsNotNone(ctx.caught)
@@ -1600,18 +2258,14 @@ class LowLevelTests(TestBase):
with results:
exc = _interpreters.exec(interpid, script)
out = results.stdout()
- self.assertEqual(out, '')
- self.assert_ns_equal(exc, types.SimpleNamespace(
- type=types.SimpleNamespace(
- __name__='Exception',
- __qualname__='Exception',
- __module__='builtins',
- ),
- msg='uh-oh!',
+ expected = build_excinfo(
+ Exception, 'uh-oh!',
# We check these in other tests.
formatted=exc.formatted,
errdisplay=exc.errdisplay,
- ))
+ )
+ self.assertEqual(out, '')
+ self.assert_ns_equal(exc, expected)
with self.subTest('from C-API'):
with self.interpreter_from_capi() as interpid:
@@ -1623,25 +2277,50 @@ class LowLevelTests(TestBase):
self.assertEqual(exc.msg, 'it worked!')
def test_call(self):
- with self.subTest('no args'):
- interpid = _interpreters.create()
- with self.assertRaises(ValueError):
- _interpreters.call(interpid, call_func_return_shareable)
+ interpid = _interpreters.create()
+
+ # Here we focus on basic args and return values.
+ # See TestInterpreterCall for full operational coverage,
+ # including supported callables.
+
+ with self.subTest('no args, return None'):
+ func = defs.spam_minimal
+ res, exc = _interpreters.call(interpid, func)
+ self.assertIsNone(exc)
+ self.assertIsNone(res)
+
+ with self.subTest('empty args, return None'):
+ func = defs.spam_minimal
+ res, exc = _interpreters.call(interpid, func, (), {})
+ self.assertIsNone(exc)
+ self.assertIsNone(res)
+
+ with self.subTest('no args, return non-None'):
+ func = defs.script_with_return
+ res, exc = _interpreters.call(interpid, func)
+ self.assertIsNone(exc)
+ self.assertIs(res, True)
+
+ with self.subTest('full args, return non-None'):
+ expected = (1, 2, 3, 4, 5, 6, (7, 8), {'g': 9, 'h': 0})
+ func = defs.spam_full_args
+ args = (1, 2, 3, 4, 7, 8)
+ kwargs = dict(e=5, f=6, g=9, h=0)
+ res, exc = _interpreters.call(interpid, func, args, kwargs)
+ self.assertIsNone(exc)
+ self.assertEqual(res, expected)
with self.subTest('uncaught exception'):
- interpid = _interpreters.create()
- exc = _interpreters.call(interpid, call_func_failure)
- self.assertEqual(exc, types.SimpleNamespace(
- type=types.SimpleNamespace(
- __name__='Exception',
- __qualname__='Exception',
- __module__='builtins',
- ),
- msg='spam!',
+ func = defs.spam_raises
+ res, exc = _interpreters.call(interpid, func)
+ expected = build_excinfo(
+ Exception, 'spam!',
# We check these in other tests.
formatted=exc.formatted,
errdisplay=exc.errdisplay,
- ))
+ )
+ self.assertIsNone(res)
+ self.assertEqual(exc, expected)
@requires_test_modules
def test_set___main___attrs(self):
diff --git a/Lib/test/test_interpreters/test_channels.py b/Lib/test/test_interpreters/test_channels.py
index 0c027b17cea..109ddf34453 100644
--- a/Lib/test/test_interpreters/test_channels.py
+++ b/Lib/test/test_interpreters/test_channels.py
@@ -8,8 +8,8 @@ import time
from test.support import import_helper
# Raise SkipTest if subinterpreters not supported.
_channels = import_helper.import_module('_interpchannels')
-from test.support import interpreters
-from test.support.interpreters import channels
+from concurrent import interpreters
+from test.support import channels
from .utils import _run_output, TestBase
@@ -171,7 +171,7 @@ class TestSendRecv(TestBase):
def test_send_recv_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import channels
+ from test.support import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
@@ -244,7 +244,7 @@ class TestSendRecv(TestBase):
def test_send_recv_nowait_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import channels
+ from test.support import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
@@ -387,7 +387,7 @@ class TestSendRecv(TestBase):
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
sch = channels.SendChannel({sch.id})
obj1 = b'spam'
obj2 = b'eggs'
@@ -482,7 +482,7 @@ class TestSendRecv(TestBase):
self.assertEqual(_channels.get_count(rch.id), 0)
_run_output(interp, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
sch = channels.SendChannel({sch.id})
sch.send_nowait(1, unbounditems=channels.UNBOUND)
sch.send_nowait(2, unbounditems=channels.UNBOUND_ERROR)
@@ -518,7 +518,7 @@ class TestSendRecv(TestBase):
sch.send_nowait(1)
_run_output(interp1, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj1 = rch.recv()
@@ -526,7 +526,7 @@ class TestSendRecv(TestBase):
sch.send_nowait(obj1, unbounditems=channels.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
- from test.support.interpreters import channels
+ from test.support import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj2 = rch.recv()
diff --git a/Lib/test/test_interpreters/test_lifecycle.py b/Lib/test/test_interpreters/test_lifecycle.py
index ac24f6568ac..15537ac6cc8 100644
--- a/Lib/test/test_interpreters/test_lifecycle.py
+++ b/Lib/test/test_interpreters/test_lifecycle.py
@@ -119,7 +119,7 @@ class StartupTests(TestBase):
# The main interpreter's sys.path[0] should be used by subinterpreters.
script = '''
import sys
- from test.support import interpreters
+ from concurrent import interpreters
orig = sys.path[0]
@@ -170,7 +170,7 @@ class FinalizationTests(TestBase):
# is reported, even when subinterpreters get cleaned up at the end.
import subprocess
argv = [sys.executable, '-c', '''if True:
- from test.support import interpreters
+ from concurrent import interpreters
interp = interpreters.create()
raise Exception
''']
diff --git a/Lib/test/test_interpreters/test_queues.py b/Lib/test/test_interpreters/test_queues.py
index 64a2db1230d..cb17340f581 100644
--- a/Lib/test/test_interpreters/test_queues.py
+++ b/Lib/test/test_interpreters/test_queues.py
@@ -7,9 +7,8 @@ import unittest
from test.support import import_helper, Py_DEBUG
# Raise SkipTest if subinterpreters not supported.
_queues = import_helper.import_module('_interpqueues')
-from test.support import interpreters
-from test.support.interpreters import queues, _crossinterp
-import test._crossinterp_definitions as defs
+from concurrent import interpreters
+from concurrent.interpreters import _queues as queues, _crossinterp
from .utils import _run_output, TestBase as _TestBase
@@ -127,7 +126,7 @@ class QueueTests(TestBase):
interp = interpreters.create()
interp.exec(dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue1 = queues.Queue({queue1.id})
"""));
@@ -209,18 +208,64 @@ class TestQueueOps(TestBase):
self.assertIs(after, True)
def test_full(self):
- expected = [False, False, False, True, False, False, False]
- actual = []
- queue = queues.create(3)
- for _ in range(3):
- actual.append(queue.full())
- queue.put(None)
- actual.append(queue.full())
- for _ in range(3):
- queue.get()
- actual.append(queue.full())
+ for maxsize in [1, 3, 11]:
+ with self.subTest(f'maxsize={maxsize}'):
+ num_to_add = maxsize
+ expected = [False] * (num_to_add * 2 + 3)
+ expected[maxsize] = True
+ expected[maxsize + 1] = True
+
+ queue = queues.create(maxsize)
+ actual = []
+ empty = [queue.empty()]
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.put_nowait(None)
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueFull):
+ queue.put_nowait(None)
+ empty.append(queue.empty())
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.get_nowait()
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueEmpty):
+ queue.get_nowait()
+ actual.append(queue.full())
+ empty.append(queue.empty())
- self.assertEqual(actual, expected)
+ self.assertEqual(actual, expected)
+ self.assertEqual(empty, [True, False, True])
+
+ # no max size
+ for args in [(), (0,), (-1,), (-10,)]:
+ with self.subTest(f'maxsize={args[0]}' if args else '<default>'):
+ num_to_add = 13
+ expected = [False] * (num_to_add * 2 + 3)
+
+ queue = queues.create(*args)
+ actual = []
+ empty = [queue.empty()]
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.put_nowait(None)
+ actual.append(queue.full())
+ empty.append(queue.empty())
+
+ for _ in range(num_to_add):
+ actual.append(queue.full())
+ queue.get_nowait()
+ actual.append(queue.full())
+ with self.assertRaises(queues.QueueEmpty):
+ queue.get_nowait()
+ actual.append(queue.full())
+ empty.append(queue.empty())
+
+ self.assertEqual(actual, expected)
+ self.assertEqual(empty, [True, False, True])
def test_qsize(self):
expected = [0, 1, 2, 3, 2, 3, 2, 1, 0, 1, 0]
@@ -325,7 +370,7 @@ class TestQueueOps(TestBase):
def test_put_get_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.create()
"""))
for methname in ('get', 'get_nowait'):
@@ -352,7 +397,7 @@ class TestQueueOps(TestBase):
out = _run_output(
interp,
dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue1 = queues.Queue({queue1.id})
queue2 = queues.Queue({queue2.id})
assert queue1.qsize() == 1, 'expected: queue1.qsize() == 1'
@@ -391,7 +436,7 @@ class TestQueueOps(TestBase):
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj1 = b'spam'
obj2 = b'eggs'
@@ -469,7 +514,7 @@ class TestQueueOps(TestBase):
queue = queues.create()
interp = interpreters.create()
_run_output(interp, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
queue.put(1, unbounditems=queues.UNBOUND)
queue.put(2, unbounditems=queues.UNBOUND_ERROR)
@@ -505,14 +550,14 @@ class TestQueueOps(TestBase):
queue.put(1)
_run_output(interp1, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj1 = queue.get()
queue.put(2, unbounditems=queues.UNBOUND)
queue.put(obj1, unbounditems=queues.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
- from test.support.interpreters import queues
+ from concurrent.interpreters import _queues as queues
queue = queues.Queue({queue.id})
obj2 = queue.get()
obj1 = queue.get()
diff --git a/Lib/test/test_interpreters/test_stress.py b/Lib/test/test_interpreters/test_stress.py
index fae2f38cb55..e25e67a0d4f 100644
--- a/Lib/test/test_interpreters/test_stress.py
+++ b/Lib/test/test_interpreters/test_stress.py
@@ -6,7 +6,7 @@ from test.support import import_helper
from test.support import threading_helper
# Raise SkipTest if subinterpreters not supported.
import_helper.import_module('_interpreters')
-from test.support import interpreters
+from concurrent import interpreters
from .utils import TestBase
diff --git a/Lib/test/test_interpreters/utils.py b/Lib/test/test_interpreters/utils.py
index fc4ad662e03..ae09aa457b4 100644
--- a/Lib/test/test_interpreters/utils.py
+++ b/Lib/test/test_interpreters/utils.py
@@ -12,7 +12,6 @@ from textwrap import dedent
import threading
import types
import unittest
-import warnings
from test import support
@@ -22,7 +21,7 @@ try:
import _interpreters
except ImportError as exc:
raise unittest.SkipTest(str(exc))
-from test.support import interpreters
+from concurrent import interpreters
try:
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 168e66c5a3f..b487bcabf01 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -9,6 +9,7 @@
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
+# * test_free_threading/test_io - tests thread safety of io objects
################################################################################
# ATTENTION TEST WRITERS!!!
@@ -1062,6 +1063,37 @@ class IOTest(unittest.TestCase):
# Silence destructor error
R.flush = lambda self: None
+ @threading_helper.requires_working_threading()
+ def test_write_readline_races(self):
+ # gh-134908: Concurrent iteration over a file caused races
+ thread_count = 2
+ write_count = 100
+ read_count = 100
+
+ def writer(file, barrier):
+ barrier.wait()
+ for _ in range(write_count):
+ file.write("x")
+
+ def reader(file, barrier):
+ barrier.wait()
+ for _ in range(read_count):
+ for line in file:
+ self.assertEqual(line, "")
+
+ with self.open(os_helper.TESTFN, "w+") as f:
+ barrier = threading.Barrier(thread_count + 1)
+ reader = threading.Thread(target=reader, args=(f, barrier))
+ writers = [threading.Thread(target=writer, args=(f, barrier))
+ for _ in range(thread_count)]
+ with threading_helper.catch_threading_exception() as cm:
+ with threading_helper.start_threads(writers + [reader]):
+ pass
+ self.assertIsNone(cm.exc_type)
+
+ self.assertEqual(os.stat(os_helper.TESTFN).st_size,
+ write_count * thread_count)
+
class CIOTest(IOTest):
diff --git a/Lib/test/test_ioctl.py b/Lib/test/test_ioctl.py
index 3c7a58aa2bc..277d2fc99ea 100644
--- a/Lib/test/test_ioctl.py
+++ b/Lib/test/test_ioctl.py
@@ -5,7 +5,7 @@ import sys
import threading
import unittest
from test import support
-from test.support import threading_helper
+from test.support import os_helper, threading_helper
from test.support.import_helper import import_module
fcntl = import_module('fcntl')
termios = import_module('termios')
@@ -201,6 +201,17 @@ class IoctlTestsPty(unittest.TestCase):
new_winsz = struct.unpack("HHHH", result)
self.assertEqual(new_winsz[:2], (20, 40))
+ @unittest.skipUnless(hasattr(fcntl, 'FICLONE'), 'need fcntl.FICLONE')
+ def test_bad_fd(self):
+ # gh-134744: Test error handling
+ fd = os_helper.make_bad_fd()
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, fd)
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, b'\0' * 10)
+ with self.assertRaises(OSError):
+ fcntl.ioctl(fd, fcntl.FICLONE, b'\0' * 2048)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
index a06608d0016..db1c38243e2 100644
--- a/Lib/test/test_ipaddress.py
+++ b/Lib/test/test_ipaddress.py
@@ -397,6 +397,19 @@ class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42%scope")
+ def test_bad_address_split_v6_too_long(self):
+ def assertBadSplit(addr):
+ msg = r"At most 45 characters expected in '%s"
+ with self.assertAddressError(msg, re.escape(addr[:45])):
+ ipaddress.IPv6Address(addr)
+
+ # Long IPv6 address
+ long_addr = ("0:" * 10000) + "0"
+ assertBadSplit(long_addr)
+ assertBadSplit(long_addr + "%zoneid")
+ assertBadSplit(long_addr + ":255.255.255.255")
+ assertBadSplit(long_addr + ":ffff:255.255.255.255")
+
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
@@ -2178,6 +2191,11 @@ class IpaddrUnitTest(unittest.TestCase):
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
+ self.assertEqual(ipaddress.ip_address('0000:0000:0000:0000:0000:FFFF:192.168.255.255'),
+ ipaddress.ip_address('::ffff:c0a8:ffff'))
+ self.assertEqual(ipaddress.ip_address('FFFF:0000:0000:0000:0000:0000:192.168.255.255'),
+ ipaddress.ip_address('ffff::c0a8:ffff'))
+
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1%scope'),
ipaddress.ip_address('::FFFF:c000:201%scope'))
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1%scope'),
@@ -2190,6 +2208,10 @@ class IpaddrUnitTest(unittest.TestCase):
ipaddress.ip_address('::FFFF:c000:201%scope'))
self.assertNotEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201%scope'))
+ self.assertEqual(ipaddress.ip_address('0000:0000:0000:0000:0000:FFFF:192.168.255.255%scope'),
+ ipaddress.ip_address('::ffff:c0a8:ffff%scope'))
+ self.assertEqual(ipaddress.ip_address('FFFF:0000:0000:0000:0000:0000:192.168.255.255%scope'),
+ ipaddress.ip_address('ffff::c0a8:ffff%scope'))
def testIPVersion(self):
self.assertEqual(ipaddress.IPv4Address.version, 4)
@@ -2599,6 +2621,10 @@ class IpaddrUnitTest(unittest.TestCase):
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
+ '0000:0000:0000:0000:0000:0000:255.255.255.255': '::ffff:ffff/128',
+ '0000:0000:0000:0000:0000:ffff:255.255.255.255': '::ffff:255.255.255.255/128',
+ 'ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255':
+ 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py
index 1b9f3cf7624..18e4b676c53 100644
--- a/Lib/test/test_iter.py
+++ b/Lib/test/test_iter.py
@@ -1147,7 +1147,7 @@ class TestCase(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
diff --git a/Lib/test/test_json/test_dump.py b/Lib/test/test_json/test_dump.py
index 13b40020781..39470754003 100644
--- a/Lib/test/test_json/test_dump.py
+++ b/Lib/test/test_json/test_dump.py
@@ -22,6 +22,14 @@ class TestDump:
self.assertIn('valid_key', o)
self.assertNotIn(b'invalid_key', o)
+ def test_dump_skipkeys_indent_empty(self):
+ v = {b'invalid_key': False}
+ self.assertEqual(self.json.dumps(v, skipkeys=True, indent=4), '{}')
+
+ def test_skipkeys_indent(self):
+ v = {b'invalid_key': False, 'valid_key': True}
+ self.assertEqual(self.json.dumps(v, skipkeys=True, indent=4), '{\n "valid_key": true\n}')
+
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
diff --git a/Lib/test/test_json/test_recursion.py b/Lib/test/test_json/test_recursion.py
index 8f0e5e078ed..5d7b56ff9ad 100644
--- a/Lib/test/test_json/test_recursion.py
+++ b/Lib/test/test_json/test_recursion.py
@@ -86,6 +86,7 @@ class TestRecursion:
@support.skip_wasi_stack_overflow()
@support.skip_emscripten_stack_overflow()
+ @support.requires_resource('cpu')
def test_highly_nested_objects_encoding(self):
# See #12051
l, d = [], {}
diff --git a/Lib/test/test_json/test_tool.py b/Lib/test/test_json/test_tool.py
index 9ea2679c77e..30f9bb33316 100644
--- a/Lib/test/test_json/test_tool.py
+++ b/Lib/test/test_json/test_tool.py
@@ -270,7 +270,7 @@ class TestMain(unittest.TestCase):
(r'" \"foo\" "', f'{t.string}" \\"foo\\" "{t.reset}'),
('"α"', f'{t.string}"\\u03b1"{t.reset}'),
('123', f'{t.number}123{t.reset}'),
- ('-1.2345e+23', f'{t.number}-1.2345e+23{t.reset}'),
+ ('-1.25e+23', f'{t.number}-1.25e+23{t.reset}'),
(r'{"\\": ""}',
f'''\
{ob}
diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py
index 6894fba2ad1..223f34fb696 100644
--- a/Lib/test/test_list.py
+++ b/Lib/test/test_list.py
@@ -365,5 +365,20 @@ class ListTest(list_tests.CommonTest):
rc, _, _ = assert_python_ok("-c", code)
self.assertEqual(rc, 0)
+ def test_list_overwrite_local(self):
+ """Test that overwriting the last reference to the
+ iterable doesn't prematurely free the iterable"""
+
+ def foo(x):
+ self.assertEqual(sys.getrefcount(x), 1)
+ r = 0
+ for i in x:
+ r += i
+ x = None
+ return r
+
+ self.assertEqual(foo(list(range(10))), 45)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_listcomps.py b/Lib/test/test_listcomps.py
index cffdeeacc5d..70148dc30fc 100644
--- a/Lib/test/test_listcomps.py
+++ b/Lib/test/test_listcomps.py
@@ -716,7 +716,7 @@ class ListComprehensionTest(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py
index 455d2af37ef..55b502e52ca 100644
--- a/Lib/test/test_locale.py
+++ b/Lib/test/test_locale.py
@@ -387,6 +387,10 @@ class NormalizeTest(unittest.TestCase):
self.check('c', 'C')
self.check('posix', 'C')
+ def test_c_utf8(self):
+ self.check('c.utf8', 'C.UTF-8')
+ self.check('C.UTF-8', 'C.UTF-8')
+
def test_english(self):
self.check('en', 'en_US.ISO8859-1')
self.check('EN', 'en_US.ISO8859-1')
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index fa5b1e43816..3819965ed2c 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -1036,7 +1036,7 @@ class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
allow_reuse_address = True
- allow_reuse_port = True
+ allow_reuse_port = False
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
@@ -4214,89 +4214,6 @@ class ConfigDictTest(BaseTest):
handler = logging.getHandlerByName('custom')
self.assertEqual(handler.custom_kwargs, custom_kwargs)
- # See gh-91555 and gh-90321
- @support.requires_subprocess()
- def test_deadlock_in_queue(self):
- queue = multiprocessing.Queue()
- handler = logging.handlers.QueueHandler(queue)
- logger = multiprocessing.get_logger()
- level = logger.level
- try:
- logger.setLevel(logging.DEBUG)
- logger.addHandler(handler)
- logger.debug("deadlock")
- finally:
- logger.setLevel(level)
- logger.removeHandler(handler)
-
- def test_recursion_in_custom_handler(self):
- class BadHandler(logging.Handler):
- def __init__(self):
- super().__init__()
- def emit(self, record):
- logger.debug("recurse")
- logger = logging.getLogger("test_recursion_in_custom_handler")
- logger.addHandler(BadHandler())
- logger.setLevel(logging.DEBUG)
- logger.debug("boom")
-
- @threading_helper.requires_working_threading()
- def test_thread_supression_noninterference(self):
- lock = threading.Lock()
- logger = logging.getLogger("test_thread_supression_noninterference")
-
- # Block on the first call, allow others through
- #
- # NOTE: We need to bypass the base class's lock, otherwise that will
- # block multiple calls to the same handler itself.
- class BlockOnceHandler(TestHandler):
- def __init__(self, barrier):
- super().__init__(support.Matcher())
- self.barrier = barrier
-
- def createLock(self):
- self.lock = None
-
- def handle(self, record):
- self.emit(record)
-
- def emit(self, record):
- if self.barrier:
- barrier = self.barrier
- self.barrier = None
- barrier.wait()
- with lock:
- pass
- super().emit(record)
- logger.info("blow up if not supressed")
-
- barrier = threading.Barrier(2)
- handler = BlockOnceHandler(barrier)
- logger.addHandler(handler)
- logger.setLevel(logging.DEBUG)
-
- t1 = threading.Thread(target=logger.debug, args=("1",))
- with lock:
-
- # Ensure first thread is blocked in the handler, hence supressing logging...
- t1.start()
- barrier.wait()
-
- # ...but the second thread should still be able to log...
- t2 = threading.Thread(target=logger.debug, args=("2",))
- t2.start()
- t2.join(timeout=3)
-
- self.assertEqual(len(handler.buffer), 1)
- self.assertTrue(handler.matches(levelno=logging.DEBUG, message='2'))
-
- # The first thread should still be blocked here
- self.assertTrue(t1.is_alive())
-
- # Now the lock has been released the first thread should complete
- t1.join()
- self.assertEqual(len(handler.buffer), 2)
- self.assertTrue(handler.matches(levelno=logging.DEBUG, message='1'))
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py
index 913a60bf9e0..46cb54647b1 100644
--- a/Lib/test/test_math.py
+++ b/Lib/test/test_math.py
@@ -475,6 +475,19 @@ class MathTests(unittest.TestCase):
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
+ def test_signbit(self):
+ self.assertRaises(TypeError, math.signbit)
+ self.assertRaises(TypeError, math.signbit, '1.0')
+
+ # C11, §7.12.3.6 requires signbit() to return a nonzero value
+ # if and only if the sign of its argument value is negative,
+ # but in practice, we are only interested in a boolean value.
+ self.assertIsInstance(math.signbit(1.0), bool)
+
+ for arg in [0., 1., INF, NAN]:
+ self.assertFalse(math.signbit(arg))
+ self.assertTrue(math.signbit(-arg))
+
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0, abs_tol=math.ulp(1))
@@ -1214,6 +1227,12 @@ class MathTests(unittest.TestCase):
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
+ @requires_IEEE_754
+ def testLdexp_denormal(self):
+ # Denormal output incorrectly rounded (truncated)
+ # on some Windows.
+ self.assertEqual(math.ldexp(6993274598585239, -1126), 1e-323)
+
def testLog(self):
self.assertRaises(TypeError, math.log)
self.assertRaises(TypeError, math.log, 1, 2, 3)
@@ -1381,7 +1400,6 @@ class MathTests(unittest.TestCase):
args = ((-5, -5, 10), (1.5, 4611686018427387904, 2305843009213693952))
self.assertEqual(sumprod(*args), 0.0)
-
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"sumprod() accuracy not guaranteed on machines with double rounding")
@@ -1967,6 +1985,28 @@ class MathTests(unittest.TestCase):
self.assertFalse(math.isfinite(float("inf")))
self.assertFalse(math.isfinite(float("-inf")))
+ def testIsnormal(self):
+ self.assertTrue(math.isnormal(1.25))
+ self.assertTrue(math.isnormal(-1.0))
+ self.assertFalse(math.isnormal(0.0))
+ self.assertFalse(math.isnormal(-0.0))
+ self.assertFalse(math.isnormal(INF))
+ self.assertFalse(math.isnormal(NINF))
+ self.assertFalse(math.isnormal(NAN))
+ self.assertFalse(math.isnormal(FLOAT_MIN/2))
+ self.assertFalse(math.isnormal(-FLOAT_MIN/2))
+
+ def testIssubnormal(self):
+ self.assertFalse(math.issubnormal(1.25))
+ self.assertFalse(math.issubnormal(-1.0))
+ self.assertFalse(math.issubnormal(0.0))
+ self.assertFalse(math.issubnormal(-0.0))
+ self.assertFalse(math.issubnormal(INF))
+ self.assertFalse(math.issubnormal(NINF))
+ self.assertFalse(math.issubnormal(NAN))
+ self.assertTrue(math.issubnormal(FLOAT_MIN/2))
+ self.assertTrue(math.issubnormal(-FLOAT_MIN/2))
+
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("-nan")))
@@ -2458,7 +2498,6 @@ class MathTests(unittest.TestCase):
with self.assertRaises(ValueError):
math.nextafter(1.0, INF, steps=-1)
-
@requires_IEEE_754
def test_ulp(self):
self.assertEqual(math.ulp(1.0), sys.float_info.epsilon)
diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py
index 61b068c630c..64f440f180b 100644
--- a/Lib/test/test_memoryview.py
+++ b/Lib/test/test_memoryview.py
@@ -743,19 +743,21 @@ class RacingTest(unittest.TestCase):
from multiprocessing.managers import SharedMemoryManager
except ImportError:
self.skipTest("Test requires multiprocessing")
- from threading import Thread
+ from threading import Thread, Event
- n = 100
+ start = Event()
with SharedMemoryManager() as smm:
obj = smm.ShareableList(range(100))
- threads = []
- for _ in range(n):
- # Issue gh-127085, the `ShareableList.count` is just a convenient way to mess the `exports`
- # counter of `memoryview`, this issue has no direct relation with `ShareableList`.
- threads.append(Thread(target=obj.count, args=(1,)))
-
+ def test():
+ # Issue gh-127085, the `ShareableList.count` is just a
+ # convenient way to mess the `exports` counter of `memoryview`,
+ # this issue has no direct relation with `ShareableList`.
+ start.wait(support.SHORT_TIMEOUT)
+ for i in range(10):
+ obj.count(1)
+ threads = [Thread(target=test) for _ in range(10)]
with threading_helper.start_threads(threads):
- pass
+ start.set()
del obj
diff --git a/Lib/test/test_monitoring.py b/Lib/test/test_monitoring.py
index 263e4e6f394..a932ac80117 100644
--- a/Lib/test/test_monitoring.py
+++ b/Lib/test/test_monitoring.py
@@ -2157,6 +2157,21 @@ class TestRegressions(MonitoringTestBase, unittest.TestCase):
sys.monitoring.restart_events()
sys.monitoring.set_events(0, 0)
+ def test_134879(self):
+ # gh-134789
+ # Specialized FOR_ITER not incrementing index
+ def foo():
+ t = 0
+ for i in [1,2,3,4]:
+ t += i
+ self.assertEqual(t, 10)
+
+ sys.monitoring.use_tool_id(0, "test")
+ self.addCleanup(sys.monitoring.free_tool_id, 0)
+ sys.monitoring.set_local_events(0, foo.__code__, E.BRANCH_LEFT | E.BRANCH_RIGHT)
+ foo()
+ sys.monitoring.set_local_events(0, foo.__code__, 0)
+
class TestOptimizer(MonitoringTestBase, unittest.TestCase):
diff --git a/Lib/test/test_netrc.py b/Lib/test/test_netrc.py
index 81e11a293cc..9d720f62710 100644
--- a/Lib/test/test_netrc.py
+++ b/Lib/test/test_netrc.py
@@ -1,11 +1,7 @@
import netrc, os, unittest, sys, textwrap
+from test import support
from test.support import os_helper
-try:
- import pwd
-except ImportError:
- pwd = None
-
temp_filename = os_helper.TESTFN
class NetrcTestCase(unittest.TestCase):
@@ -269,9 +265,14 @@ class NetrcTestCase(unittest.TestCase):
machine bar.domain.com login foo password pass
""", '#pass')
+ @unittest.skipUnless(support.is_wasi, 'WASI only test')
+ def test_security_on_WASI(self):
+ self.assertFalse(netrc._can_security_check())
+ self.assertEqual(netrc._getpwuid(0), 'uid 0')
+ self.assertEqual(netrc._getpwuid(123456), 'uid 123456')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
- @unittest.skipIf(pwd is None, 'security check requires pwd module')
+ @unittest.skipUnless(hasattr(os, 'getuid'), "os.getuid is required")
@os_helper.skip_unless_working_chmod
def test_security(self):
# This test is incomplete since we are normally not run as root and
diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py
index f83ef225a6e..22f6403d482 100644
--- a/Lib/test/test_ntpath.py
+++ b/Lib/test/test_ntpath.py
@@ -6,8 +6,9 @@ import subprocess
import sys
import unittest
import warnings
-from test.support import cpython_only, os_helper
-from test.support import TestFailed, is_emscripten
+from ntpath import ALLOW_MISSING
+from test import support
+from test.support import TestFailed, cpython_only, os_helper
from test.support.os_helper import FakePath
from test import test_genericpath
from tempfile import TemporaryFile
@@ -77,6 +78,10 @@ def tester(fn, wantResult):
%(str(fn), str(wantResult), repr(gotResult)))
+def _parameterize(*parameters):
+ return support.subTests('kwargs', parameters, _do_cleanups=True)
+
+
class NtpathTestCase(unittest.TestCase):
def assertPathEqual(self, path1, path2):
if path1 == path2 or _norm(path1) == _norm(path2):
@@ -475,6 +480,27 @@ class TestNtpath(NtpathTestCase):
tester("ntpath.realpath('.\\.')", expected)
tester("ntpath.realpath('\\'.join(['.'] * 100))", expected)
+ def test_realpath_curdir_strict(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('.', strict=True)", expected)
+ tester("ntpath.realpath('./.', strict=True)", expected)
+ tester("ntpath.realpath('/'.join(['.'] * 100), strict=True)", expected)
+ tester("ntpath.realpath('.\\.', strict=True)", expected)
+ tester("ntpath.realpath('\\'.join(['.'] * 100), strict=True)", expected)
+
+ def test_realpath_curdir_missing_ok(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('./.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('/'.join(['.'] * 100), strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('.\\.', strict=ALLOW_MISSING)",
+ expected)
+ tester("ntpath.realpath('\\'.join(['.'] * 100), strict=ALLOW_MISSING)",
+ expected)
+
def test_realpath_pardir(self):
expected = ntpath.normpath(os.getcwd())
tester("ntpath.realpath('..')", ntpath.dirname(expected))
@@ -487,24 +513,59 @@ class TestNtpath(NtpathTestCase):
tester("ntpath.realpath('\\'.join(['..'] * 50))",
ntpath.splitdrive(expected)[0] + '\\')
+ def test_realpath_pardir_strict(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('..', strict=True)", ntpath.dirname(expected))
+ tester("ntpath.realpath('../..', strict=True)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('/'.join(['..'] * 50), strict=True)",
+ ntpath.splitdrive(expected)[0] + '\\')
+ tester("ntpath.realpath('..\\..', strict=True)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('\\'.join(['..'] * 50), strict=True)",
+ ntpath.splitdrive(expected)[0] + '\\')
+
+ def test_realpath_pardir_missing_ok(self):
+ expected = ntpath.normpath(os.getcwd())
+ tester("ntpath.realpath('..', strict=ALLOW_MISSING)",
+ ntpath.dirname(expected))
+ tester("ntpath.realpath('../..', strict=ALLOW_MISSING)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('/'.join(['..'] * 50), strict=ALLOW_MISSING)",
+ ntpath.splitdrive(expected)[0] + '\\')
+ tester("ntpath.realpath('..\\..', strict=ALLOW_MISSING)",
+ ntpath.dirname(ntpath.dirname(expected)))
+ tester("ntpath.realpath('\\'.join(['..'] * 50), strict=ALLOW_MISSING)",
+ ntpath.splitdrive(expected)[0] + '\\')
+
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_basic(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_basic(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(os_helper.unlink, ABSTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ABSTFN + "1")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
- self.assertPathEqual(ntpath.realpath(os.fsencode(ABSTFN + "1")),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1", **kwargs), ABSTFN)
+ self.assertPathEqual(ntpath.realpath(os.fsencode(ABSTFN + "1"), **kwargs),
os.fsencode(ABSTFN))
# gh-88013: call ntpath.realpath with binary drive name may raise a
# TypeError. The drive should not exist to reproduce the bug.
drives = {f"{c}:\\" for c in string.ascii_uppercase} - set(os.listdrives())
d = drives.pop().encode()
- self.assertEqual(ntpath.realpath(d), d)
+ self.assertEqual(ntpath.realpath(d, strict=False), d)
+
+ # gh-106242: Embedded nulls and non-strict fallback to abspath
+ if kwargs:
+ with self.assertRaises(OSError):
+ ntpath.realpath(os_helper.TESTFN + "\0spam",
+ **kwargs)
+ else:
+ self.assertEqual(ABSTFN + "\0spam",
+ ntpath.realpath(os_helper.TESTFN + "\0spam", **kwargs))
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@@ -527,51 +588,66 @@ class TestNtpath(NtpathTestCase):
self.assertEqual(realpath(path, strict=False), path)
# gh-106242: Embedded nulls should raise OSError (not ValueError)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFNb + b'\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFN + '\\nonexistent\\x\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFNb + b'\\nonexistent\\x\x00'
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
path = ABSTFN + '\x00\\..'
self.assertEqual(realpath(path, strict=False), os.getcwd())
self.assertEqual(realpath(path, strict=True), os.getcwd())
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), os.getcwd())
path = ABSTFNb + b'\x00\\..'
self.assertEqual(realpath(path, strict=False), os.getcwdb())
self.assertEqual(realpath(path, strict=True), os.getcwdb())
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), os.getcwdb())
path = ABSTFN + '\\nonexistent\\x\x00\\..'
self.assertEqual(realpath(path, strict=False), ABSTFN + '\\nonexistent')
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), ABSTFN + '\\nonexistent')
path = ABSTFNb + b'\\nonexistent\\x\x00\\..'
self.assertEqual(realpath(path, strict=False), ABSTFNb + b'\\nonexistent')
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), ABSTFNb + b'\\nonexistent')
+ @unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_invalid_unicode_paths(self, kwargs):
+ realpath = ntpath.realpath
+ ABSTFN = ntpath.abspath(os_helper.TESTFN)
+ ABSTFNb = os.fsencode(ABSTFN)
path = ABSTFNb + b'\xff'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\\nonexistent\\\xff'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\xff\\..'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
path = ABSTFNb + b'\\nonexistent\\\xff\\..'
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
- self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
+ self.assertRaises(UnicodeDecodeError, realpath, path, **kwargs)
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_relative(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_relative(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(os_helper.unlink, ABSTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ntpath.relpath(ABSTFN + "1"))
- self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1", **kwargs), ABSTFN)
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@@ -723,7 +799,62 @@ class TestNtpath(NtpathTestCase):
@os_helper.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
- def test_realpath_symlink_prefix(self):
+ def test_realpath_symlink_loops_raise(self):
+ # Symlink loops raise OSError in ALLOW_MISSING mode
+ ABSTFN = ntpath.abspath(os_helper.TESTFN)
+ self.addCleanup(os_helper.unlink, ABSTFN)
+ self.addCleanup(os_helper.unlink, ABSTFN + "1")
+ self.addCleanup(os_helper.unlink, ABSTFN + "2")
+ self.addCleanup(os_helper.unlink, ABSTFN + "y")
+ self.addCleanup(os_helper.unlink, ABSTFN + "c")
+ self.addCleanup(os_helper.unlink, ABSTFN + "a")
+ self.addCleanup(os_helper.unlink, ABSTFN + "x")
+
+ os.symlink(ABSTFN, ABSTFN)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN, strict=ALLOW_MISSING)
+
+ os.symlink(ABSTFN + "1", ABSTFN + "2")
+ os.symlink(ABSTFN + "2", ABSTFN + "1")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "1",
+ strict=ALLOW_MISSING)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "2",
+ strict=ALLOW_MISSING)
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "1\\x",
+ strict=ALLOW_MISSING)
+
+ # Windows eliminates '..' components before resolving links;
+ # realpath is not expected to raise if this removes the loop.
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\.."),
+ ntpath.dirname(ABSTFN))
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\x"),
+ ntpath.dirname(ABSTFN) + "\\x")
+
+ os.symlink(ABSTFN + "x", ABSTFN + "y")
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\"
+ + ntpath.basename(ABSTFN) + "y"),
+ ABSTFN + "x")
+ self.assertRaises(
+ OSError, ntpath.realpath,
+ ABSTFN + "1\\..\\" + ntpath.basename(ABSTFN) + "1",
+ strict=ALLOW_MISSING)
+
+ os.symlink(ntpath.basename(ABSTFN) + "a\\b", ABSTFN + "a")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "a",
+ strict=ALLOW_MISSING)
+
+ os.symlink("..\\" + ntpath.basename(ntpath.dirname(ABSTFN))
+ + "\\" + ntpath.basename(ABSTFN) + "c", ABSTFN + "c")
+ self.assertRaises(OSError, ntpath.realpath, ABSTFN + "c",
+ strict=ALLOW_MISSING)
+
+ # Test using relative path as well.
+ self.assertRaises(OSError, ntpath.realpath, ntpath.basename(ABSTFN),
+ strict=ALLOW_MISSING)
+
+ @os_helper.skip_unless_symlink
+ @unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_symlink_prefix(self, kwargs):
ABSTFN = ntpath.abspath(os_helper.TESTFN)
self.addCleanup(os_helper.unlink, ABSTFN + "3")
self.addCleanup(os_helper.unlink, "\\\\?\\" + ABSTFN + "3.")
@@ -738,9 +869,9 @@ class TestNtpath(NtpathTestCase):
f.write(b'1')
os.symlink("\\\\?\\" + ABSTFN + "3.", ABSTFN + "3.link")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "3link"),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "3link", **kwargs),
ABSTFN + "3")
- self.assertPathEqual(ntpath.realpath(ABSTFN + "3.link"),
+ self.assertPathEqual(ntpath.realpath(ABSTFN + "3.link", **kwargs),
"\\\\?\\" + ABSTFN + "3.")
# Resolved paths should be usable to open target files
@@ -750,14 +881,17 @@ class TestNtpath(NtpathTestCase):
self.assertEqual(f.read(), b'1')
# When the prefix is included, it is not stripped
- self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3link"),
+ self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3link", **kwargs),
"\\\\?\\" + ABSTFN + "3")
- self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3.link"),
+ self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3.link", **kwargs),
"\\\\?\\" + ABSTFN + "3.")
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_nul(self):
tester("ntpath.realpath('NUL')", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=False)", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=True)", r'\\.\NUL')
+ tester("ntpath.realpath('NUL', strict=ALLOW_MISSING)", r'\\.\NUL')
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@unittest.skipUnless(HAVE_GETSHORTPATHNAME, 'need _getshortpathname')
@@ -781,12 +915,20 @@ class TestNtpath(NtpathTestCase):
self.assertPathEqual(test_file_long, ntpath.realpath(test_file_short))
- with os_helper.change_cwd(test_dir_long):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
- with os_helper.change_cwd(test_dir_long.lower()):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
- with os_helper.change_cwd(test_dir_short):
- self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
+ for kwargs in {}, {'strict': True}, {'strict': ALLOW_MISSING}:
+ with self.subTest(**kwargs):
+ with os_helper.change_cwd(test_dir_long):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
+ with os_helper.change_cwd(test_dir_long.lower()):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
+ with os_helper.change_cwd(test_dir_short):
+ self.assertPathEqual(
+ test_file_long,
+ ntpath.realpath("file.txt", **kwargs))
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_permission(self):
@@ -807,12 +949,15 @@ class TestNtpath(NtpathTestCase):
# Automatic generation of short names may be disabled on
# NTFS volumes for the sake of performance.
# They're not supported at all on ReFS and exFAT.
- subprocess.run(
+ p = subprocess.run(
# Try to set the short name manually.
['fsutil.exe', 'file', 'setShortName', test_file, 'LONGFI~1.TXT'],
creationflags=subprocess.DETACHED_PROCESS
)
+ if p.returncode:
+ raise unittest.SkipTest('failed to set short name')
+
try:
self.assertPathEqual(test_file, ntpath.realpath(test_file_short))
except AssertionError:
diff --git a/Lib/test/test_optparse.py b/Lib/test/test_optparse.py
index e6ffd2b0ffe..e476e472780 100644
--- a/Lib/test/test_optparse.py
+++ b/Lib/test/test_optparse.py
@@ -615,9 +615,9 @@ Options:
self.parser.add_option(
"-p", "--prob",
help="blow up with probability PROB [default: %default]")
- self.parser.set_defaults(prob=0.43)
+ self.parser.set_defaults(prob=0.25)
expected_help = self.help_prefix + \
- " -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n"
+ " -p PROB, --prob=PROB blow up with probability PROB [default: 0.25]\n"
self.assertHelp(self.parser, expected_help)
def test_alt_expand(self):
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 88b5b0e6e35..5217037ae9d 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -13,7 +13,6 @@ import itertools
import locale
import os
import pickle
-import platform
import select
import selectors
import shutil
@@ -1919,6 +1918,10 @@ class MakedirTests(unittest.TestCase):
support.is_wasi,
"WASI's umask is a stub."
)
+ @unittest.skipIf(
+ support.is_emscripten,
+ "TODO: Fails in buildbot; see #135783"
+ )
def test_mode(self):
with os_helper.temp_umask(0o002):
base = os_helper.TESTFN
@@ -4291,13 +4294,8 @@ class EventfdTests(unittest.TestCase):
@unittest.skipIf(sys.platform == "android", "gh-124873: Test is flaky on Android")
@support.requires_linux_version(2, 6, 30)
class TimerfdTests(unittest.TestCase):
- # 1 ms accuracy is reliably achievable on every platform except Android
- # emulators, where we allow 10 ms (gh-108277).
- if sys.platform == "android" and platform.android_ver().is_emulator:
- CLOCK_RES_PLACES = 2
- else:
- CLOCK_RES_PLACES = 3
-
+ # gh-126112: Use 10 ms to tolerate slow buildbots
+ CLOCK_RES_PLACES = 2 # 10 ms
CLOCK_RES = 10 ** -CLOCK_RES_PLACES
CLOCK_RES_NS = 10 ** (9 - CLOCK_RES_PLACES)
diff --git a/Lib/test/test_pathlib/test_pathlib.py b/Lib/test/test_pathlib/test_pathlib.py
index 13356b4cfe0..b2e2cdb3338 100644
--- a/Lib/test/test_pathlib/test_pathlib.py
+++ b/Lib/test/test_pathlib/test_pathlib.py
@@ -2954,7 +2954,13 @@ class PathTest(PurePathTest):
else:
# ".." segments are normalized first on Windows, so this path is stat()able.
self.assertEqual(set(p.glob("xyzzy/..")), { P(self.base, "xyzzy", "..") })
- self.assertEqual(set(p.glob("/".join([".."] * 50))), { P(self.base, *[".."] * 50)})
+ if sys.platform == "emscripten":
+ # Emscripten will return ELOOP if there are 49 or more ..'s.
+ # Can remove when https://github.com/emscripten-core/emscripten/pull/24591 is merged.
+ NDOTDOTS = 48
+ else:
+ NDOTDOTS = 50
+ self.assertEqual(set(p.glob("/".join([".."] * NDOTDOTS))), { P(self.base, *[".."] * NDOTDOTS)})
def test_glob_inaccessible(self):
P = self.cls
diff --git a/Lib/test/test_peepholer.py b/Lib/test/test_peepholer.py
index 0a9ba578673..98629df4574 100644
--- a/Lib/test/test_peepholer.py
+++ b/Lib/test/test_peepholer.py
@@ -12,7 +12,7 @@ except ImportError:
from test import support
from test.support.bytecode_helper import (
- BytecodeTestCase, CfgOptimizationTestCase, CompilationStepTestCase)
+ BytecodeTestCase, CfgOptimizationTestCase)
def compile_pattern_with_fast_locals(pattern):
@@ -292,6 +292,7 @@ class TestTranforms(BytecodeTestCase):
('---x', 'UNARY_NEGATIVE', None, False, None, None),
('~~~x', 'UNARY_INVERT', None, False, None, None),
('+++x', 'CALL_INTRINSIC_1', intrinsic_positive, False, None, None),
+ ('~True', 'UNARY_INVERT', None, False, None, None),
]
for (
@@ -718,9 +719,9 @@ class TestTranforms(BytecodeTestCase):
self.assertEqual(format('x = %d!', 1234), 'x = 1234!')
self.assertEqual(format('x = %x!', 1234), 'x = 4d2!')
self.assertEqual(format('x = %f!', 1234), 'x = 1234.000000!')
- self.assertEqual(format('x = %s!', 1234.5678901), 'x = 1234.5678901!')
- self.assertEqual(format('x = %f!', 1234.5678901), 'x = 1234.567890!')
- self.assertEqual(format('x = %d!', 1234.5678901), 'x = 1234!')
+ self.assertEqual(format('x = %s!', 1234.0000625), 'x = 1234.0000625!')
+ self.assertEqual(format('x = %f!', 1234.0000625), 'x = 1234.000063!')
+ self.assertEqual(format('x = %d!', 1234.0000625), 'x = 1234!')
self.assertEqual(format('x = %s%% %%%%', 1234), 'x = 1234% %%')
self.assertEqual(format('x = %s!', '%% %s'), 'x = %% %s!')
self.assertEqual(format('x = %s, y = %d', 12, 34), 'x = 12, y = 34')
@@ -2614,6 +2615,90 @@ class OptimizeLoadFastTestCase(DirectCfgOptimizerTests):
]
self.cfg_optimization_test(insts, expected, consts=[None])
+ def test_format_simple(self):
+ # FORMAT_SIMPLE will leave its operand on the stack if it's a unicode
+ # object. We treat it conservatively and assume that it always leaves
+ # its operand on the stack.
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("STORE_FAST", 1, 3),
+ ]
+ self.check(insts, insts)
+
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("POP_TOP", None, 3),
+ ]
+ expected = [
+ ("LOAD_FAST_BORROW", 0, 1),
+ ("FORMAT_SIMPLE", None, 2),
+ ("POP_TOP", None, 3),
+ ]
+ self.check(insts, expected)
+
+ def test_set_function_attribute(self):
+ # SET_FUNCTION_ATTRIBUTE leaves the function on the stack
+ insts = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("STORE_FAST", 1, 4),
+ ("LOAD_CONST", 0, 5),
+ ("RETURN_VALUE", None, 6)
+ ]
+ self.cfg_optimization_test(insts, insts, consts=[None])
+
+ insts = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("RETURN_VALUE", None, 4)
+ ]
+ expected = [
+ ("LOAD_CONST", 0, 1),
+ ("LOAD_FAST_BORROW", 0, 2),
+ ("SET_FUNCTION_ATTRIBUTE", 2, 3),
+ ("RETURN_VALUE", None, 4)
+ ]
+ self.cfg_optimization_test(insts, expected, consts=[None])
+
+ def test_get_yield_from_iter(self):
+ # GET_YIELD_FROM_ITER may leave its operand on the stack
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("GET_YIELD_FROM_ITER", None, 2),
+ ("LOAD_CONST", 0, 3),
+ send := self.Label(),
+ ("SEND", end := self.Label(), 5),
+ ("YIELD_VALUE", 1, 6),
+ ("RESUME", 2, 7),
+ ("JUMP", send, 8),
+ end,
+ ("END_SEND", None, 9),
+ ("LOAD_CONST", 0, 10),
+ ("RETURN_VALUE", None, 11),
+ ]
+ self.cfg_optimization_test(insts, insts, consts=[None])
+
+ def test_push_exc_info(self):
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("PUSH_EXC_INFO", None, 2),
+ ]
+ self.check(insts, insts)
+
+ def test_load_special(self):
+ # LOAD_SPECIAL may leave self on the stack
+ insts = [
+ ("LOAD_FAST", 0, 1),
+ ("LOAD_SPECIAL", 0, 2),
+ ("STORE_FAST", 1, 3),
+ ]
+ self.check(insts, insts)
+
+
def test_del_in_finally(self):
# This loads `obj` onto the stack, executes `del obj`, then returns the
# `obj` from the stack. See gh-133371 for more details.
@@ -2630,6 +2715,14 @@ class OptimizeLoadFastTestCase(DirectCfgOptimizerTests):
gc.collect()
self.assertEqual(obj, [42])
+ def test_format_simple_unicode(self):
+ # Repro from gh-134889
+ def f():
+ var = f"{1}"
+ var = f"{var}"
+ return var
+ self.assertEqual(f(), "1")
+
if __name__ == "__main__":
diff --git a/Lib/test/test_perf_profiler.py b/Lib/test/test_perf_profiler.py
index 21d097dbb55..7529c853f9c 100644
--- a/Lib/test/test_perf_profiler.py
+++ b/Lib/test/test_perf_profiler.py
@@ -506,9 +506,12 @@ def _is_perf_version_at_least(major, minor):
# The output of perf --version looks like "perf version 6.7-3" but
# it can also be perf version "perf version 5.15.143", or even include
# a commit hash in the version string, like "6.12.9.g242e6068fd5c"
+ #
+ # PermissionError is raised if perf does not exist on the Windows Subsystem
+ # for Linux, see #134987
try:
output = subprocess.check_output(["perf", "--version"], text=True)
- except (subprocess.CalledProcessError, FileNotFoundError):
+ except (subprocess.CalledProcessError, FileNotFoundError, PermissionError):
return False
version = output.split()[2]
version = version.split("-")[0]
diff --git a/Lib/test/test_platform.py b/Lib/test/test_platform.py
index 3688cc4267b..479649053ab 100644
--- a/Lib/test/test_platform.py
+++ b/Lib/test/test_platform.py
@@ -133,6 +133,22 @@ class PlatformTest(unittest.TestCase):
for terse in (False, True):
res = platform.platform(aliased, terse)
+ def test__platform(self):
+ for src, res in [
+ ('foo bar', 'foo_bar'),
+ (
+ '1/2\\3:4;5"6(7)8(7)6"5;4:3\\2/1',
+ '1-2-3-4-5-6-7-8-7-6-5-4-3-2-1'
+ ),
+ ('--', ''),
+ ('-f', '-f'),
+ ('-foo----', '-foo'),
+ ('--foo---', '-foo'),
+ ('---foo--', '-foo'),
+ ]:
+ with self.subTest(src=src):
+ self.assertEqual(platform._platform(src), res)
+
def test_system(self):
res = platform.system()
diff --git a/Lib/test/test_posixpath.py b/Lib/test/test_posixpath.py
index f3f9895f529..21f06712548 100644
--- a/Lib/test/test_posixpath.py
+++ b/Lib/test/test_posixpath.py
@@ -4,7 +4,8 @@ import posixpath
import random
import sys
import unittest
-from posixpath import realpath, abspath, dirname, basename
+from functools import partial
+from posixpath import realpath, abspath, dirname, basename, ALLOW_MISSING
from test import support
from test import test_genericpath
from test.support import import_helper
@@ -33,6 +34,11 @@ def skip_if_ABSTFN_contains_backslash(test):
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
+
+def _parameterize(*parameters):
+ return support.subTests('kwargs', parameters)
+
+
class PosixPathTest(unittest.TestCase):
def setUp(self):
@@ -442,32 +448,35 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(result, expected)
@skip_if_ABSTFN_contains_backslash
- def test_realpath_curdir(self):
- self.assertEqual(realpath('.'), os.getcwd())
- self.assertEqual(realpath('./.'), os.getcwd())
- self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_curdir(self, kwargs):
+ self.assertEqual(realpath('.', **kwargs), os.getcwd())
+ self.assertEqual(realpath('./.', **kwargs), os.getcwd())
+ self.assertEqual(realpath('/'.join(['.'] * 100), **kwargs), os.getcwd())
- self.assertEqual(realpath(b'.'), os.getcwdb())
- self.assertEqual(realpath(b'./.'), os.getcwdb())
- self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
+ self.assertEqual(realpath(b'.', **kwargs), os.getcwdb())
+ self.assertEqual(realpath(b'./.', **kwargs), os.getcwdb())
+ self.assertEqual(realpath(b'/'.join([b'.'] * 100), **kwargs), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
- def test_realpath_pardir(self):
- self.assertEqual(realpath('..'), dirname(os.getcwd()))
- self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
- self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_pardir(self, kwargs):
+ self.assertEqual(realpath('..', **kwargs), dirname(os.getcwd()))
+ self.assertEqual(realpath('../..', **kwargs), dirname(dirname(os.getcwd())))
+ self.assertEqual(realpath('/'.join(['..'] * 100), **kwargs), '/')
- self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
- self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
- self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
+ self.assertEqual(realpath(b'..', **kwargs), dirname(os.getcwdb()))
+ self.assertEqual(realpath(b'../..', **kwargs), dirname(dirname(os.getcwdb())))
+ self.assertEqual(realpath(b'/'.join([b'..'] * 100), **kwargs), b'/')
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_basic(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_basic(self, kwargs):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
- self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
+ self.assertEqual(realpath(ABSTFN, **kwargs), ABSTFN+"1")
finally:
os_helper.unlink(ABSTFN)
@@ -487,90 +496,115 @@ class PosixPathTest(unittest.TestCase):
path = '/\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/x\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/nonexistent/x\x00'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(ValueError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
+
path = '/nonexistent/x\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = b'/nonexistent/x\x00/..'
self.assertRaises(ValueError, realpath, path, strict=False)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertRaises(ValueError, realpath, path, strict=ALLOW_MISSING)
path = '/\udfff'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), path)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
self.assertRaises(UnicodeEncodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/\udfff'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), path)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
path = '/\udfff/..'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), '/')
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), '/')
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
self.assertRaises(UnicodeEncodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
path = '/nonexistent/\udfff/..'
if sys.platform == 'win32':
self.assertEqual(realpath(path, strict=False), '/nonexistent')
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), '/nonexistent')
else:
self.assertRaises(UnicodeEncodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeEncodeError, realpath, path, strict=ALLOW_MISSING)
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
path = b'/\xff'
if sys.platform == 'win32':
self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
self.assertRaises(UnicodeDecodeError, realpath, path, strict=True)
+ self.assertRaises(UnicodeDecodeError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertEqual(realpath(path, strict=False), path)
if support.is_wasi:
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
+ self.assertEqual(realpath(path, strict=ALLOW_MISSING), path)
path = b'/nonexistent/\xff'
if sys.platform == 'win32':
self.assertRaises(UnicodeDecodeError, realpath, path, strict=False)
+ self.assertRaises(UnicodeDecodeError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertEqual(realpath(path, strict=False), path)
if support.is_wasi:
self.assertRaises(OSError, realpath, path, strict=True)
+ self.assertRaises(OSError, realpath, path, strict=ALLOW_MISSING)
else:
self.assertRaises(FileNotFoundError, realpath, path, strict=True)
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_relative(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_relative(self, kwargs):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
- self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
+ self.assertEqual(realpath(ABSTFN, **kwargs), ABSTFN+"1")
finally:
os_helper.unlink(ABSTFN)
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_missing_pardir(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_missing_pardir(self, kwargs):
try:
os.symlink(TESTFN + "1", TESTFN)
- self.assertEqual(realpath("nonexistent/../" + TESTFN), ABSTFN + "1")
+ self.assertEqual(
+ realpath("nonexistent/../" + TESTFN, **kwargs), ABSTFN + "1")
finally:
os_helper.unlink(TESTFN)
@@ -617,37 +651,38 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_symlink_loops_strict(self):
+ @_parameterize({'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_symlink_loops_strict(self, kwargs):
# Bug #43757, raise OSError if we get into an infinite symlink loop in
- # strict mode.
+ # the strict modes.
try:
os.symlink(ABSTFN, ABSTFN)
- self.assertRaises(OSError, realpath, ABSTFN, strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN, **kwargs)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
- self.assertRaises(OSError, realpath, ABSTFN+"1", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"2", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"1", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"2", **kwargs)
- self.assertRaises(OSError, realpath, ABSTFN+"1/x", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"1/..", strict=True)
- self.assertRaises(OSError, realpath, ABSTFN+"1/../x", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/x", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/..", **kwargs)
+ self.assertRaises(OSError, realpath, ABSTFN+"1/../x", **kwargs)
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertRaises(OSError, realpath,
- ABSTFN+"1/../" + basename(ABSTFN) + "y", strict=True)
+ ABSTFN+"1/../" + basename(ABSTFN) + "y", **kwargs)
self.assertRaises(OSError, realpath,
- ABSTFN+"1/../" + basename(ABSTFN) + "1", strict=True)
+ ABSTFN+"1/../" + basename(ABSTFN) + "1", **kwargs)
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
- self.assertRaises(OSError, realpath, ABSTFN+"a", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"a", **kwargs)
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
- self.assertRaises(OSError, realpath, ABSTFN+"c", strict=True)
+ self.assertRaises(OSError, realpath, ABSTFN+"c", **kwargs)
# Test using relative path as well.
with os_helper.change_cwd(dirname(ABSTFN)):
- self.assertRaises(OSError, realpath, basename(ABSTFN), strict=True)
+ self.assertRaises(OSError, realpath, basename(ABSTFN), **kwargs)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN+"1")
@@ -658,13 +693,14 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_repeated_indirect_symlinks(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_repeated_indirect_symlinks(self, kwargs):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
- self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
+ self.assertEqual(realpath(ABSTFN + '/link', **kwargs), ABSTFN)
finally:
os_helper.unlink(ABSTFN + '/self')
os_helper.unlink(ABSTFN + '/link')
@@ -672,14 +708,15 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_deep_recursion(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_deep_recursion(self, kwargs):
depth = 10
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
- self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
+ self.assertEqual(realpath(ABSTFN + '/%d' % depth, **kwargs), ABSTFN)
# Test using relative path as well.
with os_helper.change_cwd(ABSTFN):
@@ -691,7 +728,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_parents(self):
+ @_parameterize({}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_parents(self, kwargs):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
@@ -702,7 +740,8 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
with os_helper.change_cwd(ABSTFN + "/k"):
- self.assertEqual(realpath("a"), ABSTFN + "/y/a")
+ self.assertEqual(realpath("a", **kwargs),
+ ABSTFN + "/y/a")
finally:
os_helper.unlink(ABSTFN + "/k")
os_helper.rmdir(ABSTFN + "/y")
@@ -710,7 +749,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_before_normalizing(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_before_normalizing(self, kwargs):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
@@ -725,10 +765,10 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
- self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
+ self.assertEqual(realpath(ABSTFN + "/link-y/..", **kwargs), ABSTFN + "/k")
# Relative path.
with os_helper.change_cwd(dirname(ABSTFN)):
- self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
+ self.assertEqual(realpath(basename(ABSTFN) + "/link-y/..", **kwargs),
ABSTFN + "/k")
finally:
os_helper.unlink(ABSTFN + "/link-y")
@@ -738,7 +778,8 @@ class PosixPathTest(unittest.TestCase):
@os_helper.skip_unless_symlink
@skip_if_ABSTFN_contains_backslash
- def test_realpath_resolve_first(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_resolve_first(self, kwargs):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
@@ -748,8 +789,8 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN, ABSTFN + "link")
with os_helper.change_cwd(dirname(ABSTFN)):
base = basename(ABSTFN)
- self.assertEqual(realpath(base + "link"), ABSTFN)
- self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
+ self.assertEqual(realpath(base + "link", **kwargs), ABSTFN)
+ self.assertEqual(realpath(base + "link/k", **kwargs), ABSTFN + "/k")
finally:
os_helper.unlink(ABSTFN + "link")
os_helper.rmdir(ABSTFN + "/k")
@@ -767,12 +808,67 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(realpath(ABSTFN + '/foo'), ABSTFN + '/foo')
self.assertEqual(realpath(ABSTFN + '/../foo'), dirname(ABSTFN) + '/foo')
self.assertEqual(realpath(ABSTFN + '/foo/..'), ABSTFN)
- with self.assertRaises(PermissionError):
- realpath(ABSTFN, strict=True)
finally:
os.chmod(ABSTFN, 0o755, follow_symlinks=False)
os_helper.unlink(ABSTFN)
+ @os_helper.skip_unless_symlink
+ @skip_if_ABSTFN_contains_backslash
+ @unittest.skipIf(os.chmod not in os.supports_follow_symlinks, "Can't set symlink permissions")
+ @unittest.skipIf(sys.platform != "darwin", "only macOS requires read permission to readlink()")
+ @_parameterize({'strict': True}, {'strict': ALLOW_MISSING})
+ def test_realpath_unreadable_symlink_strict(self, kwargs):
+ try:
+ os.symlink(ABSTFN+"1", ABSTFN)
+ os.chmod(ABSTFN, 0o000, follow_symlinks=False)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN, **kwargs)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/foo', **kwargs),
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/../foo', **kwargs)
+ with self.assertRaises(PermissionError):
+ realpath(ABSTFN + '/foo/..', **kwargs)
+ finally:
+ os.chmod(ABSTFN, 0o755, follow_symlinks=False)
+ os.unlink(ABSTFN)
+
+ @skip_if_ABSTFN_contains_backslash
+ @os_helper.skip_unless_symlink
+ def test_realpath_unreadable_directory(self):
+ try:
+ os.mkdir(ABSTFN)
+ os.mkdir(ABSTFN + '/k')
+ os.chmod(ABSTFN, 0o000)
+ self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN)
+
+ try:
+ os.stat(ABSTFN)
+ except PermissionError:
+ pass
+ else:
+ self.skipTest('Cannot block permissions')
+
+ self.assertEqual(realpath(ABSTFN + '/k', strict=False),
+ ABSTFN + '/k')
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/k',
+ strict=True)
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/k',
+ strict=ALLOW_MISSING)
+
+ self.assertEqual(realpath(ABSTFN + '/missing', strict=False),
+ ABSTFN + '/missing')
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/missing',
+ strict=True)
+ self.assertRaises(PermissionError, realpath, ABSTFN + '/missing',
+ strict=ALLOW_MISSING)
+ finally:
+ os.chmod(ABSTFN, 0o755)
+ os_helper.rmdir(ABSTFN + '/k')
+ os_helper.rmdir(ABSTFN)
+
@skip_if_ABSTFN_contains_backslash
def test_realpath_nonterminal_file(self):
try:
@@ -780,14 +876,27 @@ class PosixPathTest(unittest.TestCase):
f.write('test_posixpath wuz ere')
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN)
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN)
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN)
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN)
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
@@ -800,14 +909,27 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "1", ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN + "1")
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "1")
+ self.assertEqual(realpath(ABSTFN, strict=ALLOW_MISSING), ABSTFN + "1")
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN + "1")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN + "1")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "1/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN + "1")
@@ -822,14 +944,27 @@ class PosixPathTest(unittest.TestCase):
os.symlink(ABSTFN + "1", ABSTFN)
self.assertEqual(realpath(ABSTFN, strict=False), ABSTFN + "2")
self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "2")
+ self.assertEqual(realpath(ABSTFN, strict=True), ABSTFN + "2")
+
self.assertEqual(realpath(ABSTFN + "/", strict=False), ABSTFN + "2")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/.", strict=False), ABSTFN + "2")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/.",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/..", strict=False), dirname(ABSTFN))
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/..",
+ strict=ALLOW_MISSING)
+
self.assertEqual(realpath(ABSTFN + "/subdir", strict=False), ABSTFN + "2/subdir")
self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir", strict=True)
+ self.assertRaises(NotADirectoryError, realpath, ABSTFN + "/subdir",
+ strict=ALLOW_MISSING)
finally:
os_helper.unlink(ABSTFN)
os_helper.unlink(ABSTFN + "1")
@@ -1017,9 +1152,12 @@ class PathLikeTests(unittest.TestCase):
def test_path_abspath(self):
self.assertPathEqual(self.path.abspath)
- def test_path_realpath(self):
+ @_parameterize({}, {'strict': True}, {'strict': ALLOW_MISSING})
+ def test_path_realpath(self, kwargs):
self.assertPathEqual(self.path.realpath)
+ self.assertPathEqual(partial(self.path.realpath, **kwargs))
+
def test_path_relpath(self):
self.assertPathEqual(self.path.relpath)
diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py
index 0c84d3d3bfd..41c337ade7e 100644
--- a/Lib/test/test_pprint.py
+++ b/Lib/test/test_pprint.py
@@ -458,7 +458,7 @@ class QueryTestCase(unittest.TestCase):
return super().__new__(Temperature, celsius_degrees)
def __repr__(self):
kelvin_degrees = self + 273.15
- return f"{kelvin_degrees}°K"
+ return f"{kelvin_degrees:.2f}°K"
self.assertEqual(pprint.pformat(Temperature(1000)), '1273.15°K')
def test_sorted_dict(self):
diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py
index c1728f5019d..4836f38c388 100644
--- a/Lib/test/test_pty.py
+++ b/Lib/test/test_pty.py
@@ -20,7 +20,6 @@ import select
import signal
import socket
import io # readline
-import warnings
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py
index 3e7b2cd0dc9..bce68e6cd7a 100644
--- a/Lib/test/test_pyclbr.py
+++ b/Lib/test/test_pyclbr.py
@@ -11,7 +11,6 @@ from types import FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase, main as unittest_main
from test.test_importlib import util as test_importlib_util
-import warnings
StaticMethodType = type(staticmethod(lambda: None))
@@ -246,9 +245,6 @@ class PyclbrTest(TestCase):
# These were once some of the longest modules.
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('pickle', ignore=('partial', 'PickleBuffer'))
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- cm('sre_parse', ignore=('dump', 'groups', 'pos')) # from sre_constants import *; property
with temporary_main_spec():
cm(
'pdb',
diff --git a/Lib/test/test_pydoc/test_pydoc.py b/Lib/test/test_pydoc/test_pydoc.py
index 281b24eaa36..d1d6f4987de 100644
--- a/Lib/test/test_pydoc/test_pydoc.py
+++ b/Lib/test/test_pydoc/test_pydoc.py
@@ -553,7 +553,7 @@ class PydocDocTest(unittest.TestCase):
# of the known subclasses of object. (doc.docclass() used to
# fail if HeapType was imported before running this test, like
# when running tests sequentially.)
- from _testcapi import HeapType
+ from _testcapi import HeapType # noqa: F401
except ImportError:
pass
text = doc.docclass(object)
diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py
index 1d56ccd71cf..d4b4f60be98 100644
--- a/Lib/test/test_pyexpat.py
+++ b/Lib/test/test_pyexpat.py
@@ -9,12 +9,11 @@ import traceback
from io import BytesIO
from test import support
from test.support import os_helper
-
+from test.support import sortdict
+from unittest import mock
from xml.parsers import expat
from xml.parsers.expat import errors
-from test.support import sortdict
-
class SetAttributeTest(unittest.TestCase):
def setUp(self):
@@ -436,6 +435,19 @@ class BufferTextTest(unittest.TestCase):
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
+ def test_change_character_data_handler_in_callback(self):
+ # Test that xmlparse_handler_setter() properly handles
+ # the special case "parser.CharacterDataHandler = None".
+ def handler(*args):
+ parser.CharacterDataHandler = None
+
+ handler_wrapper = mock.Mock(wraps=handler)
+ parser = expat.ParserCreate()
+ parser.CharacterDataHandler = handler_wrapper
+ parser.Parse(b"<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", True)
+ handler_wrapper.assert_called_once()
+ self.assertIsNone(parser.CharacterDataHandler)
+
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
@@ -595,7 +607,7 @@ class ChardataBufferTest(unittest.TestCase):
def test_disabling_buffer(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>" + b'a' * 512
xml2 = b'b' * 1024
- xml3 = b'c' * 1024 + b'</a>';
+ xml3 = b'c' * 1024 + b'</a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py
index abb4bd1bc25..98bae7dd703 100644
--- a/Lib/test/test_pyrepl/test_pyrepl.py
+++ b/Lib/test/test_pyrepl/test_pyrepl.py
@@ -918,7 +918,14 @@ class TestPyReplCompleter(TestCase):
class TestPyReplModuleCompleter(TestCase):
def setUp(self):
+ import importlib
+ # Make iter_modules() search only the standard library.
+ # This makes the test more reliable in case there are
+ # other user packages/scripts on PYTHONPATH which can
+ # interfere with the completions.
+ lib_path = os.path.dirname(importlib.__path__[0])
self._saved_sys_path = sys.path
+ sys.path = [lib_path]
def tearDown(self):
sys.path = self._saved_sys_path
@@ -932,14 +939,6 @@ class TestPyReplModuleCompleter(TestCase):
return reader
def test_import_completions(self):
- import importlib
- # Make iter_modules() search only the standard library.
- # This makes the test more reliable in case there are
- # other user packages/scripts on PYTHONPATH which can
- # intefere with the completions.
- lib_path = os.path.dirname(importlib.__path__[0])
- sys.path = [lib_path]
-
cases = (
("import path\t\n", "import pathlib"),
("import importlib.\t\tres\t\n", "import importlib.resources"),
@@ -1052,6 +1051,19 @@ class TestPyReplModuleCompleter(TestCase):
output = reader.readline()
self.assertEqual(output, expected)
+ def test_no_fallback_on_regular_completion(self):
+ cases = (
+ ("import pri\t\n", "import pri"),
+ ("from pri\t\n", "from pri"),
+ ("from typing import Na\t\n", "from typing import Na"),
+ )
+ for code, expected in cases:
+ with self.subTest(code=code):
+ events = code_to_events(code)
+ reader = self.prepare_reader(events, namespace={})
+ output = reader.readline()
+ self.assertEqual(output, expected)
+
def test_get_path_and_prefix(self):
cases = (
('', ('', '')),
@@ -1660,6 +1672,17 @@ class TestMain(ReplTestCase):
self.assertEqual(exit_code, 0)
self.assertNotIn("TypeError", output)
+ @force_not_colorized
+ def test_non_string_suggestion_candidates(self):
+ commands = ("import runpy\n"
+ "runpy._run_module_code('blech', {0: '', 'bluch': ''}, '')\n"
+ "exit()\n")
+
+ output, exit_code = self.run_repl(commands)
+ self.assertEqual(exit_code, 0)
+ self.assertNotIn("all elements in 'candidates' must be strings", output)
+ self.assertIn("bluch", output)
+
def test_readline_history_file(self):
# skip, if readline module is not available
readline = import_module('readline')
diff --git a/Lib/test/test_pyrepl/test_unix_console.py b/Lib/test/test_pyrepl/test_unix_console.py
index c447b310c49..b3f7dc028fe 100644
--- a/Lib/test/test_pyrepl/test_unix_console.py
+++ b/Lib/test/test_pyrepl/test_unix_console.py
@@ -20,6 +20,7 @@ except ImportError:
def unix_console(events, **kwargs):
console = UnixConsole()
console.get_event = MagicMock(side_effect=events)
+ console.getpending = MagicMock(return_value=Event("key", ""))
height = kwargs.get("height", 25)
width = kwargs.get("width", 80)
diff --git a/Lib/test/test_pyrepl/test_windows_console.py b/Lib/test/test_pyrepl/test_windows_console.py
index e7bab226b31..f9607e02c60 100644
--- a/Lib/test/test_pyrepl/test_windows_console.py
+++ b/Lib/test/test_pyrepl/test_windows_console.py
@@ -35,6 +35,7 @@ class WindowsConsoleTests(TestCase):
def console(self, events, **kwargs) -> Console:
console = WindowsConsole()
console.get_event = MagicMock(side_effect=events)
+ console.getpending = MagicMock(return_value=Event("key", ""))
console.wait = MagicMock()
console._scroll = MagicMock()
console._hide_cursor = MagicMock()
@@ -385,6 +386,7 @@ class WindowsConsoleGetEventTests(TestCase):
self.console._read_input = self.mock
self.console._WindowsConsole__vt_support = kwargs.get("vt_support",
False)
+ self.console.wait = MagicMock(return_value=True)
event = self.console.get_event(block=False)
return event
diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py
index 7f4fe357034..c855fb8fe2b 100644
--- a/Lib/test/test_queue.py
+++ b/Lib/test/test_queue.py
@@ -6,7 +6,7 @@ import threading
import time
import unittest
import weakref
-from test.support import gc_collect
+from test.support import gc_collect, bigmemtest
from test.support import import_helper
from test.support import threading_helper
@@ -963,33 +963,33 @@ class BaseSimpleQueueTest:
# One producer, one consumer => results appended in well-defined order
self.assertEqual(results, inputs)
- def test_many_threads(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads(self, size):
# Test multiple concurrent put() and get()
- N = 50
q = self.q
inputs = list(range(10000))
- results = self.run_threads(N, q, inputs, self.feed, self.consume)
+ results = self.run_threads(size, q, inputs, self.feed, self.consume)
# Multiple consumers without synchronization append the
# results in random order
self.assertEqual(sorted(results), inputs)
- def test_many_threads_nonblock(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads_nonblock(self, size):
# Test multiple concurrent put() and get(block=False)
- N = 50
q = self.q
inputs = list(range(10000))
- results = self.run_threads(N, q, inputs,
+ results = self.run_threads(size, q, inputs,
self.feed, self.consume_nonblock)
self.assertEqual(sorted(results), inputs)
- def test_many_threads_timeout(self):
+ @bigmemtest(size=50, memuse=100*2**20, dry_run=False)
+ def test_many_threads_timeout(self, size):
# Test multiple concurrent put() and get(timeout=...)
- N = 50
q = self.q
inputs = list(range(1000))
- results = self.run_threads(N, q, inputs,
+ results = self.run_threads(size, q, inputs,
self.feed, self.consume_timeout)
self.assertEqual(sorted(results), inputs)
diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py
index bd76d636e4f..0217ebd132b 100644
--- a/Lib/test/test_random.py
+++ b/Lib/test/test_random.py
@@ -14,6 +14,15 @@ from test import support
from fractions import Fraction
from collections import abc, Counter
+
+class MyIndex:
+ def __init__(self, value):
+ self.value = value
+
+ def __index__(self):
+ return self.value
+
+
class TestBasicOps:
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
@@ -142,6 +151,7 @@ class TestBasicOps:
# Exception raised if size of sample exceeds that of population
self.assertRaises(ValueError, self.gen.sample, population, N+1)
self.assertRaises(ValueError, self.gen.sample, [], -1)
+ self.assertRaises(TypeError, self.gen.sample, population, 1.0)
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
@@ -259,6 +269,7 @@ class TestBasicOps:
choices(data, range(4), k=5),
choices(k=5, population=data, weights=range(4)),
choices(k=5, population=data, cum_weights=range(4)),
+ choices(data, k=MyIndex(5)),
]:
self.assertEqual(len(sample), 5)
self.assertEqual(type(sample), list)
@@ -369,118 +380,40 @@ class TestBasicOps:
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
+ @support.requires_IEEE_754
+ def test_53_bits_per_float(self):
+ span = 2 ** 53
+ cum = 0
+ for i in range(100):
+ cum |= int(self.gen.random() * span)
+ self.assertEqual(cum, span-1)
+
def test_getrandbits(self):
+ getrandbits = self.gen.getrandbits
# Verify ranges
for k in range(1, 1000):
- self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
- self.assertEqual(self.gen.getrandbits(0), 0)
+ self.assertTrue(0 <= getrandbits(k) < 2**k)
+ self.assertEqual(getrandbits(0), 0)
# Verify all bits active
- getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
all_bits = 2**span-1
cum = 0
cpl_cum = 0
for i in range(100):
- v = getbits(span)
+ v = getrandbits(span)
cum |= v
cpl_cum |= all_bits ^ v
self.assertEqual(cum, all_bits)
self.assertEqual(cpl_cum, all_bits)
# Verify argument checking
- self.assertRaises(TypeError, self.gen.getrandbits)
- self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
- self.assertRaises(ValueError, self.gen.getrandbits, -1)
- self.assertRaises(TypeError, self.gen.getrandbits, 10.1)
-
- def test_pickling(self):
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- state = pickle.dumps(self.gen, proto)
- origseq = [self.gen.random() for i in range(10)]
- newgen = pickle.loads(state)
- restoredseq = [newgen.random() for i in range(10)]
- self.assertEqual(origseq, restoredseq)
-
- def test_bug_1727780(self):
- # verify that version-2-pickles can be loaded
- # fine, whether they are created on 32-bit or 64-bit
- # platforms, and that version-3-pickles load fine.
- files = [("randv2_32.pck", 780),
- ("randv2_64.pck", 866),
- ("randv3.pck", 343)]
- for file, value in files:
- with open(support.findfile(file),"rb") as f:
- r = pickle.load(f)
- self.assertEqual(int(r.random()*1000), value)
-
- def test_bug_9025(self):
- # Had problem with an uneven distribution in int(n*random())
- # Verify the fix by checking that distributions fall within expectations.
- n = 100000
- randrange = self.gen.randrange
- k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
- self.assertTrue(0.30 < k/n < .37, (k/n))
-
- def test_randbytes(self):
- # Verify ranges
- for n in range(1, 10):
- data = self.gen.randbytes(n)
- self.assertEqual(type(data), bytes)
- self.assertEqual(len(data), n)
-
- self.assertEqual(self.gen.randbytes(0), b'')
-
- # Verify argument checking
- self.assertRaises(TypeError, self.gen.randbytes)
- self.assertRaises(TypeError, self.gen.randbytes, 1, 2)
- self.assertRaises(ValueError, self.gen.randbytes, -1)
- self.assertRaises(TypeError, self.gen.randbytes, 1.0)
-
- def test_mu_sigma_default_args(self):
- self.assertIsInstance(self.gen.normalvariate(), float)
- self.assertIsInstance(self.gen.gauss(), float)
-
-
-try:
- random.SystemRandom().random()
-except NotImplementedError:
- SystemRandom_available = False
-else:
- SystemRandom_available = True
-
-@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
-class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
- gen = random.SystemRandom()
-
- def test_autoseed(self):
- # Doesn't need to do anything except not fail
- self.gen.seed()
-
- def test_saverestore(self):
- self.assertRaises(NotImplementedError, self.gen.getstate)
- self.assertRaises(NotImplementedError, self.gen.setstate, None)
-
- def test_seedargs(self):
- # Doesn't need to do anything except not fail
- self.gen.seed(100)
-
- def test_gauss(self):
- self.gen.gauss_next = None
- self.gen.seed(100)
- self.assertEqual(self.gen.gauss_next, None)
-
- def test_pickling(self):
- for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto)
-
- def test_53_bits_per_float(self):
- # This should pass whenever a C double has 53 bit precision.
- span = 2 ** 53
- cum = 0
- for i in range(100):
- cum |= int(self.gen.random() * span)
- self.assertEqual(cum, span-1)
+ self.assertRaises(TypeError, getrandbits)
+ self.assertRaises(TypeError, getrandbits, 1, 2)
+ self.assertRaises(ValueError, getrandbits, -1)
+ self.assertRaises(OverflowError, getrandbits, 1<<1000)
+ self.assertRaises(ValueError, getrandbits, -1<<1000)
+ self.assertRaises(TypeError, getrandbits, 10.1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
@@ -559,6 +492,10 @@ class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
randrange(1000, step=100)
with self.assertRaises(TypeError):
randrange(1000, None, step=100)
+ with self.assertRaises(TypeError):
+ randrange(1000, step=MyIndex(1))
+ with self.assertRaises(TypeError):
+ randrange(1000, None, step=MyIndex(1))
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
@@ -581,6 +518,116 @@ class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
+ def test_randrange_index(self):
+ randrange = self.gen.randrange
+ self.assertIn(randrange(MyIndex(5)), range(5))
+ self.assertIn(randrange(MyIndex(2), MyIndex(7)), range(2, 7))
+ self.assertIn(randrange(MyIndex(5), MyIndex(15), MyIndex(2)), range(5, 15, 2))
+
+ def test_randint(self):
+ randint = self.gen.randint
+ self.assertIn(randint(2, 5), (2, 3, 4, 5))
+ self.assertEqual(randint(2, 2), 2)
+ self.assertIn(randint(MyIndex(2), MyIndex(5)), (2, 3, 4, 5))
+ self.assertEqual(randint(MyIndex(2), MyIndex(2)), 2)
+
+ self.assertRaises(ValueError, randint, 5, 2)
+ self.assertRaises(TypeError, randint)
+ self.assertRaises(TypeError, randint, 2)
+ self.assertRaises(TypeError, randint, 2, 5, 1)
+ self.assertRaises(TypeError, randint, 2.0, 5)
+ self.assertRaises(TypeError, randint, 2, 5.0)
+
+ def test_pickling(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ state = pickle.dumps(self.gen, proto)
+ origseq = [self.gen.random() for i in range(10)]
+ newgen = pickle.loads(state)
+ restoredseq = [newgen.random() for i in range(10)]
+ self.assertEqual(origseq, restoredseq)
+
+ def test_bug_1727780(self):
+ # verify that version-2-pickles can be loaded
+ # fine, whether they are created on 32-bit or 64-bit
+ # platforms, and that version-3-pickles load fine.
+ files = [("randv2_32.pck", 780),
+ ("randv2_64.pck", 866),
+ ("randv3.pck", 343)]
+ for file, value in files:
+ with open(support.findfile(file),"rb") as f:
+ r = pickle.load(f)
+ self.assertEqual(int(r.random()*1000), value)
+
+ def test_bug_9025(self):
+ # Had problem with an uneven distribution in int(n*random())
+ # Verify the fix by checking that distributions fall within expectations.
+ n = 100000
+ randrange = self.gen.randrange
+ k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
+ self.assertTrue(0.30 < k/n < .37, (k/n))
+
+ def test_randrange_bug_1590891(self):
+ start = 1000000000000
+ stop = -100000000000000000000
+ step = -200
+ x = self.gen.randrange(start, stop, step)
+ self.assertTrue(stop < x <= start)
+ self.assertEqual((x+stop)%step, 0)
+
+ def test_randbytes(self):
+ # Verify ranges
+ for n in range(1, 10):
+ data = self.gen.randbytes(n)
+ self.assertEqual(type(data), bytes)
+ self.assertEqual(len(data), n)
+
+ self.assertEqual(self.gen.randbytes(0), b'')
+
+ # Verify argument checking
+ self.assertRaises(TypeError, self.gen.randbytes)
+ self.assertRaises(TypeError, self.gen.randbytes, 1, 2)
+ self.assertRaises(ValueError, self.gen.randbytes, -1)
+ self.assertRaises(OverflowError, self.gen.randbytes, 1<<1000)
+ self.assertRaises((ValueError, OverflowError), self.gen.randbytes, -1<<1000)
+ self.assertRaises(TypeError, self.gen.randbytes, 1.0)
+
+ def test_mu_sigma_default_args(self):
+ self.assertIsInstance(self.gen.normalvariate(), float)
+ self.assertIsInstance(self.gen.gauss(), float)
+
+
+try:
+ random.SystemRandom().random()
+except NotImplementedError:
+ SystemRandom_available = False
+else:
+ SystemRandom_available = True
+
+@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
+class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
+ gen = random.SystemRandom()
+
+ def test_autoseed(self):
+ # Doesn't need to do anything except not fail
+ self.gen.seed()
+
+ def test_saverestore(self):
+ self.assertRaises(NotImplementedError, self.gen.getstate)
+ self.assertRaises(NotImplementedError, self.gen.setstate, None)
+
+ def test_seedargs(self):
+ # Doesn't need to do anything except not fail
+ self.gen.seed(100)
+
+ def test_gauss(self):
+ self.gen.gauss_next = None
+ self.gen.seed(100)
+ self.assertEqual(self.gen.gauss_next, None)
+
+ def test_pickling(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto)
+
class TestRawMersenneTwister(unittest.TestCase):
@test.support.cpython_only
@@ -766,38 +813,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
seed = (1 << (10000 * 8)) - 1 # about 10K bytes
self.gen.seed(seed)
- def test_53_bits_per_float(self):
- # This should pass whenever a C double has 53 bit precision.
- span = 2 ** 53
- cum = 0
- for i in range(100):
- cum |= int(self.gen.random() * span)
- self.assertEqual(cum, span-1)
-
- def test_bigrand(self):
- # The randrange routine should build-up the required number of bits
- # in stages so that all bit positions are active.
- span = 2 ** 500
- cum = 0
- for i in range(100):
- r = self.gen.randrange(span)
- self.assertTrue(0 <= r < span)
- cum |= r
- self.assertEqual(cum, span-1)
-
- def test_bigrand_ranges(self):
- for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
- start = self.gen.randrange(2 ** (i-2))
- stop = self.gen.randrange(2 ** i)
- if stop <= start:
- continue
- self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
-
- def test_rangelimits(self):
- for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
- self.assertEqual(set(range(start,stop)),
- set([self.gen.randrange(start,stop) for i in range(100)]))
-
def test_getrandbits(self):
super().test_getrandbits()
@@ -805,6 +820,25 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.gen.seed(1234567)
self.assertEqual(self.gen.getrandbits(100),
97904845777343510404718956115)
+ self.gen.seed(1234567)
+ self.assertEqual(self.gen.getrandbits(MyIndex(100)),
+ 97904845777343510404718956115)
+
+ def test_getrandbits_2G_bits(self):
+ size = 2**31
+ self.gen.seed(1234567)
+ x = self.gen.getrandbits(size)
+ self.assertEqual(x.bit_length(), size)
+ self.assertEqual(x & (2**100-1), 890186470919986886340158459475)
+ self.assertEqual(x >> (size-100), 1226514312032729439655761284440)
+
+ @support.bigmemtest(size=2**32, memuse=1/8+2/15, dry_run=False)
+ def test_getrandbits_4G_bits(self, size):
+ self.gen.seed(1234568)
+ x = self.gen.getrandbits(size)
+ self.assertEqual(x.bit_length(), size)
+ self.assertEqual(x & (2**100-1), 287241425661104632871036099814)
+ self.assertEqual(x >> (size-100), 739728759900339699429794460738)
def test_randrange_uses_getrandbits(self):
# Verify use of getrandbits by randrange
@@ -816,27 +850,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(self.gen.randrange(2**99),
97904845777343510404718956115)
- def test_randbelow_logic(self, _log=log, int=int):
- # check bitcount transition points: 2**i and 2**(i+1)-1
- # show that: k = int(1.001 + _log(n, 2))
- # is equal to or one greater than the number of bits in n
- for i in range(1, 1000):
- n = 1 << i # check an exact power of two
- numbits = i+1
- k = int(1.00001 + _log(n, 2))
- self.assertEqual(k, numbits)
- self.assertEqual(n, 2**(k-1))
-
- n += n - 1 # check 1 below the next power of two
- k = int(1.00001 + _log(n, 2))
- self.assertIn(k, [numbits, numbits+1])
- self.assertTrue(2**k > n > 2**(k-2))
-
- n -= n >> 15 # check a little farther below the next power of two
- k = int(1.00001 + _log(n, 2))
- self.assertEqual(k, numbits) # note the stronger assertion
- self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
-
def test_randbelow_without_getrandbits(self):
# Random._randbelow() can only use random() when the built-in one
# has been overridden but no new getrandbits() method was supplied.
@@ -871,14 +884,6 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.gen._randbelow_without_getrandbits(n, maxsize=maxsize)
self.assertEqual(random_mock.call_count, 2)
- def test_randrange_bug_1590891(self):
- start = 1000000000000
- stop = -100000000000000000000
- step = -200
- x = self.gen.randrange(start, stop, step)
- self.assertTrue(stop < x <= start)
- self.assertEqual((x+stop)%step, 0)
-
def test_choices_algorithms(self):
# The various ways of specifying weights should produce the same results
choices = self.gen.choices
@@ -962,6 +967,14 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
self.assertEqual(self.gen.randbytes(n),
gen2.getrandbits(n * 8).to_bytes(n, 'little'))
+ @support.bigmemtest(size=2**29, memuse=1+16/15, dry_run=False)
+ def test_randbytes_256M(self, size):
+ self.gen.seed(2849427419)
+ x = self.gen.randbytes(size)
+ self.assertEqual(len(x), size)
+ self.assertEqual(x[:12].hex(), 'f6fd9ae63855ab91ea238b4f')
+ self.assertEqual(x[-12:].hex(), '0e7af69a84ee99bf4a11becc')
+
def test_sample_counts_equivalence(self):
# Test the documented strong equivalence to a sample with repeated elements.
# We run this test on random.Random() which makes deterministic selections
diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py
index e9128ac1d97..993652d2e88 100644
--- a/Lib/test/test_re.py
+++ b/Lib/test/test_re.py
@@ -5,7 +5,6 @@ from test.support import (gc_collect, bigmemtest, _2G,
import locale
import re
import string
-import sys
import unittest
import warnings
from re import Scanner
@@ -2927,33 +2926,6 @@ class ImplementationTest(unittest.TestCase):
pat = re.compile("")
check_disallow_instantiation(self, type(pat.scanner("")))
- def test_deprecated_modules(self):
- deprecated = {
- 'sre_compile': ['compile', 'error',
- 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_compile_info'],
- 'sre_constants': ['error', 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_NamedIntConstant'],
- 'sre_parse': ['SubPattern', 'parse',
- 'SRE_FLAG_IGNORECASE', 'SUBPATTERN',
- '_parse_sub'],
- }
- for name in deprecated:
- with self.subTest(module=name):
- sys.modules.pop(name, None)
- with self.assertWarns(DeprecationWarning) as w:
- __import__(name)
- self.assertEqual(str(w.warning),
- f"module {name!r} is deprecated")
- self.assertEqual(w.filename, __file__)
- self.assertIn(name, sys.modules)
- mod = sys.modules[name]
- self.assertEqual(mod.__name__, name)
- self.assertEqual(mod.__package__, '')
- for attr in deprecated[name]:
- self.assertHasAttr(mod, attr)
- del sys.modules[name]
-
@cpython_only
def test_case_helpers(self):
import _sre
diff --git a/Lib/test/test_readline.py b/Lib/test/test_readline.py
index b9d082b3597..45192fe5082 100644
--- a/Lib/test/test_readline.py
+++ b/Lib/test/test_readline.py
@@ -1,6 +1,7 @@
"""
Very minimal unittests for parts of the readline module.
"""
+import codecs
import locale
import os
import sys
@@ -231,6 +232,13 @@ print("History length:", readline.get_current_history_length())
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
+ if sys.flags.utf8_mode:
+ encoding = locale.getencoding()
+ encoding = codecs.lookup(encoding).name # normalize the name
+ if encoding != "utf-8":
+ # gh-133711: The Python UTF-8 Mode ignores the LC_CTYPE locale
+ # and always use the UTF-8 encoding.
+ self.skipTest(f"the LC_CTYPE encoding is {encoding!r}")
try:
readline.add_history("\xEB\xEF")
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 7e317d5ab94..5bc3c5924b0 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -768,13 +768,16 @@ class BaseTestCase(unittest.TestCase):
self.fail(msg)
return proc
- def run_python(self, args, **kw):
+ def run_python(self, args, isolated=True, **kw):
extraargs = []
if 'uops' in sys._xoptions:
# Pass -X uops along
extraargs.extend(['-X', 'uops'])
- args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args]
- proc = self.run_command(args, **kw)
+ cmd = [sys.executable, *extraargs, '-X', 'faulthandler']
+ if isolated:
+ cmd.append('-I')
+ cmd.extend(args)
+ proc = self.run_command(cmd, **kw)
return proc.stdout
@@ -831,8 +834,8 @@ class ProgramsTestCase(BaseTestCase):
self.check_executed_tests(output, self.tests,
randomize=True, stats=len(self.tests))
- def run_tests(self, args, env=None):
- output = self.run_python(args, env=env)
+ def run_tests(self, args, env=None, isolated=True):
+ output = self.run_python(args, env=env, isolated=isolated)
self.check_output(output)
def test_script_regrtest(self):
@@ -874,7 +877,10 @@ class ProgramsTestCase(BaseTestCase):
self.run_tests(args)
def run_batch(self, *args):
- proc = self.run_command(args)
+ proc = self.run_command(args,
+ # gh-133711: cmd.exe uses the OEM code page
+ # to display the non-ASCII current directory
+ errors="backslashreplace")
self.check_output(proc.stdout)
@unittest.skipUnless(sysconfig.is_python_build(),
@@ -2064,7 +2070,7 @@ class ArgsTestCase(BaseTestCase):
self.check_executed_tests(output, [testname],
failed=[testname],
parallel=True,
- stats=TestStats(1, 1, 0))
+ stats=TestStats(1, 2, 1))
def _check_random_seed(self, run_workers: bool):
# gh-109276: When -r/--randomize is used, random.seed() is called
@@ -2273,7 +2279,6 @@ class ArgsTestCase(BaseTestCase):
def test_xml(self):
code = textwrap.dedent(r"""
import unittest
- from test import support
class VerboseTests(unittest.TestCase):
def test_failed(self):
@@ -2308,6 +2313,50 @@ class ArgsTestCase(BaseTestCase):
for out in testcase.iter('system-out'):
self.assertEqual(out.text, r"abc \x1b def")
+ def test_nonascii(self):
+ code = textwrap.dedent(r"""
+ import unittest
+
+ class NonASCIITests(unittest.TestCase):
+ def test_docstring(self):
+ '''docstring:\u20ac'''
+
+ def test_subtest(self):
+ with self.subTest(param='subtest:\u20ac'):
+ pass
+
+ def test_skip(self):
+ self.skipTest('skipped:\u20ac')
+ """)
+ testname = self.create_test(code=code)
+
+ env = dict(os.environ)
+ env['PYTHONIOENCODING'] = 'ascii'
+
+ def check(output):
+ self.check_executed_tests(output, testname, stats=TestStats(3, 0, 1))
+ self.assertIn(r'docstring:\u20ac', output)
+ self.assertIn(r'skipped:\u20ac', output)
+
+ # Run sequentially
+ output = self.run_tests('-v', testname, env=env, isolated=False)
+ check(output)
+
+ # Run in parallel
+ output = self.run_tests('-j1', '-v', testname, env=env, isolated=False)
+ check(output)
+
+ def test_pgo_exclude(self):
+ # Get PGO tests
+ output = self.run_tests('--pgo', '--list-tests')
+ pgo_tests = output.strip().split()
+
+ # Exclude test_re
+ output = self.run_tests('--pgo', '--list-tests', '-x', 'test_re')
+ tests = output.strip().split()
+ self.assertNotIn('test_re', tests)
+ self.assertEqual(len(tests), len(pgo_tests) - 1)
+
class TestUtils(unittest.TestCase):
def test_format_duration(self):
diff --git a/Lib/test/test_remote_pdb.py b/Lib/test/test_remote_pdb.py
index aef8a6b0129..a1c50af15f3 100644
--- a/Lib/test/test_remote_pdb.py
+++ b/Lib/test/test_remote_pdb.py
@@ -1,5 +1,4 @@
import io
-import time
import itertools
import json
import os
@@ -8,16 +7,13 @@ import signal
import socket
import subprocess
import sys
-import tempfile
import textwrap
-import threading
import unittest
import unittest.mock
from contextlib import closing, contextmanager, redirect_stdout, redirect_stderr, ExitStack
-from pathlib import Path
from test.support import is_wasi, cpython_only, force_color, requires_subprocess, SHORT_TIMEOUT
-from test.support.os_helper import temp_dir, TESTFN, unlink
-from typing import Dict, List, Optional, Tuple, Union, Any
+from test.support.os_helper import TESTFN, unlink
+from typing import List
import pdb
from pdb import _PdbServer, _PdbClient
@@ -1434,7 +1430,6 @@ class PdbConnectTestCase(unittest.TestCase):
def _supports_remote_attaching():
- from contextlib import suppress
PROCESS_VM_READV_SUPPORTED = False
try:
diff --git a/Lib/test/test_reprlib.py b/Lib/test/test_reprlib.py
index 16623654c29..22a55b57c07 100644
--- a/Lib/test/test_reprlib.py
+++ b/Lib/test/test_reprlib.py
@@ -151,14 +151,38 @@ class ReprTests(unittest.TestCase):
eq(r(frozenset({1, 2, 3, 4, 5, 6, 7})), "frozenset({1, 2, 3, 4, 5, 6, ...})")
def test_numbers(self):
- eq = self.assertEqual
- eq(r(123), repr(123))
- eq(r(123), repr(123))
- eq(r(1.0/3), repr(1.0/3))
-
- n = 10**100
- expected = repr(n)[:18] + "..." + repr(n)[-19:]
- eq(r(n), expected)
+ for x in [123, 1.0 / 3]:
+ self.assertEqual(r(x), repr(x))
+
+ max_digits = sys.get_int_max_str_digits()
+ for k in [100, max_digits - 1]:
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ expected = repr(n)[:18] + "..." + repr(n)[-19:]
+ self.assertEqual(r(n), expected)
+
+ def re_msg(n, d):
+ return (rf'<{n.__class__.__name__} instance with roughly {d} '
+ rf'digits \(limit at {max_digits}\) at 0x[a-f0-9]+>')
+
+ k = max_digits
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
+
+ for k in [max_digits + 1, 2 * max_digits]:
+ self.assertGreater(k, 100)
+ with self.subTest(f'10 ** {k}', k=k):
+ n = 10 ** k
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
+ with self.subTest(f'10 ** {k} - 1', k=k):
+ n = 10 ** k - 1
+ # Here, since math.log10(n) == math.log10(n-1),
+ # the number of digits of n - 1 is overestimated.
+ self.assertRaises(ValueError, repr, n)
+ self.assertRegex(r(n), re_msg(n, k + 1))
def test_instance(self):
eq = self.assertEqual
@@ -373,20 +397,20 @@ class ReprTests(unittest.TestCase):
'object': {
1: 'two',
b'three': [
- (4.5, 6.7),
+ (4.5, 6.25),
[set((8, 9)), frozenset((10, 11))],
],
},
'tests': (
(dict(indent=None), '''\
- {1: 'two', b'three': [(4.5, 6.7), [{8, 9}, frozenset({10, 11})]]}'''),
+ {1: 'two', b'three': [(4.5, 6.25), [{8, 9}, frozenset({10, 11})]]}'''),
(dict(indent=False), '''\
{
1: 'two',
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -406,7 +430,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -426,7 +450,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -446,7 +470,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -466,7 +490,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -494,7 +518,7 @@ class ReprTests(unittest.TestCase):
b'three': [
(
4.5,
- 6.7,
+ 6.25,
),
[
{
@@ -514,7 +538,7 @@ class ReprTests(unittest.TestCase):
-->b'three': [
-->-->(
-->-->-->4.5,
- -->-->-->6.7,
+ -->-->-->6.25,
-->-->),
-->-->[
-->-->-->{
@@ -534,7 +558,7 @@ class ReprTests(unittest.TestCase):
....b'three': [
........(
............4.5,
- ............6.7,
+ ............6.25,
........),
........[
............{
diff --git a/Lib/test/test_shutil.py b/Lib/test/test_shutil.py
index 62c80aab4b3..ebb6cf88336 100644
--- a/Lib/test/test_shutil.py
+++ b/Lib/test/test_shutil.py
@@ -3492,7 +3492,7 @@ class PublicAPITests(unittest.TestCase):
target_api.append('disk_usage')
self.assertEqual(set(shutil.__all__), set(target_api))
with self.assertWarns(DeprecationWarning):
- from shutil import ExecError
+ from shutil import ExecError # noqa: F401
if __name__ == '__main__':
diff --git a/Lib/test/test_sqlite3/test_cli.py b/Lib/test/test_sqlite3/test_cli.py
index 37e0f74f688..720fa3c4c1e 100644
--- a/Lib/test/test_sqlite3/test_cli.py
+++ b/Lib/test/test_sqlite3/test_cli.py
@@ -1,14 +1,22 @@
"""sqlite3 CLI tests."""
import sqlite3
+import sys
+import textwrap
import unittest
+import unittest.mock
+import os
from sqlite3.__main__ import main as cli
+from test.support.import_helper import import_module
from test.support.os_helper import TESTFN, unlink
+from test.support.pty_helper import run_pty
from test.support import (
captured_stdout,
captured_stderr,
captured_stdin,
force_not_colorized_test_class,
+ requires_subprocess,
+ verbose,
)
@@ -130,7 +138,7 @@ class InteractiveSession(unittest.TestCase):
self.assertEndsWith(out, self.PS1)
self.assertEqual(out.count(self.PS1), 2)
self.assertEqual(out.count(self.PS2), 0)
- self.assertIn("Error", err)
+ self.assertIn('Error: unknown command: "', err)
# test "unknown_command" is pointed out in the error message
self.assertIn("unknown_command", err)
@@ -200,5 +208,108 @@ class InteractiveSession(unittest.TestCase):
self.assertIn('\x1b[1;35mOperationalError (SQLITE_ERROR)\x1b[0m: '
'\x1b[35mnear "sel": syntax error\x1b[0m', err)
+
+@requires_subprocess()
+@force_not_colorized_test_class
+class Completion(unittest.TestCase):
+ PS1 = "sqlite> "
+
+ @classmethod
+ def setUpClass(cls):
+ _sqlite3 = import_module("_sqlite3")
+ if not hasattr(_sqlite3, "SQLITE_KEYWORDS"):
+ raise unittest.SkipTest("unable to determine SQLite keywords")
+
+ readline = import_module("readline")
+ if readline.backend == "editline":
+ raise unittest.SkipTest("libedit readline is not supported")
+
+ def write_input(self, input_, env=None):
+ script = textwrap.dedent("""
+ import readline
+ from sqlite3.__main__ import main
+
+ readline.parse_and_bind("set colored-completion-prefix off")
+ main()
+ """)
+ return run_pty(script, input_, env)
+
+ def test_complete_sql_keywords(self):
+ # List candidates starting with 'S', there should be multiple matches.
+ input_ = b"S\t\tEL\t 1;\n.quit\n"
+ output = self.write_input(input_)
+ self.assertIn(b"SELECT", output)
+ self.assertIn(b"SET", output)
+ self.assertIn(b"SAVEPOINT", output)
+ self.assertIn(b"(1,)", output)
+
+ # Keywords are completed in upper case for even lower case user input.
+ input_ = b"sel\t\t 1;\n.quit\n"
+ output = self.write_input(input_)
+ self.assertIn(b"SELECT", output)
+ self.assertIn(b"(1,)", output)
+
+ @unittest.skipIf(sys.platform.startswith("freebsd"),
+ "Two actual tabs are inserted when there are no matching"
+ " completions in the pseudo-terminal opened by run_pty()"
+ " on FreeBSD")
+ def test_complete_no_match(self):
+ input_ = b"xyzzy\t\t\b\b\b\b\b\b\b.quit\n"
+ # Set NO_COLOR to disable coloring for self.PS1.
+ output = self.write_input(input_, env={**os.environ, "NO_COLOR": "1"})
+ lines = output.decode().splitlines()
+ indices = (
+ i for i, line in enumerate(lines, 1)
+ if line.startswith(f"{self.PS1}xyzzy")
+ )
+ line_num = next(indices, -1)
+ self.assertNotEqual(line_num, -1)
+ # Completions occupy lines, assert no extra lines when there is nothing
+ # to complete.
+ self.assertEqual(line_num, len(lines))
+
+ def test_complete_no_input(self):
+ from _sqlite3 import SQLITE_KEYWORDS
+
+ script = textwrap.dedent("""
+ import readline
+ from sqlite3.__main__ import main
+
+ # Configure readline to ...:
+ # - hide control sequences surrounding each candidate
+ # - hide "Display all xxx possibilities? (y or n)"
+ # - hide "--More--"
+ # - show candidates one per line
+ readline.parse_and_bind("set colored-completion-prefix off")
+ readline.parse_and_bind("set colored-stats off")
+ readline.parse_and_bind("set completion-query-items 0")
+ readline.parse_and_bind("set page-completions off")
+ readline.parse_and_bind("set completion-display-width 0")
+ readline.parse_and_bind("set show-all-if-ambiguous off")
+ readline.parse_and_bind("set show-all-if-unmodified off")
+
+ main()
+ """)
+ input_ = b"\t\t.quit\n"
+ output = run_pty(script, input_, env={**os.environ, "NO_COLOR": "1"})
+ try:
+ lines = output.decode().splitlines()
+ indices = [
+ i for i, line in enumerate(lines)
+ if line.startswith(self.PS1)
+ ]
+ self.assertEqual(len(indices), 2)
+ start, end = indices
+ candidates = [l.strip() for l in lines[start+1:end]]
+ self.assertEqual(candidates, sorted(SQLITE_KEYWORDS))
+ except:
+ if verbose:
+ print(' PTY output: '.center(30, '-'))
+ print(output.decode(errors='replace'))
+ print(' end PTY output '.center(30, '-'))
+ raise
+
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index 2767a53d53c..f123f6ece40 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -31,6 +31,7 @@ import weakref
import platform
import sysconfig
import functools
+from contextlib import nullcontext
try:
import ctypes
except ImportError:
@@ -2843,6 +2844,7 @@ class ThreadedTests(unittest.TestCase):
# See GH-124984: OpenSSL is not thread safe.
threads = []
+ warnings_filters = sys.flags.context_aware_warnings
global USE_SAME_TEST_CONTEXT
USE_SAME_TEST_CONTEXT = True
try:
@@ -2851,7 +2853,10 @@ class ThreadedTests(unittest.TestCase):
self.test_alpn_protocols,
self.test_getpeercert,
self.test_crl_check,
- self.test_check_hostname_idn,
+ functools.partial(
+ self.test_check_hostname_idn,
+ warnings_filters=warnings_filters,
+ ),
self.test_wrong_cert_tls12,
self.test_wrong_cert_tls13,
):
@@ -3097,7 +3102,7 @@ class ThreadedTests(unittest.TestCase):
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
- def test_check_hostname_idn(self):
+ def test_check_hostname_idn(self, warnings_filters=True):
if support.verbose:
sys.stdout.write("\n")
@@ -3152,16 +3157,30 @@ class ThreadedTests(unittest.TestCase):
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
- with ThreadedEchoServer(context=server_context, chatty=True) as server:
- with warnings_helper.check_no_resource_warning(self):
- with self.assertRaises(UnicodeError):
- context.wrap_socket(socket.socket(),
- server_hostname='.pythontest.net')
- with ThreadedEchoServer(context=server_context, chatty=True) as server:
- with warnings_helper.check_no_resource_warning(self):
- with self.assertRaises(UnicodeDecodeError):
- context.wrap_socket(socket.socket(),
- server_hostname=b'k\xf6nig.idn.pythontest.net')
+ with (
+ ThreadedEchoServer(context=server_context, chatty=True) as server,
+ (
+ warnings_helper.check_no_resource_warning(self)
+ if warnings_filters
+ else nullcontext()
+ ),
+ self.assertRaises(UnicodeError),
+ ):
+ context.wrap_socket(socket.socket(), server_hostname='.pythontest.net')
+
+ with (
+ ThreadedEchoServer(context=server_context, chatty=True) as server,
+ (
+ warnings_helper.check_no_resource_warning(self)
+ if warnings_filters
+ else nullcontext()
+ ),
+ self.assertRaises(UnicodeDecodeError),
+ ):
+ context.wrap_socket(
+ socket.socket(),
+ server_hostname=b'k\xf6nig.idn.pythontest.net',
+ )
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
diff --git a/Lib/test/test_stable_abi_ctypes.py b/Lib/test/test_stable_abi_ctypes.py
index 1e6f69d49e9..5a6ba9de337 100644
--- a/Lib/test/test_stable_abi_ctypes.py
+++ b/Lib/test/test_stable_abi_ctypes.py
@@ -658,7 +658,11 @@ SYMBOL_NAMES = (
"PySys_AuditTuple",
"PySys_FormatStderr",
"PySys_FormatStdout",
+ "PySys_GetAttr",
+ "PySys_GetAttrString",
"PySys_GetObject",
+ "PySys_GetOptionalAttr",
+ "PySys_GetOptionalAttrString",
"PySys_GetXOptions",
"PySys_HasWarnOptions",
"PySys_ResetWarnOptions",
diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py
index 5980f939185..8250b0aef09 100644
--- a/Lib/test/test_statistics.py
+++ b/Lib/test/test_statistics.py
@@ -2346,6 +2346,7 @@ class TestGeometricMean(unittest.TestCase):
class TestKDE(unittest.TestCase):
+ @support.requires_resource('cpu')
def test_kde(self):
kde = statistics.kde
StatisticsError = statistics.StatisticsError
@@ -3318,7 +3319,8 @@ class TestNormalDistC(unittest.TestCase, TestNormalDist):
def load_tests(loader, tests, ignore):
"""Used for doctest/unittest integration."""
tests.addTests(doctest.DocTestSuite())
- tests.addTests(doctest.DocTestSuite(statistics))
+ if sys.float_repr_style == 'short':
+ tests.addTests(doctest.DocTestSuite(statistics))
return tests
diff --git a/Lib/test/test_str.py b/Lib/test/test_str.py
index d6a7bd0da59..2584fbf72d3 100644
--- a/Lib/test/test_str.py
+++ b/Lib/test/test_str.py
@@ -1231,10 +1231,10 @@ class StrTest(string_tests.StringLikeTest,
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
- self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
- self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
- self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
- self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
+ self.assertEqual('{0:\x00<6}'.format(3.25), '3.25\x00\x00')
+ self.assertEqual('{0:\x01<6}'.format(3.25), '3.25\x01\x01')
+ self.assertEqual('{0:\x00^6}'.format(3.25), '\x003.25\x00')
+ self.assertEqual('{0:^6}'.format(3.25), ' 3.25 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
diff --git a/Lib/test/test_string/_support.py b/Lib/test/test_string/_support.py
index eaa3354a559..abdddaf187b 100644
--- a/Lib/test/test_string/_support.py
+++ b/Lib/test/test_string/_support.py
@@ -1,4 +1,3 @@
-import unittest
from string.templatelib import Interpolation
diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py
index 268230f6da7..0241e543cd7 100644
--- a/Lib/test/test_strptime.py
+++ b/Lib/test/test_strptime.py
@@ -221,14 +221,16 @@ class StrptimeTests(unittest.TestCase):
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%\n"):
- with self.assertRaisesRegex(ValueError, "stray % in format "):
+ with (self.subTest(format=bad_format),
+ self.assertRaisesRegex(ValueError, "stray % in format ")):
_strptime._strptime_time("2005", bad_format)
- for bad_format in ("%e", "%Oe", "%O", "%O ", "%Ee", "%E", "%E ",
- "%.", "%+", "%_", "%~", "%\\",
+ for bad_format in ("%i", "%Oi", "%O", "%O ", "%Ee", "%E", "%E ",
+ "%.", "%+", "%~", "%\\",
"%O.", "%O+", "%O_", "%O~", "%O\\"):
directive = bad_format[1:].rstrip()
- with self.assertRaisesRegex(ValueError,
- f"'{re.escape(directive)}' is a bad directive in format "):
+ with (self.subTest(format=bad_format),
+ self.assertRaisesRegex(ValueError,
+ f"'{re.escape(directive)}' is a bad directive in format ")):
_strptime._strptime_time("2005", bad_format)
msg_week_no_year_or_weekday = r"ISO week directive '%V' must be used with " \
@@ -335,6 +337,15 @@ class StrptimeTests(unittest.TestCase):
self.roundtrip('%B', 1, (1900, m, 1, 0, 0, 0, 0, 1, 0))
self.roundtrip('%b', 1, (1900, m, 1, 0, 0, 0, 0, 1, 0))
+ @run_with_locales('LC_TIME', 'az_AZ', 'ber_DZ', 'ber_MA', 'crh_UA')
+ def test_month_locale2(self):
+ # Test for month directives
+ # Month name contains 'İ' ('\u0130')
+ self.roundtrip('%B', 1, (2025, 6, 1, 0, 0, 0, 6, 152, 0))
+ self.roundtrip('%b', 1, (2025, 6, 1, 0, 0, 0, 6, 152, 0))
+ self.roundtrip('%B', 1, (2025, 7, 1, 0, 0, 0, 1, 182, 0))
+ self.roundtrip('%b', 1, (2025, 7, 1, 0, 0, 0, 1, 182, 0))
+
def test_day(self):
# Test for day directives
self.roundtrip('%d %Y', 2)
@@ -480,13 +491,11 @@ class StrptimeTests(unittest.TestCase):
# * Year is not included: ha_NG.
# * Use non-Gregorian calendar: lo_LA, thai, th_TH.
# On Windows: ar_IN, ar_SA, fa_IR, ps_AF.
- #
- # BUG: Generates regexp that does not match the current date and time
- # for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'he_IL', 'eu_ES', 'ar_AE', 'mfe_MU', 'yo_NG',
'csb_PL', 'br_FR', 'gez_ET', 'brx_IN',
- 'my_MM', 'or_IN', 'shn_MM', 'az_IR')
+ 'my_MM', 'or_IN', 'shn_MM', 'az_IR',
+ 'byn_ER', 'wal_ET', 'lzh_TW')
def test_date_time_locale(self):
# Test %c directive
loc = locale.getlocale(locale.LC_TIME)[0]
@@ -525,11 +534,9 @@ class StrptimeTests(unittest.TestCase):
# NB: Does not roundtrip because use non-Gregorian calendar:
# lo_LA, thai, th_TH. On Windows: ar_IN, ar_SA, fa_IR, ps_AF.
- # BUG: Generates regexp that does not match the current date
- # for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'he_IL', 'eu_ES', 'ar_AE',
- 'az_IR', 'my_MM', 'or_IN', 'shn_MM')
+ 'az_IR', 'my_MM', 'or_IN', 'shn_MM', 'lzh_TW')
def test_date_locale(self):
# Test %x directive
now = time.time()
@@ -546,7 +553,7 @@ class StrptimeTests(unittest.TestCase):
# NB: Dates before 1969 do not roundtrip on many locales, including C.
@unittest.skipIf(support.linked_to_musl(), "musl libc issue, bpo-46390")
@run_with_locales('LC_TIME', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
- 'eu_ES', 'ar_AE', 'my_MM', 'shn_MM')
+ 'eu_ES', 'ar_AE', 'my_MM', 'shn_MM', 'lzh_TW')
def test_date_locale2(self):
# Test %x directive
loc = locale.getlocale(locale.LC_TIME)[0]
@@ -562,11 +569,11 @@ class StrptimeTests(unittest.TestCase):
# norwegian, nynorsk.
# * Hours are in 12-hour notation without AM/PM indication: hy_AM,
# ms_MY, sm_WS.
- # BUG: Generates regexp that does not match the current time for lzh_TW.
@run_with_locales('LC_TIME', 'C', 'en_US', 'fr_FR', 'de_DE', 'ja_JP',
'aa_ET', 'am_ET', 'az_IR', 'byn_ER', 'fa_IR', 'gez_ET',
'my_MM', 'om_ET', 'or_IN', 'shn_MM', 'sid_ET', 'so_SO',
- 'ti_ET', 'tig_ER', 'wal_ET')
+ 'ti_ET', 'tig_ER', 'wal_ET', 'lzh_TW',
+ 'ar_SA', 'bg_BG')
def test_time_locale(self):
# Test %X directive
loc = locale.getlocale(locale.LC_TIME)[0]
diff --git a/Lib/test/test_syntax.py b/Lib/test/test_syntax.py
index c7ac7914158..c52d2421941 100644
--- a/Lib/test/test_syntax.py
+++ b/Lib/test/test_syntax.py
@@ -382,6 +382,13 @@ SyntaxError: invalid syntax
Traceback (most recent call last):
SyntaxError: invalid syntax
+# But prefixes of soft keywords should
+# still raise specialized errors
+
+>>> (mat x)
+Traceback (most recent call last):
+SyntaxError: invalid syntax. Perhaps you forgot a comma?
+
From compiler_complex_args():
>>> def f(None=1):
@@ -1436,17 +1443,17 @@ Regression tests for gh-133999:
>>> try: pass
... except TypeError as name: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
>>> try: pass
... except* TypeError as name: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
>>> match 1:
... case 1 | 2 as abc: raise from None
Traceback (most recent call last):
- SyntaxError: invalid syntax
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
Ensure that early = are not matched by the parser as invalid comparisons
>>> f(2, 4, x=34); 1 $ 2
@@ -1695,6 +1702,28 @@ Make sure that the old "raise X, Y[, Z]" form is gone:
...
SyntaxError: invalid syntax
+Better errors for `raise` statement:
+
+ >>> raise ValueError from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression after 'from'?
+
+ >>> raise mod.ValueError() from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression after 'from'?
+
+ >>> raise from exc
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
+ >>> raise from None
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
+ >>> raise from
+ Traceback (most recent call last):
+ SyntaxError: did you forget an expression between 'raise' and 'from'?
+
Check that an multiple exception types with missing parentheses
raise a custom exception only when using 'as'
@@ -2843,6 +2872,13 @@ class SyntaxErrorTestCase(unittest.TestCase):
"""
self._check_error(source, "parameter and nonlocal", lineno=3)
+ def test_raise_from_error_message(self):
+ source = """if 1:
+ raise AssertionError() from None
+ print(1,,2)
+ """
+ self._check_error(source, "invalid syntax", lineno=3)
+
def test_yield_outside_function(self):
self._check_error("if 0: yield", "outside function")
self._check_error("if 0: yield\nelse: x=1", "outside function")
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index 795d1ecbb59..486bf10a0b5 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -24,7 +24,7 @@ from test.support import import_helper
from test.support import force_not_colorized
from test.support import SHORT_TIMEOUT
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ImportError:
interpreters = None
import textwrap
@@ -729,7 +729,7 @@ class SysModuleTest(unittest.TestCase):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'pthread-stubs', 'solaris', None))
- self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
+ self.assertIn(info.lock, ('pymutex', None))
if sys.platform.startswith(("linux", "android", "freebsd")):
self.assertEqual(info.name, "pthread")
elif sys.platform == "win32":
@@ -869,12 +869,7 @@ class SysModuleTest(unittest.TestCase):
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
- arg = sys_attr
- attr_type = type(sys_attr)
- with self.assertRaises(TypeError):
- attr_type(arg)
- with self.assertRaises(TypeError):
- attr_type.__new__(attr_type, arg)
+ support.check_disallow_instantiation(self, type(sys_attr), sys_attr)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
@@ -1074,6 +1069,7 @@ class SysModuleTest(unittest.TestCase):
self.assertHasAttr(sys.implementation, 'version')
self.assertHasAttr(sys.implementation, 'hexversion')
self.assertHasAttr(sys.implementation, 'cache_tag')
+ self.assertHasAttr(sys.implementation, 'supports_isolated_interpreters')
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
@@ -1087,6 +1083,15 @@ class SysModuleTest(unittest.TestCase):
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
+ # https://peps.python.org/pep-0734
+ sii = sys.implementation.supports_isolated_interpreters
+ self.assertIsInstance(sii, bool)
+ if test.support.check_impl_detail(cpython=True):
+ if test.support.is_emscripten or test.support.is_wasi:
+ self.assertFalse(sii)
+ else:
+ self.assertTrue(sii)
+
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
@@ -1135,23 +1140,12 @@ class SysModuleTest(unittest.TestCase):
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
try:
- # While we could imagine a Python session where the number of
- # multiple buffer objects would exceed the sharing of references,
- # it is unlikely to happen in a normal test run.
- #
- # In free-threaded builds each code object owns an array of
- # pointers to copies of the bytecode. When the number of
- # code objects is a large fraction of the total number of
- # references, this can cause the total number of allocated
- # blocks to exceed the total number of references.
- #
- # For some reason, iOS seems to trigger the "unlikely to happen"
- # case reliably under CI conditions. It's not clear why; but as
- # this test is checking the behavior of getallocatedblock()
- # under garbage collection, we can skip this pre-condition check
- # for now. See GH-130384.
- if not support.Py_GIL_DISABLED and not support.is_apple_mobile:
- self.assertLess(a, sys.gettotalrefcount())
+ # The reported blocks will include immortalized strings, but the
+ # total ref count will not. This will sanity check that among all
+ # other objects (those eligible for garbage collection) there
+ # are more references being tracked than allocated blocks.
+ interned_immortal = sys.getunicodeinternedsize(_only_immortal=True)
+ self.assertLess(a - interned_immortal, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
@@ -1299,6 +1293,7 @@ class SysModuleTest(unittest.TestCase):
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
+ @unittest.skipUnless(hasattr(sys, '_stdlib_dir'), 'need sys._stdlib_dir')
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
@@ -1953,22 +1948,7 @@ class SizeofTest(unittest.TestCase):
self.assertEqual(out, b"")
self.assertEqual(err, b"")
-
-def _supports_remote_attaching():
- PROCESS_VM_READV_SUPPORTED = False
-
- try:
- from _remote_debugging import PROCESS_VM_READV_SUPPORTED
- except ImportError:
- pass
-
- return PROCESS_VM_READV_SUPPORTED
-
-@unittest.skipIf(not sys.is_remote_debug_enabled(), "Remote debugging is not enabled")
-@unittest.skipIf(sys.platform != "darwin" and sys.platform != "linux" and sys.platform != "win32",
- "Test only runs on Linux, Windows and MacOS")
-@unittest.skipIf(sys.platform == "linux" and not _supports_remote_attaching(),
- "Test only runs on Linux with process_vm_readv support")
+@test.support.support_remote_exec_only
@test.support.cpython_only
class TestRemoteExec(unittest.TestCase):
def tearDown(self):
@@ -2127,7 +2107,7 @@ print("Remote script executed successfully!")
returncode, stdout, stderr = self._run_remote_exec_test(script, prologue=prologue)
self.assertEqual(returncode, 0)
self.assertIn(b"Remote script executed successfully!", stdout)
- self.assertIn(b"Audit event: remote_debugger_script, arg: ", stdout)
+ self.assertIn(b"Audit event: cpython.remote_debugger_script, arg: ", stdout)
self.assertEqual(stderr, b"")
def test_remote_exec_with_exception(self):
diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py
index d30f69ded66..2eb8de4b29f 100644
--- a/Lib/test/test_sysconfig.py
+++ b/Lib/test/test_sysconfig.py
@@ -32,7 +32,6 @@ from sysconfig import (get_paths, get_platform, get_config_vars,
from sysconfig.__main__ import _main, _parse_makefile, _get_pybuilddir, _get_json_data_name
import _imp
import _osx_support
-import _sysconfig
HAS_USER_BASE = sysconfig._HAS_USER_BASE
@@ -712,8 +711,8 @@ class TestSysConfig(unittest.TestCase, VirtualEnvironmentMixin):
ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase', 'installed_base', 'installed_platbase'}
for key in ignore_keys:
- json_config_vars.pop(key)
- system_config_vars.pop(key)
+ json_config_vars.pop(key, None)
+ system_config_vars.pop(key, None)
self.assertEqual(system_config_vars, json_config_vars)
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index cf218a2bf14..7055e1ed147 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -2715,6 +2715,31 @@ class MiscTest(unittest.TestCase):
str(excinfo.exception),
)
+ @unittest.skipUnless(os_helper.can_symlink(), 'requires symlink support')
+ @unittest.skipUnless(hasattr(os, 'chmod'), "missing os.chmod")
+ @unittest.mock.patch('os.chmod')
+ def test_deferred_directory_attributes_update(self, mock_chmod):
+ # Regression test for gh-127987: setting attributes on arbitrary files
+ tempdir = os.path.join(TEMPDIR, 'test127987')
+ def mock_chmod_side_effect(path, mode, **kwargs):
+ target_path = os.path.realpath(path)
+ if os.path.commonpath([target_path, tempdir]) != tempdir:
+ raise Exception("should not try to chmod anything outside the destination", target_path)
+ mock_chmod.side_effect = mock_chmod_side_effect
+
+ outside_tree_dir = os.path.join(TEMPDIR, 'outside_tree_dir')
+ with ArchiveMaker() as arc:
+ arc.add('x', symlink_to='.')
+ arc.add('x', type=tarfile.DIRTYPE, mode='?rwsrwsrwt')
+ arc.add('x', symlink_to=outside_tree_dir)
+
+ os.makedirs(outside_tree_dir)
+ try:
+ arc.open().extractall(path=tempdir, filter='tar')
+ finally:
+ os_helper.rmtree(outside_tree_dir)
+ os_helper.rmtree(tempdir)
+
class CommandLineTest(unittest.TestCase):
@@ -3275,6 +3300,10 @@ class NoneInfoExtractTests(ReadTest):
got_paths = set(
p.relative_to(directory)
for p in pathlib.Path(directory).glob('**/*'))
+ if self.extraction_filter in (None, 'data'):
+ # The 'data' filter is expected to reject special files
+ for path in 'ustar/fifotype', 'ustar/blktype', 'ustar/chrtype':
+ got_paths.discard(pathlib.Path(path))
self.assertEqual(self.control_paths, got_paths)
@contextmanager
@@ -3504,12 +3533,28 @@ class ArchiveMaker:
self.bio = None
def add(self, name, *, type=None, symlink_to=None, hardlink_to=None,
- mode=None, size=None, **kwargs):
- """Add a member to the test archive. Call within `with`."""
+ mode=None, size=None, content=None, **kwargs):
+ """Add a member to the test archive. Call within `with`.
+
+ Provides many shortcuts:
+ - default `type` is based on symlink_to, hardlink_to, and trailing `/`
+ in name (which is stripped)
+ - size & content defaults are based on each other
+ - content can be str or bytes
+ - mode should be textual ('-rwxrwxrwx')
+
+ (add more! this is unstable internal test-only API)
+ """
name = str(name)
tarinfo = tarfile.TarInfo(name).replace(**kwargs)
+ if content is not None:
+ if isinstance(content, str):
+ content = content.encode()
+ size = len(content)
if size is not None:
tarinfo.size = size
+ if content is None:
+ content = bytes(tarinfo.size)
if mode:
tarinfo.mode = _filemode_to_int(mode)
if symlink_to is not None:
@@ -3523,7 +3568,7 @@ class ArchiveMaker:
if type is not None:
tarinfo.type = type
if tarinfo.isreg():
- fileobj = io.BytesIO(bytes(tarinfo.size))
+ fileobj = io.BytesIO(content)
else:
fileobj = None
self.tar_w.addfile(tarinfo, fileobj)
@@ -3557,7 +3602,7 @@ class TestExtractionFilters(unittest.TestCase):
destdir = outerdir / 'dest'
@contextmanager
- def check_context(self, tar, filter):
+ def check_context(self, tar, filter, *, check_flag=True):
"""Extracts `tar` to `self.destdir` and allows checking the result
If an error occurs, it must be checked using `expect_exception`
@@ -3566,27 +3611,40 @@ class TestExtractionFilters(unittest.TestCase):
except the destination directory itself and parent directories of
other files.
When checking directories, do so before their contents.
+
+ A file called 'flag' is made in outerdir (i.e. outside destdir)
+ before extraction; it should not be altered nor should its contents
+ be read/copied.
"""
with os_helper.temp_dir(self.outerdir):
+ flag_path = self.outerdir / 'flag'
+ flag_path.write_text('capture me')
try:
tar.extractall(self.destdir, filter=filter)
except Exception as exc:
self.raised_exception = exc
+ self.reraise_exception = True
self.expected_paths = set()
else:
self.raised_exception = None
+ self.reraise_exception = False
self.expected_paths = set(self.outerdir.glob('**/*'))
self.expected_paths.discard(self.destdir)
+ self.expected_paths.discard(flag_path)
try:
- yield
+ yield self
finally:
tar.close()
- if self.raised_exception:
+ if self.reraise_exception:
raise self.raised_exception
self.assertEqual(self.expected_paths, set())
+ if check_flag:
+ self.assertEqual(flag_path.read_text(), 'capture me')
+ else:
+ assert filter == 'fully_trusted'
def expect_file(self, name, type=None, symlink_to=None, mode=None,
- size=None):
+ size=None, content=None):
"""Check a single file. See check_context."""
if self.raised_exception:
raise self.raised_exception
@@ -3605,26 +3663,45 @@ class TestExtractionFilters(unittest.TestCase):
# The symlink might be the same (textually) as what we expect,
# but some systems change the link to an equivalent path, so
# we fall back to samefile().
- if expected != got:
- self.assertTrue(got.samefile(expected))
+ try:
+ if expected != got:
+ self.assertTrue(got.samefile(expected))
+ except Exception as e:
+ # attach a note, so it's shown even if `samefile` fails
+ e.add_note(f'{expected=}, {got=}')
+ raise
elif type == tarfile.REGTYPE or type is None:
self.assertTrue(path.is_file())
elif type == tarfile.DIRTYPE:
self.assertTrue(path.is_dir())
elif type == tarfile.FIFOTYPE:
self.assertTrue(path.is_fifo())
+ elif type == tarfile.SYMTYPE:
+ self.assertTrue(path.is_symlink())
else:
raise NotImplementedError(type)
if size is not None:
self.assertEqual(path.stat().st_size, size)
+ if content is not None:
+ self.assertEqual(path.read_text(), content)
for parent in path.parents:
self.expected_paths.discard(parent)
+ def expect_any_tree(self, name):
+ """Check a directory; forget about its contents."""
+ tree_path = (self.destdir / name).resolve()
+ self.expect_file(tree_path, type=tarfile.DIRTYPE)
+ self.expected_paths = {
+ p for p in self.expected_paths
+ if tree_path not in p.parents
+ }
+
def expect_exception(self, exc_type, message_re='.'):
with self.assertRaisesRegex(exc_type, message_re):
if self.raised_exception is not None:
raise self.raised_exception
- self.raised_exception = None
+ self.reraise_exception = False
+ return self.raised_exception
def test_benign_file(self):
with ArchiveMaker() as arc:
@@ -3710,6 +3787,80 @@ class TestExtractionFilters(unittest.TestCase):
self.expect_file('parent/evil')
@symlink_test
+ @os_helper.skip_unless_symlink
+ def test_realpath_limit_attack(self):
+ # (CVE-2025-4517)
+
+ with ArchiveMaker() as arc:
+ # populate the symlinks and dirs that expand in os.path.realpath()
+ # The component length is chosen so that in common cases, the unexpanded
+ # path fits in PATH_MAX, but it overflows when the final symlink
+ # is expanded
+ steps = "abcdefghijklmnop"
+ if sys.platform == 'win32':
+ component = 'd' * 25
+ elif 'PC_PATH_MAX' in os.pathconf_names:
+ max_path_len = os.pathconf(self.outerdir.parent, "PC_PATH_MAX")
+ path_sep_len = 1
+ dest_len = len(str(self.destdir)) + path_sep_len
+ component_len = (max_path_len - dest_len) // (len(steps) + path_sep_len)
+ component = 'd' * component_len
+ else:
+ raise NotImplementedError("Need to guess component length for {sys.platform}")
+ path = ""
+ step_path = ""
+ for i in steps:
+ arc.add(os.path.join(path, component), type=tarfile.DIRTYPE,
+ mode='drwxrwxrwx')
+ arc.add(os.path.join(path, i), symlink_to=component)
+ path = os.path.join(path, component)
+ step_path = os.path.join(step_path, i)
+ # create the final symlink that exceeds PATH_MAX and simply points
+ # to the top dir.
+ # this link will never be expanded by
+ # os.path.realpath(strict=False), nor anything after it.
+ linkpath = os.path.join(*steps, "l"*254)
+ parent_segments = [".."] * len(steps)
+ arc.add(linkpath, symlink_to=os.path.join(*parent_segments))
+ # make a symlink outside to keep the tar command happy
+ arc.add("escape", symlink_to=os.path.join(linkpath, ".."))
+ # use the symlinks above, that are not checked, to create a hardlink
+ # to a file outside of the destination path
+ arc.add("flaglink", hardlink_to=os.path.join("escape", "flag"))
+ # now that we have the hardlink we can overwrite the file
+ arc.add("flaglink", content='overwrite')
+ # we can also create new files as well!
+ arc.add("escape/newfile", content='new')
+
+ with (self.subTest('fully_trusted'),
+ self.check_context(arc.open(), filter='fully_trusted',
+ check_flag=False)):
+ if sys.platform == 'win32':
+ self.expect_exception((FileNotFoundError, FileExistsError))
+ elif self.raised_exception:
+ # Cannot symlink/hardlink: tarfile falls back to getmember()
+ self.expect_exception(KeyError)
+ # Otherwise, this block should never enter.
+ else:
+ self.expect_any_tree(component)
+ self.expect_file('flaglink', content='overwrite')
+ self.expect_file('../newfile', content='new')
+ self.expect_file('escape', type=tarfile.SYMTYPE)
+ self.expect_file('a', symlink_to=component)
+
+ for filter in 'tar', 'data':
+ with self.subTest(filter), self.check_context(arc.open(), filter=filter):
+ exc = self.expect_exception((OSError, KeyError))
+ if isinstance(exc, OSError):
+ if sys.platform == 'win32':
+ # 3: ERROR_PATH_NOT_FOUND
+ # 5: ERROR_ACCESS_DENIED
+ # 206: ERROR_FILENAME_EXCED_RANGE
+ self.assertIn(exc.winerror, (3, 5, 206))
+ else:
+ self.assertEqual(exc.errno, errno.ENAMETOOLONG)
+
+ @symlink_test
def test_parent_symlink2(self):
# Test interplaying symlinks
# Inspired by 'dirsymlink2b' in jwilk/traversal-archives
@@ -3931,8 +4082,8 @@ class TestExtractionFilters(unittest.TestCase):
arc.add('symlink2', symlink_to=os.path.join(
'linkdir', 'hardlink2'))
arc.add('targetdir/target', size=3)
- arc.add('linkdir/hardlink', hardlink_to='targetdir/target')
- arc.add('linkdir/hardlink2', hardlink_to='linkdir/symlink')
+ arc.add('linkdir/hardlink', hardlink_to=os.path.join('targetdir', 'target'))
+ arc.add('linkdir/hardlink2', hardlink_to=os.path.join('linkdir', 'symlink'))
for filter in 'tar', 'data', 'fully_trusted':
with self.check_context(arc.open(), filter):
@@ -3948,6 +4099,129 @@ class TestExtractionFilters(unittest.TestCase):
self.expect_file('linkdir/symlink', size=3)
self.expect_file('symlink2', size=3)
+ @symlink_test
+ def test_sneaky_hardlink_fallback(self):
+ # (CVE-2025-4330)
+ # Test that when hardlink extraction falls back to extracting members
+ # from the archive, the extracted member is (re-)filtered.
+ with ArchiveMaker() as arc:
+ # Create a directory structure so the c/escape symlink stays
+ # inside the path
+ arc.add("a/t/dummy")
+ # Create b/ directory
+ arc.add("b/")
+ # Point "c" to the bottom of the tree in "a"
+ arc.add("c", symlink_to=os.path.join("a", "t"))
+ # link to non-existant location under "a"
+ arc.add("c/escape", symlink_to=os.path.join("..", "..",
+ "link_here"))
+ # Move "c" to point to "b" ("c/escape" no longer exists)
+ arc.add("c", symlink_to="b")
+ # Attempt to create a hard link to "c/escape". Since it doesn't
+ # exist it will attempt to extract "cescape" but at "boom".
+ arc.add("boom", hardlink_to=os.path.join("c", "escape"))
+
+ with self.check_context(arc.open(), 'data'):
+ if not os_helper.can_symlink():
+ # When 'c/escape' is extracted, 'c' is a regular
+ # directory, and 'c/escape' *would* point outside
+ # the destination if symlinks were allowed.
+ self.expect_exception(
+ tarfile.LinkOutsideDestinationError)
+ elif sys.platform == "win32":
+ # On Windows, 'c/escape' points outside the destination
+ self.expect_exception(tarfile.LinkOutsideDestinationError)
+ else:
+ e = self.expect_exception(
+ tarfile.LinkFallbackError,
+ "link 'boom' would be extracted as a copy of "
+ + "'c/escape', which was rejected")
+ self.assertIsInstance(e.__cause__,
+ tarfile.LinkOutsideDestinationError)
+ for filter in 'tar', 'fully_trusted':
+ with self.subTest(filter), self.check_context(arc.open(), filter):
+ if not os_helper.can_symlink():
+ self.expect_file("a/t/dummy")
+ self.expect_file("b/")
+ self.expect_file("c/")
+ else:
+ self.expect_file("a/t/dummy")
+ self.expect_file("b/")
+ self.expect_file("a/t/escape", symlink_to='../../link_here')
+ self.expect_file("boom", symlink_to='../../link_here')
+ self.expect_file("c", symlink_to='b')
+
+ @symlink_test
+ def test_exfiltration_via_symlink(self):
+ # (CVE-2025-4138)
+ # Test changing symlinks that result in a symlink pointing outside
+ # the extraction directory, unless prevented by 'data' filter's
+ # normalization.
+ with ArchiveMaker() as arc:
+ arc.add("escape", symlink_to=os.path.join('link', 'link', '..', '..', 'link-here'))
+ arc.add("link", symlink_to='./')
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter):
+ if os_helper.can_symlink():
+ self.expect_file("link", symlink_to='./')
+ if filter == 'data':
+ self.expect_file("escape", symlink_to='link-here')
+ else:
+ self.expect_file("escape",
+ symlink_to='link/link/../../link-here')
+ else:
+ # Nothing is extracted.
+ pass
+
+ @symlink_test
+ def test_chmod_outside_dir(self):
+ # (CVE-2024-12718)
+ # Test that members used for delayed updates of directory metadata
+ # are (re-)filtered.
+ with ArchiveMaker() as arc:
+ # "pwn" is a veeeery innocent symlink:
+ arc.add("a/pwn", symlink_to='.')
+ # But now "pwn" is also a directory, so it's scheduled to have its
+ # metadata updated later:
+ arc.add("a/pwn/", mode='drwxrwxrwx')
+ # Oops, "pwn" is not so innocent any more:
+ arc.add("a/pwn", symlink_to='x/../')
+ # Newly created symlink points to the dest dir,
+ # so it's OK for the "data" filter.
+ arc.add('a/x', symlink_to=('../'))
+ # But now "pwn" points outside the dest dir
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter) as cc:
+ if not os_helper.can_symlink():
+ self.expect_file("a/pwn/")
+ elif filter == 'data':
+ self.expect_file("a/x", symlink_to='../')
+ self.expect_file("a/pwn", symlink_to='.')
+ else:
+ self.expect_file("a/x", symlink_to='../')
+ self.expect_file("a/pwn", symlink_to='x/../')
+ if sys.platform != "win32":
+ st_mode = cc.outerdir.stat().st_mode
+ self.assertNotEqual(st_mode & 0o777, 0o777)
+
+ def test_link_fallback_normalizes(self):
+ # Make sure hardlink fallbacks work for non-normalized paths for all
+ # filters
+ with ArchiveMaker() as arc:
+ arc.add("dir/")
+ arc.add("dir/../afile")
+ arc.add("link1", hardlink_to='dir/../afile')
+ arc.add("link2", hardlink_to='dir/../dir/../afile')
+
+ for filter in 'tar', 'data', 'fully_trusted':
+ with self.check_context(arc.open(), filter) as cc:
+ self.expect_file("dir/")
+ self.expect_file("afile")
+ self.expect_file("link1")
+ self.expect_file("link2")
+
def test_modes(self):
# Test how file modes are extracted
# (Note that the modes are ignored on platforms without working chmod)
@@ -4072,7 +4346,7 @@ class TestExtractionFilters(unittest.TestCase):
# The 'tar' filter returns TarInfo objects with the same name/type.
# (It can also fail for particularly "evil" input, but we don't have
# that in the test archive.)
- with tarfile.TarFile.open(tarname) as tar:
+ with tarfile.TarFile.open(tarname, encoding="iso8859-1") as tar:
for tarinfo in tar.getmembers():
try:
filtered = tarfile.tar_filter(tarinfo, '')
@@ -4084,7 +4358,7 @@ class TestExtractionFilters(unittest.TestCase):
def test_data_filter(self):
# The 'data' filter either raises, or returns TarInfo with the same
# name/type.
- with tarfile.TarFile.open(tarname) as tar:
+ with tarfile.TarFile.open(tarname, encoding="iso8859-1") as tar:
for tarinfo in tar.getmembers():
try:
filtered = tarfile.data_filter(tarinfo, '')
@@ -4242,13 +4516,13 @@ class TestExtractionFilters(unittest.TestCase):
# If errorlevel is 0, errors affected by errorlevel are ignored
with self.check_context(arc.open(errorlevel=0), extracterror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), filtererror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), oserror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=0), tarerror_filter):
self.expect_exception(tarfile.TarError)
@@ -4259,7 +4533,7 @@ class TestExtractionFilters(unittest.TestCase):
# If 1, all fatal errors are raised
with self.check_context(arc.open(errorlevel=1), extracterror_filter):
- self.expect_file('file')
+ pass
with self.check_context(arc.open(errorlevel=1), filtererror_filter):
self.expect_exception(tarfile.FilterError)
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index 0e51e7fc8c5..00a3037c3e1 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -28,7 +28,7 @@ from test import lock_tests
from test import support
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ImportError:
interpreters = None
@@ -1247,13 +1247,68 @@ class ThreadTests(BaseTestCase):
self.assertEqual(err, b"")
self.assertIn(b"all clear", out)
+ @support.subTests('lock_class_name', ['Lock', 'RLock'])
+ def test_acquire_daemon_thread_lock_in_finalization(self, lock_class_name):
+ # gh-123940: Py_Finalize() prevents other threads from running Python
+ # code (and so, releasing locks), so acquiring a locked lock can not
+ # succeed.
+ # We raise an exception rather than hang.
+ code = textwrap.dedent(f"""
+ import threading
+ import time
+
+ thread_started_event = threading.Event()
+
+ lock = threading.{lock_class_name}()
+ def loop():
+ if {lock_class_name!r} == 'RLock':
+ lock.acquire()
+ with lock:
+ thread_started_event.set()
+ while True:
+ time.sleep(1)
+
+ uncontested_lock = threading.{lock_class_name}()
+
+ class Cycle:
+ def __init__(self):
+ self.self_ref = self
+ self.thr = threading.Thread(
+ target=loop, daemon=True)
+ self.thr.start()
+ thread_started_event.wait()
+
+ def __del__(self):
+ assert self.thr.is_alive()
+
+ # We *can* acquire an unlocked lock
+ uncontested_lock.acquire()
+ if {lock_class_name!r} == 'RLock':
+ uncontested_lock.acquire()
+
+ # Acquiring a locked one fails
+ try:
+ lock.acquire()
+ except PythonFinalizationError:
+ assert self.thr.is_alive()
+ print('got the correct exception!')
+
+ # Cycle holds a reference to itself, which ensures it is
+ # cleaned up during the GC that runs after daemon threads
+ # have been forced to exit during finalization.
+ Cycle()
+ """)
+ rc, out, err = assert_python_ok("-c", code)
+ self.assertEqual(err, b"")
+ self.assertIn(b"got the correct exception", out)
+
def test_start_new_thread_failed(self):
# gh-109746: if Python fails to start newly created thread
# due to failure of underlying PyThread_start_new_thread() call,
# its state should be removed from interpreter' thread states list
# to avoid its double cleanup
try:
- from resource import setrlimit, RLIMIT_NPROC
+ from resource import setrlimit, RLIMIT_NPROC # noqa: F401
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
code = """if 1:
@@ -1284,12 +1339,6 @@ class ThreadTests(BaseTestCase):
@cpython_only
def test_finalize_daemon_thread_hang(self):
- if support.check_sanitizer(thread=True, memory=True):
- # the thread running `time.sleep(100)` below will still be alive
- # at process exit
- self.skipTest(
- "https://github.com/python/cpython/issues/124878 - Known"
- " race condition that TSAN identifies.")
# gh-87135: tests that daemon threads hang during finalization
script = textwrap.dedent('''
import os
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 2d41a5e5ac0..865e0c5b40d 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1,6 +1,8 @@
import contextlib
+import itertools
import os
import re
+import string
import tempfile
import token
import tokenize
@@ -1975,6 +1977,10 @@ if 1:
for case in cases:
self.check_roundtrip(case)
+ self.check_roundtrip(r"t'{ {}}'")
+ self.check_roundtrip(r"t'{f'{ {}}'}{ {}}'")
+ self.check_roundtrip(r"f'{t'{ {}}'}{ {}}'")
+
def test_continuation(self):
# Balancing continuation
@@ -3234,5 +3240,77 @@ class CommandLineTest(unittest.TestCase):
self.check_output(source, expect, flag)
+class StringPrefixTest(unittest.TestCase):
+ @staticmethod
+ def determine_valid_prefixes():
+ # Try all lengths until we find a length that has zero valid
+ # prefixes. This will miss the case where for example there
+ # are no valid 3 character prefixes, but there are valid 4
+ # character prefixes. That seems unlikely.
+
+ single_char_valid_prefixes = set()
+
+ # Find all of the single character string prefixes. Just get
+ # the lowercase version, we'll deal with combinations of upper
+ # and lower case later. I'm using this logic just in case
+ # some uppercase-only prefix is added.
+ for letter in itertools.chain(string.ascii_lowercase, string.ascii_uppercase):
+ try:
+ eval(f'{letter}""')
+ single_char_valid_prefixes.add(letter.lower())
+ except SyntaxError:
+ pass
+
+ # This logic assumes that all combinations of valid prefixes only use
+ # the characters that are valid single character prefixes. That seems
+ # like a valid assumption, but if it ever changes this will need
+ # adjusting.
+ valid_prefixes = set()
+ for length in itertools.count():
+ num_at_this_length = 0
+ for prefix in (
+ "".join(l)
+ for l in itertools.combinations(single_char_valid_prefixes, length)
+ ):
+ for t in itertools.permutations(prefix):
+ for u in itertools.product(*[(c, c.upper()) for c in t]):
+ p = "".join(u)
+ if p == "not":
+ # 'not' can never be a string prefix,
+ # because it's a valid expression: not ""
+ continue
+ try:
+ eval(f'{p}""')
+
+ # No syntax error, so p is a valid string
+ # prefix.
+
+ valid_prefixes.add(p)
+ num_at_this_length += 1
+ except SyntaxError:
+ pass
+ if num_at_this_length == 0:
+ return valid_prefixes
+
+
+ def test_prefixes(self):
+ # Get the list of defined string prefixes. I don't see an
+ # obvious documented way of doing this, but probably the best
+ # thing is to split apart tokenize.StringPrefix.
+
+ # Make sure StringPrefix begins and ends in parens. We're
+ # assuming it's of the form "(a|b|ab)", if a, b, and cd are
+ # valid string prefixes.
+ self.assertEqual(tokenize.StringPrefix[0], '(')
+ self.assertEqual(tokenize.StringPrefix[-1], ')')
+
+ # Then split apart everything else by '|'.
+ defined_prefixes = set(tokenize.StringPrefix[1:-1].split('|'))
+
+ # Now compute the actual allowed string prefixes and compare
+ # to what is defined in the tokenize module.
+ self.assertEqual(defined_prefixes, self.determine_valid_prefixes())
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_tools/i18n_data/docstrings.py b/Lib/test/test_tools/i18n_data/docstrings.py
index 151a55a4b56..14559a632da 100644
--- a/Lib/test/test_tools/i18n_data/docstrings.py
+++ b/Lib/test/test_tools/i18n_data/docstrings.py
@@ -1,7 +1,7 @@
"""Module docstring"""
# Test docstring extraction
-from gettext import gettext as _
+from gettext import gettext as _ # noqa: F401
# Empty docstring
diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py
index b9be87f357f..74b979d0096 100644
--- a/Lib/test/test_traceback.py
+++ b/Lib/test/test_traceback.py
@@ -4188,6 +4188,15 @@ class SuggestionFormattingTestBase:
self.assertNotIn("blech", actual)
self.assertNotIn("oh no!", actual)
+ def test_attribute_error_with_non_string_candidates(self):
+ class T:
+ bluch = 1
+
+ instance = T()
+ instance.__dict__[0] = 1
+ actual = self.get_suggestion(instance, 'blich')
+ self.assertIn("bluch", actual)
+
def test_attribute_error_with_bad_name(self):
def raise_attribute_error_with_bad_name():
raise AttributeError(name=12, obj=23)
@@ -4223,8 +4232,8 @@ class SuggestionFormattingTestBase:
return mod_name
- def get_import_from_suggestion(self, mod_dict, name):
- modname = self.make_module(mod_dict)
+ def get_import_from_suggestion(self, code, name):
+ modname = self.make_module(code)
def callable():
try:
@@ -4301,6 +4310,13 @@ class SuggestionFormattingTestBase:
self.assertIn("'_bluch'", self.get_import_from_suggestion(code, '_luch'))
self.assertNotIn("'_bluch'", self.get_import_from_suggestion(code, 'bluch'))
+ def test_import_from_suggestions_non_string(self):
+ modWithNonStringAttr = textwrap.dedent("""\
+ globals()[0] = 1
+ bluch = 1
+ """)
+ self.assertIn("'bluch'", self.get_import_from_suggestion(modWithNonStringAttr, 'blech'))
+
def test_import_from_suggestions_do_not_trigger_for_long_attributes(self):
code = "blech = None"
@@ -4397,6 +4413,15 @@ class SuggestionFormattingTestBase:
actual = self.get_suggestion(func)
self.assertIn("'ZeroDivisionError'?", actual)
+ def test_name_error_suggestions_with_non_string_candidates(self):
+ def func():
+ abc = 1
+ custom_globals = globals().copy()
+ custom_globals[0] = 1
+ print(eval("abv", custom_globals, locals()))
+ actual = self.get_suggestion(func)
+ self.assertIn("abc", actual)
+
def test_name_error_suggestions_do_not_trigger_for_long_names(self):
def func():
somethingverywronghehehehehehe = None
diff --git a/Lib/test/test_tstring.py b/Lib/test/test_tstring.py
index e72a1ea5417..aabae385567 100644
--- a/Lib/test/test_tstring.py
+++ b/Lib/test/test_tstring.py
@@ -219,6 +219,7 @@ class TestTString(unittest.TestCase, TStringBaseCase):
("t'{lambda:1}'", "t-string: lambda expressions are not allowed "
"without parentheses"),
("t'{x:{;}}'", "t-string: expecting a valid expression after '{'"),
+ ("t'{1:d\n}'", "t-string: newlines are not allowed in format specifiers")
):
with self.subTest(case), self.assertRaisesRegex(SyntaxError, err):
eval(case)
diff --git a/Lib/test/test_type_annotations.py b/Lib/test/test_type_annotations.py
index 2c886bb6d36..c66cb058552 100644
--- a/Lib/test/test_type_annotations.py
+++ b/Lib/test/test_type_annotations.py
@@ -498,6 +498,28 @@ class DeferredEvaluationTests(unittest.TestCase):
self.assertEqual(f.__annotate__(annotationlib.Format.VALUE), annos)
self.assertEqual(f.__annotations__, annos)
+ def test_set_annotations(self):
+ function_code = textwrap.dedent("""
+ def f(x: int):
+ pass
+ """)
+ class_code = textwrap.dedent("""
+ class f:
+ x: int
+ """)
+ for future in (False, True):
+ for label, code in (("function", function_code), ("class", class_code)):
+ with self.subTest(future=future, label=label):
+ if future:
+ code = "from __future__ import annotations\n" + code
+ ns = run_code(code)
+ f = ns["f"]
+ anno = "int" if future else int
+ self.assertEqual(f.__annotations__, {"x": anno})
+
+ f.__annotations__ = {"x": str}
+ self.assertEqual(f.__annotations__, {"x": str})
+
def test_name_clash_with_format(self):
# this test would fail if __annotate__'s parameter was called "format"
# during symbol table construction
diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py
index 3097c7ddf05..fccdcc975e6 100644
--- a/Lib/test/test_types.py
+++ b/Lib/test/test_types.py
@@ -2,7 +2,7 @@
from test.support import (
run_with_locale, cpython_only, no_rerun,
- MISSING_C_DOCSTRINGS, EqualToForwardRef,
+ MISSING_C_DOCSTRINGS, EqualToForwardRef, check_disallow_instantiation,
)
from test.support.script_helper import assert_python_ok
from test.support.import_helper import import_fresh_module
@@ -21,6 +21,7 @@ import types
import unittest.mock
import weakref
import typing
+import re
c_types = import_fresh_module('types', fresh=['_types'])
py_types = import_fresh_module('types', blocked=['_types'])
@@ -517,8 +518,8 @@ class TypesTests(unittest.TestCase):
# and a number after the decimal. This is tricky, because
# a totally empty format specifier means something else.
# So, just use a sign flag
- test(1e200, '+g', '+1e+200')
- test(1e200, '+', '+1e+200')
+ test(1.25e200, '+g', '+1.25e+200')
+ test(1.25e200, '+', '+1.25e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
@@ -1148,8 +1149,7 @@ class UnionTests(unittest.TestCase):
msg='Check for union reference leak.')
def test_instantiation(self):
- with self.assertRaises(TypeError):
- types.UnionType()
+ check_disallow_instantiation(self, types.UnionType)
self.assertIs(int, types.UnionType[int])
self.assertIs(int, types.UnionType[int, int])
self.assertEqual(int | str, types.UnionType[int, str])
@@ -1376,6 +1376,27 @@ class MappingProxyTests(unittest.TestCase):
view = self.mappingproxy(mapping)
self.assertEqual(hash(view), hash(mapping))
+ def test_richcompare(self):
+ mp1 = self.mappingproxy({'a': 1})
+ mp1_2 = self.mappingproxy({'a': 1})
+ mp2 = self.mappingproxy({'a': 2})
+
+ self.assertTrue(mp1 == mp1_2)
+ self.assertFalse(mp1 != mp1_2)
+ self.assertFalse(mp1 == mp2)
+ self.assertTrue(mp1 != mp2)
+
+ msg = "not supported between instances of 'mappingproxy' and 'mappingproxy'"
+
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1 > mp2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1 < mp1_2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp2 >= mp2
+ with self.assertRaisesRegex(TypeError, msg):
+ mp1_2 <= mp1
+
class ClassCreationTests(unittest.TestCase):
@@ -2010,6 +2031,24 @@ class SimpleNamespaceTests(unittest.TestCase):
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns2, types.SimpleNamespace())
+ def test_richcompare_unsupported(self):
+ ns1 = types.SimpleNamespace(x=1)
+ ns2 = types.SimpleNamespace(y=2)
+
+ msg = re.escape(
+ "not supported between instances of "
+ "'types.SimpleNamespace' and 'types.SimpleNamespace'"
+ )
+
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 > ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 >= ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 < ns2
+ with self.assertRaisesRegex(TypeError, msg):
+ ns1 <= ns2
+
def test_nested(self):
ns1 = types.SimpleNamespace(a=1, b=2)
ns2 = types.SimpleNamespace()
@@ -2513,15 +2552,16 @@ class SubinterpreterTests(unittest.TestCase):
def setUpClass(cls):
global interpreters
try:
- from test.support import interpreters
+ from concurrent import interpreters
except ModuleNotFoundError:
raise unittest.SkipTest('subinterpreters required')
- import test.support.interpreters.channels
+ from test.support import channels # noqa: F401
+ cls.create_channel = staticmethod(channels.create)
@cpython_only
@no_rerun('channels (and queues) might have a refleak; see gh-122199')
def test_static_types_inherited_slots(self):
- rch, sch = interpreters.channels.create()
+ rch, sch = self.create_channel()
script = textwrap.dedent("""
import test.support
@@ -2547,7 +2587,7 @@ class SubinterpreterTests(unittest.TestCase):
main_results = collate_results(raw)
interp = interpreters.create()
- interp.exec('from test.support import interpreters')
+ interp.exec('from concurrent import interpreters')
interp.prepare_main(sch=sch)
interp.exec(script)
raw = rch.recv_nowait()
diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py
index 246be22a0d8..bef6773ad6c 100644
--- a/Lib/test/test_typing.py
+++ b/Lib/test/test_typing.py
@@ -46,11 +46,10 @@ import abc
import textwrap
import typing
import weakref
-import warnings
import types
from test.support import (
- captured_stderr, cpython_only, infinite_recursion, requires_docstrings, import_helper, run_code,
+ captured_stderr, cpython_only, requires_docstrings, import_helper, run_code,
EqualToForwardRef,
)
from test.typinganndata import (
@@ -1606,7 +1605,10 @@ class TypeVarTupleTests(BaseTestCase):
self.assertEqual(gth(func1), {'args': Unpack[Ts]})
def func2(*args: *tuple[int, str]): pass
- self.assertEqual(gth(func2), {'args': Unpack[tuple[int, str]]})
+ hint = gth(func2)['args']
+ self.assertIsInstance(hint, types.GenericAlias)
+ self.assertEqual(hint.__args__[0], int)
+ self.assertIs(hint.__unpacked__, True)
class CustomVariadic(Generic[*Ts]): pass
@@ -1621,7 +1623,10 @@ class TypeVarTupleTests(BaseTestCase):
{'args': Unpack[Ts]})
def func2(*args: '*tuple[int, str]'): pass
- self.assertEqual(gth(func2), {'args': Unpack[tuple[int, str]]})
+ hint = gth(func2)['args']
+ self.assertIsInstance(hint, types.GenericAlias)
+ self.assertEqual(hint.__args__[0], int)
+ self.assertIs(hint.__unpacked__, True)
class CustomVariadic(Generic[*Ts]): pass
@@ -6859,12 +6864,10 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(hints, {'value': Final})
def test_top_level_class_var(self):
- # https://bugs.python.org/issue45166
- with self.assertRaisesRegex(
- TypeError,
- r'typing.ClassVar\[int\] is not valid as type argument',
- ):
- get_type_hints(ann_module6)
+ # This is not meaningful but we don't raise for it.
+ # https://github.com/python/cpython/issues/133959
+ hints = get_type_hints(ann_module6)
+ self.assertEqual(hints, {'wrong': ClassVar[int]})
def test_get_type_hints_typeddict(self):
self.assertEqual(get_type_hints(TotalMovie), {'title': str, 'year': int})
@@ -6967,6 +6970,11 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
+ def test_special_forms_no_forward(self):
+ def f(x: ClassVar[int]):
+ pass
+ self.assertEqual(get_type_hints(f), {'x': ClassVar[int]})
+
def test_special_forms_forward(self):
class C:
@@ -6982,8 +6990,9 @@ class GetTypeHintsTests(BaseTestCase):
self.assertEqual(get_type_hints(C, globals())['b'], Final[int])
self.assertEqual(get_type_hints(C, globals())['x'], ClassVar)
self.assertEqual(get_type_hints(C, globals())['y'], Final)
- with self.assertRaises(TypeError):
- get_type_hints(CF, globals()),
+ lfi = get_type_hints(CF, globals())['b']
+ self.assertIs(get_origin(lfi), list)
+ self.assertEqual(get_args(lfi), (Final[int],))
def test_union_forward_recursion(self):
ValueList = List['Value']
@@ -7111,6 +7120,24 @@ class GetTypeHintsTests(BaseTestCase):
right_hints = get_type_hints(t.add_right, globals(), locals())
self.assertEqual(right_hints['node'], Node[T])
+ def test_get_type_hints_preserve_generic_alias_subclasses(self):
+ # https://github.com/python/cpython/issues/130870
+ # A real world example of this is `collections.abc.Callable`. When parameterized,
+ # the result is a subclass of `types.GenericAlias`.
+ class MyAlias(types.GenericAlias):
+ pass
+
+ class MyClass:
+ def __class_getitem__(cls, args):
+ return MyAlias(cls, args)
+
+ # Using a forward reference is important, otherwise it works as expected.
+ # `y` tests that the `GenericAlias` subclass is preserved when stripping `Annotated`.
+ def func(x: MyClass['int'], y: MyClass[Annotated[int, ...]]): ...
+
+ assert isinstance(get_type_hints(func)['x'], MyAlias)
+ assert isinstance(get_type_hints(func)['y'], MyAlias)
+
class GetUtilitiesTestCase(TestCase):
def test_get_origin(self):
@@ -7216,33 +7243,113 @@ class GetUtilitiesTestCase(TestCase):
class EvaluateForwardRefTests(BaseTestCase):
def test_evaluate_forward_ref(self):
int_ref = ForwardRef('int')
- missing = ForwardRef('missing')
+ self.assertIs(typing.evaluate_forward_ref(int_ref), int)
self.assertIs(
typing.evaluate_forward_ref(int_ref, type_params=()),
int,
)
self.assertIs(
+ typing.evaluate_forward_ref(int_ref, format=annotationlib.Format.VALUE),
+ int,
+ )
+ self.assertIs(
typing.evaluate_forward_ref(
- int_ref, type_params=(), format=annotationlib.Format.FORWARDREF,
+ int_ref, format=annotationlib.Format.FORWARDREF,
),
int,
)
+ self.assertEqual(
+ typing.evaluate_forward_ref(
+ int_ref, format=annotationlib.Format.STRING,
+ ),
+ 'int',
+ )
+
+ def test_evaluate_forward_ref_undefined(self):
+ missing = ForwardRef('missing')
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(missing)
self.assertIs(
typing.evaluate_forward_ref(
- missing, type_params=(), format=annotationlib.Format.FORWARDREF,
+ missing, format=annotationlib.Format.FORWARDREF,
),
missing,
)
self.assertEqual(
typing.evaluate_forward_ref(
- int_ref, type_params=(), format=annotationlib.Format.STRING,
+ missing, format=annotationlib.Format.STRING,
),
- 'int',
+ "missing",
+ )
+
+ def test_evaluate_forward_ref_nested(self):
+ ref = ForwardRef("int | list['str']")
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref),
+ int | list[str],
+ )
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.FORWARDREF),
+ int | list[str],
+ )
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.STRING),
+ "int | list['str']",
+ )
+
+ why = ForwardRef('"\'str\'"')
+ self.assertIs(typing.evaluate_forward_ref(why), str)
+
+ def test_evaluate_forward_ref_none(self):
+ none_ref = ForwardRef('None')
+ self.assertIs(typing.evaluate_forward_ref(none_ref), None)
+
+ def test_globals(self):
+ A = "str"
+ ref = ForwardRef('list[A]')
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, globals={'A': A}),
+ list[str],
)
- def test_evaluate_forward_ref_no_type_params(self):
- ref = ForwardRef('int')
- self.assertIs(typing.evaluate_forward_ref(ref), int)
+ def test_owner(self):
+ ref = ForwardRef("A")
+
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+
+ # We default to the globals of `owner`,
+ # so it no longer raises `NameError`
+ self.assertIs(
+ typing.evaluate_forward_ref(ref, owner=Loop), A
+ )
+
+ def test_inherited_owner(self):
+ # owner passed to evaluate_forward_ref
+ ref = ForwardRef("list['A']")
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, owner=Loop),
+ list[A],
+ )
+
+ # owner set on the ForwardRef
+ ref = ForwardRef("list['A']", owner=Loop)
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref),
+ list[A],
+ )
+
+ def test_partial_evaluation(self):
+ ref = ForwardRef("list[A]")
+ with self.assertRaises(NameError):
+ typing.evaluate_forward_ref(ref)
+
+ self.assertEqual(
+ typing.evaluate_forward_ref(ref, format=annotationlib.Format.FORWARDREF),
+ list[EqualToForwardRef('A')],
+ )
class CollectionsAbcTests(BaseTestCase):
diff --git a/Lib/test/test_unittest/test_case.py b/Lib/test/test_unittest/test_case.py
index d66cab146af..cf10e956bf2 100644
--- a/Lib/test/test_unittest/test_case.py
+++ b/Lib/test/test_unittest/test_case.py
@@ -1920,6 +1920,22 @@ test case
with self.assertLogs():
raise ZeroDivisionError("Unexpected")
+ def testAssertLogsWithFormatter(self):
+ # Check alternative formats will be respected
+ format = "[No.1: the larch] %(levelname)s:%(name)s:%(message)s"
+ formatter = logging.Formatter(format)
+ with self.assertNoStderr():
+ with self.assertLogs() as cm:
+ log_foo.info("1")
+ log_foobar.debug("2")
+ self.assertEqual(cm.output, ["INFO:foo:1"])
+ self.assertLogRecords(cm.records, [{'name': 'foo'}])
+ with self.assertLogs(formatter=formatter) as cm:
+ log_foo.info("1")
+ log_foobar.debug("2")
+ self.assertEqual(cm.output, ["[No.1: the larch] INFO:foo:1"])
+ self.assertLogRecords(cm.records, [{'name': 'foo'}])
+
def testAssertNoLogsDefault(self):
with self.assertRaises(self.failureException) as cm:
with self.assertNoLogs():
diff --git a/Lib/test/test_unittest/test_result.py b/Lib/test/test_unittest/test_result.py
index 9ac4c52449c..3f44e617303 100644
--- a/Lib/test/test_unittest/test_result.py
+++ b/Lib/test/test_unittest/test_result.py
@@ -1282,14 +1282,22 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 1)
+ self.assertEqual(len(result.errors), 2)
description = 'tearDownModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[1]
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
def testBufferSetUpModule_DoModuleCleanups(self):
with captured_stdout() as stdout:
result = unittest.TestResult()
@@ -1313,22 +1321,34 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\nset up module\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 2)
+ self.assertEqual(len(result.errors), 3)
description = 'setUpModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn('\nStdout:\nset up module\n', formatted_exc)
+
test_case, formatted_exc = result.errors[1]
self.assertIn(expected_out, formatted_exc)
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[2]
+ self.assertIn(expected_out, formatted_exc)
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ZeroDivisionError', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
def testBufferTearDownModule_DoModuleCleanups(self):
with captured_stdout() as stdout:
result = unittest.TestResult()
@@ -1355,21 +1375,32 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\ntear down module\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 2)
+ self.assertEqual(len(result.errors), 3)
description = 'tearDownModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn('\nStdout:\ntear down module\n', formatted_exc)
+
test_case, formatted_exc = result.errors[1]
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[2]
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ZeroDivisionError', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_unittest/test_runner.py b/Lib/test/test_unittest/test_runner.py
index 4d3cfd60b8d..a47e2ebb59d 100644
--- a/Lib/test/test_unittest/test_runner.py
+++ b/Lib/test/test_unittest/test_runner.py
@@ -13,6 +13,7 @@ from test.test_unittest.support import (
LoggingResult,
ResultWithNoStartTestRunStopTestRun,
)
+from test.support.testcase import ExceptionIsLikeMixin
def resultFactory(*_):
@@ -604,7 +605,7 @@ class TestClassCleanup(unittest.TestCase):
@support.force_not_colorized_test_class
-class TestModuleCleanUp(unittest.TestCase):
+class TestModuleCleanUp(ExceptionIsLikeMixin, unittest.TestCase):
def test_add_and_do_ModuleCleanup(self):
module_cleanups = []
@@ -646,11 +647,50 @@ class TestModuleCleanUp(unittest.TestCase):
[(module_cleanup_good, (1, 2, 3),
dict(four='hello', five='goodbye')),
(module_cleanup_bad, (), {})])
- with self.assertRaises(CustomError) as e:
+ with self.assertRaises(Exception) as e:
unittest.case.doModuleCleanups()
- self.assertEqual(str(e.exception), 'CleanUpExc')
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed',
+ [CustomError('CleanUpExc')]))
self.assertEqual(unittest.case._module_cleanups, [])
+ def test_doModuleCleanup_with_multiple_errors_in_addModuleCleanup(self):
+ def module_cleanup_bad1():
+ raise TypeError('CleanUpExc1')
+
+ def module_cleanup_bad2():
+ raise ValueError('CleanUpExc2')
+
+ class Module:
+ unittest.addModuleCleanup(module_cleanup_bad1)
+ unittest.addModuleCleanup(module_cleanup_bad2)
+ with self.assertRaises(ExceptionGroup) as e:
+ unittest.case.doModuleCleanups()
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ]))
+
+ def test_doModuleCleanup_with_exception_group_in_addModuleCleanup(self):
+ def module_cleanup_bad():
+ raise ExceptionGroup('CleanUpExc', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ])
+
+ class Module:
+ unittest.addModuleCleanup(module_cleanup_bad)
+ with self.assertRaises(ExceptionGroup) as e:
+ unittest.case.doModuleCleanups()
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed', [
+ ExceptionGroup('CleanUpExc', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ]),
+ ]))
+
def test_addModuleCleanup_arg_errors(self):
cleanups = []
def cleanup(*args, **kwargs):
@@ -871,9 +911,11 @@ class TestModuleCleanUp(unittest.TestCase):
ordering = []
blowUp = True
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
- with self.assertRaises(CustomError) as cm:
+ with self.assertRaises(Exception) as cm:
suite.debug()
- self.assertEqual(str(cm.exception), 'CleanUpExc')
+ self.assertExceptionIsLike(cm.exception,
+ ExceptionGroup('module cleanup failed',
+ [CustomError('CleanUpExc')]))
self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
'tearDownClass', 'tearDownModule', 'cleanup_exc'])
self.assertEqual(unittest.case._module_cleanups, [])
diff --git a/Lib/test/test_unittest/testmock/testhelpers.py b/Lib/test/test_unittest/testmock/testhelpers.py
index d1e48bde982..0e82c723ec3 100644
--- a/Lib/test/test_unittest/testmock/testhelpers.py
+++ b/Lib/test/test_unittest/testmock/testhelpers.py
@@ -1050,6 +1050,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithPostInit()),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithPostInit)
self.assertIsInstance(mock.a, int)
self.assertIsInstance(mock.b, int)
@@ -1072,6 +1073,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithDefault(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithDefault)
self.assertIsInstance(mock.a, int)
self.assertIsInstance(mock.b, int)
@@ -1087,6 +1089,7 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithMethod(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithMethod)
self.assertIsInstance(mock.a, int)
mock.b.assert_not_called()
@@ -1102,11 +1105,29 @@ class SpecSignatureTest(unittest.TestCase):
create_autospec(WithNonFields(1)),
]:
with self.subTest(mock=mock):
+ self.assertIsInstance(mock, WithNonFields)
with self.assertRaisesRegex(AttributeError, msg):
mock.a
with self.assertRaisesRegex(AttributeError, msg):
mock.b
+ def test_dataclass_special_attrs(self):
+ @dataclass
+ class Description:
+ name: str
+
+ for mock in [
+ create_autospec(Description, instance=True),
+ create_autospec(Description(1)),
+ ]:
+ with self.subTest(mock=mock):
+ self.assertIsInstance(mock, Description)
+ self.assertIs(mock.__class__, Description)
+ self.assertIsInstance(mock.__dataclass_fields__, MagicMock)
+ self.assertIsInstance(mock.__dataclass_params__, MagicMock)
+ self.assertIsInstance(mock.__match_args__, MagicMock)
+ self.assertIsInstance(mock.__hash__, MagicMock)
+
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index bc1030eea60..1d889ae7cf4 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1569,6 +1569,7 @@ class Pathname_Tests(unittest.TestCase):
urllib.request.url2pathname,
url, require_scheme=True)
+ @unittest.skipIf(support.is_emscripten, "Fixed by https://github.com/emscripten-core/emscripten/pull/24593")
def test_url2pathname_resolve_host(self):
fn = urllib.request.url2pathname
sep = os.path.sep
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index aabc360289a..b2bde5a9b1d 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -2,6 +2,7 @@ import sys
import unicodedata
import unittest
import urllib.parse
+from test import support
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
@@ -156,27 +157,25 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
- def test_qsl(self):
- for orig, expect in parse_qsl_test_cases:
- result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- expect_without_blanks = [v for v in expect if len(v[1])]
- result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
- self.assertEqual(result, expect_without_blanks,
- "Error parsing %r" % orig)
-
- def test_qs(self):
- for orig, expect in parse_qs_test_cases:
- result = urllib.parse.parse_qs(orig, keep_blank_values=True)
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- expect_without_blanks = {v: expect[v]
- for v in expect if len(expect[v][0])}
- result = urllib.parse.parse_qs(orig, keep_blank_values=False)
- self.assertEqual(result, expect_without_blanks,
- "Error parsing %r" % orig)
-
- def test_roundtrips(self):
- str_cases = [
+ @support.subTests('orig,expect', parse_qsl_test_cases)
+ def test_qsl(self, orig, expect):
+ result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
+ self.assertEqual(result, expect)
+ expect_without_blanks = [v for v in expect if len(v[1])]
+ result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
+ self.assertEqual(result, expect_without_blanks)
+
+ @support.subTests('orig,expect', parse_qs_test_cases)
+ def test_qs(self, orig, expect):
+ result = urllib.parse.parse_qs(orig, keep_blank_values=True)
+ self.assertEqual(result, expect)
+ expect_without_blanks = {v: expect[v]
+ for v in expect if len(expect[v][0])}
+ result = urllib.parse.parse_qs(orig, keep_blank_values=False)
+ self.assertEqual(result, expect_without_blanks)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,parsed,split', [
('path/to/file',
('', '', 'path/to/file', '', '', ''),
('', '', 'path/to/file', '', '')),
@@ -263,23 +262,21 @@ class UrlParseTestCase(unittest.TestCase):
('sch_me:path/to/file',
('', '', 'sch_me:path/to/file', '', '', ''),
('', '', 'sch_me:path/to/file', '', '')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- tuple(x.encode('ascii') for x in t[1]),
- tuple(x.encode('ascii') for x in t[2]))
- bytes_cases = [_encode(x) for x in str_cases]
- str_cases += [
('schème:path/to/file',
('', '', 'schème:path/to/file', '', '', ''),
('', '', 'schème:path/to/file', '', '')),
- ]
- for url, parsed, split in str_cases + bytes_cases:
- with self.subTest(url):
- self.checkRoundtrips(url, parsed, split)
-
- def test_roundtrips_normalization(self):
- str_cases = [
+ ])
+ def test_roundtrips(self, bytes, url, parsed, split):
+ if bytes:
+ if not url.isascii():
+ self.skipTest('non-ASCII bytes')
+ url = str_encode(url)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ self.checkRoundtrips(url, parsed, split)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,url2,parsed,split', [
('///path/to/file',
'/path/to/file',
('', '', '/path/to/file', '', '', ''),
@@ -300,22 +297,18 @@ class UrlParseTestCase(unittest.TestCase):
'https:///tmp/junk.txt',
('https', '', '/tmp/junk.txt', '', '', ''),
('https', '', '/tmp/junk.txt', '', '')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- t[1].encode('ascii'),
- tuple(x.encode('ascii') for x in t[2]),
- tuple(x.encode('ascii') for x in t[3]))
- bytes_cases = [_encode(x) for x in str_cases]
- for url, url2, parsed, split in str_cases + bytes_cases:
- with self.subTest(url):
- self.checkRoundtrips(url, parsed, split, url2)
-
- def test_http_roundtrips(self):
- # urllib.parse.urlsplit treats 'http:' as an optimized special case,
- # so we test both 'http:' and 'https:' in all the following.
- # Three cheers for white box knowledge!
- str_cases = [
+ ])
+ def test_roundtrips_normalization(self, bytes, url, url2, parsed, split):
+ if bytes:
+ url = str_encode(url)
+ url2 = str_encode(url2)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ self.checkRoundtrips(url, parsed, split, url2)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('scheme', ('http', 'https'))
+ @support.subTests('url,parsed,split', [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
@@ -331,23 +324,20 @@ class UrlParseTestCase(unittest.TestCase):
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
- ]
- def _encode(t):
- return (t[0].encode('ascii'),
- tuple(x.encode('ascii') for x in t[1]),
- tuple(x.encode('ascii') for x in t[2]))
- bytes_cases = [_encode(x) for x in str_cases]
- str_schemes = ('http', 'https')
- bytes_schemes = (b'http', b'https')
- str_tests = str_schemes, str_cases
- bytes_tests = bytes_schemes, bytes_cases
- for schemes, test_cases in (str_tests, bytes_tests):
- for scheme in schemes:
- for url, parsed, split in test_cases:
- url = scheme + url
- parsed = (scheme,) + parsed
- split = (scheme,) + split
- self.checkRoundtrips(url, parsed, split)
+ ])
+ def test_http_roundtrips(self, bytes, scheme, url, parsed, split):
+ # urllib.parse.urlsplit treats 'http:' as an optimized special case,
+ # so we test both 'http:' and 'https:' in all the following.
+ # Three cheers for white box knowledge!
+ if bytes:
+ scheme = str_encode(scheme)
+ url = str_encode(url)
+ parsed = tuple_encode(parsed)
+ split = tuple_encode(split)
+ url = scheme + url
+ parsed = (scheme,) + parsed
+ split = (scheme,) + split
+ self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected, *, relroundtrip=True):
with self.subTest(base=base, relurl=relurl):
@@ -363,12 +353,13 @@ class UrlParseTestCase(unittest.TestCase):
relurlb = urllib.parse.urlunsplit(urllib.parse.urlsplit(relurlb))
self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
- def test_unparse_parse(self):
- str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
- bytes_cases = [x.encode('ascii') for x in str_cases]
- for u in str_cases + bytes_cases:
- self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
- self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
+ @support.subTests('bytes', (False, True))
+ @support.subTests('u', ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',])
+ def test_unparse_parse(self, bytes, u):
+ if bytes:
+ u = str_encode(u)
+ self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
+ self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
@@ -695,8 +686,8 @@ class UrlParseTestCase(unittest.TestCase):
self.checkJoin('///b/c', '///w', '///w')
self.checkJoin('///b/c', 'w', '///b/w')
- def test_RFC2732(self):
- str_cases = [
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,hostname,port', [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
@@ -727,26 +718,28 @@ class UrlParseTestCase(unittest.TestCase):
('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]:/foo/',
'::ffff:12.34.56.78', None),
- ]
- def _encode(t):
- return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
- bytes_cases = [_encode(x) for x in str_cases]
- for url, hostname, port in str_cases + bytes_cases:
- urlparsed = urllib.parse.urlparse(url)
- self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
-
- str_cases = [
+ ])
+ def test_RFC2732(self, bytes, url, hostname, port):
+ if bytes:
+ url = str_encode(url)
+ hostname = str_encode(hostname)
+ urlparsed = urllib.parse.urlparse(url)
+ self.assertEqual((urlparsed.hostname, urlparsed.port), (hostname, port))
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('invalid_url', [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
- 'http://[::ffff:12.34.56.78']
- bytes_cases = [x.encode('ascii') for x in str_cases]
- for invalid_url in str_cases + bytes_cases:
- self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
-
- def test_urldefrag(self):
- str_cases = [
+ 'http://[::ffff:12.34.56.78'])
+ def test_RFC2732_invalid(self, bytes, invalid_url):
+ if bytes:
+ invalid_url = str_encode(invalid_url)
+ self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
+
+ @support.subTests('bytes', (False, True))
+ @support.subTests('url,defrag,frag', [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
@@ -770,18 +763,18 @@ class UrlParseTestCase(unittest.TestCase):
('http:?q#f', 'http:?q', 'f'),
('//a/b/c;p?q#f', '//a/b/c;p?q', 'f'),
('://a/b/c;p?q#f', '://a/b/c;p?q', 'f'),
- ]
- def _encode(t):
- return type(t)(x.encode('ascii') for x in t)
- bytes_cases = [_encode(x) for x in str_cases]
- for url, defrag, frag in str_cases + bytes_cases:
- with self.subTest(url):
- result = urllib.parse.urldefrag(url)
- hash = '#' if isinstance(url, str) else b'#'
- self.assertEqual(result.geturl(), url.rstrip(hash))
- self.assertEqual(result, (defrag, frag))
- self.assertEqual(result.url, defrag)
- self.assertEqual(result.fragment, frag)
+ ])
+ def test_urldefrag(self, bytes, url, defrag, frag):
+ if bytes:
+ url = str_encode(url)
+ defrag = str_encode(defrag)
+ frag = str_encode(frag)
+ result = urllib.parse.urldefrag(url)
+ hash = '#' if isinstance(url, str) else b'#'
+ self.assertEqual(result.geturl(), url.rstrip(hash))
+ self.assertEqual(result, (defrag, frag))
+ self.assertEqual(result.url, defrag)
+ self.assertEqual(result.fragment, frag)
def test_urlsplit_scoped_IPv6(self):
p = urllib.parse.urlsplit('http://[FE80::822a:a8ff:fe49:470c%tESt]:1234')
@@ -981,42 +974,35 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p.scheme, "https")
self.assertEqual(p.geturl(), "https://www.python.org/")
- def test_attributes_bad_port(self):
+ @support.subTests('bytes', (False, True))
+ @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+ @support.subTests('port', ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", "1 ", "६"))
+ def test_attributes_bad_port(self, bytes, parse, port):
"""Check handling of invalid ports."""
- for bytes in (False, True):
- for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
- for port in ("foo", "1.5", "-1", "0x10", "-0", "1_1", " 1", "1 ", "६"):
- with self.subTest(bytes=bytes, parse=parse, port=port):
- netloc = "www.example.net:" + port
- url = "http://" + netloc + "/"
- if bytes:
- if netloc.isascii() and port.isascii():
- netloc = netloc.encode("ascii")
- url = url.encode("ascii")
- else:
- continue
- p = parse(url)
- self.assertEqual(p.netloc, netloc)
- with self.assertRaises(ValueError):
- p.port
+ netloc = "www.example.net:" + port
+ url = "http://" + netloc + "/"
+ if bytes:
+ if not (netloc.isascii() and port.isascii()):
+ self.skipTest('non-ASCII bytes')
+ netloc = str_encode(netloc)
+ url = str_encode(url)
+ p = parse(url)
+ self.assertEqual(p.netloc, netloc)
+ with self.assertRaises(ValueError):
+ p.port
- def test_attributes_bad_scheme(self):
+ @support.subTests('bytes', (False, True))
+ @support.subTests('parse', (urllib.parse.urlsplit, urllib.parse.urlparse))
+ @support.subTests('scheme', (".", "+", "-", "0", "http&", "६http"))
+ def test_attributes_bad_scheme(self, bytes, parse, scheme):
"""Check handling of invalid schemes."""
- for bytes in (False, True):
- for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
- for scheme in (".", "+", "-", "0", "http&", "६http"):
- with self.subTest(bytes=bytes, parse=parse, scheme=scheme):
- url = scheme + "://www.example.net"
- if bytes:
- if url.isascii():
- url = url.encode("ascii")
- else:
- continue
- p = parse(url)
- if bytes:
- self.assertEqual(p.scheme, b"")
- else:
- self.assertEqual(p.scheme, "")
+ url = scheme + "://www.example.net"
+ if bytes:
+ if not url.isascii():
+ self.skipTest('non-ASCII bytes')
+ url = url.encode("ascii")
+ p = parse(url)
+ self.assertEqual(p.scheme, b"" if bytes else "")
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
@@ -1128,24 +1114,21 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b''))
- def test_default_scheme(self):
+ @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+ def test_default_scheme(self, func):
# Exercise the scheme parameter of urlparse() and urlsplit()
- for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
- with self.subTest(function=func):
- result = func("http://example.net/", "ftp")
- self.assertEqual(result.scheme, "http")
- result = func(b"http://example.net/", b"ftp")
- self.assertEqual(result.scheme, b"http")
- self.assertEqual(func("path", "ftp").scheme, "ftp")
- self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
- self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
- self.assertEqual(func("path").scheme, "")
- self.assertEqual(func(b"path").scheme, b"")
- self.assertEqual(func(b"path", "").scheme, b"")
-
- def test_parse_fragments(self):
- # Exercise the allow_fragments parameter of urlparse() and urlsplit()
- tests = (
+ result = func("http://example.net/", "ftp")
+ self.assertEqual(result.scheme, "http")
+ result = func(b"http://example.net/", b"ftp")
+ self.assertEqual(result.scheme, b"http")
+ self.assertEqual(func("path", "ftp").scheme, "ftp")
+ self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
+ self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
+ self.assertEqual(func("path").scheme, "")
+ self.assertEqual(func(b"path").scheme, b"")
+ self.assertEqual(func(b"path", "").scheme, b"")
+
+ @support.subTests('url,attr,expected_frag', (
("http:#frag", "path", "frag"),
("//example.net#frag", "path", "frag"),
("index.html#frag", "path", "frag"),
@@ -1156,24 +1139,24 @@ class UrlParseTestCase(unittest.TestCase):
("//abc#@frag", "path", "@frag"),
("//abc:80#@frag", "path", "@frag"),
("//abc#@frag:80", "path", "@frag:80"),
- )
- for url, attr, expected_frag in tests:
- for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
- if attr == "params" and func is urllib.parse.urlsplit:
- attr = "path"
- with self.subTest(url=url, function=func):
- result = func(url, allow_fragments=False)
- self.assertEqual(result.fragment, "")
- self.assertEndsWith(getattr(result, attr),
- "#" + expected_frag)
- self.assertEqual(func(url, "", False).fragment, "")
-
- result = func(url, allow_fragments=True)
- self.assertEqual(result.fragment, expected_frag)
- self.assertNotEndsWith(getattr(result, attr), expected_frag)
- self.assertEqual(func(url, "", True).fragment,
- expected_frag)
- self.assertEqual(func(url).fragment, expected_frag)
+ ))
+ @support.subTests('func', (urllib.parse.urlparse, urllib.parse.urlsplit))
+ def test_parse_fragments(self, url, attr, expected_frag, func):
+ # Exercise the allow_fragments parameter of urlparse() and urlsplit()
+ if attr == "params" and func is urllib.parse.urlsplit:
+ attr = "path"
+ result = func(url, allow_fragments=False)
+ self.assertEqual(result.fragment, "")
+ self.assertEndsWith(getattr(result, attr),
+ "#" + expected_frag)
+ self.assertEqual(func(url, "", False).fragment, "")
+
+ result = func(url, allow_fragments=True)
+ self.assertEqual(result.fragment, expected_frag)
+ self.assertNotEndsWith(getattr(result, attr), expected_frag)
+ self.assertEqual(func(url, "", True).fragment,
+ expected_frag)
+ self.assertEqual(func(url).fragment, expected_frag)
def test_mixed_types_rejected(self):
# Several functions that process either strings or ASCII encoded bytes
@@ -1199,7 +1182,14 @@ class UrlParseTestCase(unittest.TestCase):
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin(b"http://python.org", "http://python.org")
- def _check_result_type(self, str_type):
+ @support.subTests('result_type', [
+ urllib.parse.DefragResult,
+ urllib.parse.SplitResult,
+ urllib.parse.ParseResult,
+ ])
+ def test_result_pairs(self, result_type):
+ # Check encoding and decoding between result pairs
+ str_type = result_type
num_args = len(str_type._fields)
bytes_type = str_type._encoded_counterpart
self.assertIs(bytes_type._decoded_counterpart, str_type)
@@ -1224,16 +1214,6 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(str_result.encode(encoding, errors), bytes_args)
self.assertEqual(str_result.encode(encoding, errors), bytes_result)
- def test_result_pairs(self):
- # Check encoding and decoding between result pairs
- result_types = [
- urllib.parse.DefragResult,
- urllib.parse.SplitResult,
- urllib.parse.ParseResult,
- ]
- for result_type in result_types:
- self._check_result_type(result_type)
-
def test_parse_qs_encoding(self):
result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, {'key': ['\u0141\xE9']})
@@ -1265,8 +1245,7 @@ class UrlParseTestCase(unittest.TestCase):
urllib.parse.parse_qsl('&'.join(['a=a']*11), max_num_fields=10)
urllib.parse.parse_qsl('&'.join(['a=a']*10), max_num_fields=10)
- def test_parse_qs_separator(self):
- parse_qs_semicolon_cases = [
+ @support.subTests('orig,expect', [
(";", {}),
(";;", {}),
(";a=b", {'a': ['b']}),
@@ -1277,17 +1256,14 @@ class UrlParseTestCase(unittest.TestCase):
(b";a=b", {b'a': [b'b']}),
(b"a=a+b;b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1;a=2", {b'a': [b'1', b'2']}),
- ]
- for orig, expect in parse_qs_semicolon_cases:
- with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
- result = urllib.parse.parse_qs(orig, separator=';')
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- result_bytes = urllib.parse.parse_qs(orig, separator=b';')
- self.assertEqual(result_bytes, expect, "Error parsing %r" % orig)
-
-
- def test_parse_qsl_separator(self):
- parse_qsl_semicolon_cases = [
+ ])
+ def test_parse_qs_separator(self, orig, expect):
+ result = urllib.parse.parse_qs(orig, separator=';')
+ self.assertEqual(result, expect)
+ result_bytes = urllib.parse.parse_qs(orig, separator=b';')
+ self.assertEqual(result_bytes, expect)
+
+ @support.subTests('orig,expect', [
(";", []),
(";;", []),
(";a=b", [('a', 'b')]),
@@ -1298,13 +1274,12 @@ class UrlParseTestCase(unittest.TestCase):
(b";a=b", [(b'a', b'b')]),
(b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
- ]
- for orig, expect in parse_qsl_semicolon_cases:
- with self.subTest(f"Original: {orig!r}, Expected: {expect!r}"):
- result = urllib.parse.parse_qsl(orig, separator=';')
- self.assertEqual(result, expect, "Error parsing %r" % orig)
- result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
- self.assertEqual(result_bytes, expect, "Error parsing %r" % orig)
+ ])
+ def test_parse_qsl_separator(self, orig, expect):
+ result = urllib.parse.parse_qsl(orig, separator=';')
+ self.assertEqual(result, expect)
+ result_bytes = urllib.parse.parse_qsl(orig, separator=b';')
+ self.assertEqual(result_bytes, expect)
def test_parse_qsl_bytes(self):
self.assertEqual(urllib.parse.parse_qsl(b'a=b'), [(b'a', b'b')])
@@ -1695,11 +1670,12 @@ class Utility_Tests(unittest.TestCase):
self.assertRaises(UnicodeError, urllib.parse._to_bytes,
'http://www.python.org/medi\u00e6val')
- def test_unwrap(self):
- for wrapped_url in ('<URL:scheme://host/path>', '<scheme://host/path>',
- 'URL:scheme://host/path', 'scheme://host/path'):
- url = urllib.parse.unwrap(wrapped_url)
- self.assertEqual(url, 'scheme://host/path')
+ @support.subTests('wrapped_url',
+ ('<URL:scheme://host/path>', '<scheme://host/path>',
+ 'URL:scheme://host/path', 'scheme://host/path'))
+ def test_unwrap(self, wrapped_url):
+ url = urllib.parse.unwrap(wrapped_url)
+ self.assertEqual(url, 'scheme://host/path')
class DeprecationTest(unittest.TestCase):
@@ -1780,5 +1756,11 @@ class DeprecationTest(unittest.TestCase):
'urllib.parse.to_bytes() is deprecated as of 3.8')
+def str_encode(s):
+ return s.encode('ascii')
+
+def tuple_encode(t):
+ return tuple(str_encode(x) for x in t)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_uuid.py b/Lib/test/test_uuid.py
index 958be5408ce..7ddacf07a2c 100755
--- a/Lib/test/test_uuid.py
+++ b/Lib/test/test_uuid.py
@@ -14,6 +14,7 @@ from unittest import mock
from test import support
from test.support import import_helper
+from test.support.script_helper import assert_python_ok
py_uuid = import_helper.import_fresh_module('uuid', blocked=['_uuid'])
c_uuid = import_helper.import_fresh_module('uuid', fresh=['_uuid'])
@@ -1217,10 +1218,37 @@ class BaseTestUUID:
class TestUUIDWithoutExtModule(BaseTestUUID, unittest.TestCase):
uuid = py_uuid
+
@unittest.skipUnless(c_uuid, 'requires the C _uuid module')
class TestUUIDWithExtModule(BaseTestUUID, unittest.TestCase):
uuid = c_uuid
+ def check_has_stable_libuuid_extractable_node(self):
+ if not self.uuid._has_stable_extractable_node:
+ self.skipTest("libuuid cannot deduce MAC address")
+
+ @unittest.skipUnless(os.name == 'posix', 'POSIX only')
+ def test_unix_getnode_from_libuuid(self):
+ self.check_has_stable_libuuid_extractable_node()
+ script = 'import uuid; print(uuid._unix_getnode())'
+ _, n_a, _ = assert_python_ok('-c', script)
+ _, n_b, _ = assert_python_ok('-c', script)
+ n_a, n_b = n_a.decode().strip(), n_b.decode().strip()
+ self.assertTrue(n_a.isdigit())
+ self.assertTrue(n_b.isdigit())
+ self.assertEqual(n_a, n_b)
+
+ @unittest.skipUnless(os.name == 'nt', 'Windows only')
+ def test_windows_getnode_from_libuuid(self):
+ self.check_has_stable_libuuid_extractable_node()
+ script = 'import uuid; print(uuid._windll_getnode())'
+ _, n_a, _ = assert_python_ok('-c', script)
+ _, n_b, _ = assert_python_ok('-c', script)
+ n_a, n_b = n_a.decode().strip(), n_b.decode().strip()
+ self.assertTrue(n_a.isdigit())
+ self.assertTrue(n_b.isdigit())
+ self.assertEqual(n_a, n_b)
+
class BaseTestInternals:
_uuid = py_uuid
diff --git a/Lib/test/test_venv.py b/Lib/test/test_venv.py
index 12c30e178ae..d62f3fba2d1 100644
--- a/Lib/test/test_venv.py
+++ b/Lib/test/test_venv.py
@@ -1008,7 +1008,7 @@ class EnsurePipTest(BaseTest):
err, flags=re.MULTILINE)
# Ignore warning about missing optional module:
try:
- import ssl
+ import ssl # noqa: F401
except ImportError:
err = re.sub(
"^WARNING: Disabling truststore since ssl support is missing$",
diff --git a/Lib/test/test_webbrowser.py b/Lib/test/test_webbrowser.py
index 4c3ea1cd8df..6b577ae100e 100644
--- a/Lib/test/test_webbrowser.py
+++ b/Lib/test/test_webbrowser.py
@@ -6,7 +6,6 @@ import subprocess
import sys
import unittest
import webbrowser
-from functools import partial
from test import support
from test.support import import_helper
from test.support import is_apple_mobile
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index 38be2cd437f..bf6d5074fde 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -218,6 +218,33 @@ class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
+ def test_constructor(self):
+ # Test constructor behavior.
+
+ with self.assertRaises(TypeError):
+ tree = ET.ElementTree("")
+ with self.assertRaises(TypeError):
+ tree = ET.ElementTree(ET.ElementTree())
+
+ def test_setroot(self):
+ # Test _setroot behavior.
+
+ tree = ET.ElementTree()
+ element = ET.Element("tag")
+ tree._setroot(element)
+ self.assertEqual(tree.getroot().tag, "tag")
+ self.assertEqual(tree.getroot(), element)
+
+ # Test behavior with an invalid root element
+
+ tree = ET.ElementTree()
+ with self.assertRaises(TypeError):
+ tree._setroot("")
+ with self.assertRaises(TypeError):
+ tree._setroot(ET.ElementTree())
+ with self.assertRaises(TypeError):
+ tree._setroot(None)
+
def test_interface(self):
# Test element tree interface.
diff --git a/Lib/test/test_zipfile/__main__.py b/Lib/test/test_zipfile/__main__.py
index e25ac946edf..90da74ade38 100644
--- a/Lib/test/test_zipfile/__main__.py
+++ b/Lib/test/test_zipfile/__main__.py
@@ -1,6 +1,6 @@
import unittest
-from . import load_tests # noqa: F401
+from . import load_tests
if __name__ == "__main__":
diff --git a/Lib/test/test_zipfile/_path/_test_params.py b/Lib/test/test_zipfile/_path/_test_params.py
index bc95b4ebf4a..00a9eaf2f99 100644
--- a/Lib/test/test_zipfile/_path/_test_params.py
+++ b/Lib/test/test_zipfile/_path/_test_params.py
@@ -1,5 +1,5 @@
-import types
import functools
+import types
from ._itertools import always_iterable
diff --git a/Lib/test/test_zipfile/_path/test_complexity.py b/Lib/test/test_zipfile/_path/test_complexity.py
index b505dd7c376..7c108fc6ab8 100644
--- a/Lib/test/test_zipfile/_path/test_complexity.py
+++ b/Lib/test/test_zipfile/_path/test_complexity.py
@@ -8,10 +8,8 @@ import zipfile
from ._functools import compose
from ._itertools import consume
-
from ._support import import_or_skip
-
big_o = import_or_skip('big_o')
pytest = import_or_skip('pytest')
diff --git a/Lib/test/test_zipfile/_path/test_path.py b/Lib/test/test_zipfile/_path/test_path.py
index 0afabc0c668..696134023a5 100644
--- a/Lib/test/test_zipfile/_path/test_path.py
+++ b/Lib/test/test_zipfile/_path/test_path.py
@@ -1,6 +1,6 @@
+import contextlib
import io
import itertools
-import contextlib
import pathlib
import pickle
import stat
@@ -9,12 +9,11 @@ import unittest
import zipfile
import zipfile._path
-from test.support.os_helper import temp_dir, FakePath
+from test.support.os_helper import FakePath, temp_dir
from ._functools import compose
from ._itertools import Counter
-
-from ._test_params import parameterize, Invoked
+from ._test_params import Invoked, parameterize
class jaraco:
@@ -193,10 +192,10 @@ class TestPath(unittest.TestCase):
"""EncodingWarning must blame the read_text and open calls."""
assert sys.flags.warn_default_encoding
root = zipfile.Path(alpharep)
- with self.assertWarns(EncodingWarning) as wc:
+ with self.assertWarns(EncodingWarning) as wc: # noqa: F821 (astral-sh/ruff#13296)
root.joinpath("a.txt").read_text()
assert __file__ == wc.filename
- with self.assertWarns(EncodingWarning) as wc:
+ with self.assertWarns(EncodingWarning) as wc: # noqa: F821 (astral-sh/ruff#13296)
root.joinpath("a.txt").open("r").close()
assert __file__ == wc.filename
@@ -365,6 +364,17 @@ class TestPath(unittest.TestCase):
assert root.name == 'alpharep.zip' == root.filename.name
@pass_alpharep
+ def test_root_on_disk(self, alpharep):
+ """
+ The name/stem of the root should match the zipfile on disk.
+
+ This condition must hold across platforms.
+ """
+ root = zipfile.Path(self.zipfile_ondisk(alpharep))
+ assert root.name == 'alpharep.zip' == root.filename.name
+ assert root.stem == 'alpharep' == root.filename.stem
+
+ @pass_alpharep
def test_suffix(self, alpharep):
"""
The suffix of the root should be the suffix of the zipfile.
diff --git a/Lib/test/test_zipfile/_path/write-alpharep.py b/Lib/test/test_zipfile/_path/write-alpharep.py
index 48c09b53717..7418391abad 100644
--- a/Lib/test/test_zipfile/_path/write-alpharep.py
+++ b/Lib/test/test_zipfile/_path/write-alpharep.py
@@ -1,4 +1,3 @@
from . import test_path
-
__name__ == '__main__' and test_path.build_alpharep_fixture().extractall('alpharep')
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 4d97fe56f3a..c57ab51eca1 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -119,6 +119,114 @@ class ChecksumTestCase(unittest.TestCase):
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
+class ChecksumCombineMixin:
+ """Mixin class for testing checksum combination."""
+
+ N = 1000
+ default_iv: int
+
+ def parse_iv(self, iv):
+ """Parse an IV value.
+
+ - The default IV is returned if *iv* is None.
+ - A random IV is returned if *iv* is -1.
+ - Otherwise, *iv* is returned as is.
+ """
+ if iv is None:
+ return self.default_iv
+ if iv == -1:
+ return random.randint(1, 0x80000000)
+ return iv
+
+ def checksum(self, data, init=None):
+ """Compute the checksum of data with a given initial value.
+
+ The *init* value is parsed by ``parse_iv``.
+ """
+ iv = self.parse_iv(init)
+ return self._checksum(data, iv)
+
+ def _checksum(self, data, init):
+ raise NotImplementedError
+
+ def combine(self, a, b, blen):
+ """Combine two checksums together."""
+ raise NotImplementedError
+
+ def get_random_data(self, data_len, *, iv=None):
+ """Get a triplet (data, iv, checksum)."""
+ data = random.randbytes(data_len)
+ init = self.parse_iv(iv)
+ checksum = self.checksum(data, init)
+ return data, init, checksum
+
+ def test_combine_empty(self):
+ for _ in range(self.N):
+ a, iv, checksum = self.get_random_data(32, iv=-1)
+ res = self.combine(iv, self.checksum(a), len(a))
+ self.assertEqual(res, checksum)
+
+ def test_combine_no_iv(self):
+ for _ in range(self.N):
+ a, _, chk_a = self.get_random_data(32)
+ b, _, chk_b = self.get_random_data(64)
+ res = self.combine(chk_a, chk_b, len(b))
+ self.assertEqual(res, self.checksum(a + b))
+
+ def test_combine_no_iv_invalid_length(self):
+ a, _, chk_a = self.get_random_data(32)
+ b, _, chk_b = self.get_random_data(64)
+ checksum = self.checksum(a + b)
+ for invalid_len in [1, len(a), 48, len(b) + 1, 191]:
+ invalid_res = self.combine(chk_a, chk_b, invalid_len)
+ self.assertNotEqual(invalid_res, checksum)
+
+ self.assertRaises(TypeError, self.combine, 0, 0, "len")
+
+ def test_combine_with_iv(self):
+ for _ in range(self.N):
+ a, iv_a, chk_a_with_iv = self.get_random_data(32, iv=-1)
+ chk_a_no_iv = self.checksum(a)
+ b, iv_b, chk_b_with_iv = self.get_random_data(64, iv=-1)
+ chk_b_no_iv = self.checksum(b)
+
+ # We can represent c = COMBINE(CHK(a, iv_a), CHK(b, iv_b)) as:
+ #
+ # c = CHK(CHK(b'', iv_a) + CHK(a) + CHK(b'', iv_b) + CHK(b))
+ # = COMBINE(
+ # COMBINE(CHK(b'', iv_a), CHK(a)),
+ # COMBINE(CHK(b'', iv_b), CHK(b)),
+ # )
+ # = COMBINE(COMBINE(iv_a, CHK(a)), COMBINE(iv_b, CHK(b)))
+ tmp0 = self.combine(iv_a, chk_a_no_iv, len(a))
+ tmp1 = self.combine(iv_b, chk_b_no_iv, len(b))
+ expected = self.combine(tmp0, tmp1, len(b))
+ checksum = self.combine(chk_a_with_iv, chk_b_with_iv, len(b))
+ self.assertEqual(checksum, expected)
+
+
+class CRC32CombineTestCase(ChecksumCombineMixin, unittest.TestCase):
+
+ default_iv = 0
+
+ def _checksum(self, data, init):
+ return zlib.crc32(data, init)
+
+ def combine(self, a, b, blen):
+ return zlib.crc32_combine(a, b, blen)
+
+
+class Adler32CombineTestCase(ChecksumCombineMixin, unittest.TestCase):
+
+ default_iv = 1
+
+ def _checksum(self, data, init):
+ return zlib.adler32(data, init)
+
+ def combine(self, a, b, blen):
+ return zlib.adler32_combine(a, b, blen)
+
+
# Issue #10276 - check that inputs >=4 GiB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
diff --git a/Lib/test/test_zoneinfo/test_zoneinfo.py b/Lib/test/test_zoneinfo/test_zoneinfo.py
index f313e394f49..44e87e71c8e 100644
--- a/Lib/test/test_zoneinfo/test_zoneinfo.py
+++ b/Lib/test/test_zoneinfo/test_zoneinfo.py
@@ -58,6 +58,10 @@ def tearDownModule():
shutil.rmtree(TEMP_DIR)
+class CustomError(Exception):
+ pass
+
+
class TzPathUserMixin:
"""
Adds a setUp() and tearDown() to make TZPATH manipulations thread-safe.
@@ -404,6 +408,25 @@ class ZoneInfoTest(TzPathUserMixin, ZoneInfoTestBase):
self.assertEqual(t.utcoffset(), offset.utcoffset)
self.assertEqual(t.dst(), offset.dst)
+ def test_cache_exception(self):
+ class Incomparable(str):
+ eq_called = False
+ def __eq__(self, other):
+ self.eq_called = True
+ raise CustomError
+ __hash__ = str.__hash__
+
+ key = "America/Los_Angeles"
+ tz1 = self.klass(key)
+ key = Incomparable(key)
+ try:
+ tz2 = self.klass(key)
+ except CustomError:
+ self.assertTrue(key.eq_called)
+ else:
+ self.assertFalse(key.eq_called)
+ self.assertIs(tz2, tz1)
+
class CZoneInfoTest(ZoneInfoTest):
module = c_zoneinfo
@@ -1507,6 +1530,26 @@ class ZoneInfoCacheTest(TzPathUserMixin, ZoneInfoTestBase):
self.assertIsNot(dub0, dub1)
self.assertIs(tok0, tok1)
+ def test_clear_cache_refleak(self):
+ class Stringy(str):
+ allow_comparisons = True
+ def __eq__(self, other):
+ if not self.allow_comparisons:
+ raise CustomError
+ return super().__eq__(other)
+ __hash__ = str.__hash__
+
+ key = Stringy("America/Los_Angeles")
+ self.klass(key)
+ key.allow_comparisons = False
+ try:
+ # Note: This is try/except rather than assertRaises because
+ # there is no guarantee that the key is even still in the cache,
+ # or that the key for the cache is the original `key` object.
+ self.klass.clear_cache(only_keys="America/Los_Angeles")
+ except CustomError:
+ pass
+
class CZoneInfoCacheTest(ZoneInfoCacheTest):
module = c_zoneinfo
diff --git a/Lib/test/test_zoneinfo/test_zoneinfo_property.py b/Lib/test/test_zoneinfo/test_zoneinfo_property.py
index feaa77f3e7f..294c7e9b27a 100644
--- a/Lib/test/test_zoneinfo/test_zoneinfo_property.py
+++ b/Lib/test/test_zoneinfo/test_zoneinfo_property.py
@@ -146,20 +146,24 @@ class ZoneInfoPickleTest(ZoneInfoTestBase):
@add_key_examples
def test_pickle_unpickle_cache(self, key):
zi = self.klass(key)
- pkl_str = pickle.dumps(zi)
- zi_rt = pickle.loads(pkl_str)
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(proto=proto):
+ pkl_str = pickle.dumps(zi, proto)
+ zi_rt = pickle.loads(pkl_str)
- self.assertIs(zi, zi_rt)
+ self.assertIs(zi, zi_rt)
@hypothesis.given(key=valid_keys())
@add_key_examples
def test_pickle_unpickle_no_cache(self, key):
zi = self.klass.no_cache(key)
- pkl_str = pickle.dumps(zi)
- zi_rt = pickle.loads(pkl_str)
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(proto=proto):
+ pkl_str = pickle.dumps(zi, proto)
+ zi_rt = pickle.loads(pkl_str)
- self.assertIsNot(zi, zi_rt)
- self.assertEqual(str(zi), str(zi_rt))
+ self.assertIsNot(zi, zi_rt)
+ self.assertEqual(str(zi), str(zi_rt))
@hypothesis.given(key=valid_keys())
@add_key_examples
diff --git a/Lib/test/test_zstd.py b/Lib/test/test_zstd.py
index 53ca592ea38..90b2adc9665 100644
--- a/Lib/test/test_zstd.py
+++ b/Lib/test/test_zstd.py
@@ -12,7 +12,6 @@ import threading
from test.support.import_helper import import_module
from test.support import threading_helper
from test.support import _1M
-from test.support import Py_GIL_DISABLED
_zstd = import_module("_zstd")
zstd = import_module("compression.zstd")
@@ -63,11 +62,18 @@ SAMPLES = None
TRAINED_DICT = None
-SUPPORT_MULTITHREADING = False
+# Cannot be deferred to setup as it is used to check whether or not to skip
+# tests
+try:
+ SUPPORT_MULTITHREADING = CompressionParameter.nb_workers.bounds() != (0, 0)
+except Exception:
+ SUPPORT_MULTITHREADING = False
+
+C_INT_MIN = -(2**31)
+C_INT_MAX = (2**31) - 1
+
def setUpModule():
- global SUPPORT_MULTITHREADING
- SUPPORT_MULTITHREADING = CompressionParameter.nb_workers.bounds() != (0, 0)
# uncompressed size 130KB, more than a zstd block.
# with a frame epilogue, 4 bytes checksum.
global DAT_130K_D
@@ -196,14 +202,21 @@ class CompressorTestCase(unittest.TestCase):
self.assertRaises(TypeError, ZstdCompressor, zstd_dict=b"abcd1234")
self.assertRaises(TypeError, ZstdCompressor, zstd_dict={1: 2, 3: 4})
- with self.assertRaises(ValueError):
- ZstdCompressor(2**31)
- with self.assertRaises(ValueError):
- ZstdCompressor(options={2**31: 100})
+ # valid range for compression level is [-(1<<17), 22]
+ msg = r'illegal compression level {}; the valid range is \[-?\d+, -?\d+\]'
+ with self.assertRaisesRegex(ValueError, msg.format(C_INT_MAX)):
+ ZstdCompressor(C_INT_MAX)
+ with self.assertRaisesRegex(ValueError, msg.format(C_INT_MIN)):
+ ZstdCompressor(C_INT_MIN)
+ msg = r'illegal compression level; the valid range is \[-?\d+, -?\d+\]'
+ with self.assertRaisesRegex(ValueError, msg):
+ ZstdCompressor(level=-(2**1000))
+ with self.assertRaisesRegex(ValueError, msg):
+ ZstdCompressor(level=2**1000)
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.window_log: 100})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={3333: 100})
# Method bad arguments
@@ -254,43 +267,57 @@ class CompressorTestCase(unittest.TestCase):
}
ZstdCompressor(options=d)
- # larger than signed int, ValueError
d1 = d.copy()
- d1[CompressionParameter.ldm_bucket_size_log] = 2**31
- self.assertRaises(ValueError, ZstdCompressor, options=d1)
+ # larger than signed int
+ d1[CompressionParameter.ldm_bucket_size_log] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ ZstdCompressor(options=d1)
+ # smaller than signed int
+ d1[CompressionParameter.ldm_bucket_size_log] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ ZstdCompressor(options=d1)
- # clamp compressionLevel
+ # out of bounds compression level
level_min, level_max = CompressionParameter.compression_level.bounds()
- compress(b'', level_max+1)
- compress(b'', level_min-1)
-
- compress(b'', options={CompressionParameter.compression_level:level_max+1})
- compress(b'', options={CompressionParameter.compression_level:level_min-1})
+ with self.assertRaises(ValueError):
+ compress(b'', level_max+1)
+ with self.assertRaises(ValueError):
+ compress(b'', level_min-1)
+ with self.assertRaises(ValueError):
+ compress(b'', 2**1000)
+ with self.assertRaises(ValueError):
+ compress(b'', -(2**1000))
+ with self.assertRaises(ValueError):
+ compress(b'', options={
+ CompressionParameter.compression_level: level_max+1})
+ with self.assertRaises(ValueError):
+ compress(b'', options={
+ CompressionParameter.compression_level: level_min-1})
# zstd lib doesn't support MT compression
if not SUPPORT_MULTITHREADING:
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.nb_workers:4})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.job_size:4})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdCompressor(options={CompressionParameter.overlap_log:4})
# out of bounds error msg
option = {CompressionParameter.window_log:100}
- with self.assertRaisesRegex(ZstdError,
- (r'Error when setting zstd compression parameter "window_log", '
- r'it should \d+ <= value <= \d+, provided value is 100\. '
- r'\((?:32|64)-bit build\)')):
+ with self.assertRaisesRegex(
+ ValueError,
+ "compression parameter 'window_log' received an illegal value 100; "
+ r'the valid range is \[-?\d+, -?\d+\]',
+ ):
compress(b'', options=option)
def test_unknown_compression_parameter(self):
KEY = 100001234
option = {CompressionParameter.compression_level: 10,
KEY: 200000000}
- pattern = (r'Invalid zstd compression parameter.*?'
- fr'"unknown parameter \(key {KEY}\)"')
- with self.assertRaisesRegex(ZstdError, pattern):
+ pattern = rf"invalid compression parameter 'unknown parameter \(key {KEY}\)'"
+ with self.assertRaisesRegex(ValueError, pattern):
ZstdCompressor(options=option)
@unittest.skipIf(not SUPPORT_MULTITHREADING,
@@ -371,6 +398,115 @@ class CompressorTestCase(unittest.TestCase):
c = ZstdCompressor()
self.assertNotEqual(c.compress(b'', c.FLUSH_FRAME), b'')
+ def test_set_pledged_input_size(self):
+ DAT = DECOMPRESSED_100_PLUS_32KB
+ CHUNK_SIZE = len(DAT) // 3
+
+ # wrong value
+ c = ZstdCompressor()
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(-300)
+ # overflow
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64)
+ # ZSTD_CONTENTSIZE_ERROR is invalid
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64-2)
+ # ZSTD_CONTENTSIZE_UNKNOWN should use None
+ with self.assertRaisesRegex(ValueError,
+ r'should be a positive int less than \d+'):
+ c.set_pledged_input_size(2**64-1)
+
+ # check valid values are settable
+ c.set_pledged_input_size(2**63)
+ c.set_pledged_input_size(2**64-3)
+
+ # check that zero means empty frame
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(0)
+ c.compress(b'')
+ dat = c.flush()
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, 0)
+
+
+ # wrong mode
+ c = ZstdCompressor(level=1)
+ c.compress(b'123456')
+ self.assertEqual(c.last_mode, c.CONTINUE)
+ with self.assertRaisesRegex(ValueError,
+ r'last_mode == FLUSH_FRAME'):
+ c.set_pledged_input_size(300)
+
+ # None value
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(None)
+ dat = c.compress(DAT) + c.flush()
+
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, None)
+
+ # correct value
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT))
+
+ chunks = []
+ posi = 0
+ while posi < len(DAT):
+ dat = c.compress(DAT[posi:posi+CHUNK_SIZE])
+ posi += CHUNK_SIZE
+ chunks.append(dat)
+
+ dat = c.flush()
+ chunks.append(dat)
+ chunks = b''.join(chunks)
+
+ ret = get_frame_info(chunks)
+ self.assertEqual(ret.decompressed_size, len(DAT))
+ self.assertEqual(decompress(chunks), DAT)
+
+ c.set_pledged_input_size(len(DAT)) # the second frame
+ dat = c.compress(DAT) + c.flush()
+
+ ret = get_frame_info(dat)
+ self.assertEqual(ret.decompressed_size, len(DAT))
+ self.assertEqual(decompress(dat), DAT)
+
+ # not enough data
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT)+1)
+
+ for start in range(0, len(DAT), CHUNK_SIZE):
+ end = min(start+CHUNK_SIZE, len(DAT))
+ _dat = c.compress(DAT[start:end])
+
+ with self.assertRaises(ZstdError):
+ c.flush()
+
+ # too much data
+ c = ZstdCompressor(level=1)
+ c.set_pledged_input_size(len(DAT))
+
+ for start in range(0, len(DAT), CHUNK_SIZE):
+ end = min(start+CHUNK_SIZE, len(DAT))
+ _dat = c.compress(DAT[start:end])
+
+ with self.assertRaises(ZstdError):
+ c.compress(b'extra', ZstdCompressor.FLUSH_FRAME)
+
+ # content size not set if content_size_flag == 0
+ c = ZstdCompressor(options={CompressionParameter.content_size_flag: 0})
+ c.set_pledged_input_size(10)
+ dat1 = c.compress(b"hello")
+ dat2 = c.compress(b"world")
+ dat3 = c.flush()
+ frame_data = get_frame_info(dat1 + dat2 + dat3)
+ self.assertIsNone(frame_data.decompressed_size)
+
+
class DecompressorTestCase(unittest.TestCase):
def test_simple_decompress_bad_args(self):
@@ -385,12 +521,22 @@ class DecompressorTestCase(unittest.TestCase):
self.assertRaises(TypeError, ZstdDecompressor, options=b'abc')
with self.assertRaises(ValueError):
- ZstdDecompressor(options={2**31 : 100})
+ ZstdDecompressor(options={C_INT_MAX: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={C_INT_MIN: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={0: C_INT_MAX})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={2**1000: 100})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={-(2**1000): 100})
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(options={0: -(2**1000)})
- with self.assertRaises(ZstdError):
- ZstdDecompressor(options={DecompressionParameter.window_log_max:100})
- with self.assertRaises(ZstdError):
- ZstdDecompressor(options={3333 : 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={DecompressionParameter.window_log_max: 100})
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(options={3333: 100})
empty = compress(b'')
lzd = ZstdDecompressor()
@@ -403,26 +549,52 @@ class DecompressorTestCase(unittest.TestCase):
d = {DecompressionParameter.window_log_max : 15}
ZstdDecompressor(options=d)
- # larger than signed int, ValueError
d1 = d.copy()
- d1[DecompressionParameter.window_log_max] = 2**31
- self.assertRaises(ValueError, ZstdDecompressor, None, d1)
+ # larger than signed int
+ d1[DecompressionParameter.window_log_max] = 2**1000
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(None, d1)
+ # smaller than signed int
+ d1[DecompressionParameter.window_log_max] = -(2**1000)
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor(None, d1)
+
+ d1[DecompressionParameter.window_log_max] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(None, d1)
+ d1[DecompressionParameter.window_log_max] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ ZstdDecompressor(None, d1)
# out of bounds error msg
options = {DecompressionParameter.window_log_max:100}
- with self.assertRaisesRegex(ZstdError,
- (r'Error when setting zstd decompression parameter "window_log_max", '
- r'it should \d+ <= value <= \d+, provided value is 100\. '
- r'\((?:32|64)-bit build\)')):
+ with self.assertRaisesRegex(
+ ValueError,
+ "decompression parameter 'window_log_max' received an illegal value 100; "
+ r'the valid range is \[-?\d+, -?\d+\]',
+ ):
+ decompress(b'', options=options)
+
+ # out of bounds deecompression parameter
+ options[DecompressionParameter.window_log_max] = C_INT_MAX
+ with self.assertRaises(ValueError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = C_INT_MIN
+ with self.assertRaises(ValueError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = 2**1000
+ with self.assertRaises(OverflowError):
+ decompress(b'', options=options)
+ options[DecompressionParameter.window_log_max] = -(2**1000)
+ with self.assertRaises(OverflowError):
decompress(b'', options=options)
def test_unknown_decompression_parameter(self):
KEY = 100001234
options = {DecompressionParameter.window_log_max: DecompressionParameter.window_log_max.bounds()[1],
KEY: 200000000}
- pattern = (r'Invalid zstd decompression parameter.*?'
- fr'"unknown parameter \(key {KEY}\)"')
- with self.assertRaisesRegex(ZstdError, pattern):
+ pattern = rf"invalid decompression parameter 'unknown parameter \(key {KEY}\)'"
+ with self.assertRaisesRegex(ValueError, pattern):
ZstdDecompressor(options=options)
def test_decompress_epilogue_flags(self):
@@ -1078,27 +1250,41 @@ class ZstdDictTestCase(unittest.TestCase):
ZstdDecompressor(zd)
# wrong type
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
- ZstdCompressor(zstd_dict=(zd, b'123'))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=[zd, 1])
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=(zd, 1.0))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdCompressor(zstd_dict=(zd,))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, 1, 2))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, -1))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdCompressor(zstd_dict=(zd, 3))
-
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
- ZstdDecompressor(zstd_dict=(zd, b'123'))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaises(OverflowError):
+ ZstdCompressor(zstd_dict=(zd, 2**1000))
+ with self.assertRaises(OverflowError):
+ ZstdCompressor(zstd_dict=(zd, -2**1000))
+
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor(zstd_dict=[zd, 1])
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor(zstd_dict=(zd, 1.0))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
+ ZstdDecompressor((zd,))
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, 1, 2))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, -1))
- with self.assertRaisesRegex(TypeError, r'should be ZstdDict object'):
+ with self.assertRaisesRegex(TypeError, r'should be a ZstdDict object'):
ZstdDecompressor((zd, 3))
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor((zd, 2**1000))
+ with self.assertRaises(OverflowError):
+ ZstdDecompressor((zd, -2**1000))
def test_train_dict(self):
-
-
TRAINED_DICT = train_dict(SAMPLES, DICT_SIZE1)
ZstdDict(TRAINED_DICT.dict_content, is_raw=False)
@@ -1180,17 +1366,36 @@ class ZstdDictTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
_zstd.train_dict({}, (), 100)
with self.assertRaises(TypeError):
+ _zstd.train_dict(bytearray(), (), 100)
+ with self.assertRaises(TypeError):
_zstd.train_dict(b'', 99, 100)
with self.assertRaises(TypeError):
+ _zstd.train_dict(b'', [], 100)
+ with self.assertRaises(TypeError):
_zstd.train_dict(b'', (), 100.1)
+ with self.assertRaises(TypeError):
+ _zstd.train_dict(b'', (99.1,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'abc', (4, -1), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'abc', (2,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (99,), 100)
# size > size_t
with self.assertRaises(ValueError):
- _zstd.train_dict(b'', (2**64+1,), 100)
+ _zstd.train_dict(b'', (2**1000,), 100)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (-2**1000,), 100)
# dict_size <= 0
with self.assertRaises(ValueError):
_zstd.train_dict(b'', (), 0)
+ with self.assertRaises(ValueError):
+ _zstd.train_dict(b'', (), -1)
+
+ with self.assertRaises(ZstdError):
+ _zstd.train_dict(b'', (), 1)
def test_finalize_dict_c(self):
with self.assertRaises(TypeError):
@@ -1200,21 +1405,50 @@ class ZstdDictTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
_zstd.finalize_dict({}, b'', (), 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(bytearray(TRAINED_DICT.dict_content), b'', (), 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, {}, (), 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, bytearray(), (), 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', 99, 100, 5)
with self.assertRaises(TypeError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', [], 100, 5)
+ with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100.1, 5)
with self.assertRaises(TypeError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 5.1)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'abc', (4, -1), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'abc', (2,), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (99,), 100, 5)
+
# size > size_t
with self.assertRaises(ValueError):
- _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (2**64+1,), 100, 5)
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (2**1000,), 100, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (-2**1000,), 100, 5)
# dict_size <= 0
with self.assertRaises(ValueError):
_zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 0, 5)
+ with self.assertRaises(ValueError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), -1, 5)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 2**1000, 5)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), -2**1000, 5)
+
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 2**1000)
+ with self.assertRaises(OverflowError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, -2**1000)
+
+ with self.assertRaises(ZstdError):
+ _zstd.finalize_dict(TRAINED_DICT.dict_content, b'', (), 100, 5)
def test_train_buffer_protocol_samples(self):
def _nbytes(dat):
@@ -1424,11 +1658,12 @@ class FileTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB), "rw")
- with self.assertRaisesRegex(TypeError, r"NOT be CompressionParameter"):
+ with self.assertRaisesRegex(TypeError,
+ r"not be a CompressionParameter"):
ZstdFile(io.BytesIO(), 'rb',
options={CompressionParameter.compression_level:5})
with self.assertRaisesRegex(TypeError,
- r"NOT be DecompressionParameter"):
+ r"not be a DecompressionParameter"):
ZstdFile(io.BytesIO(), 'wb',
options={DecompressionParameter.window_log_max:21})
@@ -1439,19 +1674,19 @@ class FileTestCase(unittest.TestCase):
with self.assertRaises(TypeError):
ZstdFile(io.BytesIO(), "w", level='asd')
# CHECK_UNKNOWN and anything above CHECK_ID_MAX should be invalid.
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(), "w", options={999:9999})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(), "w", options={CompressionParameter.window_log:99})
with self.assertRaises(TypeError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB), "r", options=33)
- with self.assertRaises(ValueError):
+ with self.assertRaises(OverflowError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB),
options={DecompressionParameter.window_log_max:2**31})
- with self.assertRaises(ZstdError):
+ with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB),
options={444:333})
@@ -1467,7 +1702,7 @@ class FileTestCase(unittest.TestCase):
tmp_f.write(DAT_130K_C)
filename = tmp_f.name
- with self.assertRaises(ValueError):
+ with self.assertRaises(TypeError):
ZstdFile(filename, options={'a':'b'})
# for PyPy
@@ -2430,10 +2665,8 @@ class OpenTestCase(unittest.TestCase):
self.assertEqual(f.write(arr), LENGTH)
self.assertEqual(f.tell(), LENGTH)
-@unittest.skip("it fails for now, see gh-133885")
class FreeThreadingMethodTests(unittest.TestCase):
- @unittest.skipUnless(Py_GIL_DISABLED, 'this test can only possibly fail with GIL disabled')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_compress_locking(self):
@@ -2470,7 +2703,6 @@ class FreeThreadingMethodTests(unittest.TestCase):
actual = b''.join(output) + rest2
self.assertEqual(expected, actual)
- @unittest.skipUnless(Py_GIL_DISABLED, 'this test can only possibly fail with GIL disabled')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_decompress_locking(self):
@@ -2506,6 +2738,59 @@ class FreeThreadingMethodTests(unittest.TestCase):
actual = b''.join(output)
self.assertEqual(expected, actual)
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_compress_shared_dict(self):
+ num_threads = 8
+
+ def run_method(b):
+ level = threading.get_ident() % 4
+ # sync threads to increase chance of contention on
+ # capsule storing dictionary levels
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_digested_dict)
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_undigested_dict)
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_prefix)
+ threads = []
+
+ b = threading.Barrier(num_threads)
+ for i in range(num_threads):
+ thread = threading.Thread(target=run_method, args=(b,))
+
+ threads.append(thread)
+
+ with threading_helper.start_threads(threads):
+ pass
+
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_decompress_shared_dict(self):
+ num_threads = 8
+
+ def run_method(b):
+ # sync threads to increase chance of contention on
+ # decompression dictionary
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_digested_dict)
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_undigested_dict)
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_prefix)
+ threads = []
+
+ b = threading.Barrier(num_threads)
+ for i in range(num_threads):
+ thread = threading.Thread(target=run_method, args=(b,))
+
+ threads.append(thread)
+
+ with threading_helper.start_threads(threads):
+ pass
if __name__ == "__main__":