diff options
Diffstat (limited to 'Lib/unittest')
-rw-r--r-- | Lib/unittest/case.py | 4 | ||||
-rw-r--r-- | Lib/unittest/main.py | 4 | ||||
-rw-r--r-- | Lib/unittest/mock.py | 15 | ||||
-rw-r--r-- | Lib/unittest/runner.py | 89 | ||||
-rw-r--r-- | Lib/unittest/suite.py | 20 |
5 files changed, 72 insertions, 60 deletions
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py index 884fc1b21f6..db10de68e4a 100644 --- a/Lib/unittest/case.py +++ b/Lib/unittest/case.py @@ -149,9 +149,7 @@ def doModuleCleanups(): except Exception as exc: exceptions.append(exc) if exceptions: - # Swallows all but first exception. If a multi-exception handler - # gets written we should use that here instead. - raise exceptions[0] + raise ExceptionGroup('module cleanup failed', exceptions) def skip(reason): diff --git a/Lib/unittest/main.py b/Lib/unittest/main.py index c3869de3f6f..6fd949581f3 100644 --- a/Lib/unittest/main.py +++ b/Lib/unittest/main.py @@ -197,7 +197,7 @@ class TestProgram(object): return parser def _getMainArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) + parser = argparse.ArgumentParser(parents=[parent], color=True) parser.prog = self.progName parser.print_help = self._print_help @@ -208,7 +208,7 @@ class TestProgram(object): return parser def _getDiscoveryArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) + parser = argparse.ArgumentParser(parents=[parent], color=True) parser.prog = '%s discover' % self.progName parser.epilog = ('For test discovery all test modules must be ' 'importable from the top level directory of the ' diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py index 55cb4b1f6af..e1dbfdacf56 100644 --- a/Lib/unittest/mock.py +++ b/Lib/unittest/mock.py @@ -569,6 +569,11 @@ class NonCallableMock(Base): __dict__['_mock_methods'] = spec __dict__['_spec_asyncs'] = _spec_asyncs + def _mock_extend_spec_methods(self, spec_methods): + methods = self.__dict__.get('_mock_methods') or [] + methods.extend(spec_methods) + self.__dict__['_mock_methods'] = methods + def __get_return_value(self): ret = self._mock_return_value if self._mock_delegate is not None: @@ -981,7 +986,7 @@ class NonCallableMock(Base): def assert_called_once_with(self, /, *args, **kwargs): - """assert that the mock was called exactly once and that that call was + """assert that the mock was called exactly once and that call was with the specified arguments.""" if not self.call_count == 1: msg = ("Expected '%s' to be called once. Called %s times.%s" @@ -2766,14 +2771,16 @@ def create_autospec(spec, spec_set=False, instance=False, _parent=None, raise InvalidSpecError(f'Cannot autospec a Mock object. ' f'[object={spec!r}]') is_async_func = _is_async_func(spec) + _kwargs = {'spec': spec} entries = [(entry, _missing) for entry in dir(spec)] if is_type and instance and is_dataclass(spec): + is_dataclass_spec = True dataclass_fields = fields(spec) entries.extend((f.name, f.type) for f in dataclass_fields) - _kwargs = {'spec': [f.name for f in dataclass_fields]} + dataclass_spec_list = [f.name for f in dataclass_fields] else: - _kwargs = {'spec': spec} + is_dataclass_spec = False if spec_set: _kwargs = {'spec_set': spec} @@ -2810,6 +2817,8 @@ def create_autospec(spec, spec_set=False, instance=False, _parent=None, mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, name=_name, **_kwargs) + if is_dataclass_spec: + mock._mock_extend_spec_methods(dataclass_spec_list) if isinstance(spec, FunctionTypes): # should only happen at the top level because we don't diff --git a/Lib/unittest/runner.py b/Lib/unittest/runner.py index eb0234a2617..5f22d91aebd 100644 --- a/Lib/unittest/runner.py +++ b/Lib/unittest/runner.py @@ -4,7 +4,7 @@ import sys import time import warnings -from _colorize import get_colors +from _colorize import get_theme from . import result from .case import _SubTest @@ -45,7 +45,7 @@ class TextTestResult(result.TestResult): self.showAll = verbosity > 1 self.dots = verbosity == 1 self.descriptions = descriptions - self._ansi = get_colors(file=stream) + self._theme = get_theme(tty_file=stream).unittest self._newline = True self.durations = durations @@ -79,101 +79,99 @@ class TextTestResult(result.TestResult): def addSubTest(self, test, subtest, err): if err is not None: - red, reset = self._ansi.RED, self._ansi.RESET + t = self._theme if self.showAll: if issubclass(err[0], subtest.failureException): - self._write_status(subtest, f"{red}FAIL{reset}") + self._write_status(subtest, f"{t.fail}FAIL{t.reset}") else: - self._write_status(subtest, f"{red}ERROR{reset}") + self._write_status(subtest, f"{t.fail}ERROR{t.reset}") elif self.dots: if issubclass(err[0], subtest.failureException): - self.stream.write(f"{red}F{reset}") + self.stream.write(f"{t.fail}F{t.reset}") else: - self.stream.write(f"{red}E{reset}") + self.stream.write(f"{t.fail}E{t.reset}") self.stream.flush() super(TextTestResult, self).addSubTest(test, subtest, err) def addSuccess(self, test): super(TextTestResult, self).addSuccess(test) - green, reset = self._ansi.GREEN, self._ansi.RESET + t = self._theme if self.showAll: - self._write_status(test, f"{green}ok{reset}") + self._write_status(test, f"{t.passed}ok{t.reset}") elif self.dots: - self.stream.write(f"{green}.{reset}") + self.stream.write(f"{t.passed}.{t.reset}") self.stream.flush() def addError(self, test, err): super(TextTestResult, self).addError(test, err) - red, reset = self._ansi.RED, self._ansi.RESET + t = self._theme if self.showAll: - self._write_status(test, f"{red}ERROR{reset}") + self._write_status(test, f"{t.fail}ERROR{t.reset}") elif self.dots: - self.stream.write(f"{red}E{reset}") + self.stream.write(f"{t.fail}E{t.reset}") self.stream.flush() def addFailure(self, test, err): super(TextTestResult, self).addFailure(test, err) - red, reset = self._ansi.RED, self._ansi.RESET + t = self._theme if self.showAll: - self._write_status(test, f"{red}FAIL{reset}") + self._write_status(test, f"{t.fail}FAIL{t.reset}") elif self.dots: - self.stream.write(f"{red}F{reset}") + self.stream.write(f"{t.fail}F{t.reset}") self.stream.flush() def addSkip(self, test, reason): super(TextTestResult, self).addSkip(test, reason) - yellow, reset = self._ansi.YELLOW, self._ansi.RESET + t = self._theme if self.showAll: - self._write_status(test, f"{yellow}skipped{reset} {reason!r}") + self._write_status(test, f"{t.warn}skipped{t.reset} {reason!r}") elif self.dots: - self.stream.write(f"{yellow}s{reset}") + self.stream.write(f"{t.warn}s{t.reset}") self.stream.flush() def addExpectedFailure(self, test, err): super(TextTestResult, self).addExpectedFailure(test, err) - yellow, reset = self._ansi.YELLOW, self._ansi.RESET + t = self._theme if self.showAll: - self.stream.writeln(f"{yellow}expected failure{reset}") + self.stream.writeln(f"{t.warn}expected failure{t.reset}") self.stream.flush() elif self.dots: - self.stream.write(f"{yellow}x{reset}") + self.stream.write(f"{t.warn}x{t.reset}") self.stream.flush() def addUnexpectedSuccess(self, test): super(TextTestResult, self).addUnexpectedSuccess(test) - red, reset = self._ansi.RED, self._ansi.RESET + t = self._theme if self.showAll: - self.stream.writeln(f"{red}unexpected success{reset}") + self.stream.writeln(f"{t.fail}unexpected success{t.reset}") self.stream.flush() elif self.dots: - self.stream.write(f"{red}u{reset}") + self.stream.write(f"{t.fail}u{t.reset}") self.stream.flush() def printErrors(self): - bold_red = self._ansi.BOLD_RED - red = self._ansi.RED - reset = self._ansi.RESET + t = self._theme if self.dots or self.showAll: self.stream.writeln() self.stream.flush() - self.printErrorList(f"{red}ERROR{reset}", self.errors) - self.printErrorList(f"{red}FAIL{reset}", self.failures) + self.printErrorList(f"{t.fail}ERROR{t.reset}", self.errors) + self.printErrorList(f"{t.fail}FAIL{t.reset}", self.failures) unexpectedSuccesses = getattr(self, "unexpectedSuccesses", ()) if unexpectedSuccesses: self.stream.writeln(self.separator1) for test in unexpectedSuccesses: self.stream.writeln( - f"{red}UNEXPECTED SUCCESS{bold_red}: " - f"{self.getDescription(test)}{reset}" + f"{t.fail}UNEXPECTED SUCCESS{t.fail_info}: " + f"{self.getDescription(test)}{t.reset}" ) self.stream.flush() def printErrorList(self, flavour, errors): - bold_red, reset = self._ansi.BOLD_RED, self._ansi.RESET + t = self._theme for test, err in errors: self.stream.writeln(self.separator1) self.stream.writeln( - f"{flavour}{bold_red}: {self.getDescription(test)}{reset}" + f"{flavour}{t.fail_info}: {self.getDescription(test)}{t.reset}" ) self.stream.writeln(self.separator2) self.stream.writeln("%s" % err) @@ -286,31 +284,26 @@ class TextTestRunner(object): expected_fails, unexpected_successes, skipped = results infos = [] - ansi = get_colors(file=self.stream) - bold_red = ansi.BOLD_RED - green = ansi.GREEN - red = ansi.RED - reset = ansi.RESET - yellow = ansi.YELLOW + t = get_theme(tty_file=self.stream).unittest if not result.wasSuccessful(): - self.stream.write(f"{bold_red}FAILED{reset}") + self.stream.write(f"{t.fail_info}FAILED{t.reset}") failed, errored = len(result.failures), len(result.errors) if failed: - infos.append(f"{bold_red}failures={failed}{reset}") + infos.append(f"{t.fail_info}failures={failed}{t.reset}") if errored: - infos.append(f"{bold_red}errors={errored}{reset}") + infos.append(f"{t.fail_info}errors={errored}{t.reset}") elif run == 0 and not skipped: - self.stream.write(f"{yellow}NO TESTS RAN{reset}") + self.stream.write(f"{t.warn}NO TESTS RAN{t.reset}") else: - self.stream.write(f"{green}OK{reset}") + self.stream.write(f"{t.passed}OK{t.reset}") if skipped: - infos.append(f"{yellow}skipped={skipped}{reset}") + infos.append(f"{t.warn}skipped={skipped}{t.reset}") if expected_fails: - infos.append(f"{yellow}expected failures={expected_fails}{reset}") + infos.append(f"{t.warn}expected failures={expected_fails}{t.reset}") if unexpected_successes: infos.append( - f"{red}unexpected successes={unexpected_successes}{reset}" + f"{t.fail}unexpected successes={unexpected_successes}{t.reset}" ) if infos: self.stream.writeln(" (%s)" % (", ".join(infos),)) diff --git a/Lib/unittest/suite.py b/Lib/unittest/suite.py index 6f45b6fe5f6..ae9ca2d615d 100644 --- a/Lib/unittest/suite.py +++ b/Lib/unittest/suite.py @@ -223,6 +223,11 @@ class TestSuite(BaseTestSuite): if result._moduleSetUpFailed: try: case.doModuleCleanups() + except ExceptionGroup as eg: + for e in eg.exceptions: + self._createClassOrModuleLevelException(result, e, + 'setUpModule', + currentModule) except Exception as e: self._createClassOrModuleLevelException(result, e, 'setUpModule', @@ -235,15 +240,15 @@ class TestSuite(BaseTestSuite): errorName = f'{method_name} ({parent})' self._addClassOrModuleLevelException(result, exc, errorName, info) - def _addClassOrModuleLevelException(self, result, exception, errorName, + def _addClassOrModuleLevelException(self, result, exc, errorName, info=None): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) - if addSkip is not None and isinstance(exception, case.SkipTest): - addSkip(error, str(exception)) + if addSkip is not None and isinstance(exc, case.SkipTest): + addSkip(error, str(exc)) else: if not info: - result.addError(error, sys.exc_info()) + result.addError(error, (type(exc), exc, exc.__traceback__)) else: result.addError(error, info) @@ -273,6 +278,13 @@ class TestSuite(BaseTestSuite): previousModule) try: case.doModuleCleanups() + except ExceptionGroup as eg: + if isinstance(result, _DebugResult): + raise + for e in eg.exceptions: + self._createClassOrModuleLevelException(result, e, + 'tearDownModule', + previousModule) except Exception as e: if isinstance(result, _DebugResult): raise |