summaryrefslogtreecommitdiffstatshomepage
path: root/tests/run-tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/run-tests.py')
-rwxr-xr-xtests/run-tests.py177
1 files changed, 118 insertions, 59 deletions
diff --git a/tests/run-tests.py b/tests/run-tests.py
index ac411a0be6..e45122b10e 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -95,6 +95,7 @@ class __FS:
return __File()
vfs.mount(__FS(), '/__vfstest')
os.chdir('/__vfstest')
+{import_prologue}
__import__('__injected_test')
"""
@@ -105,14 +106,11 @@ PC_PLATFORMS = ("darwin", "linux", "win32")
# These are tests that are difficult to detect that they should not be run on the given target.
platform_tests_to_skip = {
"esp8266": (
- "micropython/viper_args.py", # too large
- "micropython/viper_binop_arith.py", # too large
- "misc/rge_sm.py", # too large
+ "misc/rge_sm.py", # incorrect values due to object representation C
),
"minimal": (
"basics/class_inplace_op.py", # all special methods not supported
"basics/subclass_native_init.py", # native subclassing corner cases not support
- "misc/rge_sm.py", # too large
"micropython/opt_level.py", # don't assume line numbers are stored
),
"nrf": (
@@ -272,22 +270,17 @@ def detect_test_platform(pyb, args):
print()
-def prepare_script_for_target(args, *, script_filename=None, script_text=None, force_plain=False):
+def prepare_script_for_target(args, *, script_text=None, force_plain=False):
if force_plain or (not args.via_mpy and args.emit == "bytecode"):
- if script_filename is not None:
- with open(script_filename, "rb") as f:
- script_text = f.read()
+ # A plain test to run as-is, no processing needed.
+ pass
elif args.via_mpy:
tempname = tempfile.mktemp(dir="")
mpy_filename = tempname + ".mpy"
- if script_filename is None:
- script_filename = tempname + ".py"
- cleanup_script_filename = True
- with open(script_filename, "wb") as f:
- f.write(script_text)
- else:
- cleanup_script_filename = False
+ script_filename = tempname + ".py"
+ with open(script_filename, "wb") as f:
+ f.write(script_text)
try:
subprocess.check_output(
@@ -303,8 +296,7 @@ def prepare_script_for_target(args, *, script_filename=None, script_text=None, f
script_text = b"__buf=" + bytes(repr(f.read()), "ascii") + b"\n"
rm_f(mpy_filename)
- if cleanup_script_filename:
- rm_f(script_filename)
+ rm_f(script_filename)
script_text += bytes(injected_import_hook_code, "ascii")
else:
@@ -315,9 +307,21 @@ def prepare_script_for_target(args, *, script_filename=None, script_text=None, f
def run_script_on_remote_target(pyb, args, test_file, is_special):
- had_crash, script = prepare_script_for_target(
- args, script_filename=test_file, force_plain=is_special
- )
+ with open(test_file, "rb") as f:
+ script = f.read()
+
+ # If the test is not a special test, prepend it with a print to indicate that it started.
+ # If the print does not execute this means that the test did not even start, eg it was
+ # too large for the target.
+ prepend_start_test = not is_special
+ if prepend_start_test:
+ if script.startswith(b"#"):
+ script = b"print('START TEST')" + script
+ else:
+ script = b"print('START TEST')\n" + script
+
+ had_crash, script = prepare_script_for_target(args, script_text=script, force_plain=is_special)
+
if had_crash:
return True, script
@@ -328,9 +332,19 @@ def run_script_on_remote_target(pyb, args, test_file, is_special):
except pyboard.PyboardError as e:
had_crash = True
if not is_special and e.args[0] == "exception":
- output_mupy = e.args[1] + e.args[2] + b"CRASH"
+ if prepend_start_test and e.args[1] == b"" and b"MemoryError" in e.args[2]:
+ output_mupy = b"SKIP-TOO-LARGE\n"
+ else:
+ output_mupy = e.args[1] + e.args[2] + b"CRASH"
else:
output_mupy = bytes(e.args[0], "ascii") + b"\nCRASH"
+
+ if prepend_start_test:
+ if output_mupy.startswith(b"START TEST\r\n"):
+ output_mupy = output_mupy.removeprefix(b"START TEST\r\n")
+ else:
+ had_crash = True
+
return had_crash, output_mupy
@@ -392,6 +406,10 @@ def run_micropython(pyb, args, test_file, test_file_abspath, is_special=False):
return rv
def send_get(what):
+ # Detect {\x00} pattern and convert to ctrl-key codes.
+ ctrl_code = lambda m: bytes([int(m.group(1))])
+ what = re.sub(rb'{\\x(\d\d)}', ctrl_code, what)
+
os.write(master, what)
return get()
@@ -474,7 +492,7 @@ def run_micropython(pyb, args, test_file, test_file_abspath, is_special=False):
output_mupy = output_mupy.replace(b"\r\n", b"\n")
# don't try to convert the output if we should skip this test
- if had_crash or output_mupy in (b"SKIP\n", b"CRASH"):
+ if had_crash or output_mupy in (b"SKIP\n", b"SKIP-TOO-LARGE\n", b"CRASH"):
return output_mupy
# skipped special tests will output "SKIP" surrounded by other interpreter debug output
@@ -603,11 +621,8 @@ class PyboardNodeRunner:
def run_tests(pyb, tests, args, result_dir, num_threads=1):
- test_count = ThreadSafeCounter()
testcase_count = ThreadSafeCounter()
- passed_count = ThreadSafeCounter()
- failed_tests = ThreadSafeCounter([])
- skipped_tests = ThreadSafeCounter([])
+ test_results = ThreadSafeCounter([])
skip_tests = set()
skip_native = False
@@ -864,11 +879,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
test_basename = test_file.replace("..", "_").replace("./", "").replace("/", "_")
test_name = os.path.splitext(os.path.basename(test_file))[0]
- is_native = (
- test_name.startswith("native_")
- or test_name.startswith("viper_")
- or args.emit == "native"
- )
+ is_native = test_name.startswith("native_") or test_name.startswith("viper_")
is_endian = test_name.endswith("_endian")
is_int_big = test_name.startswith("int_big") or test_name.endswith("_intbig")
is_bytearray = test_name.startswith("bytearray") or test_name.endswith("_bytearray")
@@ -896,7 +907,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
if skip_it:
print("skip ", test_file)
- skipped_tests.append(test_name)
+ test_results.append((test_file, "skip", ""))
return
# Run the test on the MicroPython target.
@@ -911,7 +922,11 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# start-up code (eg boot.py) when preparing to run the next test.
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
print("skip ", test_file)
- skipped_tests.append(test_name)
+ test_results.append((test_file, "skip", ""))
+ return
+ elif output_mupy == b"SKIP-TOO-LARGE\n":
+ print("lrge ", test_file)
+ test_results.append((test_file, "skip", "too large"))
return
# Look at the output of the test to see if unittest was used.
@@ -994,7 +1009,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# Print test summary, update counters, and save .exp/.out files if needed.
if test_passed:
print("pass ", test_file, extra_info)
- passed_count.increment()
+ test_results.append((test_file, "pass", ""))
rm_f(filename_expected)
rm_f(filename_mupy)
else:
@@ -1006,9 +1021,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
rm_f(filename_expected) # in case left over from previous failed run
with open(filename_mupy, "wb") as f:
f.write(output_mupy)
- failed_tests.append((test_name, test_file))
-
- test_count.increment()
+ test_results.append((test_file, "fail", ""))
# Print a note if this looks like it might have been a misfired unittest
if not uses_unittest and not test_passed:
@@ -1035,17 +1048,49 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
print(line)
sys.exit(1)
- print(
- "{} tests performed ({} individual testcases)".format(
- test_count.value, testcase_count.value
- )
+ # Return test results.
+ return test_results.value, testcase_count.value
+
+
+# Print a summary of the results and save them to a JSON file.
+# Returns True if everything succeeded, False otherwise.
+def create_test_report(args, test_results, testcase_count=None):
+ passed_tests = list(r for r in test_results if r[1] == "pass")
+ skipped_tests = list(r for r in test_results if r[1] == "skip" and r[2] != "too large")
+ skipped_tests_too_large = list(
+ r for r in test_results if r[1] == "skip" and r[2] == "too large"
)
- print("{} tests passed".format(passed_count.value))
+ failed_tests = list(r for r in test_results if r[1] == "fail")
+
+ num_tests_performed = len(passed_tests) + len(failed_tests)
+
+ testcase_count_info = ""
+ if testcase_count is not None:
+ testcase_count_info = " ({} individual testcases)".format(testcase_count)
+ print("{} tests performed{}".format(num_tests_performed, testcase_count_info))
+
+ print("{} tests passed".format(len(passed_tests)))
- skipped_tests = sorted(skipped_tests.value)
if len(skipped_tests) > 0:
- print("{} tests skipped: {}".format(len(skipped_tests), " ".join(skipped_tests)))
- failed_tests = sorted(failed_tests.value)
+ print(
+ "{} tests skipped: {}".format(
+ len(skipped_tests), " ".join(test[0] for test in skipped_tests)
+ )
+ )
+
+ if len(skipped_tests_too_large) > 0:
+ print(
+ "{} tests skipped because they are too large: {}".format(
+ len(skipped_tests_too_large), " ".join(test[0] for test in skipped_tests_too_large)
+ )
+ )
+
+ if len(failed_tests) > 0:
+ print(
+ "{} tests failed: {}".format(
+ len(failed_tests), " ".join(test[0] for test in failed_tests)
+ )
+ )
# Serialize regex added by append_filter.
def to_json(obj):
@@ -1053,23 +1098,22 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
return obj.pattern
return obj
- with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
+ with open(os.path.join(args.result_dir, RESULTS_FILE), "w") as f:
json.dump(
- {"args": vars(args), "failed_tests": [test[1] for test in failed_tests]},
+ {
+ # The arguments passed on the command-line.
+ "args": vars(args),
+ # A list of all results of the form [(test, result, reason), ...].
+ "results": list(test for test in test_results),
+ # A list of failed tests. This is deprecated, use the "results" above instead.
+ "failed_tests": [test[0] for test in failed_tests],
+ },
f,
default=to_json,
)
- if len(failed_tests) > 0:
- print(
- "{} tests failed: {}".format(
- len(failed_tests), " ".join(test[0] for test in failed_tests)
- )
- )
- return False
-
- # all tests succeeded
- return True
+ # Return True only if all tests succeeded.
+ return len(failed_tests) == 0
class append_filter(argparse.Action):
@@ -1087,6 +1131,8 @@ class append_filter(argparse.Action):
def main():
+ global injected_import_hook_code
+
cmd_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Run and manage tests for MicroPython.
@@ -1196,8 +1242,20 @@ the last matching regex is used:
action="store_true",
help="re-run only the failed tests",
)
+ cmd_parser.add_argument(
+ "--begin",
+ metavar="PROLOGUE",
+ default=None,
+ help="prologue python file to execute before module import",
+ )
args = cmd_parser.parse_args()
+ prologue = ""
+ if args.begin:
+ with open(args.begin, "rt") as source:
+ prologue = source.read()
+ injected_import_hook_code = injected_import_hook_code.replace("{import_prologue}", prologue)
+
if args.print_failures:
for out in glob(os.path.join(args.result_dir, "*.out")):
testbase = out[:-4]
@@ -1238,7 +1296,7 @@ the last matching regex is used:
results_file = os.path.join(args.result_dir, RESULTS_FILE)
if os.path.exists(results_file):
with open(results_file, "r") as f:
- tests = json.load(f)["failed_tests"]
+ tests = list(test[0] for test in json.load(f)["results"] if test[1] == "fail")
else:
tests = []
elif len(args.files) == 0:
@@ -1316,7 +1374,8 @@ the last matching regex is used:
try:
os.makedirs(args.result_dir, exist_ok=True)
- res = run_tests(pyb, tests, args, args.result_dir, args.jobs)
+ test_results, testcase_count = run_tests(pyb, tests, args, args.result_dir, args.jobs)
+ res = create_test_report(args, test_results, testcase_count)
finally:
if pyb:
pyb.close()