summaryrefslogtreecommitdiffstatshomepage
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/extmod/random_extra_float.py8
-rw-r--r--tests/float/complex1.py2
-rwxr-xr-xtests/run-multitests.py32
-rwxr-xr-xtests/run-natmodtests.py37
-rwxr-xr-xtests/run-perfbench.py29
-rwxr-xr-xtests/run-tests.py50
6 files changed, 91 insertions, 67 deletions
diff --git a/tests/extmod/random_extra_float.py b/tests/extmod/random_extra_float.py
index 3b37ed8dce..03973c5834 100644
--- a/tests/extmod/random_extra_float.py
+++ b/tests/extmod/random_extra_float.py
@@ -1,12 +1,8 @@
try:
import random
-except ImportError:
- print("SKIP")
- raise SystemExit
-try:
- random.randint
-except AttributeError:
+ random.random
+except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
diff --git a/tests/float/complex1.py b/tests/float/complex1.py
index f4107a1390..0a1d98b9af 100644
--- a/tests/float/complex1.py
+++ b/tests/float/complex1.py
@@ -12,9 +12,11 @@ print(complex("1.2j"))
print(complex("1+j"))
print(complex("1+2j"))
print(complex("-1-2j"))
+print(complex("-1+2j"))
print(complex("+1-2j"))
print(complex(" -1-2j "))
print(complex(" +1-2j "))
+print(complex(" -1+2j "))
print(complex("nanj"))
print(complex("nan-infj"))
print(complex(1, 2))
diff --git a/tests/run-multitests.py b/tests/run-multitests.py
index 387eec7018..92bd64193d 100755
--- a/tests/run-multitests.py
+++ b/tests/run-multitests.py
@@ -15,6 +15,8 @@ import itertools
import subprocess
import tempfile
+run_tests_module = __import__("run-tests")
+
test_dir = os.path.abspath(os.path.dirname(__file__))
if os.path.abspath(sys.path[0]) == test_dir:
@@ -488,9 +490,7 @@ def print_diff(a, b):
def run_tests(test_files, instances_truth, instances_test):
- skipped_tests = []
- passed_tests = []
- failed_tests = []
+ test_results = []
for test_file, num_instances in test_files:
instances_str = "|".join(str(instances_test[i]) for i in range(num_instances))
@@ -526,13 +526,13 @@ def run_tests(test_files, instances_truth, instances_test):
# Print result of test
if skip:
print("skip")
- skipped_tests.append(test_file)
+ test_results.append((test_file, "skip", ""))
elif output_test == output_truth:
print("pass")
- passed_tests.append(test_file)
+ test_results.append((test_file, "pass", ""))
else:
print("FAIL")
- failed_tests.append(test_file)
+ test_results.append((test_file, "fail", ""))
if not cmd_args.show_output:
print("### TEST ###")
print(output_test, end="")
@@ -549,15 +549,7 @@ def run_tests(test_files, instances_truth, instances_test):
if cmd_args.show_output:
print()
- print("{} tests performed".format(len(skipped_tests) + len(passed_tests) + len(failed_tests)))
- print("{} tests passed".format(len(passed_tests)))
-
- if skipped_tests:
- print("{} tests skipped: {}".format(len(skipped_tests), " ".join(skipped_tests)))
- if failed_tests:
- print("{} tests failed: {}".format(len(failed_tests), " ".join(failed_tests)))
-
- return not failed_tests
+ return test_results
def main():
@@ -583,6 +575,12 @@ def main():
default=1,
help="repeat the test with this many permutations of the instance order",
)
+ cmd_parser.add_argument(
+ "-r",
+ "--result-dir",
+ default=run_tests_module.base_path("results"),
+ help="directory for test results",
+ )
cmd_parser.epilog = (
"Supported instance types:\r\n"
" -i pyb:<port> physical device (eg. pyboard) on provided repl port.\n"
@@ -623,13 +621,15 @@ def main():
for _ in range(max_instances - len(instances_test)):
instances_test.append(PyInstanceSubProcess([MICROPYTHON]))
+ os.makedirs(cmd_args.result_dir, exist_ok=True)
all_pass = True
try:
for i, instances_test_permutation in enumerate(itertools.permutations(instances_test)):
if i >= cmd_args.permutations:
break
- all_pass &= run_tests(test_files, instances_truth, instances_test_permutation)
+ test_results = run_tests(test_files, instances_truth, instances_test_permutation)
+ all_pass &= run_tests_module.create_test_report(cmd_args, test_results)
finally:
for i in instances_truth:
diff --git a/tests/run-natmodtests.py b/tests/run-natmodtests.py
index 073e0b053e..f9d2074f6f 100755
--- a/tests/run-natmodtests.py
+++ b/tests/run-natmodtests.py
@@ -9,6 +9,8 @@ import subprocess
import sys
import argparse
+run_tests_module = __import__("run-tests")
+
sys.path.append("../tools")
import pyboard
@@ -133,7 +135,7 @@ def detect_architecture(target):
return platform, arch, None
-def run_tests(target_truth, target, args, stats, resolved_arch):
+def run_tests(target_truth, target, args, resolved_arch):
global injected_import_hook_code
prelude = ""
@@ -141,6 +143,7 @@ def run_tests(target_truth, target, args, stats, resolved_arch):
prelude = args.begin.read()
injected_import_hook_code = injected_import_hook_code.replace("{import_prelude}", prelude)
+ test_results = []
for test_file in args.files:
# Find supported test
test_file_basename = os.path.basename(test_file)
@@ -163,7 +166,8 @@ def run_tests(target_truth, target, args, stats, resolved_arch):
with open(NATMOD_EXAMPLE_DIR + test_mpy, "rb") as f:
test_script += b"__buf=" + bytes(repr(f.read()), "ascii") + b"\n"
except OSError:
- print("---- {} - mpy file not compiled".format(test_file))
+ test_results.append((test_file, "skip", "mpy file not compiled"))
+ print("skip {} - mpy file not compiled".format(test_file))
continue
test_script += bytes(injected_import_hook_code.format(test_module), "ascii")
test_script += test_file_data
@@ -195,17 +199,18 @@ def run_tests(target_truth, target, args, stats, resolved_arch):
result = "pass"
# Accumulate statistics
- stats["total"] += 1
if result == "pass":
- stats["pass"] += 1
+ test_results.append((test_file, "pass", ""))
elif result == "SKIP":
- stats["skip"] += 1
+ test_results.append((test_file, "skip", ""))
else:
- stats["fail"] += 1
+ test_results.append((test_file, "fail", ""))
# Print result
print("{:4} {}{}".format(result, test_file, extra))
+ return test_results
+
def main():
cmd_parser = argparse.ArgumentParser(
@@ -227,6 +232,12 @@ def main():
default=None,
help="prologue python file to execute before module import",
)
+ cmd_parser.add_argument(
+ "-r",
+ "--result-dir",
+ default=run_tests_module.base_path("results"),
+ help="directory for test results",
+ )
cmd_parser.add_argument("files", nargs="*", help="input test files")
args = cmd_parser.parse_args()
@@ -251,20 +262,14 @@ def main():
print("platform={} ".format(target_platform), end="")
print("arch={}".format(target_arch))
- stats = {"total": 0, "pass": 0, "fail": 0, "skip": 0}
- run_tests(target_truth, target, args, stats, target_arch)
+ os.makedirs(args.result_dir, exist_ok=True)
+ test_results = run_tests(target_truth, target, args, target_arch)
+ res = run_tests_module.create_test_report(args, test_results)
target.close()
target_truth.close()
- print("{} tests performed".format(stats["total"]))
- print("{} tests passed".format(stats["pass"]))
- if stats["fail"]:
- print("{} tests failed".format(stats["fail"]))
- if stats["skip"]:
- print("{} tests skipped".format(stats["skip"]))
-
- if stats["fail"]:
+ if not res:
sys.exit(1)
diff --git a/tests/run-perfbench.py b/tests/run-perfbench.py
index 81d873c459..cac2fee58f 100755
--- a/tests/run-perfbench.py
+++ b/tests/run-perfbench.py
@@ -10,10 +10,12 @@ import sys
import argparse
from glob import glob
+run_tests_module = __import__("run-tests")
+
sys.path.append("../tools")
import pyboard
-prepare_script_for_target = __import__("run-tests").prepare_script_for_target
+prepare_script_for_target = run_tests_module.prepare_script_for_target
# Paths for host executables
if os.name == "nt":
@@ -90,9 +92,9 @@ def run_benchmark_on_target(target, script):
def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
+ test_results = []
skip_complex = run_feature_test(target, "complex") != "complex"
skip_native = run_feature_test(target, "native_check") != "native"
- target_had_error = False
for test_file in sorted(test_list):
print(test_file + ": ", end="")
@@ -105,6 +107,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
and test_file.find("viper_") != -1
)
if skip:
+ test_results.append((test_file, "skip", ""))
print("SKIP")
continue
@@ -125,6 +128,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
if isinstance(target, pyboard.Pyboard) or args.via_mpy:
crash, test_script_target = prepare_script_for_target(args, script_text=test_script)
if crash:
+ test_results.append((test_file, "fail", "preparation"))
print("CRASH:", test_script_target)
continue
else:
@@ -162,10 +166,13 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
error = "FAIL truth"
if error is not None:
- if not error.startswith("SKIP"):
- target_had_error = True
+ if error.startswith("SKIP"):
+ test_results.append((test_file, "skip", error))
+ else:
+ test_results.append((test_file, "fail", error))
print(error)
else:
+ test_results.append((test_file, "pass", ""))
t_avg, t_sd = compute_stats(times)
s_avg, s_sd = compute_stats(scores)
print(
@@ -179,7 +186,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
sys.stdout.flush()
- return target_had_error
+ return test_results
def parse_output(filename):
@@ -265,6 +272,12 @@ def main():
cmd_parser.add_argument("--via-mpy", action="store_true", help="compile code to .mpy first")
cmd_parser.add_argument("--mpy-cross-flags", default="", help="flags to pass to mpy-cross")
cmd_parser.add_argument(
+ "-r",
+ "--result-dir",
+ default=run_tests_module.base_path("results"),
+ help="directory for test results",
+ )
+ cmd_parser.add_argument(
"N", nargs=1, help="N parameter (approximate target CPU frequency in MHz)"
)
cmd_parser.add_argument("M", nargs=1, help="M parameter (approximate target heap in kbytes)")
@@ -307,13 +320,15 @@ def main():
print("N={} M={} n_average={}".format(N, M, n_average))
- target_had_error = run_benchmarks(args, target, N, M, n_average, tests)
+ os.makedirs(args.result_dir, exist_ok=True)
+ test_results = run_benchmarks(args, target, N, M, n_average, tests)
+ res = run_tests_module.create_test_report(args, test_results)
if isinstance(target, pyboard.Pyboard):
target.exit_raw_repl()
target.close()
- if target_had_error:
+ if not res:
sys.exit(1)
diff --git a/tests/run-tests.py b/tests/run-tests.py
index da05e18e4f..5eebc72460 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -616,7 +616,6 @@ class PyboardNodeRunner:
def run_tests(pyb, tests, args, result_dir, num_threads=1):
- test_count = ThreadSafeCounter()
testcase_count = ThreadSafeCounter()
test_results = ThreadSafeCounter([])
@@ -903,7 +902,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
if skip_it:
print("skip ", test_file)
- test_results.append((test_name, test_file, "skip", ""))
+ test_results.append((test_file, "skip", ""))
return
# Run the test on the MicroPython target.
@@ -918,11 +917,11 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# start-up code (eg boot.py) when preparing to run the next test.
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
print("skip ", test_file)
- test_results.append((test_name, test_file, "skip", ""))
+ test_results.append((test_file, "skip", ""))
return
elif output_mupy == b"SKIP-TOO-LARGE\n":
print("lrge ", test_file)
- test_results.append((test_name, test_file, "skip", "too large"))
+ test_results.append((test_file, "skip", "too large"))
return
# Look at the output of the test to see if unittest was used.
@@ -1005,7 +1004,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
# Print test summary, update counters, and save .exp/.out files if needed.
if test_passed:
print("pass ", test_file, extra_info)
- test_results.append((test_name, test_file, "pass", ""))
+ test_results.append((test_file, "pass", ""))
rm_f(filename_expected)
rm_f(filename_mupy)
else:
@@ -1017,9 +1016,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
rm_f(filename_expected) # in case left over from previous failed run
with open(filename_mupy, "wb") as f:
f.write(output_mupy)
- test_results.append((test_name, test_file, "fail", ""))
-
- test_count.increment()
+ test_results.append((test_file, "fail", ""))
# Print a note if this looks like it might have been a misfired unittest
if not uses_unittest and not test_passed:
@@ -1046,19 +1043,27 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
print(line)
sys.exit(1)
- test_results = test_results.value
- passed_tests = list(r for r in test_results if r[2] == "pass")
- skipped_tests = list(r for r in test_results if r[2] == "skip" and r[3] != "too large")
+ # Return test results.
+ return test_results.value, testcase_count.value
+
+
+# Print a summary of the results and save them to a JSON file.
+# Returns True if everything succeeded, False otherwise.
+def create_test_report(args, test_results, testcase_count=None):
+ passed_tests = list(r for r in test_results if r[1] == "pass")
+ skipped_tests = list(r for r in test_results if r[1] == "skip" and r[2] != "too large")
skipped_tests_too_large = list(
- r for r in test_results if r[2] == "skip" and r[3] == "too large"
+ r for r in test_results if r[1] == "skip" and r[2] == "too large"
)
- failed_tests = list(r for r in test_results if r[2] == "fail")
+ failed_tests = list(r for r in test_results if r[1] == "fail")
+
+ num_tests_performed = len(passed_tests) + len(failed_tests)
+
+ testcase_count_info = ""
+ if testcase_count is not None:
+ testcase_count_info = " ({} individual testcases)".format(testcase_count)
+ print("{} tests performed{}".format(num_tests_performed, testcase_count_info))
- print(
- "{} tests performed ({} individual testcases)".format(
- test_count.value, testcase_count.value
- )
- )
print("{} tests passed".format(len(passed_tests)))
if len(skipped_tests) > 0:
@@ -1088,15 +1093,15 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
return obj.pattern
return obj
- with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
+ with open(os.path.join(args.result_dir, RESULTS_FILE), "w") as f:
json.dump(
{
# The arguments passed on the command-line.
"args": vars(args),
# A list of all results of the form [(test, result, reason), ...].
- "results": list(test[1:] for test in test_results),
+ "results": list(test for test in test_results),
# A list of failed tests. This is deprecated, use the "results" above instead.
- "failed_tests": [test[1] for test in failed_tests],
+ "failed_tests": [test[0] for test in failed_tests],
},
f,
default=to_json,
@@ -1350,7 +1355,8 @@ the last matching regex is used:
try:
os.makedirs(args.result_dir, exist_ok=True)
- res = run_tests(pyb, tests, args, args.result_dir, args.jobs)
+ test_results, testcase_count = run_tests(pyb, tests, args, args.result_dir, args.jobs)
+ res = create_test_report(args, test_results, testcase_count)
finally:
if pyb:
pyb.close()