summaryrefslogtreecommitdiffstatshomepage
path: root/tests/run-multitests.py
diff options
context:
space:
mode:
authorDamien George <damien@micropython.org>2023-03-17 13:55:35 +1100
committerDamien George <damien@micropython.org>2023-03-23 13:18:52 +1100
commitaf426348661d2b6c3f6bbbdf6ae493f505ddee01 (patch)
tree69a4d82807b13e50380cba2fcd0f099f7637ebff /tests/run-multitests.py
parent31e7a0587d7eabe668101d26b26a0190120465c8 (diff)
downloadmicropython-af426348661d2b6c3f6bbbdf6ae493f505ddee01.tar.gz
micropython-af426348661d2b6c3f6bbbdf6ae493f505ddee01.zip
tests/run-multitests.py: Support outputting test metrics.
If a multitest calls `multitest.output_metric(...)` then that output will be collected separately, not considered as part of the test verification output, and instead be printed at the end. This is useful for tests that want to output performance/timing metrics that may change from one run to the next. Signed-off-by: Damien George <damien@micropython.org>
Diffstat (limited to 'tests/run-multitests.py')
-rwxr-xr-xtests/run-multitests.py19
1 files changed, 16 insertions, 3 deletions
diff --git a/tests/run-multitests.py b/tests/run-multitests.py
index b316ce69f6..81db31ea94 100755
--- a/tests/run-multitests.py
+++ b/tests/run-multitests.py
@@ -93,6 +93,9 @@ class multitest:
@staticmethod
def expect_reboot(resume, delay_ms=0):
print("WAIT_FOR_REBOOT", resume, delay_ms)
+ @staticmethod
+ def output_metric(data):
+ print("OUTPUT_METRIC", data)
{}
@@ -312,6 +315,7 @@ def run_test_on_instances(test_file, num_instances, instances):
skip = False
injected_globals = ""
output = [[] for _ in range(num_instances)]
+ output_metrics = []
# If the test calls get_network_ip() then inject HOST_IP so that devices can know
# the IP address of the host. Do this lazily to not require a TCP/IP connection
@@ -400,6 +404,8 @@ def run_test_on_instances(test_file, num_instances, instances):
for instance2 in instances:
if instance2 is not instance:
instance2.write(bytes(out, "ascii") + b"\r\n")
+ elif out.startswith("OUTPUT_METRIC "):
+ output_metrics.append(out.split(" ", 1)[1])
else:
output[idx].append(out)
if err is not None:
@@ -421,7 +427,7 @@ def run_test_on_instances(test_file, num_instances, instances):
output_str += "--- instance{} ---\n".format(idx)
output_str += "\n".join(lines) + "\n"
- return error, skip, output_str
+ return error, skip, output_str, output_metrics
def wait_for_reboot(instance, extra_timeout_ms=0):
@@ -481,7 +487,9 @@ def run_tests(test_files, instances_truth, instances_test):
sys.stdout.flush()
# Run test on test instances
- error, skip, output_test = run_test_on_instances(test_file, num_instances, instances_test)
+ error, skip, output_test, output_metrics = run_test_on_instances(
+ test_file, num_instances, instances_test
+ )
if not skip:
# Check if truth exists in a file, and read it in
@@ -491,7 +499,7 @@ def run_tests(test_files, instances_truth, instances_test):
output_truth = f.read()
else:
# Run test on truth instances to get expected output
- _, _, output_truth = run_test_on_instances(
+ _, _, output_truth, _ = run_test_on_instances(
test_file, num_instances, instances_truth
)
@@ -520,6 +528,11 @@ def run_tests(test_files, instances_truth, instances_test):
print("### DIFF ###")
print_diff(output_truth, output_test)
+ # Print test output metrics, if there are any.
+ if output_metrics:
+ for metric in output_metrics:
+ print(test_file, ": ", metric, sep="")
+
if cmd_args.show_output:
print()