tests/run-multitests.py: Support outputting test metrics.

If a multitest calls `multitest.output_metric(...)` then that output will
be collected separately, not considered as part of the test verification
output, and instead be printed at the end.  This is useful for tests that
want to output performance/timing metrics that may change from one run to
the next.

Signed-off-by: Damien George <damien@micropython.org>
pull/11052/head
Damien George 2023-03-17 13:55:35 +11:00
rodzic 31e7a0587d
commit af42634866
1 zmienionych plików z 16 dodań i 3 usunięć

Wyświetl plik

@ -93,6 +93,9 @@ class multitest:
@staticmethod
def expect_reboot(resume, delay_ms=0):
print("WAIT_FOR_REBOOT", resume, delay_ms)
@staticmethod
def output_metric(data):
print("OUTPUT_METRIC", data)
{}
@ -312,6 +315,7 @@ def run_test_on_instances(test_file, num_instances, instances):
skip = False
injected_globals = ""
output = [[] for _ in range(num_instances)]
output_metrics = []
# If the test calls get_network_ip() then inject HOST_IP so that devices can know
# the IP address of the host. Do this lazily to not require a TCP/IP connection
@ -400,6 +404,8 @@ def run_test_on_instances(test_file, num_instances, instances):
for instance2 in instances:
if instance2 is not instance:
instance2.write(bytes(out, "ascii") + b"\r\n")
elif out.startswith("OUTPUT_METRIC "):
output_metrics.append(out.split(" ", 1)[1])
else:
output[idx].append(out)
if err is not None:
@ -421,7 +427,7 @@ def run_test_on_instances(test_file, num_instances, instances):
output_str += "--- instance{} ---\n".format(idx)
output_str += "\n".join(lines) + "\n"
return error, skip, output_str
return error, skip, output_str, output_metrics
def wait_for_reboot(instance, extra_timeout_ms=0):
@ -481,7 +487,9 @@ def run_tests(test_files, instances_truth, instances_test):
sys.stdout.flush()
# Run test on test instances
error, skip, output_test = run_test_on_instances(test_file, num_instances, instances_test)
error, skip, output_test, output_metrics = run_test_on_instances(
test_file, num_instances, instances_test
)
if not skip:
# Check if truth exists in a file, and read it in
@ -491,7 +499,7 @@ def run_tests(test_files, instances_truth, instances_test):
output_truth = f.read()
else:
# Run test on truth instances to get expected output
_, _, output_truth = run_test_on_instances(
_, _, output_truth, _ = run_test_on_instances(
test_file, num_instances, instances_truth
)
@ -520,6 +528,11 @@ def run_tests(test_files, instances_truth, instances_test):
print("### DIFF ###")
print_diff(output_truth, output_test)
# Print test output metrics, if there are any.
if output_metrics:
for metric in output_metrics:
print(test_file, ": ", metric, sep="")
if cmd_args.show_output:
print()