summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorPaul Sokolovsky <pfalcon@users.sourceforge.net>2014-05-05 01:24:16 +0300
committerPaul Sokolovsky <pfalcon@users.sourceforge.net>2014-05-05 01:24:16 +0300
commitaaff82afe5a72ec69e05f1e56047d0acfde91d0e (patch)
tree7bd8dc3cf156b790b66c05b2222bd362bdbce3d8
parent22a0d67c0fc7daf18280d3b7e938be8442102110 (diff)
downloadmicropython-aaff82afe5a72ec69e05f1e56047d0acfde91d0e.tar.gz
micropython-aaff82afe5a72ec69e05f1e56047d0acfde91d0e.zip
tests: Add framework for comparative benchmarking.
Motivation is optimizing handling of various constructs as well as understanding which constructs are more efficient in MicroPython. More info: http://forum.micropython.org/viewtopic.php?f=3&t=77 Results are wildly unexpected. For example, "optimization" of range iteration into while loop makes it twice as slow. Generally, the more bytecodes, the slower the code.
-rw-r--r--tests/bench/bench.py10
-rw-r--r--tests/bench/loop_count-1-range.py7
-rw-r--r--tests/bench/loop_count-2-range_iter.py7
-rw-r--r--tests/bench/loop_count-3-while_up.py8
-rw-r--r--tests/bench/loop_count-4-while_down_gt.py7
-rw-r--r--tests/bench/loop_count-5-while_down_ne.py7
-rw-r--r--tests/bench/var-1-constant.py8
-rw-r--r--tests/bench/var-2-global.py10
-rw-r--r--tests/bench/var-3-local.py10
-rw-r--r--tests/bench/var-4-arg.py9
-rwxr-xr-xtests/run-bench-tests97
11 files changed, 180 insertions, 0 deletions
diff --git a/tests/bench/bench.py b/tests/bench/bench.py
new file mode 100644
index 0000000000..0cd40a93fc
--- /dev/null
+++ b/tests/bench/bench.py
@@ -0,0 +1,10 @@
+import time
+
+
+ITERS = 20000000
+
+def run(f):
+ t = time.time()
+ f(ITERS)
+ t = time.time() - t
+ print(t)
diff --git a/tests/bench/loop_count-1-range.py b/tests/bench/loop_count-1-range.py
new file mode 100644
index 0000000000..e22adf6cbe
--- /dev/null
+++ b/tests/bench/loop_count-1-range.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ for i in range(num):
+ pass
+
+bench.run(test)
diff --git a/tests/bench/loop_count-2-range_iter.py b/tests/bench/loop_count-2-range_iter.py
new file mode 100644
index 0000000000..fe4a3857e1
--- /dev/null
+++ b/tests/bench/loop_count-2-range_iter.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ for i in iter(range(num)):
+ pass
+
+bench.run(test)
diff --git a/tests/bench/loop_count-3-while_up.py b/tests/bench/loop_count-3-while_up.py
new file mode 100644
index 0000000000..1ab8054a0f
--- /dev/null
+++ b/tests/bench/loop_count-3-while_up.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ i = 0
+ while i < num:
+ i += 1
+
+bench.run(test)
diff --git a/tests/bench/loop_count-4-while_down_gt.py b/tests/bench/loop_count-4-while_down_gt.py
new file mode 100644
index 0000000000..de8dee2ca9
--- /dev/null
+++ b/tests/bench/loop_count-4-while_down_gt.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ while num > 0:
+ num -= 1
+
+bench.run(test)
diff --git a/tests/bench/loop_count-5-while_down_ne.py b/tests/bench/loop_count-5-while_down_ne.py
new file mode 100644
index 0000000000..b9a1af414b
--- /dev/null
+++ b/tests/bench/loop_count-5-while_down_ne.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ while num != 0:
+ num -= 1
+
+bench.run(test)
diff --git a/tests/bench/var-1-constant.py b/tests/bench/var-1-constant.py
new file mode 100644
index 0000000000..eec977909c
--- /dev/null
+++ b/tests/bench/var-1-constant.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ i = 0
+ while i < 20000000:
+ i += 1
+
+bench.run(test)
diff --git a/tests/bench/var-2-global.py b/tests/bench/var-2-global.py
new file mode 100644
index 0000000000..5758ad61aa
--- /dev/null
+++ b/tests/bench/var-2-global.py
@@ -0,0 +1,10 @@
+import bench
+
+ITERS = 20000000
+
+def test(num):
+ i = 0
+ while i < ITERS:
+ i += 1
+
+bench.run(test)
diff --git a/tests/bench/var-3-local.py b/tests/bench/var-3-local.py
new file mode 100644
index 0000000000..124b484295
--- /dev/null
+++ b/tests/bench/var-3-local.py
@@ -0,0 +1,10 @@
+import bench
+
+
+def test(num):
+ ITERS = 20000000
+ i = 0
+ while i < ITERS:
+ i += 1
+
+bench.run(test)
diff --git a/tests/bench/var-4-arg.py b/tests/bench/var-4-arg.py
new file mode 100644
index 0000000000..cf050c58fd
--- /dev/null
+++ b/tests/bench/var-4-arg.py
@@ -0,0 +1,9 @@
+import bench
+
+
+def test(num):
+ i = 0
+ while i < num:
+ i += 1
+
+bench.run(lambda n:test(20000000))
diff --git a/tests/run-bench-tests b/tests/run-bench-tests
new file mode 100755
index 0000000000..59074bb877
--- /dev/null
+++ b/tests/run-bench-tests
@@ -0,0 +1,97 @@
+#! /usr/bin/env python3
+
+import os
+import subprocess
+import sys
+import argparse
+import re
+from glob import glob
+from collections import defaultdict
+
+# Tests require at least CPython 3.3. If your default python3 executable
+# is of lower version, you can point MICROPY_CPYTHON3 environment var
+# to the correct executable.
+if os.name == 'nt':
+ CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
+ MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../windows/micropython.exe')
+else:
+ CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
+ MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../unix/micropython')
+
+def run_tests(pyb, test_dict):
+ test_count = 0
+ testcase_count = 0
+
+ for base_test, tests in test_dict.items():
+ print(base_test + ":")
+ for test_file in tests:
+
+ # run Micro Python
+ if pyb is None:
+ # run on PC
+ try:
+ output_mupy = subprocess.check_output([MICROPYTHON, '-X', 'emit=bytecode', test_file[0]])
+ except subprocess.CalledProcessError:
+ output_mupy = b'CRASH'
+ else:
+ # run on pyboard
+ pyb.enter_raw_repl()
+ try:
+ output_mupy = pyb.execfile(test_file).replace(b'\r\n', b'\n')
+ except pyboard.PyboardError:
+ output_mupy = b'CRASH'
+
+ output_mupy = float(output_mupy.strip())
+ test_file[1] = output_mupy
+ testcase_count += 1
+
+ test_count += 1
+ baseline = None
+ for t in tests:
+ if baseline is None:
+ baseline = t[1]
+ print(" %.3fs (%+06.2f%%) %s" % (t[1], (t[1] * 100 / baseline) - 100, t[0]))
+
+ print("{} tests performed ({} individual testcases)".format(test_count, testcase_count))
+
+ # all tests succeeded
+ return True
+
+def main():
+ cmd_parser = argparse.ArgumentParser(description='Run tests for Micro Python.')
+ cmd_parser.add_argument('--pyboard', action='store_true', help='run the tests on the pyboard')
+ cmd_parser.add_argument('files', nargs='*', help='input test files')
+ args = cmd_parser.parse_args()
+
+ # Note pyboard support is copied over from run-tests, not testes, and likely needs revamping
+ if args.pyboard:
+ import pyboard
+ pyb = pyboard.Pyboard('/dev/ttyACM0')
+ pyb.enter_raw_repl()
+ else:
+ pyb = None
+
+ if len(args.files) == 0:
+ if pyb is None:
+ # run PC tests
+ test_dirs = ('bench',)
+ else:
+ # run pyboard tests
+ test_dirs = ('basics', 'float', 'pyb')
+ tests = sorted(test_file for test_files in (glob('{}/*.py'.format(dir)) for dir in test_dirs) for test_file in test_files)
+ else:
+ # tests explicitly given
+ tests = sorted(args.files)
+
+ test_dict = defaultdict(lambda: [])
+ for t in tests:
+ m = re.match(r"(.+?)-(.+)\.py", t)
+ if not m:
+ continue
+ test_dict[m.group(1)].append([t, None])
+
+ if not run_tests(pyb, test_dict):
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()