summaryrefslogtreecommitdiffstatshomepage
path: root/tests/run-tests
blob: 1e6dd50538206838de419a0548e08061977dec18 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#! /usr/bin/env python3

import os
import subprocess
import sys
import argparse
from glob import glob

# Tests require at least CPython 3.3. If your default python3 executable
# is of lower version, you can point MICROPY_CPYTHON3 environment var
# to the correct executable.
if os.name == 'nt':
    CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
    MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../windows/micropython.exe')
else:
    CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
    MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../unix/micropython')

def rm_f(fname):
    if os.path.exists(fname):
        os.remove(fname)

def run_tests(pyb, tests):
    test_count = 0
    testcase_count = 0
    passed_count = 0
    failed_tests = []
    skipped_tests = []

    running_under_travis = os.getenv('TRAVIS') == 'true'

    # Set of tests that we shouldn't run under Travis CI
    skip_travis_tests = set(['basics/memoryerror.py'])

    for test_file in tests:
        if running_under_travis and test_file in skip_travis_tests:
            print("skip ", test_file)
            skipped_tests.append(test_name)
            continue

        # get expected output
        test_file_expected = test_file + '.exp'
        if os.path.isfile(test_file_expected):
            # expected output given by a file, so read that in
            with open(test_file_expected, 'rb') as f:
                output_expected = f.read()
                if os.name == 'nt':
                    output_expected = output_expected.replace(b'\n', b'\r\n')
        else:
            # run CPython to work out expected output
            try:
                output_expected = subprocess.check_output([CPYTHON3, '-B', test_file])
            except subprocess.CalledProcessError:
                output_expected = b'CPYTHON3 CRASH'

        # run Micro Python
        if pyb is None:
            # run on PC
            try:
                output_mupy = subprocess.check_output([MICROPYTHON, '-X', 'emit=bytecode', test_file])
            except subprocess.CalledProcessError:
                output_mupy = b'CRASH'
        else:
            # run on pyboard
            pyb.enter_raw_repl()
            try:
                output_mupy = pyb.execfile(test_file).replace(b'\r\n', b'\n')
            except pyboard.PyboardError:
                output_mupy = b'CRASH'

        test_basename = os.path.basename(test_file)
        test_name = os.path.splitext(test_basename)[0]

        if output_mupy == b'SKIP\n':
            print("skip ", test_file)
            skipped_tests.append(test_name)
            continue

        testcase_count += len(output_expected.splitlines())

        filename_expected = test_basename + ".exp"
        filename_mupy = test_basename + ".out"

        if output_expected == output_mupy:
            print("pass ", test_file)
            passed_count += 1
            rm_f(filename_expected)
            rm_f(filename_mupy)
        else:
            with open(filename_expected, "w") as f:
                f.write(str(output_expected, "ascii"))
            with open(filename_mupy, "w") as f:
                f.write(str(output_mupy, "ascii"))
            print("FAIL ", test_file)
            failed_tests.append(test_name)

        test_count += 1

    print("{} tests performed ({} individual testcases)".format(test_count, testcase_count))
    print("{} tests passed".format(passed_count))

    if len(skipped_tests) > 0:
        print("{} tests skipped: {}".format(len(skipped_tests), ' '.join(skipped_tests)))
    if len(failed_tests) > 0:
        print("{} tests failed: {}".format(len(failed_tests), ' '.join(failed_tests)))
        return False

    # all tests succeeded
    return True

def main():
    cmd_parser = argparse.ArgumentParser(description='Run tests for Micro Python.')
    cmd_parser.add_argument('--pyboard', action='store_true', help='run the tests on the pyboard')
    cmd_parser.add_argument('-d', '--test-dirs', nargs='*', help='input test directories (if no files given)')
    cmd_parser.add_argument('files', nargs='*', help='input test files')
    args = cmd_parser.parse_args()

    if args.pyboard:
        import pyboard
        pyb = pyboard.Pyboard('/dev/ttyACM0')
        pyb.enter_raw_repl()
    else:
        pyb = None

    if len(args.files) == 0:
        if args.test_dirs is None:
            if pyb is None:
                # run PC tests
                test_dirs = ('basics', 'micropython', 'float', 'import', 'io', 'misc')
            else:
                # run pyboard tests
                test_dirs = ('basics', 'float', 'pyb', 'pybnative', 'inlineasm')
        else:
            # run tests from these directories
            test_dirs = args.test_dirs
        tests = sorted(test_file for test_files in (glob('{}/*.py'.format(dir)) for dir in test_dirs) for test_file in test_files)
    else:
        # tests explicitly given
        tests = args.files

    if not run_tests(pyb, tests):
        sys.exit(1)

if __name__ == "__main__":
    main()