summaryrefslogtreecommitdiffstatshomepage
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/basics/ordereddict_eq.py44
-rw-r--r--tests/basics/ordereddict_eq.py.exp15
-rw-r--r--tests/bench/bytealloc-1-bytes_n.py7
-rw-r--r--tests/bench/bytealloc-2-repeat.py7
-rw-r--r--tests/extmod/btree1.py59
-rw-r--r--tests/extmod/btree1.py.exp32
-rw-r--r--tests/extmod/machine_pinbase.py25
-rw-r--r--tests/extmod/machine_pinbase.py.exp9
-rw-r--r--tests/micropython/const.py12
-rw-r--r--tests/micropython/const.py.exp3
-rw-r--r--tests/micropython/meminfo.py.exp4
-rw-r--r--tests/misc/recursive_iternext.py2
-rwxr-xr-xtests/run-tests7
-rw-r--r--tests/thread/mutate_bytearray.py45
-rw-r--r--tests/thread/mutate_dict.py42
-rw-r--r--tests/thread/mutate_instance.py43
-rw-r--r--tests/thread/mutate_list.py44
-rw-r--r--tests/thread/mutate_set.py37
-rw-r--r--tests/thread/stress_aes.py255
-rw-r--r--tests/thread/stress_heap.py42
-rw-r--r--tests/thread/stress_recurse.py25
-rw-r--r--tests/thread/thread_exc1.py30
-rw-r--r--tests/thread/thread_exit1.py19
-rw-r--r--tests/thread/thread_exit2.py19
-rw-r--r--tests/thread/thread_gc1.py34
-rw-r--r--tests/thread/thread_ident1.py21
-rw-r--r--tests/thread/thread_lock1.py40
-rw-r--r--tests/thread/thread_lock2.py24
-rw-r--r--tests/thread/thread_lock3.py27
-rw-r--r--tests/thread/thread_lock4.py46
-rw-r--r--tests/thread/thread_qstr1.py35
-rw-r--r--tests/thread/thread_shared1.py31
-rw-r--r--tests/thread/thread_shared2.py32
-rw-r--r--tests/thread/thread_sleep1.py31
-rw-r--r--tests/thread/thread_stacksize1.py44
-rw-r--r--tests/thread/thread_start1.py23
-rw-r--r--tests/thread/thread_start2.py19
37 files changed, 1231 insertions, 3 deletions
diff --git a/tests/basics/ordereddict_eq.py b/tests/basics/ordereddict_eq.py
new file mode 100644
index 0000000000..2746608772
--- /dev/null
+++ b/tests/basics/ordereddict_eq.py
@@ -0,0 +1,44 @@
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ucollections import OrderedDict
+ except ImportError:
+ print("SKIP")
+ import sys
+ sys.exit()
+
+x = OrderedDict()
+y = OrderedDict()
+x['a'] = 1
+x['b'] = 2
+y['a'] = 1
+y['b'] = 2
+print(x)
+print(y)
+print(x == y)
+
+z = OrderedDict()
+z['b'] = 2
+z['a'] = 1
+print(y)
+print(z)
+print(y == z)
+
+del z['b']
+z['b'] = 2
+print(y)
+print(z)
+print(y == z)
+
+del x['a']
+del y['a']
+print(x)
+print(y)
+print(x == y)
+
+del z['b']
+del y['b']
+print(y)
+print(z)
+print(y == z)
diff --git a/tests/basics/ordereddict_eq.py.exp b/tests/basics/ordereddict_eq.py.exp
new file mode 100644
index 0000000000..892ba0e3bd
--- /dev/null
+++ b/tests/basics/ordereddict_eq.py.exp
@@ -0,0 +1,15 @@
+OrderedDict({'a': 1, 'b': 2})
+OrderedDict({'a': 1, 'b': 2})
+True
+OrderedDict({'a': 1, 'b': 2})
+OrderedDict({'b': 2, 'a': 1})
+False
+OrderedDict({'a': 1, 'b': 2})
+OrderedDict({'a': 1, 'b': 2})
+True
+OrderedDict({'b': 2})
+OrderedDict({'b': 2})
+True
+OrderedDict({})
+OrderedDict({'a': 1})
+False
diff --git a/tests/bench/bytealloc-1-bytes_n.py b/tests/bench/bytealloc-1-bytes_n.py
new file mode 100644
index 0000000000..4a4bbc6fae
--- /dev/null
+++ b/tests/bench/bytealloc-1-bytes_n.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ for i in iter(range(num // 1000)):
+ bytes(10000)
+
+bench.run(test)
diff --git a/tests/bench/bytealloc-2-repeat.py b/tests/bench/bytealloc-2-repeat.py
new file mode 100644
index 0000000000..786a804622
--- /dev/null
+++ b/tests/bench/bytealloc-2-repeat.py
@@ -0,0 +1,7 @@
+import bench
+
+def test(num):
+ for i in iter(range(num // 1000)):
+ b"\0" * 10000
+
+bench.run(test)
diff --git a/tests/extmod/btree1.py b/tests/extmod/btree1.py
new file mode 100644
index 0000000000..11acd7c98f
--- /dev/null
+++ b/tests/extmod/btree1.py
@@ -0,0 +1,59 @@
+try:
+ import btree
+except ImportError:
+ print("SKIP")
+ import sys
+ sys.exit()
+
+db = btree.open(None)
+db[b"foo3"] = b"bar3"
+db[b"foo1"] = b"bar1"
+db[b"foo2"] = b"bar2"
+db[b"bar1"] = b"foo1"
+
+print(db[b"foo2"])
+try:
+ print(db[b"foo"])
+except KeyError:
+ print("KeyError")
+print(db.get(b"foo"))
+print(db.get(b"foo", b"dflt"))
+
+del db[b"foo2"]
+try:
+ del db[b"foo"]
+except KeyError:
+ print("KeyError")
+
+for k, v in db.items():
+ print((k, v))
+
+print("---")
+for k, v in db.items(None, None):
+ print((k, v))
+
+print("---")
+for k, v in db.items(b"f"):
+ print((k, v))
+
+print("---")
+for k, v in db.items(b"f", b"foo3"):
+ print((k, v))
+
+print("---")
+for k, v in db.items(None, b"foo3"):
+ print((k, v))
+
+print("---")
+for k, v in db.items(b"f", b"foo3", btree.INCL):
+ print((k, v))
+
+print("---")
+for k, v in db.items(None, None, btree.DESC):
+ print((k, v))
+
+print(list(db.keys()))
+print(list(db.values()))
+
+for k in db:
+ print(k)
diff --git a/tests/extmod/btree1.py.exp b/tests/extmod/btree1.py.exp
new file mode 100644
index 0000000000..a266d7acfc
--- /dev/null
+++ b/tests/extmod/btree1.py.exp
@@ -0,0 +1,32 @@
+b'bar2'
+KeyError
+None
+b'dflt'
+KeyError
+(b'bar1', b'foo1')
+(b'foo1', b'bar1')
+(b'foo3', b'bar3')
+---
+(b'bar1', b'foo1')
+(b'foo1', b'bar1')
+(b'foo3', b'bar3')
+---
+(b'foo1', b'bar1')
+(b'foo3', b'bar3')
+---
+(b'foo1', b'bar1')
+---
+(b'bar1', b'foo1')
+(b'foo1', b'bar1')
+---
+(b'foo1', b'bar1')
+(b'foo3', b'bar3')
+---
+(b'foo3', b'bar3')
+(b'foo1', b'bar1')
+(b'bar1', b'foo1')
+[b'bar1', b'foo1', b'foo3']
+[b'foo1', b'bar1', b'bar3']
+b'bar1'
+b'foo1'
+b'foo3'
diff --git a/tests/extmod/machine_pinbase.py b/tests/extmod/machine_pinbase.py
new file mode 100644
index 0000000000..07a489a596
--- /dev/null
+++ b/tests/extmod/machine_pinbase.py
@@ -0,0 +1,25 @@
+try:
+ from umachine import PinBase
+except ImportError:
+ from machine import PinBase
+
+
+class MyPin(PinBase):
+
+ def __init__(self):
+ print("__init__")
+ self.v = False
+
+ def value(self, v=None):
+ print("value:", v)
+ if v is None:
+ self.v = not self.v
+ return int(self.v)
+
+p = MyPin()
+
+print(p.value())
+print(p.value())
+print(p.value())
+p.value(1)
+p.value(0)
diff --git a/tests/extmod/machine_pinbase.py.exp b/tests/extmod/machine_pinbase.py.exp
new file mode 100644
index 0000000000..b31cd98308
--- /dev/null
+++ b/tests/extmod/machine_pinbase.py.exp
@@ -0,0 +1,9 @@
+__init__
+value: None
+1
+value: None
+0
+value: None
+1
+value: 1
+value: 0
diff --git a/tests/micropython/const.py b/tests/micropython/const.py
index 457365c50a..09717fd147 100644
--- a/tests/micropython/const.py
+++ b/tests/micropython/const.py
@@ -9,3 +9,15 @@ def f():
print(X, Y + 1)
f()
+
+_X = const(12)
+_Y = const(_X + 34)
+
+print(_X, _Y)
+
+class A:
+ Z = const(1)
+ _Z = const(2)
+ print(Z, _Z)
+
+print(hasattr(A, 'Z'), hasattr(A, '_Z'))
diff --git a/tests/micropython/const.py.exp b/tests/micropython/const.py.exp
index c447aaf8c1..ece6a5cb2e 100644
--- a/tests/micropython/const.py.exp
+++ b/tests/micropython/const.py.exp
@@ -1,2 +1,5 @@
123 580
123 580
+12 46
+1 2
+True False
diff --git a/tests/micropython/meminfo.py.exp b/tests/micropython/meminfo.py.exp
index c1a662ae6b..a229a7fa4c 100644
--- a/tests/micropython/meminfo.py.exp
+++ b/tests/micropython/meminfo.py.exp
@@ -1,11 +1,11 @@
mem: total=\\d\+, current=\\d\+, peak=\\d\+
stack: \\d\+ out of \\d\+
GC: total: \\d\+, used: \\d\+, free: \\d\+
- No. of 1-blocks: \\d\+, 2-blocks: \\d\+, max blk sz: \\d\+
+ No. of 1-blocks: \\d\+, 2-blocks: \\d\+, max blk sz: \\d\+, max free sz: \\d\+
mem: total=\\d\+, current=\\d\+, peak=\\d\+
stack: \\d\+ out of \\d\+
GC: total: \\d\+, used: \\d\+, free: \\d\+
- No. of 1-blocks: \\d\+, 2-blocks: \\d\+, max blk sz: \\d\+
+ No. of 1-blocks: \\d\+, 2-blocks: \\d\+, max blk sz: \\d\+, max free sz: \\d\+
GC memory layout; from \[0-9a-f\]\+:
########
qstr pool: n_pool=1, n_qstr=\\d, n_str_data_bytes=\\d\+, n_total_bytes=\\d\+
diff --git a/tests/misc/recursive_iternext.py b/tests/misc/recursive_iternext.py
index 376c45b3c7..025fa425b5 100644
--- a/tests/misc/recursive_iternext.py
+++ b/tests/misc/recursive_iternext.py
@@ -5,7 +5,7 @@
try:
# large stack/heap, eg unix
[0] * 80000
- N = 2000
+ N = 2400
except:
try:
# medium, eg pyboard
diff --git a/tests/run-tests b/tests/run-tests
index 649f1789fa..02791896b5 100755
--- a/tests/run-tests
+++ b/tests/run-tests
@@ -201,6 +201,13 @@ def run_tests(pyb, tests, args):
skip_tests.add('float/true_value.py')
skip_tests.add('float/types.py')
+ # Some tests shouldn't be run on a PC
+ if pyb is None:
+ # unix build does not have the GIL so can't run thread mutation tests
+ for t in tests:
+ if t.startswith('thread/mutate_'):
+ skip_tests.add(t)
+
# Some tests shouldn't be run on pyboard
if pyb is not None:
skip_tests.add('basics/exception_chain.py') # warning is not printed
diff --git a/tests/thread/mutate_bytearray.py b/tests/thread/mutate_bytearray.py
new file mode 100644
index 0000000000..f3276f1b2d
--- /dev/null
+++ b/tests/thread/mutate_bytearray.py
@@ -0,0 +1,45 @@
+# test concurrent mutating access to a shared bytearray object
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# the shared bytearray
+ba = bytearray()
+
+# main thread function
+def th(n, lo, hi):
+ for repeat in range(n):
+ for i in range(lo, hi):
+ l = len(ba)
+ ba.append(i)
+ assert len(ba) >= l + 1
+
+ l = len(ba)
+ ba.extend(bytearray([i]))
+ assert len(ba) >= l + 1
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+n_repeat = 4 # use 40 for more stressful test (uses more heap)
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (n_repeat, i * 256 // n_thread, (i + 1) * 256 // n_thread))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+# check bytearray has correct contents
+print(len(ba))
+count = [0 for _ in range(256)]
+for b in ba:
+ count[b] += 1
+print(count)
+
diff --git a/tests/thread/mutate_dict.py b/tests/thread/mutate_dict.py
new file mode 100644
index 0000000000..c57d332d51
--- /dev/null
+++ b/tests/thread/mutate_dict.py
@@ -0,0 +1,42 @@
+# test concurrent mutating access to a shared dict object
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# the shared dict
+di = {'a':'A', 'b':'B', 'c':'C', 'd':'D'}
+
+# main thread function
+def th(n, lo, hi):
+ for repeat in range(n):
+ for i in range(lo, hi):
+ di[i] = repeat + i
+ assert di[i] == repeat + i
+
+ del di[i]
+ assert i not in di
+
+ di[i] = repeat + i
+ assert di[i] == repeat + i
+
+ assert di.pop(i) == repeat + i
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (30, i * 300, (i + 1) * 300))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+# check dict has correct contents
+print(sorted(di.items()))
diff --git a/tests/thread/mutate_instance.py b/tests/thread/mutate_instance.py
new file mode 100644
index 0000000000..a1ae428b54
--- /dev/null
+++ b/tests/thread/mutate_instance.py
@@ -0,0 +1,43 @@
+# test concurrent mutating access to a shared user instance
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# the shared user class and instance
+class User:
+ def __init__(self):
+ self.a = 'A'
+ self.b = 'B'
+ self.c = 'C'
+user = User()
+
+# main thread function
+def th(n, lo, hi):
+ for repeat in range(n):
+ for i in range(lo, hi):
+ setattr(user, 'attr_%u' % i, repeat + i)
+ assert getattr(user, 'attr_%u' % i) == repeat + i
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_repeat = 30
+n_range = 50 # 300 for stressful test (uses more heap)
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+# check user instance has correct contents
+print(user.a, user.b, user.c)
+for i in range(n_thread * n_range):
+ assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
diff --git a/tests/thread/mutate_list.py b/tests/thread/mutate_list.py
new file mode 100644
index 0000000000..764a9bd99e
--- /dev/null
+++ b/tests/thread/mutate_list.py
@@ -0,0 +1,44 @@
+# test concurrent mutating access to a shared list object
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# the shared list
+li = list()
+
+# main thread function
+def th(n, lo, hi):
+ for repeat in range(n):
+ for i in range(lo, hi):
+ li.append(i)
+ assert li.count(i) == repeat + 1
+
+ li.extend([i, i])
+ assert li.count(i) == repeat + 3
+
+ li.remove(i)
+ assert li.count(i) == repeat + 2
+
+ li.remove(i)
+ assert li.count(i) == repeat + 1
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (4, i * 60, (i + 1) * 60))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+# check list has correct contents
+li.sort()
+print(li)
diff --git a/tests/thread/mutate_set.py b/tests/thread/mutate_set.py
new file mode 100644
index 0000000000..5492d86313
--- /dev/null
+++ b/tests/thread/mutate_set.py
@@ -0,0 +1,37 @@
+# test concurrent mutating access to a shared set object
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# the shared set
+se = set([-1, -2, -3, -4])
+
+# main thread function
+def th(n, lo, hi):
+ for repeat in range(n):
+ for i in range(lo, hi):
+ se.add(i)
+ assert i in se
+
+ se.remove(i)
+ assert i not in se
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (50, i * 500, (i + 1) * 500))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+# check set has correct contents
+print(sorted(se))
diff --git a/tests/thread/stress_aes.py b/tests/thread/stress_aes.py
new file mode 100644
index 0000000000..ecc963c925
--- /dev/null
+++ b/tests/thread/stress_aes.py
@@ -0,0 +1,255 @@
+# Stress test for threads using AES encryption routines.
+#
+# AES was chosen because it is integer based and inplace so doesn't use the
+# heap. It is therefore a good test of raw performance and correctness of the
+# VM/runtime. It can be used to measure threading performance (concurrency is
+# in principle possible) and correctness (it's non trivial for the encryption/
+# decryption to give the correct answer).
+#
+# The AES code comes first (code originates from a C version authored by D.P.George)
+# and then the test harness at the bottom. It can be tuned to be more/less
+# agressive by changing the amount of data to encrypt, the number of loops and
+# the number of threads.
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+##################################################################
+# discrete arithmetic routines, mostly from a precomputed table
+
+# non-linear, invertible, substitution box
+aes_s_box_table = bytes((
+ 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
+ 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
+ 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
+ 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
+ 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
+ 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
+ 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
+ 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
+ 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
+ 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
+ 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
+ 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
+ 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
+ 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
+ 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
+ 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16,
+))
+
+# multiplication of polynomials modulo x^8 + x^4 + x^3 + x + 1 = 0x11b
+def aes_gf8_mul_2(x):
+ if x & 0x80:
+ return (x << 1) ^ 0x11b
+ else:
+ return x << 1
+
+def aes_gf8_mul_3(x):
+ return x ^ aes_gf8_mul_2(x)
+
+# non-linear, invertible, substitution box
+def aes_s_box(a):
+ return aes_s_box_table[a & 0xff]
+
+# return 0x02^(a-1) in GF(2^8)
+def aes_r_con(a):
+ ans = 1
+ while a > 1:
+ ans <<= 1;
+ if ans & 0x100:
+ ans ^= 0x11b
+ a -= 1
+ return ans
+
+##################################################################
+# basic AES algorithm; see FIPS-197
+#
+# Think of it as a pseudo random number generator, with each
+# symbol in the sequence being a 16 byte block (the state). The
+# key is a parameter of the algorithm and tells which particular
+# sequence of random symbols you want. The initial vector, IV,
+# sets the start of the sequence. The idea of a strong cipher
+# is that it's very difficult to guess the key even if you know
+# a large part of the sequence. The basic AES algorithm simply
+# provides such a sequence. En/de-cryption is implemented here
+# using OCB, where the sequence is xored against the plaintext.
+# Care must be taken to (almost) always choose a different IV.
+
+# all inputs must be size 16
+def aes_add_round_key(state, w):
+ for i in range(16):
+ state[i] ^= w[i]
+
+# combined sub_bytes, shift_rows, mix_columns, add_round_key
+# all inputs must be size 16
+def aes_sb_sr_mc_ark(state, w, w_idx, temp):
+ temp_idx = 0
+ for i in range(4):
+ x0 = aes_s_box_table[state[i * 4]]
+ x1 = aes_s_box_table[state[1 + ((i + 1) & 3) * 4]]
+ x2 = aes_s_box_table[state[2 + ((i + 2) & 3) * 4]]
+ x3 = aes_s_box_table[state[3 + ((i + 3) & 3) * 4]]
+ temp[temp_idx] = aes_gf8_mul_2(x0) ^ aes_gf8_mul_3(x1) ^ x2 ^ x3 ^ w[w_idx]
+ temp[temp_idx + 1] = x0 ^ aes_gf8_mul_2(x1) ^ aes_gf8_mul_3(x2) ^ x3 ^ w[w_idx + 1]
+ temp[temp_idx + 2] = x0 ^ x1 ^ aes_gf8_mul_2(x2) ^ aes_gf8_mul_3(x3) ^ w[w_idx + 2]
+ temp[temp_idx + 3] = aes_gf8_mul_3(x0) ^ x1 ^ x2 ^ aes_gf8_mul_2(x3) ^ w[w_idx + 3]
+ w_idx += 4
+ temp_idx += 4
+ for i in range(16):
+ state[i] = temp[i]
+
+# combined sub_bytes, shift_rows, add_round_key
+# all inputs must be size 16
+def aes_sb_sr_ark(state, w, w_idx, temp):
+ temp_idx = 0
+ for i in range(4):
+ x0 = aes_s_box_table[state[i * 4]]
+ x1 = aes_s_box_table[state[1 + ((i + 1) & 3) * 4]]
+ x2 = aes_s_box_table[state[2 + ((i + 2) & 3) * 4]]
+ x3 = aes_s_box_table[state[3 + ((i + 3) & 3) * 4]]
+ temp[temp_idx] = x0 ^ w[w_idx]
+ temp[temp_idx + 1] = x1 ^ w[w_idx + 1]
+ temp[temp_idx + 2] = x2 ^ w[w_idx + 2]
+ temp[temp_idx + 3] = x3 ^ w[w_idx + 3]
+ w_idx += 4
+ temp_idx += 4
+ for i in range(16):
+ state[i] = temp[i]
+
+# take state as input and change it to the next state in the sequence
+# state and temp have size 16, w has size 16 * (Nr + 1), Nr >= 1
+def aes_state(state, w, temp, nr):
+ aes_add_round_key(state, w)
+ w_idx = 16
+ for i in range(nr - 1):
+ aes_sb_sr_mc_ark(state, w, w_idx, temp)
+ w_idx += 16
+ aes_sb_sr_ark(state, w, w_idx, temp)
+
+# expand 'key' to 'w' for use with aes_state
+# key has size 4 * Nk, w has size 16 * (Nr + 1), temp has size 16
+def aes_key_expansion(key, w, temp, nk, nr):
+ for i in range(4 * nk):
+ w[i] = key[i]
+ w_idx = 4 * nk - 4
+ for i in range(nk, 4 * (nr + 1)):
+ t = temp
+ t_idx = 0
+ if i % nk == 0:
+ t[0] = aes_s_box(w[w_idx + 1]) ^ aes_r_con(i // nk)
+ for j in range(1, 4):
+ t[j] = aes_s_box(w[w_idx + (j + 1) % 4])
+ elif nk > 6 and i % nk == 4:
+ for j in range(0, 4):
+ t[j] = aes_s_box(w[w_idx + j])
+ else:
+ t = w
+ t_idx = w_idx
+ w_idx += 4
+ for j in range(4):
+ w[w_idx + j] = w[w_idx + j - 4 * nk] ^ t[t_idx + j]
+
+##################################################################
+# simple use of AES algorithm, using output feedback (OFB) mode
+
+class AES:
+ def __init__(self, keysize):
+ if keysize == 128:
+ self.nk = 4
+ self.nr = 10
+ elif keysize == 192:
+ self.nk = 6
+ self.nr = 12
+ else:
+ assert keysize == 256
+ self.nk = 8
+ self.nr = 14
+
+ self.state = bytearray(16)
+ self.w = bytearray(16 * (self.nr + 1))
+ self.temp = bytearray(16)
+ self.state_pos = 16
+
+ def set_key(self, key):
+ aes_key_expansion(key, self.w, self.temp, self.nk, self.nr)
+ self.state_pos = 16
+
+ def set_iv(self, iv):
+ for i in range(16):
+ self.state[i] = iv[i]
+ self.state_pos = 16;
+
+ def get_some_state(self, n_needed):
+ if self.state_pos >= 16:
+ aes_state(self.state, self.w, self.temp, self.nr)
+ self.state_pos = 0
+ n = 16 - self.state_pos
+ if n > n_needed:
+ n = n_needed
+ return n
+
+ def apply_to(self, data):
+ idx = 0
+ n = len(data)
+ while n > 0:
+ ln = self.get_some_state(n)
+ n -= ln
+ for i in range(ln):
+ data[idx + i] ^= self.state[self.state_pos + i]
+ idx += ln
+ self.state_pos += n
+
+##################################################################
+# test code
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+class LockedCounter:
+ def __init__(self):
+ self.lock = _thread.allocate_lock()
+ self.value = 0
+
+ def add(self, val):
+ self.lock.acquire()
+ self.value += val
+ self.lock.release()
+
+count = LockedCounter()
+
+def thread_entry():
+ global count
+
+ aes = AES(256)
+ key = bytearray(256 // 8)
+ iv = bytearray(16)
+ data = bytearray(128)
+ # from now on we don't use the heap
+
+ for loop in range(5):
+ # encrypt
+ aes.set_key(key)
+ aes.set_iv(iv)
+ for i in range(8):
+ aes.apply_to(data)
+
+ # decrypt
+ aes.set_key(key)
+ aes.set_iv(iv)
+ for i in range(8):
+ aes.apply_to(data)
+
+ # verify
+ for i in range(len(data)):
+ assert data[i] == 0
+
+ count.add(1)
+
+if __name__ == '__main__':
+ n_thread = 20
+ for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, ())
+ while count.value < n_thread:
+ time.sleep(1)
diff --git a/tests/thread/stress_heap.py b/tests/thread/stress_heap.py
new file mode 100644
index 0000000000..ac3ebe0491
--- /dev/null
+++ b/tests/thread/stress_heap.py
@@ -0,0 +1,42 @@
+# stress test for the heap by allocating lots of objects within threads
+# allocates about 5mb on the heap
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def last(l):
+ return l[-1]
+
+def thread_entry(n):
+ # allocate a bytearray and fill it
+ data = bytearray(i for i in range(256))
+
+ # run a loop which allocates a small list and uses it each iteration
+ lst = 8 * [0]
+ sum = 0
+ for i in range(n):
+ sum += last(lst)
+ lst = [0, 0, 0, 0, 0, 0, 0, i + 1]
+
+ # check that the bytearray still has the right data
+ for i, b in enumerate(data):
+ assert i == b
+
+ # print the result of the loop and indicate we are finished
+ with lock:
+ print(sum, lst[-1])
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 10
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, (10000,))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
diff --git a/tests/thread/stress_recurse.py b/tests/thread/stress_recurse.py
new file mode 100644
index 0000000000..68367c4dd7
--- /dev/null
+++ b/tests/thread/stress_recurse.py
@@ -0,0 +1,25 @@
+# test hitting the function recursion limit within a thread
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def foo():
+ foo()
+
+def thread_entry():
+ try:
+ foo()
+ except RuntimeError:
+ print('RuntimeError')
+ global finished
+ finished = True
+
+finished = False
+
+_thread.start_new_thread(thread_entry, ())
+
+# busy wait for thread to finish
+while not finished:
+ pass
+print('done')
diff --git a/tests/thread/thread_exc1.py b/tests/thread/thread_exc1.py
new file mode 100644
index 0000000000..10fb94b4fb
--- /dev/null
+++ b/tests/thread/thread_exc1.py
@@ -0,0 +1,30 @@
+# test raising and catching an exception within a thread
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def foo():
+ raise ValueError
+
+def thread_entry():
+ try:
+ foo()
+ except ValueError:
+ pass
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, ())
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+print('done')
diff --git a/tests/thread/thread_exit1.py b/tests/thread/thread_exit1.py
new file mode 100644
index 0000000000..88cdd165c7
--- /dev/null
+++ b/tests/thread/thread_exit1.py
@@ -0,0 +1,19 @@
+# test _thread.exit() function
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+def thread_entry():
+ _thread.exit()
+
+_thread.start_new_thread(thread_entry, ())
+_thread.start_new_thread(thread_entry, ())
+
+# wait for threads to finish
+time.sleep(1)
+print('done')
diff --git a/tests/thread/thread_exit2.py b/tests/thread/thread_exit2.py
new file mode 100644
index 0000000000..368a11bba4
--- /dev/null
+++ b/tests/thread/thread_exit2.py
@@ -0,0 +1,19 @@
+# test raising SystemExit to finish a thread
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+def thread_entry():
+ raise SystemExit
+
+_thread.start_new_thread(thread_entry, ())
+_thread.start_new_thread(thread_entry, ())
+
+# wait for threads to finish
+time.sleep(1)
+print('done')
diff --git a/tests/thread/thread_gc1.py b/tests/thread/thread_gc1.py
new file mode 100644
index 0000000000..8dcbf7e07a
--- /dev/null
+++ b/tests/thread/thread_gc1.py
@@ -0,0 +1,34 @@
+# test that we can run the garbage collector within threads
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import gc
+import _thread
+
+def thread_entry(n):
+ # allocate a bytearray and fill it
+ data = bytearray(i for i in range(256))
+
+ # do some work and call gc.collect() a few times
+ for i in range(n):
+ for i in range(len(data)):
+ data[i] = data[i]
+ gc.collect()
+
+ # print whether the data remains intact and indicate we are finished
+ with lock:
+ print(list(data) == list(range(256)))
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, (10,))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
diff --git a/tests/thread/thread_ident1.py b/tests/thread/thread_ident1.py
new file mode 100644
index 0000000000..217fce73b1
--- /dev/null
+++ b/tests/thread/thread_ident1.py
@@ -0,0 +1,21 @@
+# test _thread.get_ident() function
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def thread_entry():
+ tid = _thread.get_ident()
+ print('thread', type(tid) == int, tid != 0, tid != tid_main)
+ global finished
+ finished = True
+
+tid_main = _thread.get_ident()
+print('main', type(tid_main) == int, tid_main != 0)
+
+finished = False
+_thread.start_new_thread(thread_entry, ())
+
+while not finished:
+ pass
+print('done')
diff --git a/tests/thread/thread_lock1.py b/tests/thread/thread_lock1.py
new file mode 100644
index 0000000000..ca585ffbb9
--- /dev/null
+++ b/tests/thread/thread_lock1.py
@@ -0,0 +1,40 @@
+# test _thread lock object using a single thread
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# create lock
+lock = _thread.allocate_lock()
+
+print(type(lock) == _thread.LockType)
+
+# should be unlocked
+print(lock.locked())
+
+# basic acquire and release
+print(lock.acquire())
+print(lock.locked())
+lock.release()
+print(lock.locked())
+
+# try acquire twice (second should fail)
+print(lock.acquire())
+print(lock.locked())
+print(lock.acquire(0))
+print(lock.locked())
+lock.release()
+print(lock.locked())
+
+# test with capabilities of lock
+with lock:
+ print(lock.locked())
+
+# test that lock is unlocked if an error is rasied
+try:
+ with lock:
+ print(lock.locked())
+ raise KeyError
+except KeyError:
+ print('KeyError')
+ print(lock.locked())
diff --git a/tests/thread/thread_lock2.py b/tests/thread/thread_lock2.py
new file mode 100644
index 0000000000..405f10b0b6
--- /dev/null
+++ b/tests/thread/thread_lock2.py
@@ -0,0 +1,24 @@
+# test _thread lock objects with multiple threads
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+lock = _thread.allocate_lock()
+
+def thread_entry():
+ lock.acquire()
+ print('have it')
+ lock.release()
+
+# spawn the threads
+for i in range(4):
+ _thread.start_new_thread(thread_entry, ())
+
+# wait for threads to finish
+time.sleep(1)
+print('done')
diff --git a/tests/thread/thread_lock3.py b/tests/thread/thread_lock3.py
new file mode 100644
index 0000000000..607898dad8
--- /dev/null
+++ b/tests/thread/thread_lock3.py
@@ -0,0 +1,27 @@
+# test thread coordination using a lock object
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+lock = _thread.allocate_lock()
+n_thread = 10
+n_finished = 0
+
+def thread_entry(idx):
+ global n_finished
+ while True:
+ with lock:
+ if n_finished == idx:
+ break
+ print('my turn:', idx)
+ with lock:
+ n_finished += 1
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, (i,))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
diff --git a/tests/thread/thread_lock4.py b/tests/thread/thread_lock4.py
new file mode 100644
index 0000000000..d77aa24ee8
--- /dev/null
+++ b/tests/thread/thread_lock4.py
@@ -0,0 +1,46 @@
+# test using lock to coordinate access to global mutable objects
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def fac(n):
+ x = 1
+ for i in range(1, n + 1):
+ x *= i
+ return x
+
+def thread_entry():
+ while True:
+ with jobs_lock:
+ try:
+ f, arg = jobs.pop(0)
+ except IndexError:
+ return
+ ans = f(arg)
+ with output_lock:
+ output.append((arg, ans))
+
+# create a list of jobs
+jobs = [(fac, i) for i in range(20, 80)]
+jobs_lock = _thread.allocate_lock()
+n_jobs = len(jobs)
+
+# create a list to store the results
+output = []
+output_lock = _thread.allocate_lock()
+
+# spawn threads to do the jobs
+for i in range(4):
+ _thread.start_new_thread(thread_entry, ())
+
+# wait for the jobs to complete
+while True:
+ with jobs_lock:
+ if len(output) == n_jobs:
+ break
+
+# sort and print the results
+output.sort(key=lambda x: x[0])
+for arg, ans in output:
+ print(arg, ans)
diff --git a/tests/thread/thread_qstr1.py b/tests/thread/thread_qstr1.py
new file mode 100644
index 0000000000..c0256316e5
--- /dev/null
+++ b/tests/thread/thread_qstr1.py
@@ -0,0 +1,35 @@
+# test concurrent interning of strings
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+# function to check the interned string
+def check(s, val):
+ assert type(s) == str
+ assert int(s) == val
+
+# main thread function
+def th(base, n):
+ for i in range(n):
+ # this will intern the string and check it
+ exec("check('%u', %u)" % (base + i, base + i))
+
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+n_qstr_per_thread = 100 # make 1000 for a more stressful test (uses more heap)
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(th, (i * n_qstr_per_thread, n_qstr_per_thread))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+
+print('pass')
diff --git a/tests/thread/thread_shared1.py b/tests/thread/thread_shared1.py
new file mode 100644
index 0000000000..13c6651cc4
--- /dev/null
+++ b/tests/thread/thread_shared1.py
@@ -0,0 +1,31 @@
+# test capability for threads to access a shared immutable data structure
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def foo(i):
+ pass
+
+def thread_entry(n, tup):
+ for i in tup:
+ foo(i)
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 2
+n_finished = 0
+
+# the shared data structure
+tup = (1, 2, 3, 4)
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, (100, tup))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+print(tup)
diff --git a/tests/thread/thread_shared2.py b/tests/thread/thread_shared2.py
new file mode 100644
index 0000000000..e4bfe78022
--- /dev/null
+++ b/tests/thread/thread_shared2.py
@@ -0,0 +1,32 @@
+# test capability for threads to access a shared mutable data structure
+# (without contention because they access different parts of the structure)
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import _thread
+
+def foo(lst, i):
+ lst[i] += 1
+
+def thread_entry(n, lst, idx):
+ for i in range(n):
+ foo(lst, idx)
+ with lock:
+ global n_finished
+ n_finished += 1
+
+lock = _thread.allocate_lock()
+n_thread = 2
+n_finished = 0
+
+# the shared data structure
+lst = [0, 0]
+
+# spawn threads
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, ((i + 1) * 10, lst, i))
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+print(lst)
diff --git a/tests/thread/thread_sleep1.py b/tests/thread/thread_sleep1.py
new file mode 100644
index 0000000000..032ec17543
--- /dev/null
+++ b/tests/thread/thread_sleep1.py
@@ -0,0 +1,31 @@
+# test threads sleeping
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime
+ sleep_ms = utime.sleep_ms
+except ImportError:
+ import time
+ sleep_ms = lambda t: time.sleep(t / 1000)
+
+import _thread
+
+lock = _thread.allocate_lock()
+n_thread = 4
+n_finished = 0
+
+def thread_entry(t):
+ global n_finished
+ sleep_ms(t)
+ sleep_ms(2 * t)
+ with lock:
+ n_finished += 1
+
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, (10 * i,))
+
+# wait for threads to finish
+while n_finished < n_thread:
+ sleep_ms(100)
+print('done', n_thread)
diff --git a/tests/thread/thread_stacksize1.py b/tests/thread/thread_stacksize1.py
new file mode 100644
index 0000000000..e62899631f
--- /dev/null
+++ b/tests/thread/thread_stacksize1.py
@@ -0,0 +1,44 @@
+# test setting the thread stack size
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+import sys
+import _thread
+
+# different implementations have different minimum sizes
+if sys.implementation.name == 'micropython':
+ sz = 2 * 1024
+else:
+ sz = 32 * 1024
+
+def foo():
+ pass
+
+def thread_entry():
+ foo()
+ with lock:
+ global n_finished
+ n_finished += 1
+
+# reset stack size to default
+_thread.stack_size()
+
+# test set/get of stack size
+print(_thread.stack_size())
+print(_thread.stack_size(sz))
+print(_thread.stack_size() == sz)
+print(_thread.stack_size())
+
+lock = _thread.allocate_lock()
+n_thread = 2
+n_finished = 0
+
+# set stack size and spawn a few threads
+_thread.stack_size(sz)
+for i in range(n_thread):
+ _thread.start_new_thread(thread_entry, ())
+
+# busy wait for threads to finish
+while n_finished < n_thread:
+ pass
+print('done')
diff --git a/tests/thread/thread_start1.py b/tests/thread/thread_start1.py
new file mode 100644
index 0000000000..d23a74aa21
--- /dev/null
+++ b/tests/thread/thread_start1.py
@@ -0,0 +1,23 @@
+# test basic capability to start a new thread
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+def foo():
+ pass
+
+def thread_entry(n):
+ for i in range(n):
+ foo()
+
+_thread.start_new_thread(thread_entry, (10,))
+_thread.start_new_thread(thread_entry, (20,))
+
+# wait for threads to finish
+time.sleep(1)
+print('done')
diff --git a/tests/thread/thread_start2.py b/tests/thread/thread_start2.py
new file mode 100644
index 0000000000..4efa808eb9
--- /dev/null
+++ b/tests/thread/thread_start2.py
@@ -0,0 +1,19 @@
+# test capability to start a thread with keyword args
+#
+# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+
+try:
+ import utime as time
+except ImportError:
+ import time
+import _thread
+
+def thread_entry(a0, a1, a2, a3):
+ print('thread', a0, a1, a2, a3)
+
+# spawn thread using kw args
+_thread.start_new_thread(thread_entry, (10, 20), {'a2': 0, 'a3': 1})
+
+# wait for thread to finish
+time.sleep(1)
+print('done')