summaryrefslogtreecommitdiffstatshomepage
path: root/tests/bench
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bench')
-rw-r--r--tests/bench/arrayop-1-list_inplace.py12
-rw-r--r--tests/bench/arrayop-2-list_map.py12
-rw-r--r--tests/bench/arrayop-3-bytearray_inplace.py12
-rw-r--r--tests/bench/arrayop-4-bytearray_map.py12
-rw-r--r--tests/bench/bytebuf-1-inplace.py11
-rw-r--r--tests/bench/bytebuf-2-join_map_bytes.py12
-rw-r--r--tests/bench/bytebuf-3-bytarray_map.py10
-rw-r--r--tests/bench/from_iter-1-list_bound.py8
-rw-r--r--tests/bench/from_iter-2-list_unbound.py8
-rw-r--r--tests/bench/from_iter-3-tuple_bound.py8
-rw-r--r--tests/bench/from_iter-4-tuple_unbound.py8
-rw-r--r--tests/bench/from_iter-5-bytes_bound.py8
-rw-r--r--tests/bench/from_iter-6-bytes_unbound.py8
-rw-r--r--tests/bench/from_iter-7-bytearray_bound.py8
-rw-r--r--tests/bench/from_iter-8-bytearray_unbound.py8
-rw-r--r--tests/bench/funcall-1-inline.py9
-rw-r--r--tests/bench/funcall-2-funcall.py12
-rw-r--r--tests/bench/funcall-3-funcall-local.py16
18 files changed, 182 insertions, 0 deletions
diff --git a/tests/bench/arrayop-1-list_inplace.py b/tests/bench/arrayop-1-list_inplace.py
new file mode 100644
index 0000000000..0ee1ef2eca
--- /dev/null
+++ b/tests/bench/arrayop-1-list_inplace.py
@@ -0,0 +1,12 @@
+# Array operation
+# Type: list, inplace operation using for. What's good about this
+# method is that it doesn't require any extra memory allocation.
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ arr = [0] * 1000
+ for i in range(len(arr)):
+ arr[i] += 1
+
+bench.run(test)
diff --git a/tests/bench/arrayop-2-list_map.py b/tests/bench/arrayop-2-list_map.py
new file mode 100644
index 0000000000..9d5095c53a
--- /dev/null
+++ b/tests/bench/arrayop-2-list_map.py
@@ -0,0 +1,12 @@
+# Array operation
+# Type: list, map() call. This method requires allocation of
+# the same amount of memory as original array (to hold result
+# array). On the other hand, input array stays intact.
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ arr = [0] * 1000
+ arr2 = list(map(lambda x: x + 1, arr))
+
+bench.run(test)
diff --git a/tests/bench/arrayop-3-bytearray_inplace.py b/tests/bench/arrayop-3-bytearray_inplace.py
new file mode 100644
index 0000000000..a6d6280705
--- /dev/null
+++ b/tests/bench/arrayop-3-bytearray_inplace.py
@@ -0,0 +1,12 @@
+# Array operation
+# Type: bytearray, inplace operation using for. What's good about this
+# method is that it doesn't require any extra memory allocation.
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ arr = bytearray(b"\0" * 1000)
+ for i in range(len(arr)):
+ arr[i] += 1
+
+bench.run(test)
diff --git a/tests/bench/arrayop-4-bytearray_map.py b/tests/bench/arrayop-4-bytearray_map.py
new file mode 100644
index 0000000000..1b92a40961
--- /dev/null
+++ b/tests/bench/arrayop-4-bytearray_map.py
@@ -0,0 +1,12 @@
+# Array operation
+# Type: list, map() call. This method requires allocation of
+# the same amount of memory as original array (to hold result
+# array). On the other hand, input array stays intact.
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ arr = bytearray(b"\0" * 1000)
+ arr2 = bytearray(map(lambda x: x + 1, arr))
+
+bench.run(test)
diff --git a/tests/bench/bytebuf-1-inplace.py b/tests/bench/bytebuf-1-inplace.py
new file mode 100644
index 0000000000..7e7d9391cc
--- /dev/null
+++ b/tests/bench/bytebuf-1-inplace.py
@@ -0,0 +1,11 @@
+# Doing some operation on bytearray
+# Inplace - the most memory efficient way
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ ba = bytearray(b"\0" * 1000)
+ for i in range(len(ba)):
+ ba[i] += 1
+
+bench.run(test)
diff --git a/tests/bench/bytebuf-2-join_map_bytes.py b/tests/bench/bytebuf-2-join_map_bytes.py
new file mode 100644
index 0000000000..daa622991f
--- /dev/null
+++ b/tests/bench/bytebuf-2-join_map_bytes.py
@@ -0,0 +1,12 @@
+# Doing some operation on bytearray
+# Pretty weird way - map bytearray thru function, but make sure that
+# function return bytes of size 1, then join them together. Surely,
+# this is slowest way to do it.
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ ba = bytearray(b"\0" * 1000)
+ ba2 = b''.join(map(lambda x:bytes([x + 1]), ba))
+
+bench.run(test)
diff --git a/tests/bench/bytebuf-3-bytarray_map.py b/tests/bench/bytebuf-3-bytarray_map.py
new file mode 100644
index 0000000000..078d08e99b
--- /dev/null
+++ b/tests/bench/bytebuf-3-bytarray_map.py
@@ -0,0 +1,10 @@
+# Doing some operation on bytearray
+# No joins, but still map().
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ ba = bytearray(b"\0" * 1000)
+ ba2 = bytearray(map(lambda x: x + 1, ba))
+
+bench.run(test)
diff --git a/tests/bench/from_iter-1-list_bound.py b/tests/bench/from_iter-1-list_bound.py
new file mode 100644
index 0000000000..d209daecc5
--- /dev/null
+++ b/tests/bench/from_iter-1-list_bound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = list(l)
+
+bench.run(test)
diff --git a/tests/bench/from_iter-2-list_unbound.py b/tests/bench/from_iter-2-list_unbound.py
new file mode 100644
index 0000000000..be019c52fe
--- /dev/null
+++ b/tests/bench/from_iter-2-list_unbound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = list(map(lambda x: x, l))
+
+bench.run(test)
diff --git a/tests/bench/from_iter-3-tuple_bound.py b/tests/bench/from_iter-3-tuple_bound.py
new file mode 100644
index 0000000000..7b7fa36c6e
--- /dev/null
+++ b/tests/bench/from_iter-3-tuple_bound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = tuple(l)
+
+bench.run(test)
diff --git a/tests/bench/from_iter-4-tuple_unbound.py b/tests/bench/from_iter-4-tuple_unbound.py
new file mode 100644
index 0000000000..7c7f134c85
--- /dev/null
+++ b/tests/bench/from_iter-4-tuple_unbound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = tuple(map(lambda x: x, l))
+
+bench.run(test)
diff --git a/tests/bench/from_iter-5-bytes_bound.py b/tests/bench/from_iter-5-bytes_bound.py
new file mode 100644
index 0000000000..b793a3207e
--- /dev/null
+++ b/tests/bench/from_iter-5-bytes_bound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = bytes(l)
+
+bench.run(test)
diff --git a/tests/bench/from_iter-6-bytes_unbound.py b/tests/bench/from_iter-6-bytes_unbound.py
new file mode 100644
index 0000000000..20aa556277
--- /dev/null
+++ b/tests/bench/from_iter-6-bytes_unbound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = bytes(map(lambda x: x, l))
+
+bench.run(test)
diff --git a/tests/bench/from_iter-7-bytearray_bound.py b/tests/bench/from_iter-7-bytearray_bound.py
new file mode 100644
index 0000000000..72001a05c7
--- /dev/null
+++ b/tests/bench/from_iter-7-bytearray_bound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = bytearray(l)
+
+bench.run(test)
diff --git a/tests/bench/from_iter-8-bytearray_unbound.py b/tests/bench/from_iter-8-bytearray_unbound.py
new file mode 100644
index 0000000000..e2263b8ef9
--- /dev/null
+++ b/tests/bench/from_iter-8-bytearray_unbound.py
@@ -0,0 +1,8 @@
+import bench
+
+def test(num):
+ for i in iter(range(num//10000)):
+ l = [0] * 1000
+ l2 = bytearray(map(lambda x: x, l))
+
+bench.run(test)
diff --git a/tests/bench/funcall-1-inline.py b/tests/bench/funcall-1-inline.py
new file mode 100644
index 0000000000..fbeb79630d
--- /dev/null
+++ b/tests/bench/funcall-1-inline.py
@@ -0,0 +1,9 @@
+# Function call overhead test
+# Establish a baseline for performing a trivial operation inline
+import bench
+
+def test(num):
+ for i in iter(range(num)):
+ a = i + 1
+
+bench.run(test)
diff --git a/tests/bench/funcall-2-funcall.py b/tests/bench/funcall-2-funcall.py
new file mode 100644
index 0000000000..d5c36c60aa
--- /dev/null
+++ b/tests/bench/funcall-2-funcall.py
@@ -0,0 +1,12 @@
+# Function call overhead test
+# Perform the same trivial operation as global function call
+import bench
+
+def f(x):
+ return x + 1
+
+def test(num):
+ for i in iter(range(num)):
+ a = f(i)
+
+bench.run(test)
diff --git a/tests/bench/funcall-3-funcall-local.py b/tests/bench/funcall-3-funcall-local.py
new file mode 100644
index 0000000000..1a6d728c63
--- /dev/null
+++ b/tests/bench/funcall-3-funcall-local.py
@@ -0,0 +1,16 @@
+# Function call overhead test
+# Perform the same trivial operation as calling function, cached in a
+# local variable. This is commonly known optimization for overly dynamic
+# languages (the idea is to cut on symbolic look up overhead, as local
+# variables are accessed by offset, not by name)
+import bench
+
+def f(x):
+ return x + 1
+
+def test(num):
+ f_ = f
+ for i in iter(range(num)):
+ a = f_(i)
+
+bench.run(test)