aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Tools/peg_generator/scripts/benchmark.py
diff options
context:
space:
mode:
authorPablo Galindo <Pablogsal@gmail.com>2020-06-12 01:55:35 +0100
committerGitHub <noreply@github.com>2020-06-12 01:55:35 +0100
commit756180b4bfa09bb77394a2b3754d331181d4f28c (patch)
treeee9713369ef6401fe6a1f7d62e7a13790166bdc3 /Tools/peg_generator/scripts/benchmark.py
parentb4282dd15079ed46edc9d382b21422320a0af94f (diff)
downloadcpython-756180b4bfa09bb77394a2b3754d331181d4f28c.tar.gz
cpython-756180b4bfa09bb77394a2b3754d331181d4f28c.zip
bpo-40939: Clean and adapt the peg_generator directory after deleting the old parser (GH-20822)
Diffstat (limited to 'Tools/peg_generator/scripts/benchmark.py')
-rw-r--r--Tools/peg_generator/scripts/benchmark.py66
1 files changed, 16 insertions, 50 deletions
diff --git a/Tools/peg_generator/scripts/benchmark.py b/Tools/peg_generator/scripts/benchmark.py
index af356bed783..5fbedaa3b0e 100644
--- a/Tools/peg_generator/scripts/benchmark.py
+++ b/Tools/peg_generator/scripts/benchmark.py
@@ -6,13 +6,13 @@ import sys
import os
from time import time
-import _peg_parser
-
try:
import memory_profiler
except ModuleNotFoundError:
- print("Please run `make venv` to create a virtual environment and install"
- " all the dependencies, before running this script.")
+ print(
+ "Please run `make venv` to create a virtual environment and install"
+ " all the dependencies, before running this script."
+ )
sys.exit(1)
sys.path.insert(0, os.getcwd())
@@ -22,13 +22,6 @@ argparser = argparse.ArgumentParser(
prog="benchmark", description="Reproduce the various pegen benchmarks"
)
argparser.add_argument(
- "--parser",
- action="store",
- choices=["new", "old"],
- default="pegen",
- help="Which parser to benchmark (default is pegen)",
-)
-argparser.add_argument(
"--target",
action="store",
choices=["xxl", "stdlib"],
@@ -40,12 +33,7 @@ subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
command_compile = subcommands.add_parser(
"compile", help="Benchmark parsing and compiling to bytecode"
)
-command_parse = subcommands.add_parser(
- "parse", help="Benchmark parsing and generating an ast.AST"
-)
-command_notree = subcommands.add_parser(
- "notree", help="Benchmark parsing and dumping the tree"
-)
+command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST")
def benchmark(func):
@@ -66,59 +54,37 @@ def benchmark(func):
@benchmark
-def time_compile(source, parser):
- if parser == "old":
- return _peg_parser.compile_string(
- source,
- oldparser=True,
- )
- else:
- return _peg_parser.compile_string(source)
-
-
-@benchmark
-def time_parse(source, parser):
- if parser == "old":
- return _peg_parser.parse_string(source, oldparser=True)
- else:
- return _peg_parser.parse_string(source)
+def time_compile(source):
+ return compile(source, "<string>", "exec")
@benchmark
-def time_notree(source, parser):
- if parser == "old":
- return _peg_parser.parse_string(source, oldparser=True, ast=False)
- else:
- return _peg_parser.parse_string(source, ast=False)
+def time_parse(source):
+ return ast.parse(source)
-def run_benchmark_xxl(subcommand, parser, source):
+def run_benchmark_xxl(subcommand, source):
if subcommand == "compile":
- time_compile(source, parser)
+ time_compile(source)
elif subcommand == "parse":
- time_parse(source, parser)
- elif subcommand == "notree":
- time_notree(source, parser)
+ time_parse(source)
-def run_benchmark_stdlib(subcommand, parser):
- modes = {"compile": 2, "parse": 1, "notree": 0}
+def run_benchmark_stdlib(subcommand):
+ modes = {"compile": 2, "parse": 1}
for _ in range(3):
parse_directory(
"../../Lib",
verbose=False,
excluded_files=["*/bad*", "*/lib2to3/tests/data/*",],
- tree_arg=0,
short=True,
mode=modes[subcommand],
- oldparser=(parser == "old"),
)
def main():
args = argparser.parse_args()
subcommand = args.subcommand
- parser = args.parser
target = args.target
if subcommand is None:
@@ -127,9 +93,9 @@ def main():
if target == "xxl":
with open(os.path.join("data", "xxl.py"), "r") as f:
source = f.read()
- run_benchmark_xxl(subcommand, parser, source)
+ run_benchmark_xxl(subcommand, source)
elif target == "stdlib":
- run_benchmark_stdlib(subcommand, parser)
+ run_benchmark_stdlib(subcommand)
if __name__ == "__main__":