From 01481f2dc13341c84b64d6dffc08ffed022712a6 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Wed, 11 Oct 2023 17:14:44 +0200 Subject: gh-104169: Refactor tokenizer into lexer and wrappers (#110684) * The lexer, which include the actual lexeme producing logic, goes into the `lexer` directory. * The wrappers, one wrapper per input mode (file, string, utf-8, and readline), go into the `tokenizer` directory and include logic for creating a lexer instance and managing the buffer for different modes. --------- Co-authored-by: Pablo Galindo Co-authored-by: blurb-it[bot] <43283697+blurb-it[bot]@users.noreply.github.com> --- Python/Python-tokenize.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'Python/Python-tokenize.c') diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index 1b021069c5e..83b4aa4b1a7 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -1,6 +1,8 @@ #include "Python.h" #include "errcode.h" -#include "../Parser/tokenizer.h" +#include "../Parser/lexer/state.h" +#include "../Parser/lexer/lexer.h" +#include "../Parser/tokenizer/tokenizer.h" #include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset() #include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset() -- cgit v1.2.3