aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Lib/tokenize.py
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/tokenize.py')
-rw-r--r--Lib/tokenize.py24
1 files changed, 12 insertions, 12 deletions
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 7afacff7381..7e71755068e 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -86,7 +86,7 @@ def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permutations (include 'fr', but not
# 'rf'). The various permutations will be generated.
- _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
+ _valid_string_prefixes = ['b', 'r', 'u', 'f', 't', 'br', 'fr', 'tr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
@@ -132,7 +132,7 @@ ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
+PseudoExtras = group(r'\\\r?\n|\z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
@@ -251,7 +251,7 @@ class Untokenizer:
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
- elif tok_type == FSTRING_MIDDLE:
+ elif tok_type in {FSTRING_MIDDLE, TSTRING_MIDDLE}:
if '{' in token or '}' in token:
token = self.escape_brackets(token)
last_line = token.splitlines()[-1]
@@ -274,7 +274,7 @@ class Untokenizer:
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
- in_fstring = 0
+ in_fstring_or_tstring = 0
for tok in _itertools.chain([token], iterable):
toknum, tokval = tok[:2]
@@ -293,10 +293,10 @@ class Untokenizer:
else:
prevstring = False
- if toknum == FSTRING_START:
- in_fstring += 1
- elif toknum == FSTRING_END:
- in_fstring -= 1
+ if toknum in {FSTRING_START, TSTRING_START}:
+ in_fstring_or_tstring += 1
+ elif toknum in {FSTRING_END, TSTRING_END}:
+ in_fstring_or_tstring -= 1
if toknum == INDENT:
indents.append(tokval)
continue
@@ -308,11 +308,11 @@ class Untokenizer:
elif startline and indents:
toks_append(indents[-1])
startline = False
- elif toknum == FSTRING_MIDDLE:
+ elif toknum in {FSTRING_MIDDLE, TSTRING_MIDDLE}:
tokval = self.escape_brackets(tokval)
- # Insert a space between two consecutive brackets if we are in an f-string
- if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring:
+ # Insert a space between two consecutive brackets if we are in an f-string or t-string
+ if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring_or_tstring:
tokval = ' ' + tokval
# Insert a space between two consecutive f-strings
@@ -518,7 +518,7 @@ def _main(args=None):
sys.exit(1)
# Parse the arguments and options
- parser = argparse.ArgumentParser()
+ parser = argparse.ArgumentParser(color=True)
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')