aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rw-r--r--Lib/test/test_tokenize.py10
-rw-r--r--Lib/tokenize.py2
2 files changed, 11 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 681f2c72f9c..4bce1ca9c76 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1458,6 +1458,16 @@ class TestTokenize(TestCase):
# See http://bugs.python.org/issue16152
self.assertExactTypeEqual('@ ', token.AT)
+ def test_comment_at_the_end_of_the_source_without_newline(self):
+ # See http://bugs.python.org/issue44667
+ source = 'b = 1\n\n#test'
+ expected_tokens = [token.NAME, token.EQUAL, token.NUMBER, token.NEWLINE, token.NL, token.COMMENT]
+
+ tokens = list(tokenize(BytesIO(source.encode('utf-8')).readline))
+ self.assertEqual(tok_name[tokens[0].exact_type], tok_name[ENCODING])
+ for i in range(6):
+ self.assertEqual(tok_name[tokens[i + 1].exact_type], tok_name[expected_tokens[i]])
+ self.assertEqual(tok_name[tokens[-1].exact_type], tok_name[token.ENDMARKER])
class UntokenizeTest(TestCase):
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 42c1f10373d..7d7736fe985 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -604,7 +604,7 @@ def _tokenize(readline, encoding):
pos += 1
# Add an implicit NEWLINE if the input doesn't end in one
- if last_line and last_line[-1] not in '\r\n':
+ if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')