summaryrefslogtreecommitdiff
path: root/Lib/test/test_tokenize.py
diff options
context:
space:
mode:
authorAmmar Askar <ammar_askar@hotmail.com>2018-07-06 03:19:08 -0400
committerTal Einat <taleinat+github@gmail.com>2018-07-06 10:19:08 +0300
commitc4ef4896eac86a6759901c8546e26de4695a1389 (patch)
tree47ad8191fbe9f8fe4bb4272509410bc229ec7c6e /Lib/test/test_tokenize.py
parent3c8aae9ffe13d0f2ad4ff81cdf56bc6393af362a (diff)
downloadcpython-git-c4ef4896eac86a6759901c8546e26de4695a1389.tar.gz
bpo-33899: Make tokenize module mirror end-of-file is end-of-line behavior (GH-7891)
Most of the change involves fixing up the test suite, which previously made the assumption that there wouldn't be a new line if the input didn't end in one. Contributed by Ammar Askar.
Diffstat (limited to 'Lib/test/test_tokenize.py')
-rw-r--r--Lib/test/test_tokenize.py71
1 files changed, 47 insertions, 24 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 93e40de96e..f68580ccfb 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1,7 +1,8 @@
from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
- open as tokenize_open, Untokenizer, generate_tokens)
+ open as tokenize_open, Untokenizer, generate_tokens,
+ NEWLINE)
from io import BytesIO, StringIO
import unittest
from unittest import TestCase, mock
@@ -11,27 +12,51 @@ import os
import token
+# Converts a source string into a list of textual representation
+# of the tokens such as:
+# ` NAME 'if' (1, 0) (1, 2)`
+# to make writing tests easier.
+def stringify_tokens_from_source(token_generator, source_string):
+ result = []
+ num_lines = len(source_string.splitlines())
+ missing_trailing_nl = source_string[-1] not in '\r\n'
+
+ for type, token, start, end, line in token_generator:
+ if type == ENDMARKER:
+ break
+ # Ignore the new line on the last line if the input lacks one
+ if missing_trailing_nl and type == NEWLINE and end[0] == num_lines:
+ continue
+ type = tok_name[type]
+ result.append(f" {type:10} {token!r:13} {start} {end}")
+
+ return result
+
class TokenizeTest(TestCase):
# Tests for the tokenize module.
# The tests can be really simple. Given a small fragment of source
- # code, print out a table with tokens. The ENDMARKER is omitted for
- # brevity.
+ # code, print out a table with tokens. The ENDMARKER, ENCODING and
+ # final NEWLINE are omitted for brevity.
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
- # The ENDMARKER is omitted.
- result = []
+ # The ENDMARKER and final NEWLINE are omitted.
f = BytesIO(s.encode('utf-8'))
- for type, token, start, end, line in tokenize(f.readline):
- if type == ENDMARKER:
- break
- type = tok_name[type]
- result.append(f" {type:10} {token!r:13} {start} {end}")
+ result = stringify_tokens_from_source(tokenize(f.readline), s)
+
self.assertEqual(result,
[" ENCODING 'utf-8' (0, 0) (0, 0)"] +
expected.rstrip().splitlines())
+ def test_implicit_newline(self):
+ # Make sure that the tokenizer puts in an implicit NEWLINE
+ # when the input lacks a trailing new line.
+ f = BytesIO("x".encode('utf-8'))
+ tokens = list(tokenize(f.readline))
+ self.assertEqual(tokens[-2].type, NEWLINE)
+ self.assertEqual(tokens[-1].type, ENDMARKER)
+
def test_basic(self):
self.check_tokenize("1 + 1", """\
NUMBER '1' (1, 0) (1, 1)
@@ -922,14 +947,9 @@ async def f():
class GenerateTokensTest(TokenizeTest):
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
- # The ENDMARKER is omitted.
- result = []
+ # The ENDMARKER and final NEWLINE are omitted.
f = StringIO(s)
- for type, token, start, end, line in generate_tokens(f.readline):
- if type == ENDMARKER:
- break
- type = tok_name[type]
- result.append(f" {type:10} {token!r:13} {start} {end}")
+ result = stringify_tokens_from_source(generate_tokens(f.readline), s)
self.assertEqual(result, expected.rstrip().splitlines())
@@ -1022,8 +1042,8 @@ class Test_Tokenize(TestCase):
else:
return b''
- # skip the initial encoding token and the end token
- tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
+ # skip the initial encoding token and the end tokens
+ tokens = list(_tokenize(readline, encoding='utf-8'))[1:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
@@ -1039,8 +1059,8 @@ class Test_Tokenize(TestCase):
else:
return b''
- # skip the end token
- tokens = list(_tokenize(readline, encoding=None))[:-1]
+ # skip the end tokens
+ tokens = list(_tokenize(readline, encoding=None))[:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"string not tokenized when encoding is None")
@@ -1351,18 +1371,21 @@ class TestTokenize(TestCase):
# Test that 500 consequent, one-line defs is OK
toks = list(tokenize(BytesIO(buf.encode('utf-8')).readline))
- self.assertEqual(toks[-2].string, 'OK') # [-1] is always ENDMARKER
+ self.assertEqual(toks[-3].string, 'OK') # [-1] is always ENDMARKER
+ # [-2] is always NEWLINE
def assertExactTypeEqual(self, opstr, *optypes):
tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
num_optypes = len(optypes)
- self.assertEqual(len(tokens), 2 + num_optypes)
+ self.assertEqual(len(tokens), 3 + num_optypes)
self.assertEqual(tok_name[tokens[0].exact_type],
tok_name[ENCODING])
for i in range(num_optypes):
self.assertEqual(tok_name[tokens[i + 1].exact_type],
tok_name[optypes[i]])
self.assertEqual(tok_name[tokens[1 + num_optypes].exact_type],
+ tok_name[token.NEWLINE])
+ self.assertEqual(tok_name[tokens[2 + num_optypes].exact_type],
tok_name[token.ENDMARKER])
def test_exact_type(self):
@@ -1515,7 +1538,7 @@ class TestRoundtrip(TestCase):
self.check_roundtrip("if x == 1:\n"
" print(x)\n")
self.check_roundtrip("# This is a comment\n"
- "# This also")
+ "# This also\n")
# Some people use different formatting conventions, which makes
# untokenize a little trickier. Note that this test involves trailing