]> granicus.if.org Git - python/commitdiff
Issue #9974: When untokenizing, use row info to insert backslash+newline.
authorTerry Jan Reedy <tjreedy@udel.edu>
Mon, 24 Feb 2014 04:32:59 +0000 (23:32 -0500)
committerTerry Jan Reedy <tjreedy@udel.edu>
Mon, 24 Feb 2014 04:32:59 +0000 (23:32 -0500)
Original patches by A. Kuchling and G. Rees (#12691).

Lib/test/test_tokenize.py
Lib/tokenize.py

index bbac59bd6c86c6acfecad1a8d7bd39da6a4b3f77..850aa9ca8e067ac953a26e575127a38434bfc5d3 100644 (file)
@@ -4,7 +4,7 @@ Tests for the tokenize module.
     >>> import glob, random, sys
 
 The tests can be really simple. Given a small fragment of source
-code, print out a table with tokens. The ENDMARK is omitted for
+code, print out a table with tokens. The ENDMARKER is omitted for
 brevity.
 
     >>> dump_tokens("1 + 1")
@@ -618,6 +618,7 @@ def decistmt(s):
 class UntokenizeTest(TestCase):
 
     def test_bad_input_order(self):
+        # raise if previous row
         u = Untokenizer()
         u.prev_row = 2
         u.prev_col = 2
@@ -625,8 +626,21 @@ class UntokenizeTest(TestCase):
             u.add_whitespace((1,3))
         self.assertEqual(cm.exception.args[0],
                 'start (1,3) precedes previous end (2,2)')
+        # raise if previous column in row
         self.assertRaises(ValueError, u.add_whitespace, (2,1))
 
+    def test_backslash_continuation(self):
+        # The problem is that <whitespace>\<newline> leaves no token
+        u = Untokenizer()
+        u.prev_row = 1
+        u.prev_col =  1
+        u.tokens = []
+        u.add_whitespace((2, 0))
+        self.assertEqual(u.tokens, ['\\\n'])
+        u.prev_row = 2
+        u.add_whitespace((4, 4))
+        self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', '    '])
+
     def test_iter_compat(self):
         u = Untokenizer()
         token = (NAME, 'Hello')
index a16d475c0ebc232bd72aed3164fba6cbcac771f9..661ddeb8e6b5cd167013a34fbc211b402d4b1b90 100644 (file)
@@ -188,6 +188,10 @@ class Untokenizer:
         if row < self.prev_row or row == self.prev_row and col < self.prev_col:
             raise ValueError("start ({},{}) precedes previous end ({},{})"
                              .format(row, col, self.prev_row, self.prev_col))
+        row_offset = row - self.prev_row
+        if row_offset:
+            self.tokens.append("\\\n" * row_offset)
+            self.prev_col = 0
         col_offset = col - self.prev_col
         if col_offset:
             self.tokens.append(" " * col_offset)
@@ -199,6 +203,8 @@ class Untokenizer:
                 self.compat(t, it)
                 break
             tok_type, token, start, end, line = t
+            if tok_type == ENDMARKER:
+                break
             self.add_whitespace(start)
             self.tokens.append(token)
             self.prev_row, self.prev_col = end