]> granicus.if.org Git - python/commitdiff
Issue #9974: When untokenizing, use row info to insert backslash+newline.
authorTerry Jan Reedy <tjreedy@udel.edu>
Mon, 24 Feb 2014 04:33:08 +0000 (23:33 -0500)
committerTerry Jan Reedy <tjreedy@udel.edu>
Mon, 24 Feb 2014 04:33:08 +0000 (23:33 -0500)
Original patches by A. Kuchling and G. Rees (#12691).

Lib/test/test_tokenize.py
Lib/tokenize.py

index 7a472b21f37bff5c453d0efcbf634f182046b7cd..38611a79eec09a1b32aa3d1a2b5e1a4b796ad3a8 100644 (file)
@@ -2,7 +2,7 @@ doctests = """
 Tests for the tokenize module.
 
 The tests can be really simple. Given a small fragment of source
-code, print out a table with tokens. The ENDMARK is omitted for
+code, print out a table with tokens. The ENDMARKER is omitted for
 brevity.
 
     >>> dump_tokens("1 + 1")
@@ -1180,6 +1180,7 @@ class TestTokenize(TestCase):
 class UntokenizeTest(TestCase):
 
     def test_bad_input_order(self):
+        # raise if previous row
         u = Untokenizer()
         u.prev_row = 2
         u.prev_col = 2
@@ -1187,8 +1188,22 @@ class UntokenizeTest(TestCase):
             u.add_whitespace((1,3))
         self.assertEqual(cm.exception.args[0],
                 'start (1,3) precedes previous end (2,2)')
+        # raise if previous column in row
         self.assertRaises(ValueError, u.add_whitespace, (2,1))
 
+    def test_backslash_continuation(self):
+        # The problem is that <whitespace>\<newline> leaves no token
+        u = Untokenizer()
+        u.prev_row = 1
+        u.prev_col =  1
+        u.tokens = []
+        u.add_whitespace((2, 0))
+        self.assertEqual(u.tokens, ['\\\n'])
+        u.prev_row = 2
+        u.add_whitespace((4, 4))
+        self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', '    '])
+        self.assertTrue(roundtrip('a\n  b\n    c\n  \\\n  c\n'))
+
     def test_iter_compat(self):
         u = Untokenizer()
         token = (NAME, 'Hello')
index 7356a88b2172c607c097e167430d12fd510d1d79..9bbb512d87f7eb04bd18f048c9d08cae22118250 100644 (file)
@@ -234,6 +234,10 @@ class Untokenizer:
         if row < self.prev_row or row == self.prev_row and col < self.prev_col:
             raise ValueError("start ({},{}) precedes previous end ({},{})"
                              .format(row, col, self.prev_row, self.prev_col))
+        row_offset = row - self.prev_row
+        if row_offset:  
+            self.tokens.append("\\\n" * row_offset)
+            self.prev_col = 0
         col_offset = col - self.prev_col
         if col_offset:
             self.tokens.append(" " * col_offset)
@@ -248,6 +252,8 @@ class Untokenizer:
             if tok_type == ENCODING:
                 self.encoding = token
                 continue
+            if tok_type == ENDMARKER:
+                break
             self.add_whitespace(start)
             self.tokens.append(token)
             self.prev_row, self.prev_col = end