]> granicus.if.org Git - python/commitdiff
Untokenize: An logically incorrect assert tested user input validity.
authorTerry Jan Reedy <tjreedy@udel.edu>
Mon, 17 Feb 2014 21:45:48 +0000 (16:45 -0500)
committerTerry Jan Reedy <tjreedy@udel.edu>
Mon, 17 Feb 2014 21:45:48 +0000 (16:45 -0500)
Replace it with correct logic that raises ValueError for bad input.
Issues #8478 and #12691 reported the incorrect logic.
Add an Untokenize test case and an initial test method.

Lib/test/test_tokenize.py
Lib/tokenize.py

index 6ed859707f8917332f0d4d9e0ffd295124997843..df4e1653c84e0051d436112d39eaa53b7a32aa96 100644 (file)
@@ -638,7 +638,7 @@ Legacy unicode literals:
 from test import support
 from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
                      STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
-                     open as tokenize_open)
+                     open as tokenize_open, Untokenizer)
 from io import BytesIO
 from unittest import TestCase
 import os, sys, glob
@@ -1153,6 +1153,19 @@ class TestTokenize(TestCase):
         # See http://bugs.python.org/issue16152
         self.assertExactTypeEqual('@          ', token.AT)
 
+class UntokenizeTest(TestCase):
+    
+    def test_bad_input_order(self):
+        u = Untokenizer()
+        u.prev_row = 2
+        u.prev_col = 2
+        with self.assertRaises(ValueError) as cm:
+            u.add_whitespace((1,3))
+        self.assertEqual(cm.exception.args[0], 
+                'start (1,3) precedes previous end (2,2)')
+        self.assertRaises(ValueError, u.add_whitespace, (2,1))
+
+
 __test__ = {"doctests" : doctests, 'decistmt': decistmt}
 
 def test_main():
@@ -1162,6 +1175,7 @@ def test_main():
     support.run_unittest(Test_Tokenize)
     support.run_unittest(TestDetectEncoding)
     support.run_unittest(TestTokenize)
+    support.run_unittest(UntokenizeTest)
 
 if __name__ == "__main__":
     test_main()
index 294bf9a068a3aabf15982a39f3255f2d27446a3c..c156450d047fa005647955ff8c9a5755271b7d50 100644 (file)
@@ -229,7 +229,9 @@ class Untokenizer:
 
     def add_whitespace(self, start):
         row, col = start
-        assert row <= self.prev_row
+        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+            raise ValueError("start ({},{}) precedes previous end ({},{})"
+                             .format(row, col, self.prev_row, self.prev_col))
         col_offset = col - self.prev_col
         if col_offset:
             self.tokens.append(" " * col_offset)