]> granicus.if.org Git - python/commitdiff
Issue 21977: Minor improvements to the regexes in the tokenizer example.
authorRaymond Hettinger <python@rcn.com>
Mon, 14 Jul 2014 08:52:00 +0000 (01:52 -0700)
committerRaymond Hettinger <python@rcn.com>
Mon, 14 Jul 2014 08:52:00 +0000 (01:52 -0700)
Doc/library/re.rst

index 451ee2d2a34eab9422aa8e7c555bad626debc020..a835c14f0ca3016762cad84cd3e122cf7c22d978 100644 (file)
@@ -1340,9 +1340,9 @@ successive matches::
             ('ASSIGN',  r':='),          # Assignment operator
             ('END',     r';'),           # Statement terminator
             ('ID',      r'[A-Za-z]+'),   # Identifiers
-            ('OP',      r'[+*\/\-]'),    # Arithmetic operators
+            ('OP',      r'[+\-*/]'),     # Arithmetic operators
             ('NEWLINE', r'\n'),          # Line endings
-            ('SKIP',    r'[ \t]'),       # Skip over spaces and tabs
+            ('SKIP',    r'[ \t]+'),      # Skip over spaces and tabs
         ]
         tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)
         get_token = re.compile(tok_regex).match