]> granicus.if.org Git - python/commitdiff
Issue #25977: Fix typos in Lib/tokenize.py
authorBerker Peksag <berker.peksag@gmail.com>
Tue, 29 Dec 2015 23:42:43 +0000 (01:42 +0200)
committerBerker Peksag <berker.peksag@gmail.com>
Tue, 29 Dec 2015 23:42:43 +0000 (01:42 +0200)
Patch by John Walker.

1  2 
Lib/tokenize.py

diff --cc Lib/tokenize.py
index 2237c3a55407b50bc71c15ae57a42b918c5af9ed,9fd676c5b24b36ff8eadbfdc1b6e4c296af86940..7a003580a4a9ad1fb0e1bfe3d451b10f204d3b91
@@@ -636,31 -638,14 +636,31 @@@ def _tokenize(readline, encoding)
                          contstr = line[start:]
                          contline = line
                          break
 -                elif initial in single_quoted or \
 -                    token[:2] in single_quoted or \
 -                    token[:3] in single_quoted:
 +
 +                # Check up to the first 3 chars of the token to see if
 +                #  they're in the single_quoted set. If so, they start
 +                #  a string.
 +                # We're using the first 3, because we're looking for
 +                #  "rb'" (for example) at the start of the token. If
 +                #  we switch to longer prefixes, this needs to be
 +                #  adjusted.
 +                # Note that initial == token[:1].
-                 # Also note that single quote checking must come afer
++                # Also note that single quote checking must come after
 +                #  triple quote checking (above).
 +                elif (initial in single_quoted or
 +                      token[:2] in single_quoted or
 +                      token[:3] in single_quoted):
                      if token[-1] == '\n':                  # continued string
                          strstart = (lnum, start)
 -                        endprog = _compile(endpats[initial] or
 -                                           endpats[token[1]] or
 -                                           endpats[token[2]])
 +                        # Again, using the first 3 chars of the
 +                        #  token. This is looking for the matching end
 +                        #  regex for the correct type of quote
 +                        #  character. So it's really looking for
 +                        #  endpats["'"] or endpats['"'], by trying to
 +                        #  skip string prefix characters, if any.
 +                        endprog = _compile(endpats.get(initial) or
 +                                           endpats.get(token[1]) or
 +                                           endpats.get(token[2]))
                          contstr, needcont = line[start:], 1
                          contline = line
                          break