]> granicus.if.org Git - python/commitdiff
in tokenize.detect_encoding(), return utf-8-sig when a BOM is found
authorBenjamin Peterson <benjamin@python.org>
Thu, 18 Mar 2010 22:29:52 +0000 (22:29 +0000)
committerBenjamin Peterson <benjamin@python.org>
Thu, 18 Mar 2010 22:29:52 +0000 (22:29 +0000)
Doc/library/tokenize.rst
Lib/test/test_tokenize.py
Lib/tokenize.py
Misc/NEWS

index 7017045f618f45b2829212e62d7819f35eb5fa34..ac6ae36bd54b313b0ba5b5a36593f508885e567a 100644 (file)
@@ -95,7 +95,8 @@ function it uses to do this is available:
 
     It detects the encoding from the presence of a UTF-8 BOM or an encoding
     cookie as specified in :pep:`263`. If both a BOM and a cookie are present,
-    but disagree, a SyntaxError will be raised.
+    but disagree, a SyntaxError will be raised. Note that if the BOM is found,
+    ``'utf-8-sig'`` will be returned as an encoding.
 
     If no encoding is specified, then the default of ``'utf-8'`` will be returned.
 
index 7b91ab2dbd39067f05db59aac433dcdce3109a94..1bfac4048ed3d503bf355bd2d5c2877502dad2d7 100644 (file)
@@ -726,7 +726,7 @@ class TestDetectEncoding(TestCase):
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines,
                           [b'# something\n', b'print(something)\n'])
 
@@ -747,7 +747,7 @@ class TestDetectEncoding(TestCase):
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [b'# coding=utf-8\n'])
 
     def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
@@ -779,7 +779,7 @@ class TestDetectEncoding(TestCase):
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines,
                           [b'#! something\n', b'f# coding=utf-8\n'])
 
@@ -833,12 +833,12 @@ class TestDetectEncoding(TestCase):
 
         readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
         encoding, consumed_lines = detect_encoding(readline)
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [b'print(something)\n'])
 
         readline = self.get_readline((b'\xef\xbb\xbf',))
         encoding, consumed_lines = detect_encoding(readline)
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [])
 
         readline = self.get_readline((b'# coding: bad\n',))
index f82922b7f084801a8bde8a47b5011909c3fb7a85..89721371b998db1ddff4b283799ad1164b0663b9 100644 (file)
@@ -301,14 +301,16 @@ def detect_encoding(readline):
     in.
 
     It detects the encoding from the presence of a utf-8 bom or an encoding
-    cookie as specified in pep-0263. If both a bom and a cookie are present,
-    but disagree, a SyntaxError will be raised. If the encoding cookie is an
-    invalid charset, raise a SyntaxError.
+    cookie as specified in pep-0263. If both a bom and a cookie are present, but
+    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
+    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
+    'utf-8-sig' is returned.
 
     If no encoding is specified, then the default of 'utf-8' will be returned.
     """
     bom_found = False
     encoding = None
+    default = 'utf-8'
     def read_or_stop():
         try:
             return readline()
@@ -340,8 +342,9 @@ def detect_encoding(readline):
     if first.startswith(BOM_UTF8):
         bom_found = True
         first = first[3:]
+        default = 'utf-8-sig'
     if not first:
-        return 'utf-8', []
+        return default, []
 
     encoding = find_cookie(first)
     if encoding:
@@ -349,13 +352,13 @@ def detect_encoding(readline):
 
     second = read_or_stop()
     if not second:
-        return 'utf-8', [first]
+        return default, [first]
 
     encoding = find_cookie(second)
     if encoding:
         return encoding, [first, second]
 
-    return 'utf-8', [first, second]
+    return default, [first, second]
 
 
 def tokenize(readline):
@@ -394,6 +397,9 @@ def _tokenize(readline, encoding):
     indents = [0]
 
     if encoding is not None:
+        if encoding == "utf-8-sig":
+            # BOM will already have been stripped.
+            encoding = "utf-8"
         yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
     while True:             # loop over lines in stream
         try:
index 01c37ce3256242084fbd76f5093116540e40415d..f1b068b47dc2b95d7845dca61a6ee989b9130923 100644 (file)
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -283,6 +283,9 @@ C-API
 Library
 -------
 
+- ``tokenize.detect_encoding`` now returns ``'utf-8-sig'`` when a UTF-8 BOM is
+  detected.
+
 - Issue #8024: Update the Unicode database to 5.2.
 
 - Issue #6716/2: Backslash-replace error output in compilall.