found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "iso-8859-1")
+ def test_syntaxerror_latin1(self):
+ # Issue 14629: need to raise SyntaxError if the first
+ # line(s) have non-UTF-8 characters
+ lines = (
+ b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
+ )
+ readline = self.get_readline(lines)
+ self.assertRaises(SyntaxError, detect_encoding, readline)
+
+
def test_utf8_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
def find_cookie(line):
try:
- line_string = line.decode('ascii')
+ # Decode as UTF-8. Either the line is an encoding declaration,
+ # in which case it should be pure ASCII, or it must be UTF-8
+ # per default encoding.
+ line_string = line.decode('utf-8')
except UnicodeDecodeError:
- return None
+ raise SyntaxError("invalid or missing encoding declaration")
matches = cookie_re.findall(line_string)
if not matches:
Library
-------
+- Issue #14629: Raise SyntaxError in tokenizer.detect_encoding if the
+ first two lines have non-UTF-8 characters without an encoding declaration.
+
- Issue #14308: Fix an exception when a "dummy" thread is in the threading
module's active list after a fork().