From: Victor Stinner Date: Tue, 2 Mar 2010 23:20:02 +0000 (+0000) Subject: Issue #7820: The parser tokenizer restores all bytes in the right if the BOM X-Git-Tag: v2.7a4~54 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=d23d3930ff1ce72263537bb050824129c6ac74f6;p=python Issue #7820: The parser tokenizer restores all bytes in the right if the BOM check fails. Fix an assertion in pydebug mode. --- diff --git a/Lib/test/test_pep263.py b/Lib/test/test_pep263.py index e4faa9ff56..9286467adf 100644 --- a/Lib/test/test_pep263.py +++ b/Lib/test/test_pep263.py @@ -30,6 +30,17 @@ class PEP263Test(unittest.TestCase): self.assertEqual(d['a'], d['b']) self.assertEqual(len(d['a']), len(d['b'])) + def test_issue7820(self): + # Ensure that check_bom() restores all bytes in the right order if + # check_bom() fails in pydebug mode: a buffer starts with the first + # byte of a valid BOM, but next bytes are different + + # one byte in common with the UTF-16-LE BOM + self.assertRaises(SyntaxError, eval, '\xff\x20') + + # two bytes in common with the UTF-8 BOM + self.assertRaises(SyntaxError, eval, '\xef\xbb\x20') + def test_main(): test_support.run_unittest(PEP263Test) diff --git a/Misc/NEWS b/Misc/NEWS index a02681a1db..086fb274a3 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -12,6 +12,9 @@ What's New in Python 2.7 alpha 4? Core and Builtins ----------------- +- Issue #7820: The parser tokenizer restores all bytes in the right if + the BOM check fails. + - Issue #7309: Fix unchecked attribute access when converting UnicodeEncodeError, UnicodeDecodeError, and UnicodeTranslateError to strings. diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 04749c8657..b881e7ce7e 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -312,47 +312,57 @@ check_bom(int get_char(struct tok_state *), int set_readline(struct tok_state *, const char *), struct tok_state *tok) { - int ch = get_char(tok); + int ch1, ch2, ch3; + ch1 = get_char(tok); tok->decoding_state = 1; - if (ch == EOF) { + if (ch1 == EOF) { return 1; - } else if (ch == 0xEF) { - ch = get_char(tok); - if (ch != 0xBB) - goto NON_BOM; - ch = get_char(tok); - if (ch != 0xBF) - goto NON_BOM; + } else if (ch1 == 0xEF) { + ch2 = get_char(tok); + if (ch2 != 0xBB) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } + ch3 = get_char(tok); + if (ch3 != 0xBF) { + unget_char(ch3, tok); + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } #if 0 /* Disable support for UTF-16 BOMs until a decision is made whether this needs to be supported. */ - } else if (ch == 0xFE) { - ch = get_char(tok); - if (ch != 0xFF) - goto NON_BOM; + } else if (ch1 == 0xFE) { + ch2 = get_char(tok); + if (ch2 != 0xFF) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } if (!set_readline(tok, "utf-16-be")) return 0; tok->decoding_state = -1; - } else if (ch == 0xFF) { - ch = get_char(tok); - if (ch != 0xFE) - goto NON_BOM; + } else if (ch1 == 0xFF) { + ch2 = get_char(tok); + if (ch2 != 0xFE) { + unget_char(ch2, tok); + unget_char(ch1, tok); + return 1; + } if (!set_readline(tok, "utf-16-le")) return 0; tok->decoding_state = -1; #endif } else { - unget_char(ch, tok); + unget_char(ch1, tok); return 1; } if (tok->encoding != NULL) PyMem_FREE(tok->encoding); tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */ return 1; - NON_BOM: - /* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */ - unget_char(0xFF, tok); /* XXX this will cause a syntax error */ - return 1; } /* Read a line of text from TOK into S, using the stream in TOK.