]> granicus.if.org Git - python/commitdiff
#3773: Check for errors around the use of PyTokenizer_FindEncoding().
authorAmaury Forgeot d'Arc <amauryfa@gmail.com>
Thu, 4 Sep 2008 22:34:09 +0000 (22:34 +0000)
committerAmaury Forgeot d'Arc <amauryfa@gmail.com>
Thu, 4 Sep 2008 22:34:09 +0000 (22:34 +0000)
reviewed by Brett Cannon.

Misc/NEWS
Parser/tokenizer.c
Python/import.c

index 0e51c80c8bbc6f367cc20c0b0d7617c2b9ee494b..889cead8a693f7c00b5203ab5e6808768302c939 100644 (file)
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,9 @@ What's New in Python 3.0 release candidate 1
 Core and Builtins
 -----------------
 
+- Issue 3774: Added a few more checks in PyTokenizer_FindEncoding to handle
+  error conditions.
+
 - Issue 3594: Fix Parser/tokenizer.c:fp_setreadl() to open the file being
   tokenized by either a file path or file pointer for the benefit of
   PyTokenizer_FindEncoding().
index e4cf8e4cc5524ff184193f996bd1f2af21ad839b..a0406965790d57c1549ee46f2549f75dd30c45d9 100644 (file)
@@ -1610,7 +1610,10 @@ PyTokenizer_FindEncoding(int fd)
        fclose(fp);
        if (tok->encoding) {
             encoding = (char *)PyMem_MALLOC(strlen(tok->encoding) + 1);
-            strcpy(encoding, tok->encoding);
+            if (encoding)
+                strcpy(encoding, tok->encoding);
+            else
+                PyErr_NoMemory();
         }
        PyTokenizer_Free(tok);
        return encoding;
index d87d7515a0d887e406f25476693478b39a758107..9c077fe8a66dd7865a1fde6136d7bd4dcb36fd41 100644 (file)
@@ -2830,6 +2830,8 @@ call_find_module(char *name, PyObject *path)
                           memory. */
                        found_encoding = PyTokenizer_FindEncoding(fd);
                        lseek(fd, 0, 0); /* Reset position */
+                       if (found_encoding == NULL && PyErr_Occurred())
+                               return NULL;
                        encoding = (found_encoding != NULL) ? found_encoding :
                                   (char*)PyUnicode_GetDefaultEncoding();
                }