]> granicus.if.org Git - python/commitdiff
Merged revisions 80928 via svnmerge from
authorAntoine Pitrou <solipsis@pitrou.net>
Fri, 7 May 2010 17:08:54 +0000 (17:08 +0000)
committerAntoine Pitrou <solipsis@pitrou.net>
Fri, 7 May 2010 17:08:54 +0000 (17:08 +0000)
svn+ssh://pythondev@svn.python.org/python/branches/py3k

................
  r80928 | antoine.pitrou | 2010-05-07 19:04:02 +0200 (ven., 07 mai 2010) | 11 lines

  Merged revisions 80926 via svnmerge from
  svn+ssh://pythondev@svn.python.org/python/trunk

  ........
    r80926 | antoine.pitrou | 2010-05-07 18:50:34 +0200 (ven., 07 mai 2010) | 5 lines

    Issue #8571: Fix an internal error when compressing or decompressing a
    chunk larger than 1GB with the zlib module's compressor and decompressor
    objects.
  ........
................

Lib/test/test_zlib.py
Misc/NEWS
Modules/zlibmodule.c

index c6bdda10e4acd38d13967529b4c7054c6dc2f01f..1b13274142411a862d3aac279032b7035b23b435 100644 (file)
@@ -2,6 +2,7 @@ import unittest
 from test import support
 import binascii
 import random
+from test.support import precisionbigmemtest, _1G
 
 zlib = support.import_module('zlib')
 
@@ -93,8 +94,39 @@ class ExceptionTestCase(unittest.TestCase):
         self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
 
 
-
-class CompressTestCase(unittest.TestCase):
+class BaseCompressTestCase(object):
+    def check_big_compress_buffer(self, size, compress_func):
+        _1M = 1024 * 1024
+        fmt = "%%0%dx" % (2 * _1M)
+        # Generate 10MB worth of random, and expand it by repeating it.
+        # The assumption is that zlib's memory is not big enough to exploit
+        # such spread out redundancy.
+        data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
+                        for i in range(10)])
+        data = data * (size // len(data) + 1)
+        try:
+            compress_func(data)
+        finally:
+            # Release memory
+            data = None
+
+    def check_big_decompress_buffer(self, size, decompress_func):
+        data = b'x' * size
+        try:
+            compressed = zlib.compress(data, 1)
+        finally:
+            # Release memory
+            data = None
+        data = decompress_func(compressed)
+        # Sanity check
+        try:
+            self.assertEqual(len(data), size)
+            self.assertEqual(len(data.strip(b'x')), 0)
+        finally:
+            data = None
+
+
+class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
     # Test compression in one go (whole message compression)
     def test_speech(self):
         x = zlib.compress(HAMLET_SCENE)
@@ -108,9 +140,19 @@ class CompressTestCase(unittest.TestCase):
         for ob in x, bytearray(x):
             self.assertEqual(zlib.decompress(ob), data)
 
+    # Memory use of the following functions takes into account overallocation
+
+    @precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
+    def test_big_compress_buffer(self, size):
+        compress = lambda s: zlib.compress(s, 1)
+        self.check_big_compress_buffer(size, compress)
 
+    @precisionbigmemtest(size=_1G + 1024 * 1024, memuse=2)
+    def test_big_decompress_buffer(self, size):
+        self.check_big_decompress_buffer(size, zlib.decompress)
 
-class CompressObjectTestCase(unittest.TestCase):
+
+class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
     # Test compression object
     def test_pair(self):
         # straightforward compress/decompress objects
@@ -399,6 +441,21 @@ class CompressObjectTestCase(unittest.TestCase):
             d.flush()
             self.assertRaises(ValueError, d.copy)
 
+    # Memory use of the following functions takes into account overallocation
+
+    @precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
+    def test_big_compress_buffer(self, size):
+        c = zlib.compressobj(1)
+        compress = lambda s: c.compress(s) + c.flush()
+        self.check_big_compress_buffer(size, compress)
+
+    @precisionbigmemtest(size=_1G + 1024 * 1024, memuse=2)
+    def test_big_decompress_buffer(self, size):
+        d = zlib.decompressobj()
+        decompress = lambda s: d.decompress(s) + d.flush()
+        self.check_big_decompress_buffer(size, decompress)
+
+
 def genblock(seed, length, step=1024, generator=random):
     """length-byte stream of random data from a seed (in step-byte blocks)."""
     if seed is not None:
index 3256e31c49e575a3f27b0c0b4d8d23674646face..6935296bbd80c2f6bb00b25d6fe89f6e5e6b158b 100644 (file)
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -40,6 +40,10 @@ Core and Builtins
 Library
 -------
 
+- Issue #8571: Fix an internal error when compressing or decompressing a
+  chunk larger than 1GB with the zlib module's compressor and decompressor
+  objects.
+
 - Issue #8573: asyncore _strerror() function might throw ValueError.
 
 - Issue #8483: asyncore.dispatcher's __getattr__ method produced confusing 
index 2f2e214906ffb26d352c5c48460e15a2acfc83ce..353d11a49413b20bfb918222e43801ac7ae49273 100644 (file)
@@ -396,7 +396,8 @@ PyDoc_STRVAR(comp_compress__doc__,
 static PyObject *
 PyZlib_objcompress(compobject *self, PyObject *args)
 {
-    int err, inplen, length = DEFAULTALLOC;
+    int err, inplen;
+    Py_ssize_t length = DEFAULTALLOC;
     PyObject *RetVal;
     Py_buffer pinput;
     Byte *input;
@@ -477,8 +478,8 @@ PyDoc_STRVAR(decomp_decompress__doc__,
 static PyObject *
 PyZlib_objdecompress(compobject *self, PyObject *args)
 {
-    int err, inplen, old_length, length = DEFAULTALLOC;
-    int max_length = 0;
+    int err, inplen, max_length = 0;
+    Py_ssize_t old_length, length = DEFAULTALLOC;
     PyObject *RetVal;
     Py_buffer pinput;
     Byte *input;