]> granicus.if.org Git - python/commitdiff
Reduced memory burden by iterating over the normalization test input
authorTim Peters <tim.peters@gmail.com>
Sun, 24 Nov 2002 19:19:09 +0000 (19:19 +0000)
committerTim Peters <tim.peters@gmail.com>
Sun, 24 Nov 2002 19:19:09 +0000 (19:19 +0000)
file directly (instead of sucking it all into a list of lines first).

Lib/test/regrtest.py
Lib/test/test_normalization.py

index d0650e6aace1f24676a6c0f4867f69abc71f2ee9..f870527088e98ef99a8fefb6000509c288ab1ee4 100755 (executable)
@@ -506,7 +506,7 @@ def printlist(x, width=70, indent=4):
 #     test_normalization
 #         Whether a skip is expected here depends on whether a large test
 #         input file has been downloaded.  test_normalization.skip_expected
-#         controls that
+#         controls that.
 
 _expectations = {
     'win32':
index b6737391cb56bd96eadb5ca1b2de2691b42f790e..7e18c973ffa616aa86cc4da2d4ba60df09903dd9 100644 (file)
@@ -33,10 +33,8 @@ def test_main():
         raise TestSkipped(TESTDATAFILE + " not found, download from " +
                     "http://www.unicode.org/Public/UNIDATA/" + TESTDATAFILE)
 
-    data = open(TESTDATAFILE).readlines()
-
     part1_data = {}
-    for line in data:
+    for line in open(TESTDATAFILE):
         if '#' in line:
             line = line.split('#')[0]
         line = line.strip()