From: Victor Stinner Date: Mon, 17 Oct 2016 16:13:46 +0000 (+0200) Subject: Merge 3.6: Issue #28409: regrtest: fix the parser of command line arguments. X-Git-Tag: v3.6.0b3~113 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a506a93b0b6602be1613cee752d62feca41bee24;p=python Merge 3.6: Issue #28409: regrtest: fix the parser of command line arguments. --- a506a93b0b6602be1613cee752d62feca41bee24 diff --cc Lib/test/libregrtest/cmdline.py index c9e2f3d060,0000000000..891b00c753 mode 100644,000000..100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@@ -1,344 -1,0 +1,347 @@@ +import argparse +import os +import sys +from test import support + + +USAGE = """\ +python -m test [options] [test_name1 [test_name2 ...]] +python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]] +""" + +DESCRIPTION = """\ +Run Python regression tests. + +If no arguments or options are provided, finds all files matching +the pattern "test_*" in the Lib/test subdirectory and runs +them in alphabetical order (but see -M and -u, below, for exceptions). + +For more rigorous testing, it is useful to use the following +command line: + +python -E -Wd -m test [options] [test_name1 ...] +""" + +EPILOG = """\ +Additional option details: + +-r randomizes test execution order. You can use --randseed=int to provide an +int seed value for the randomizer; this is useful for reproducing troublesome +test orders. + +-s On the first invocation of regrtest using -s, the first test file found +or the first test file given on the command line is run, and the name of +the next test is recorded in a file named pynexttest. If run from the +Python build directory, pynexttest is located in the 'build' subdirectory, +otherwise it is located in tempfile.gettempdir(). On subsequent runs, +the test in pynexttest is run, and the next test is written to pynexttest. +When the last test has been run, pynexttest is deleted. In this way it +is possible to single step through the test files. This is useful when +doing memory analysis on the Python interpreter, which process tends to +consume too many resources to run the full regression test non-stop. + +-S is used to continue running tests after an aborted run. It will +maintain the order a standard run (ie, this assumes -r is not used). +This is useful after the tests have prematurely stopped for some external +reason and you want to start running from where you left off rather +than starting from the beginning. + +-f reads the names of tests from the file given as f's argument, one +or more test names per line. Whitespace is ignored. Blank lines and +lines beginning with '#' are ignored. This is especially useful for +whittling down failures involving interactions among tests. + +-L causes the leaks(1) command to be run just before exit if it exists. +leaks(1) is available on Mac OS X and presumably on some other +FreeBSD-derived systems. + +-R runs each test several times and examines sys.gettotalrefcount() to +see if the test appears to be leaking references. The argument should +be of the form stab:run:fname where 'stab' is the number of times the +test is run to let gettotalrefcount settle down, 'run' is the number +of times further it is run and 'fname' is the name of the file the +reports are written to. These parameters all have defaults (5, 4 and +"reflog.txt" respectively), and the minimal invocation is '-R :'. + +-M runs tests that require an exorbitant amount of memory. These tests +typically try to ascertain containers keep working when containing more than +2 billion objects, which only works on 64-bit systems. There are also some +tests that try to exhaust the address space of the process, which only makes +sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit, +which is a string in the form of '2.5Gb', determines howmuch memory the +tests will limit themselves to (but they may go slightly over.) The number +shouldn't be more memory than the machine has (including swap memory). You +should also keep in mind that swap memory is generally much, much slower +than RAM, and setting memlimit to all available RAM or higher will heavily +tax the machine. On the other hand, it is no use running these tests with a +limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect +to use more than memlimit memory will be skipped. The big-memory tests +generally run very, very long. + +-u is used to specify which special resource intensive tests to run, +such as those requiring large file support or network connectivity. +The argument is a comma-separated list of words indicating the +resources to test. Currently only the following are defined: + + all - Enable all special resources. + + none - Disable all special resources (this is the default). + + audio - Tests that use the audio device. (There are known + cases of broken audio drivers that can crash Python or + even the Linux kernel.) + + curses - Tests that use curses and will modify the terminal's + state and output modes. + + largefile - It is okay to run some test that may create huge + files. These tests can take a long time and may + consume >2GB of disk space temporarily. + + network - It is okay to run tests that use external network + resource, e.g. testing SSL support for sockets. + + decimal - Test the decimal module against a large suite that + verifies compliance with standards. + + cpu - Used for certain CPU-heavy tests. + + subprocess Run all tests for the subprocess module. + + urlfetch - It is okay to download files required on testing. + + gui - Run tests that require a running GUI. + + tzdata - Run tests that require timezone data. + +To enable all resources except one, use '-uall,-'. For +example, to run all the tests except for the gui tests, give the +option '-uall,-gui'. +""" + + +RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', + 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'tzdata') + +class _ArgParser(argparse.ArgumentParser): + + def error(self, message): + super().error(message + "\nPass -h or --help for complete help.") + + +def _create_parser(): + # Set prog to prevent the uninformative "__main__.py" from displaying in + # error messages when using "python -m test ...". + parser = _ArgParser(prog='regrtest.py', + usage=USAGE, + description=DESCRIPTION, + epilog=EPILOG, + add_help=False, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments with this clause added to its help are described further in + # the epilog's "Additional option details" section. + more_details = ' See the section at bottom for more details.' + + group = parser.add_argument_group('General options') + # We add help explicitly to control what argument group it renders under. + group.add_argument('-h', '--help', action='help', + help='show this help message and exit') + group.add_argument('--timeout', metavar='TIMEOUT', type=float, + help='dump the traceback and exit if a test takes ' + 'more than TIMEOUT seconds; disabled if TIMEOUT ' + 'is negative or equals to zero') + group.add_argument('--wait', action='store_true', + help='wait for user input, e.g., allow a debugger ' + 'to be attached') + group.add_argument('--slaveargs', metavar='ARGS') + group.add_argument('-S', '--start', metavar='START', + help='the name of the test at which to start.' + + more_details) + + group = parser.add_argument_group('Verbosity') + group.add_argument('-v', '--verbose', action='count', + help='run tests in verbose mode with output to stdout') + group.add_argument('-w', '--verbose2', action='store_true', + help='re-run failed tests in verbose mode') + group.add_argument('-W', '--verbose3', action='store_true', + help='display test output on failure') + group.add_argument('-q', '--quiet', action='store_true', + help='no output unless one or more tests fail') + group.add_argument('-o', '--slowest', action='store_true', dest='print_slow', + help='print the slowest 10 tests') + group.add_argument('--header', action='store_true', + help='print header with interpreter info') + + group = parser.add_argument_group('Selecting tests') + group.add_argument('-r', '--randomize', action='store_true', + help='randomize test execution order.' + more_details) + group.add_argument('--randseed', metavar='SEED', + dest='random_seed', type=int, + help='pass a random seed to reproduce a previous ' + 'random run') + group.add_argument('-f', '--fromfile', metavar='FILE', + help='read names of tests to run from a file.' + + more_details) + group.add_argument('-x', '--exclude', action='store_true', + help='arguments are tests to *exclude*') + group.add_argument('-s', '--single', action='store_true', + help='single step through a set of tests.' + + more_details) + group.add_argument('-m', '--match', metavar='PAT', + dest='match_tests', + help='match test cases and methods with glob pattern PAT') + group.add_argument('-G', '--failfast', action='store_true', + help='fail as soon as a test fails (only with -v or -W)') + group.add_argument('-u', '--use', metavar='RES1,RES2,...', + action='append', type=resources_list, + help='specify which special resource intensive tests ' + 'to run.' + more_details) + group.add_argument('-M', '--memlimit', metavar='LIMIT', + help='run very large memory-consuming tests.' + + more_details) + group.add_argument('--testdir', metavar='DIR', + type=relative_filename, + help='execute test files in the specified directory ' + '(instead of the Python stdlib test suite)') + + group = parser.add_argument_group('Special runs') + group.add_argument('-l', '--findleaks', action='store_true', + help='if GC is available detect tests that leak memory') + group.add_argument('-L', '--runleaks', action='store_true', + help='run the leaks(1) command just before exit.' + + more_details) + group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS', + type=huntrleaks, + help='search for reference leaks (needs debug build, ' + 'very slow).' + more_details) + group.add_argument('-j', '--multiprocess', metavar='PROCESSES', + dest='use_mp', type=int, + help='run PROCESSES processes at once') + group.add_argument('-T', '--coverage', action='store_true', + dest='trace', + help='turn on code coverage tracing using the trace ' + 'module') + group.add_argument('-D', '--coverdir', metavar='DIR', + type=relative_filename, + help='directory where coverage files are put') + group.add_argument('-N', '--nocoverdir', + action='store_const', const=None, dest='coverdir', + help='put coverage files alongside modules') + group.add_argument('-t', '--threshold', metavar='THRESHOLD', + type=int, + help='call gc.set_threshold(THRESHOLD)') + group.add_argument('-n', '--nowindows', action='store_true', + help='suppress error message boxes on Windows') + group.add_argument('-F', '--forever', action='store_true', + help='run the specified tests in a loop, until an ' + 'error happens') + group.add_argument('--list-tests', action='store_true', + help="only write the name of tests that will be run, " + "don't execute them") + group.add_argument('-P', '--pgo', dest='pgo', action='store_true', + help='enable Profile Guided Optimization training') + - parser.add_argument('args', nargs='*', - help=argparse.SUPPRESS) - + return parser + + +def relative_filename(string): + # CWD is replaced with a temporary dir before calling main(), so we + # join it with the saved CWD so it ends up where the user expects. + return os.path.join(support.SAVEDCWD, string) + + +def huntrleaks(string): + args = string.split(':') + if len(args) not in (2, 3): + raise argparse.ArgumentTypeError( + 'needs 2 or 3 colon-separated arguments') + nwarmup = int(args[0]) if args[0] else 5 + ntracked = int(args[1]) if args[1] else 4 + fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt' + return nwarmup, ntracked, fname + + +def resources_list(string): + u = [x.lower() for x in string.split(',')] + for r in u: + if r == 'all' or r == 'none': + continue + if r[0] == '-': + r = r[1:] + if r not in RESOURCE_NAMES: + raise argparse.ArgumentTypeError('invalid resource: ' + r) + return u + + +def _parse_args(args, **kwargs): + # Defaults + ns = argparse.Namespace(testdir=None, verbose=0, quiet=False, + exclude=False, single=False, randomize=False, fromfile=None, + findleaks=False, use_resources=None, trace=False, coverdir='coverage', + runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, + random_seed=None, use_mp=None, verbose3=False, forever=False, + header=False, failfast=False, match_tests=None, pgo=False) + for k, v in kwargs.items(): + if not hasattr(ns, k): + raise TypeError('%r is an invalid keyword argument ' + 'for this function' % k) + setattr(ns, k, v) + if ns.use_resources is None: + ns.use_resources = [] + + parser = _create_parser() - parser.parse_args(args=args, namespace=ns) ++ # Issue #14191: argparse doesn't support "intermixed" positional and ++ # optional arguments. Use parse_known_args() as workaround. ++ ns.args = parser.parse_known_args(args=args, namespace=ns)[1] ++ for arg in ns.args: ++ if arg.startswith('-'): ++ parser.error("unrecognized arguments: %s" % arg) ++ sys.exit(1) + + if ns.single and ns.fromfile: + parser.error("-s and -f don't go together!") + if ns.use_mp and ns.trace: + parser.error("-T and -j don't go together!") + if ns.use_mp and ns.findleaks: + parser.error("-l and -j don't go together!") + if ns.failfast and not (ns.verbose or ns.verbose3): + parser.error("-G/--failfast needs either -v or -W") + if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3): + parser.error("--pgo/-v don't go together!") + + if ns.nowindows: + print("Warning: the --nowindows (-n) option is deprecated. " + "Use -vv to display assertions in stderr.", file=sys.stderr) + + if ns.quiet: + ns.verbose = 0 + if ns.timeout is not None: + if ns.timeout <= 0: + ns.timeout = None + if ns.use_mp is not None: + if ns.use_mp <= 0: + # Use all cores + extras for tests that like to sleep + ns.use_mp = 2 + (os.cpu_count() or 1) + if ns.use: + for a in ns.use: + for r in a: + if r == 'all': + ns.use_resources[:] = RESOURCE_NAMES + continue + if r == 'none': + del ns.use_resources[:] + continue + remove = False + if r[0] == '-': + remove = True + r = r[1:] + if remove: + if r in ns.use_resources: + ns.use_resources.remove(r) + elif r not in ns.use_resources: + ns.use_resources.append(r) + if ns.random_seed is not None: + ns.randomize = True + + return ns diff --cc Lib/test/test_regrtest.py index 5de2a6f12e,ae183272a1..d43160470f --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@@ -299,492 -270,16 +299,501 @@@ class ParseArgsTestCase(unittest.TestCa self.assertEqual(ns.verbose, 0) self.assertEqual(ns.args, ['foo']) + def test_arg_option_arg(self): - ns = regrtest._parse_args(['test_unaryop', '-v', 'test_binop']) ++ ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop']) + self.assertEqual(ns.verbose, 1) + self.assertEqual(ns.args, ['test_unaryop', 'test_binop']) + + def test_unknown_option(self): + self.checkError(['--unknown-option'], + 'unrecognized arguments: --unknown-option') + +class BaseTestCase(unittest.TestCase): + TEST_UNIQUE_ID = 1 + TESTNAME_PREFIX = 'test_regrtest_' + TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+' + + def setUp(self): + self.testdir = os.path.realpath(os.path.dirname(__file__)) + + self.tmptestdir = tempfile.mkdtemp() + self.addCleanup(support.rmtree, self.tmptestdir) + + def create_test(self, name=None, code=''): + if not name: + name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID + BaseTestCase.TEST_UNIQUE_ID += 1 + + # test_regrtest cannot be run twice in parallel because + # of setUp() and create_test() + name = self.TESTNAME_PREFIX + name + path = os.path.join(self.tmptestdir, name + '.py') + + self.addCleanup(support.unlink, path) + # Use 'x' mode to ensure that we do not override existing tests + try: + with open(path, 'x', encoding='utf-8') as fp: + fp.write(code) + except PermissionError as exc: + if not sysconfig.is_python_build(): + self.skipTest("cannot write %s: %s" % (path, exc)) + raise + return name + + def regex_search(self, regex, output): + match = re.search(regex, output, re.MULTILINE) + if not match: + self.fail("%r not found in %r" % (regex, output)) + return match + + def check_line(self, output, regex): + regex = re.compile(r'^' + regex, re.MULTILINE) + self.assertRegex(output, regex) + + def parse_executed_tests(self, output): + regex = (r'^[0-9]+:[0-9]+:[0-9]+ \[ *[0-9]+(?:/ *[0-9]+)?\] (%s)' + % self.TESTNAME_REGEX) + parser = re.finditer(regex, output, re.MULTILINE) + return list(match.group(1) for match in parser) + + def check_executed_tests(self, output, tests, skipped=(), failed=(), + omitted=(), randomize=False, interrupted=False): + if isinstance(tests, str): + tests = [tests] + if isinstance(skipped, str): + skipped = [skipped] + if isinstance(failed, str): + failed = [failed] + if isinstance(omitted, str): + omitted = [omitted] + ntest = len(tests) + nskipped = len(skipped) + nfailed = len(failed) + nomitted = len(omitted) + + executed = self.parse_executed_tests(output) + if randomize: + self.assertEqual(set(executed), set(tests), output) + else: + self.assertEqual(executed, tests, output) + + def plural(count): + return 's' if count != 1 else '' + + def list_regex(line_format, tests): + count = len(tests) + names = ' '.join(sorted(tests)) + regex = line_format % (count, plural(count)) + regex = r'%s:\n %s$' % (regex, names) + return regex + + if skipped: + regex = list_regex('%s test%s skipped', skipped) + self.check_line(output, regex) + + if failed: + regex = list_regex('%s test%s failed', failed) + self.check_line(output, regex) + + if omitted: + regex = list_regex('%s test%s omitted', omitted) + self.check_line(output, regex) + + good = ntest - nskipped - nfailed - nomitted + if good: + regex = r'%s test%s OK\.$' % (good, plural(good)) + if not skipped and not failed and good > 1: + regex = 'All %s' % regex + self.check_line(output, regex) + + if interrupted: + self.check_line(output, 'Test suite interrupted by signal SIGINT.') + + if nfailed: + result = 'FAILURE' + elif interrupted: + result = 'INTERRUPTED' + else: + result = 'SUCCESS' + self.check_line(output, 'Tests result: %s' % result) + + def parse_random_seed(self, output): + match = self.regex_search(r'Using random seed ([0-9]+)', output) + randseed = int(match.group(1)) + self.assertTrue(0 <= randseed <= 10000000, randseed) + return randseed + + def run_command(self, args, input=None, exitcode=0, **kw): + if not input: + input = '' + if 'stderr' not in kw: + kw['stderr'] = subprocess.PIPE + proc = subprocess.run(args, + universal_newlines=True, + input=input, + stdout=subprocess.PIPE, + **kw) + if proc.returncode != exitcode: + msg = ("Command %s failed with exit code %s\n" + "\n" + "stdout:\n" + "---\n" + "%s\n" + "---\n" + % (str(args), proc.returncode, proc.stdout)) + if proc.stderr: + msg += ("\n" + "stderr:\n" + "---\n" + "%s" + "---\n" + % proc.stderr) + self.fail(msg) + return proc + + + def run_python(self, args, **kw): + args = [sys.executable, '-X', 'faulthandler', '-I', *args] + proc = self.run_command(args, **kw) + return proc.stdout + + +class ProgramsTestCase(BaseTestCase): + """ + Test various ways to run the Python test suite. Use options close + to options used on the buildbot. + """ + + NTEST = 4 + + def setUp(self): + super().setUp() + + # Create NTEST tests doing nothing + self.tests = [self.create_test() for index in range(self.NTEST)] + + self.python_args = ['-Wd', '-E', '-bb'] + self.regrtest_args = ['-uall', '-rwW', + '--testdir=%s' % self.tmptestdir] + if hasattr(faulthandler, 'dump_traceback_later'): + self.regrtest_args.extend(('--timeout', '3600', '-j4')) + if sys.platform == 'win32': + self.regrtest_args.append('-n') + + def check_output(self, output): + self.parse_random_seed(output) + self.check_executed_tests(output, self.tests, randomize=True) + + def run_tests(self, args): + output = self.run_python(args) + self.check_output(output) + + def test_script_regrtest(self): + # Lib/test/regrtest.py + script = os.path.join(self.testdir, 'regrtest.py') + + args = [*self.python_args, script, *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_test(self): + # -m test + args = [*self.python_args, '-m', 'test', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_regrtest(self): + # -m test.regrtest + args = [*self.python_args, '-m', 'test.regrtest', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_autotest(self): + # -m test.autotest + args = [*self.python_args, '-m', 'test.autotest', + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_module_from_test_autotest(self): + # from test import autotest + code = 'from test import autotest' + args = [*self.python_args, '-c', code, + *self.regrtest_args, *self.tests] + self.run_tests(args) + + def test_script_autotest(self): + # Lib/test/autotest.py + script = os.path.join(self.testdir, 'autotest.py') + args = [*self.python_args, script, *self.regrtest_args, *self.tests] + self.run_tests(args) + + @unittest.skipUnless(sysconfig.is_python_build(), + 'run_tests.py script is not installed') + def test_tools_script_run_tests(self): + # Tools/scripts/run_tests.py + script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py') + args = [script, *self.regrtest_args, *self.tests] + self.run_tests(args) + + def run_batch(self, *args): + proc = self.run_command(args) + self.check_output(proc.stdout) + + @unittest.skipUnless(sysconfig.is_python_build(), + 'test.bat script is not installed') + @unittest.skipUnless(sys.platform == 'win32', 'Windows only') + def test_tools_buildbot_test(self): + # Tools\buildbot\test.bat + script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat') + test_args = ['--testdir=%s' % self.tmptestdir] + if platform.architecture()[0] == '64bit': + test_args.append('-x64') # 64-bit build + if not Py_DEBUG: + test_args.append('+d') # Release build, use python.exe + self.run_batch(script, *test_args, *self.tests) + + @unittest.skipUnless(sys.platform == 'win32', 'Windows only') + def test_pcbuild_rt(self): + # PCbuild\rt.bat + script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat') + rt_args = ["-q"] # Quick, don't run tests twice + if platform.architecture()[0] == '64bit': + rt_args.append('-x64') # 64-bit build + if Py_DEBUG: + rt_args.append('-d') # Debug build, use python_d.exe + self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests) + + +class ArgsTestCase(BaseTestCase): + """ + Test arguments of the Python test suite. + """ + + def run_tests(self, *testargs, **kw): + cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs] + return self.run_python(cmdargs, **kw) + + def test_failing_test(self): + # test a failing test + code = textwrap.dedent(""" + import unittest + + class FailingTest(unittest.TestCase): + def test_failing(self): + self.fail("bug") + """) + test_ok = self.create_test('ok') + test_failing = self.create_test('failing', code=code) + tests = [test_ok, test_failing] + + output = self.run_tests(*tests, exitcode=1) + self.check_executed_tests(output, tests, failed=test_failing) + + def test_resources(self): + # test -u command line option + tests = {} + for resource in ('audio', 'network'): + code = 'from test import support\nsupport.requires(%r)' % resource + tests[resource] = self.create_test(resource, code) + test_names = sorted(tests.values()) + + # -u all: 2 resources enabled + output = self.run_tests('-u', 'all', *test_names) + self.check_executed_tests(output, test_names) + + # -u audio: 1 resource enabled + output = self.run_tests('-uaudio', *test_names) + self.check_executed_tests(output, test_names, + skipped=tests['network']) + + # no option: 0 resources enabled + output = self.run_tests(*test_names) + self.check_executed_tests(output, test_names, + skipped=test_names) + + def test_random(self): + # test -r and --randseed command line option + code = textwrap.dedent(""" + import random + print("TESTRANDOM: %s" % random.randint(1, 1000)) + """) + test = self.create_test('random', code) + + # first run to get the output with the random seed + output = self.run_tests('-r', test) + randseed = self.parse_random_seed(output) + match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) + test_random = int(match.group(1)) + + # try to reproduce with the random seed + output = self.run_tests('-r', '--randseed=%s' % randseed, test) + randseed2 = self.parse_random_seed(output) + self.assertEqual(randseed2, randseed) + + match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) + test_random2 = int(match.group(1)) + self.assertEqual(test_random2, test_random) + + def test_fromfile(self): + # test --fromfile + tests = [self.create_test() for index in range(5)] + + # Write the list of files using a format similar to regrtest output: + # [1/2] test_1 + # [2/2] test_2 + filename = support.TESTFN + self.addCleanup(support.unlink, filename) + + # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec' + with open(filename, "w") as fp: + previous = None + for index, name in enumerate(tests, 1): + line = ("00:00:%02i [%s/%s] %s" + % (index, index, len(tests), name)) + if previous: + line += " -- %s took 0 sec" % previous + print(line, file=fp) + previous = name + + output = self.run_tests('--fromfile', filename) + self.check_executed_tests(output, tests) + + # test format '[2/7] test_opcodes' + with open(filename, "w") as fp: + for index, name in enumerate(tests, 1): + print("[%s/%s] %s" % (index, len(tests), name), file=fp) + + output = self.run_tests('--fromfile', filename) + self.check_executed_tests(output, tests) + + # test format 'test_opcodes' + with open(filename, "w") as fp: + for name in tests: + print(name, file=fp) + + output = self.run_tests('--fromfile', filename) + self.check_executed_tests(output, tests) + + def test_interrupted(self): + code = TEST_INTERRUPTED + test = self.create_test('sigint', code=code) + output = self.run_tests(test, exitcode=1) + self.check_executed_tests(output, test, omitted=test, + interrupted=True) + + def test_slowest(self): + # test --slowest + tests = [self.create_test() for index in range(3)] + output = self.run_tests("--slowest", *tests) + self.check_executed_tests(output, tests) + regex = ('10 slowest tests:\n' + '(?:- %s: .*\n){%s}' + % (self.TESTNAME_REGEX, len(tests))) + self.check_line(output, regex) + + def test_slow_interrupted(self): + # Issue #25373: test --slowest with an interrupted test + code = TEST_INTERRUPTED + test = self.create_test("sigint", code=code) + + for multiprocessing in (False, True): + if multiprocessing: + args = ("--slowest", "-j2", test) + else: + args = ("--slowest", test) + output = self.run_tests(*args, exitcode=1) + self.check_executed_tests(output, test, + omitted=test, interrupted=True) + + regex = ('10 slowest tests:\n') + self.check_line(output, regex) + + def test_coverage(self): + # test --coverage + test = self.create_test('coverage') + output = self.run_tests("--coverage", test) + self.check_executed_tests(output, [test]) + regex = (r'lines +cov% +module +\(path\)\n' + r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') + self.check_line(output, regex) + + def test_wait(self): + # test --wait + test = self.create_test('wait') + output = self.run_tests("--wait", test, input='key') + self.check_line(output, 'Press any key to continue') + + def test_forever(self): + # test --forever + code = textwrap.dedent(""" + import builtins + import unittest + + class ForeverTester(unittest.TestCase): + def test_run(self): + # Store the state in the builtins module, because the test + # module is reload at each run + if 'RUN' in builtins.__dict__: + builtins.__dict__['RUN'] += 1 + if builtins.__dict__['RUN'] >= 3: + self.fail("fail at the 3rd runs") + else: + builtins.__dict__['RUN'] = 1 + """) + test = self.create_test('forever', code=code) + output = self.run_tests('--forever', test, exitcode=1) + self.check_executed_tests(output, [test]*3, failed=test) + + @unittest.skipUnless(Py_DEBUG, 'need a debug build') + def test_huntrleaks_fd_leak(self): + # test --huntrleaks for file descriptor leak + code = textwrap.dedent(""" + import os + import unittest + + # Issue #25306: Disable popups and logs to stderr on assertion + # failures in MSCRT + try: + import msvcrt + msvcrt.CrtSetReportMode + except (ImportError, AttributeError): + # no Windows, o release build + pass + else: + for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: + msvcrt.CrtSetReportMode(m, 0) + + class FDLeakTest(unittest.TestCase): + def test_leak(self): + fd = os.open(__file__, os.O_RDONLY) + # bug: never cloes the file descriptor + """) + test = self.create_test('huntrleaks', code=code) + + filename = 'reflog.txt' + self.addCleanup(support.unlink, filename) + output = self.run_tests('--huntrleaks', '3:3:', test, + exitcode=1, + stderr=subprocess.STDOUT) + self.check_executed_tests(output, [test], failed=test) + + line = 'beginning 6 repetitions\n123456\n......\n' + self.check_line(output, re.escape(line)) + + line2 = '%s leaked [1, 1, 1] file descriptors, sum=3\n' % test + self.assertIn(line2, output) + + with open(filename) as fp: + reflog = fp.read() + self.assertIn(line2, reflog) + + def test_list_tests(self): + # test --list-tests + tests = [self.create_test() for i in range(5)] + output = self.run_tests('--list-tests', *tests) + self.assertEqual(output.rstrip().splitlines(), + tests) + if __name__ == '__main__': unittest.main() diff --cc Misc/NEWS index 016d3fb5f6,82cf8b04a3..2b0397bf7d --- a/Misc/NEWS +++ b/Misc/NEWS @@@ -2,34 -2,10 +2,39 @@@ Python News +++++++++++ -What's New in Python 3.5.3 release candidate 1? -=============================================== +What's New in Python 3.6.0 beta 3 +================================= + +*Release date: XXXX-XX-XX* + +Core and Builtins +----------------- + +Library +------- + +- Issue #24452: Make webbrowser support Chrome on Mac OS X. + +- Issue #20766: Fix references leaked by pdb in the handling of SIGINT + handlers. + +Build +----- + +- Issue #28208: Update Windows build to use SQLite 3.14.2.0. + +- Issue #28248: Update Windows build to use OpenSSL 1.0.2j. + ++Tests ++----- ++ ++- Issue #28409: regrtest: fix the parser of command line arguments. ++ + +What's New in Python 3.6.0 beta 2 +================================= -Release date: TBA +*Release date: 2016-10-10* Core and Builtins -----------------