--- /dev/null
- return
+"""Tests for events.py."""
+
+import functools
+import gc
+import io
+import os
+import signal
+import socket
+try:
+ import ssl
+except ImportError:
+ ssl = None
+import subprocess
+import sys
+import threading
+import time
+import errno
+import unittest
+import unittest.mock
+from test import support # find_unused_port, IPV6_ENABLED, TEST_HOME_DIR
+
+
+from asyncio import futures
+from asyncio import events
+from asyncio import transports
+from asyncio import protocols
+from asyncio import selector_events
+from asyncio import tasks
+from asyncio import test_utils
+from asyncio import locks
+
+
+def data_file(filename):
+ if hasattr(support, 'TEST_HOME_DIR'):
+ fullname = os.path.join(support.TEST_HOME_DIR, filename)
+ if os.path.isfile(fullname):
+ return fullname
+ fullname = os.path.join(os.path.dirname(__file__), filename)
+ if os.path.isfile(fullname):
+ return fullname
+ raise FileNotFoundError(filename)
+
+ONLYCERT = data_file('ssl_cert.pem')
+ONLYKEY = data_file('ssl_key.pem')
+SIGNED_CERTFILE = data_file('keycert3.pem')
+SIGNING_CA = data_file('pycacert.pem')
+
+
+class MyProto(protocols.Protocol):
+ done = None
+
+ def __init__(self, loop=None):
+ self.transport = None
+ self.state = 'INITIAL'
+ self.nbytes = 0
+ if loop is not None:
+ self.done = futures.Future(loop=loop)
+
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == 'INITIAL', self.state
+ self.state = 'CONNECTED'
+ transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
+
+ def data_received(self, data):
+ assert self.state == 'CONNECTED', self.state
+ self.nbytes += len(data)
+
+ def eof_received(self):
+ assert self.state == 'CONNECTED', self.state
+ self.state = 'EOF'
+
+ def connection_lost(self, exc):
+ assert self.state in ('CONNECTED', 'EOF'), self.state
+ self.state = 'CLOSED'
+ if self.done:
+ self.done.set_result(None)
+
+
+class MyDatagramProto(protocols.DatagramProtocol):
+ done = None
+
+ def __init__(self, loop=None):
+ self.state = 'INITIAL'
+ self.nbytes = 0
+ if loop is not None:
+ self.done = futures.Future(loop=loop)
+
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == 'INITIAL', self.state
+ self.state = 'INITIALIZED'
+
+ def datagram_received(self, data, addr):
+ assert self.state == 'INITIALIZED', self.state
+ self.nbytes += len(data)
+
+ def error_received(self, exc):
+ assert self.state == 'INITIALIZED', self.state
+
+ def connection_lost(self, exc):
+ assert self.state == 'INITIALIZED', self.state
+ self.state = 'CLOSED'
+ if self.done:
+ self.done.set_result(None)
+
+
+class MyReadPipeProto(protocols.Protocol):
+ done = None
+
+ def __init__(self, loop=None):
+ self.state = ['INITIAL']
+ self.nbytes = 0
+ self.transport = None
+ if loop is not None:
+ self.done = futures.Future(loop=loop)
+
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == ['INITIAL'], self.state
+ self.state.append('CONNECTED')
+
+ def data_received(self, data):
+ assert self.state == ['INITIAL', 'CONNECTED'], self.state
+ self.nbytes += len(data)
+
+ def eof_received(self):
+ assert self.state == ['INITIAL', 'CONNECTED'], self.state
+ self.state.append('EOF')
+
+ def connection_lost(self, exc):
+ assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
+ self.state.append('CLOSED')
+ if self.done:
+ self.done.set_result(None)
+
+
+class MyWritePipeProto(protocols.BaseProtocol):
+ done = None
+
+ def __init__(self, loop=None):
+ self.state = 'INITIAL'
+ self.transport = None
+ if loop is not None:
+ self.done = futures.Future(loop=loop)
+
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == 'INITIAL', self.state
+ self.state = 'CONNECTED'
+
+ def connection_lost(self, exc):
+ assert self.state == 'CONNECTED', self.state
+ self.state = 'CLOSED'
+ if self.done:
+ self.done.set_result(None)
+
+
+class MySubprocessProtocol(protocols.SubprocessProtocol):
+
+ def __init__(self, loop):
+ self.state = 'INITIAL'
+ self.transport = None
+ self.connected = futures.Future(loop=loop)
+ self.completed = futures.Future(loop=loop)
+ self.disconnects = {fd: futures.Future(loop=loop) for fd in range(3)}
+ self.data = {1: b'', 2: b''}
+ self.returncode = None
+ self.got_data = {1: locks.Event(loop=loop),
+ 2: locks.Event(loop=loop)}
+
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == 'INITIAL', self.state
+ self.state = 'CONNECTED'
+ self.connected.set_result(None)
+
+ def connection_lost(self, exc):
+ assert self.state == 'CONNECTED', self.state
+ self.state = 'CLOSED'
+ self.completed.set_result(None)
+
+ def pipe_data_received(self, fd, data):
+ assert self.state == 'CONNECTED', self.state
+ self.data[fd] += data
+ self.got_data[fd].set()
+
+ def pipe_connection_lost(self, fd, exc):
+ assert self.state == 'CONNECTED', self.state
+ if exc:
+ self.disconnects[fd].set_exception(exc)
+ else:
+ self.disconnects[fd].set_result(exc)
+
+ def process_exited(self):
+ assert self.state == 'CONNECTED', self.state
+ self.returncode = self.transport.get_returncode()
+
+
+class EventLoopTestsMixin:
+
+ def setUp(self):
+ super().setUp()
+ self.loop = self.create_event_loop()
+ events.set_event_loop(None)
+
+ def tearDown(self):
+ # just in case if we have transport close callbacks
+ test_utils.run_briefly(self.loop)
+
+ self.loop.close()
+ gc.collect()
+ super().tearDown()
+
+ def test_run_until_complete_nesting(self):
+ @tasks.coroutine
+ def coro1():
+ yield
+
+ @tasks.coroutine
+ def coro2():
+ self.assertTrue(self.loop.is_running())
+ self.loop.run_until_complete(coro1())
+
+ self.assertRaises(
+ RuntimeError, self.loop.run_until_complete, coro2())
+
+ # Note: because of the default Windows timing granularity of
+ # 15.6 msec, we use fairly long sleep times here (~100 msec).
+
+ def test_run_until_complete(self):
+ t0 = self.loop.time()
+ self.loop.run_until_complete(tasks.sleep(0.1, loop=self.loop))
+ t1 = self.loop.time()
+ self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
+
+ def test_run_until_complete_stopped(self):
+ @tasks.coroutine
+ def cb():
+ self.loop.stop()
+ yield from tasks.sleep(0.1, loop=self.loop)
+ task = cb()
+ self.assertRaises(RuntimeError,
+ self.loop.run_until_complete, task)
+
+ def test_call_later(self):
+ results = []
+
+ def callback(arg):
+ results.append(arg)
+ self.loop.stop()
+
+ self.loop.call_later(0.1, callback, 'hello world')
+ t0 = time.monotonic()
+ self.loop.run_forever()
+ t1 = time.monotonic()
+ self.assertEqual(results, ['hello world'])
+ self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
+
+ def test_call_soon(self):
+ results = []
+
+ def callback(arg1, arg2):
+ results.append((arg1, arg2))
+ self.loop.stop()
+
+ self.loop.call_soon(callback, 'hello', 'world')
+ self.loop.run_forever()
+ self.assertEqual(results, [('hello', 'world')])
+
+ def test_call_soon_threadsafe(self):
+ results = []
+ lock = threading.Lock()
+
+ def callback(arg):
+ results.append(arg)
+ if len(results) >= 2:
+ self.loop.stop()
+
+ def run_in_thread():
+ self.loop.call_soon_threadsafe(callback, 'hello')
+ lock.release()
+
+ lock.acquire()
+ t = threading.Thread(target=run_in_thread)
+ t.start()
+
+ with lock:
+ self.loop.call_soon(callback, 'world')
+ self.loop.run_forever()
+ t.join()
+ self.assertEqual(results, ['hello', 'world'])
+
+ def test_call_soon_threadsafe_same_thread(self):
+ results = []
+
+ def callback(arg):
+ results.append(arg)
+ if len(results) >= 2:
+ self.loop.stop()
+
+ self.loop.call_soon_threadsafe(callback, 'hello')
+ self.loop.call_soon(callback, 'world')
+ self.loop.run_forever()
+ self.assertEqual(results, ['hello', 'world'])
+
+ def test_run_in_executor(self):
+ def run(arg):
+ return (arg, threading.get_ident())
+ f2 = self.loop.run_in_executor(None, run, 'yo')
+ res, thread_id = self.loop.run_until_complete(f2)
+ self.assertEqual(res, 'yo')
+ self.assertNotEqual(thread_id, threading.get_ident())
+
+ def test_reader_callback(self):
+ r, w = test_utils.socketpair()
+ bytes_read = []
+
+ def reader():
+ try:
+ data = r.recv(1024)
+ except BlockingIOError:
+ # Spurious readiness notifications are possible
+ # at least on Linux -- see man select.
+ return
+ if data:
+ bytes_read.append(data)
+ else:
+ self.assertTrue(self.loop.remove_reader(r.fileno()))
+ r.close()
+
+ self.loop.add_reader(r.fileno(), reader)
+ self.loop.call_soon(w.send, b'abc')
+ test_utils.run_briefly(self.loop)
+ self.loop.call_soon(w.send, b'def')
+ test_utils.run_briefly(self.loop)
+ self.loop.call_soon(w.close)
+ self.loop.call_soon(self.loop.stop)
+ self.loop.run_forever()
+ self.assertEqual(b''.join(bytes_read), b'abcdef')
+
+ def test_writer_callback(self):
+ r, w = test_utils.socketpair()
+ w.setblocking(False)
+ self.loop.add_writer(w.fileno(), w.send, b'x'*(256*1024))
+ test_utils.run_briefly(self.loop)
+
+ def remove_writer():
+ self.assertTrue(self.loop.remove_writer(w.fileno()))
+
+ self.loop.call_soon(remove_writer)
+ self.loop.call_soon(self.loop.stop)
+ self.loop.run_forever()
+ w.close()
+ data = r.recv(256*1024)
+ r.close()
+ self.assertGreaterEqual(len(data), 200)
+
+ def test_sock_client_ops(self):
+ with test_utils.run_test_server() as httpd:
+ sock = socket.socket()
+ sock.setblocking(False)
+ self.loop.run_until_complete(
+ self.loop.sock_connect(sock, httpd.address))
+ self.loop.run_until_complete(
+ self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
+ data = self.loop.run_until_complete(
+ self.loop.sock_recv(sock, 1024))
+ # consume data
+ self.loop.run_until_complete(
+ self.loop.sock_recv(sock, 1024))
+ sock.close()
+
+ self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
+
+ def test_sock_client_fail(self):
+ # Make sure that we will get an unused port
+ address = None
+ try:
+ s = socket.socket()
+ s.bind(('127.0.0.1', 0))
+ address = s.getsockname()
+ finally:
+ s.close()
+
+ sock = socket.socket()
+ sock.setblocking(False)
+ with self.assertRaises(ConnectionRefusedError):
+ self.loop.run_until_complete(
+ self.loop.sock_connect(sock, address))
+ sock.close()
+
+ def test_sock_accept(self):
+ listener = socket.socket()
+ listener.setblocking(False)
+ listener.bind(('127.0.0.1', 0))
+ listener.listen(1)
+ client = socket.socket()
+ client.connect(listener.getsockname())
+
+ f = self.loop.sock_accept(listener)
+ conn, addr = self.loop.run_until_complete(f)
+ self.assertEqual(conn.gettimeout(), 0)
+ self.assertEqual(addr, client.getsockname())
+ self.assertEqual(client.getpeername(), listener.getsockname())
+ client.close()
+ conn.close()
+ listener.close()
+
+ @unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
+ def test_add_signal_handler(self):
+ caught = 0
+
+ def my_handler():
+ nonlocal caught
+ caught += 1
+
+ # Check error behavior first.
+ self.assertRaises(
+ TypeError, self.loop.add_signal_handler, 'boom', my_handler)
+ self.assertRaises(
+ TypeError, self.loop.remove_signal_handler, 'boom')
+ self.assertRaises(
+ ValueError, self.loop.add_signal_handler, signal.NSIG+1,
+ my_handler)
+ self.assertRaises(
+ ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
+ self.assertRaises(
+ ValueError, self.loop.add_signal_handler, 0, my_handler)
+ self.assertRaises(
+ ValueError, self.loop.remove_signal_handler, 0)
+ self.assertRaises(
+ ValueError, self.loop.add_signal_handler, -1, my_handler)
+ self.assertRaises(
+ ValueError, self.loop.remove_signal_handler, -1)
+ self.assertRaises(
+ RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
+ my_handler)
+ # Removing SIGKILL doesn't raise, since we don't call signal().
+ self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
+ # Now set a handler and handle it.
+ self.loop.add_signal_handler(signal.SIGINT, my_handler)
+ test_utils.run_briefly(self.loop)
+ os.kill(os.getpid(), signal.SIGINT)
+ test_utils.run_briefly(self.loop)
+ self.assertEqual(caught, 1)
+ # Removing it should restore the default handler.
+ self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
+ self.assertEqual(signal.getsignal(signal.SIGINT),
+ signal.default_int_handler)
+ # Removing again returns False.
+ self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
+
+ @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
+ def test_signal_handling_while_selecting(self):
+ # Test with a signal actually arriving during a select() call.
+ caught = 0
+
+ def my_handler():
+ nonlocal caught
+ caught += 1
+ self.loop.stop()
+
+ self.loop.add_signal_handler(signal.SIGALRM, my_handler)
+
+ signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
+ self.loop.run_forever()
+ self.assertEqual(caught, 1)
+
+ @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
+ def test_signal_handling_args(self):
+ some_args = (42,)
+ caught = 0
+
+ def my_handler(*args):
+ nonlocal caught
+ caught += 1
+ self.assertEqual(args, some_args)
+
+ self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
+
+ signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
+ self.loop.call_later(0.5, self.loop.stop)
+ self.loop.run_forever()
+ self.assertEqual(caught, 1)
+
+ def test_create_connection(self):
+ with test_utils.run_test_server() as httpd:
+ f = self.loop.create_connection(
+ lambda: MyProto(loop=self.loop), *httpd.address)
+ tr, pr = self.loop.run_until_complete(f)
+ self.assertIsInstance(tr, transports.Transport)
+ self.assertIsInstance(pr, protocols.Protocol)
+ self.loop.run_until_complete(pr.done)
+ self.assertGreater(pr.nbytes, 0)
+ tr.close()
+
+ def test_create_connection_sock(self):
+ with test_utils.run_test_server() as httpd:
+ sock = None
+ infos = self.loop.run_until_complete(
+ self.loop.getaddrinfo(
+ *httpd.address, type=socket.SOCK_STREAM))
+ for family, type, proto, cname, address in infos:
+ try:
+ sock = socket.socket(family=family, type=type, proto=proto)
+ sock.setblocking(False)
+ self.loop.run_until_complete(
+ self.loop.sock_connect(sock, address))
+ except:
+ pass
+ else:
+ break
+ else:
+ assert False, 'Can not create socket.'
+
+ f = self.loop.create_connection(
+ lambda: MyProto(loop=self.loop), sock=sock)
+ tr, pr = self.loop.run_until_complete(f)
+ self.assertIsInstance(tr, transports.Transport)
+ self.assertIsInstance(pr, protocols.Protocol)
+ self.loop.run_until_complete(pr.done)
+ self.assertGreater(pr.nbytes, 0)
+ tr.close()
+
+ @unittest.skipIf(ssl is None, 'No ssl module')
+ def test_create_ssl_connection(self):
+ with test_utils.run_test_server(use_ssl=True) as httpd:
+ f = self.loop.create_connection(
+ lambda: MyProto(loop=self.loop), *httpd.address,
+ ssl=test_utils.dummy_ssl_context())
+ tr, pr = self.loop.run_until_complete(f)
+ self.assertIsInstance(tr, transports.Transport)
+ self.assertIsInstance(pr, protocols.Protocol)
+ self.assertTrue('ssl' in tr.__class__.__name__.lower())
+ self.assertIsNotNone(tr.get_extra_info('sockname'))
+ self.loop.run_until_complete(pr.done)
+ self.assertGreater(pr.nbytes, 0)
+ tr.close()
+
+ def test_create_connection_local_addr(self):
+ with test_utils.run_test_server() as httpd:
+ port = support.find_unused_port()
+ f = self.loop.create_connection(
+ lambda: MyProto(loop=self.loop),
+ *httpd.address, local_addr=(httpd.address[0], port))
+ tr, pr = self.loop.run_until_complete(f)
+ expected = pr.transport.get_extra_info('sockname')[1]
+ self.assertEqual(port, expected)
+ tr.close()
+
+ def test_create_connection_local_addr_in_use(self):
+ with test_utils.run_test_server() as httpd:
+ f = self.loop.create_connection(
+ lambda: MyProto(loop=self.loop),
+ *httpd.address, local_addr=httpd.address)
+ with self.assertRaises(OSError) as cm:
+ self.loop.run_until_complete(f)
+ self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
+ self.assertIn(str(httpd.address), cm.exception.strerror)
+
+ def test_create_server(self):
+ proto = None
+
+ def factory():
+ nonlocal proto
+ proto = MyProto()
+ return proto
+
+ f = self.loop.create_server(factory, '0.0.0.0', 0)
+ server = self.loop.run_until_complete(f)
+ self.assertEqual(len(server.sockets), 1)
+ sock = server.sockets[0]
+ host, port = sock.getsockname()
+ self.assertEqual(host, '0.0.0.0')
+ client = socket.socket()
+ client.connect(('127.0.0.1', port))
+ client.sendall(b'xxx')
+ test_utils.run_briefly(self.loop)
+ test_utils.run_until(self.loop, lambda: proto is not None, 10)
+ self.assertIsInstance(proto, MyProto)
+ self.assertEqual('INITIAL', proto.state)
+ test_utils.run_briefly(self.loop)
+ self.assertEqual('CONNECTED', proto.state)
+ test_utils.run_until(self.loop, lambda: proto.nbytes > 0,
+ timeout=10)
+ self.assertEqual(3, proto.nbytes)
+
+ # extra info is available
+ self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
+ self.assertEqual('127.0.0.1',
+ proto.transport.get_extra_info('peername')[0])
+
+ # close connection
+ proto.transport.close()
+ test_utils.run_briefly(self.loop) # windows iocp
+
+ self.assertEqual('CLOSED', proto.state)
+
+ # the client socket must be closed after to avoid ECONNRESET upon
+ # recv()/send() on the serving socket
+ client.close()
+
+ # close server
+ server.close()
+
+ def _make_ssl_server(self, factory, certfile, keyfile=None):
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext.load_cert_chain(certfile, keyfile)
+
+ f = self.loop.create_server(
+ factory, '127.0.0.1', 0, ssl=sslcontext)
+
+ server = self.loop.run_until_complete(f)
+ sock = server.sockets[0]
+ host, port = sock.getsockname()
+ self.assertEqual(host, '127.0.0.1')
+ return server, host, port
+
+ @unittest.skipIf(ssl is None, 'No ssl module')
+ def test_create_server_ssl(self):
+ proto = None
+
+ class ClientMyProto(MyProto):
+ def connection_made(self, transport):
+ self.transport = transport
+ assert self.state == 'INITIAL', self.state
+ self.state = 'CONNECTED'
+
+ def factory():
+ nonlocal proto
+ proto = MyProto(loop=self.loop)
+ return proto
+
+ server, host, port = self._make_ssl_server(factory, ONLYCERT, ONLYKEY)
+
+ f_c = self.loop.create_connection(ClientMyProto, host, port,
+ ssl=test_utils.dummy_ssl_context())
+ client, pr = self.loop.run_until_complete(f_c)
+
+ client.write(b'xxx')
+ test_utils.run_briefly(self.loop)
+ self.assertIsInstance(proto, MyProto)
+ test_utils.run_briefly(self.loop)
+ self.assertEqual('CONNECTED', proto.state)
+ test_utils.run_until(self.loop, lambda: proto.nbytes > 0,
+ timeout=10)
+ self.assertEqual(3, proto.nbytes)
+
+ # extra info is available
+ self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
+ self.assertEqual('127.0.0.1',
+ proto.transport.get_extra_info('peername')[0])
+
+ # close connection
+ proto.transport.close()
+ self.loop.run_until_complete(proto.done)
+ self.assertEqual('CLOSED', proto.state)
+
+ # the client socket must be closed after to avoid ECONNRESET upon
+ # recv()/send() on the serving socket
+ client.close()
+
+ # stop serving
+ server.close()
+
+ @unittest.skipIf(ssl is None, 'No ssl module')
+ def test_create_server_ssl_verify_failed(self):
+ proto = None
+
+ def factory():
+ nonlocal proto
+ proto = MyProto(loop=self.loop)
+ return proto
+
+ server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
+
+ sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ if hasattr(sslcontext_client, 'check_hostname'):
+ sslcontext_client.check_hostname = True
+
+ # no CA loaded
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client)
+ with self.assertRaisesRegex(ssl.SSLError,
+ 'certificate verify failed '):
+ self.loop.run_until_complete(f_c)
+
+ # close connection
+ self.assertIsNone(proto.transport)
+ server.close()
+
+ @unittest.skipIf(ssl is None, 'No ssl module')
+ def test_create_server_ssl_match_failed(self):
+ proto = None
+
+ def factory():
+ nonlocal proto
+ proto = MyProto(loop=self.loop)
+ return proto
+
+ server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
+
+ sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(
+ cafile=SIGNING_CA)
+ if hasattr(sslcontext_client, 'check_hostname'):
+ sslcontext_client.check_hostname = True
+
+ # incorrect server_hostname
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client)
+ with self.assertRaisesRegex(ssl.CertificateError,
+ "hostname '127.0.0.1' doesn't match 'localhost'"):
+ self.loop.run_until_complete(f_c)
+
+ # close connection
+ proto.transport.close()
+ server.close()
+
+ @unittest.skipIf(ssl is None, 'No ssl module')
+ def test_create_server_ssl_verified(self):
+ proto = None
+
+ def factory():
+ nonlocal proto
+ proto = MyProto(loop=self.loop)
+ return proto
+
+ server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
+
+ sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
+ if hasattr(sslcontext_client, 'check_hostname'):
+ sslcontext_client.check_hostname = True
+
+ # Connection succeeds with correct CA and server hostname.
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client,
+ server_hostname='localhost')
+ client, pr = self.loop.run_until_complete(f_c)
+
+ # close connection
+ proto.transport.close()
+ client.close()
+ server.close()
+
+ def test_create_server_sock(self):
+ proto = futures.Future(loop=self.loop)
+
+ class TestMyProto(MyProto):
+ def connection_made(self, transport):
+ super().connection_made(transport)
+ proto.set_result(self)
+
+ sock_ob = socket.socket(type=socket.SOCK_STREAM)
+ sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock_ob.bind(('0.0.0.0', 0))
+
+ f = self.loop.create_server(TestMyProto, sock=sock_ob)
+ server = self.loop.run_until_complete(f)
+ sock = server.sockets[0]
+ self.assertIs(sock, sock_ob)
+
+ host, port = sock.getsockname()
+ self.assertEqual(host, '0.0.0.0')
+ client = socket.socket()
+ client.connect(('127.0.0.1', port))
+ client.send(b'xxx')
+ client.close()
+ server.close()
+
+ def test_create_server_addr_in_use(self):
+ sock_ob = socket.socket(type=socket.SOCK_STREAM)
+ sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock_ob.bind(('0.0.0.0', 0))
+
+ f = self.loop.create_server(MyProto, sock=sock_ob)
+ server = self.loop.run_until_complete(f)
+ sock = server.sockets[0]
+ host, port = sock.getsockname()
+
+ f = self.loop.create_server(MyProto, host=host, port=port)
+ with self.assertRaises(OSError) as cm:
+ self.loop.run_until_complete(f)
+ self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
+
+ server.close()
+
+ @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ def test_create_server_dual_stack(self):
+ f_proto = futures.Future(loop=self.loop)
+
+ class TestMyProto(MyProto):
+ def connection_made(self, transport):
+ super().connection_made(transport)
+ f_proto.set_result(self)
+
+ try_count = 0
+ while True:
+ try:
+ port = support.find_unused_port()
+ f = self.loop.create_server(TestMyProto, host=None, port=port)
+ server = self.loop.run_until_complete(f)
+ except OSError as ex:
+ if ex.errno == errno.EADDRINUSE:
+ try_count += 1
+ self.assertGreaterEqual(5, try_count)
+ continue
+ else:
+ raise
+ else:
+ break
+ client = socket.socket()
+ client.connect(('127.0.0.1', port))
+ client.send(b'xxx')
+ proto = self.loop.run_until_complete(f_proto)
+ proto.transport.close()
+ client.close()
+
+ f_proto = futures.Future(loop=self.loop)
+ client = socket.socket(socket.AF_INET6)
+ client.connect(('::1', port))
+ client.send(b'xxx')
+ proto = self.loop.run_until_complete(f_proto)
+ proto.transport.close()
+ client.close()
+
+ server.close()
+
+ def test_server_close(self):
+ f = self.loop.create_server(MyProto, '0.0.0.0', 0)
+ server = self.loop.run_until_complete(f)
+ sock = server.sockets[0]
+ host, port = sock.getsockname()
+
+ client = socket.socket()
+ client.connect(('127.0.0.1', port))
+ client.send(b'xxx')
+ client.close()
+
+ server.close()
+
+ client = socket.socket()
+ self.assertRaises(
+ ConnectionRefusedError, client.connect, ('127.0.0.1', port))
+ client.close()
+
+ def test_create_datagram_endpoint(self):
+ class TestMyDatagramProto(MyDatagramProto):
+ def __init__(inner_self):
+ super().__init__(loop=self.loop)
+
+ def datagram_received(self, data, addr):
+ super().datagram_received(data, addr)
+ self.transport.sendto(b'resp:'+data, addr)
+
+ coro = self.loop.create_datagram_endpoint(
+ TestMyDatagramProto, local_addr=('127.0.0.1', 0))
+ s_transport, server = self.loop.run_until_complete(coro)
+ host, port = s_transport.get_extra_info('sockname')
+
+ coro = self.loop.create_datagram_endpoint(
+ lambda: MyDatagramProto(loop=self.loop),
+ remote_addr=(host, port))
+ transport, client = self.loop.run_until_complete(coro)
+
+ self.assertEqual('INITIALIZED', client.state)
+ transport.sendto(b'xxx')
+ for _ in range(1000):
+ if server.nbytes:
+ break
+ test_utils.run_briefly(self.loop)
+ self.assertEqual(3, server.nbytes)
+ for _ in range(1000):
+ if client.nbytes:
+ break
+ test_utils.run_briefly(self.loop)
+
+ # received
+ self.assertEqual(8, client.nbytes)
+
+ # extra info is available
+ self.assertIsNotNone(transport.get_extra_info('sockname'))
+
+ # close connection
+ transport.close()
+ self.loop.run_until_complete(client.done)
+ self.assertEqual('CLOSED', client.state)
+ server.transport.close()
+
+ def test_internal_fds(self):
+ loop = self.create_event_loop()
+ if not isinstance(loop, selector_events.BaseSelectorEventLoop):
++ self.skipTest('loop is not a BaseSelectorEventLoop')
+
+ self.assertEqual(1, loop._internal_fds)
+ loop.close()
+ self.assertEqual(0, loop._internal_fds)
+ self.assertIsNone(loop._csock)
+ self.assertIsNone(loop._ssock)
+
+ @unittest.skipUnless(sys.platform != 'win32',
+ "Don't support pipes for Windows")
+ def test_read_pipe(self):
+ proto = None
+
+ def factory():
+ nonlocal proto
+ proto = MyReadPipeProto(loop=self.loop)
+ return proto
+
+ rpipe, wpipe = os.pipe()
+ pipeobj = io.open(rpipe, 'rb', 1024)
+
+ @tasks.coroutine
+ def connect():
+ t, p = yield from self.loop.connect_read_pipe(factory, pipeobj)
+ self.assertIs(p, proto)
+ self.assertIs(t, proto.transport)
+ self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
+ self.assertEqual(0, proto.nbytes)
+
+ self.loop.run_until_complete(connect())
+
+ os.write(wpipe, b'1')
+ test_utils.run_briefly(self.loop)
+ self.assertEqual(1, proto.nbytes)
+
+ os.write(wpipe, b'2345')
+ test_utils.run_briefly(self.loop)
+ self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
+ self.assertEqual(5, proto.nbytes)
+
+ os.close(wpipe)
+ self.loop.run_until_complete(proto.done)
+ self.assertEqual(
+ ['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
+ # extra info is available
+ self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
+
+ @unittest.skipUnless(sys.platform != 'win32',
+ "Don't support pipes for Windows")
+ def test_write_pipe(self):
+ proto = None
+ transport = None
+
+ def factory():
+ nonlocal proto
+ proto = MyWritePipeProto(loop=self.loop)
+ return proto
+
+ rpipe, wpipe = os.pipe()
+ pipeobj = io.open(wpipe, 'wb', 1024)
+
+ @tasks.coroutine
+ def connect():
+ nonlocal transport
+ t, p = yield from self.loop.connect_write_pipe(factory, pipeobj)
+ self.assertIs(p, proto)
+ self.assertIs(t, proto.transport)
+ self.assertEqual('CONNECTED', proto.state)
+ transport = t
+
+ self.loop.run_until_complete(connect())
+
+ transport.write(b'1')
+ test_utils.run_briefly(self.loop)
+ data = os.read(rpipe, 1024)
+ self.assertEqual(b'1', data)
+
+ transport.write(b'2345')
+ test_utils.run_briefly(self.loop)
+ data = os.read(rpipe, 1024)
+ self.assertEqual(b'2345', data)
+ self.assertEqual('CONNECTED', proto.state)
+
+ os.close(rpipe)
+
+ # extra info is available
+ self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
+
+ # close connection
+ proto.transport.close()
+ self.loop.run_until_complete(proto.done)
+ self.assertEqual('CLOSED', proto.state)
+
+ @unittest.skipUnless(sys.platform != 'win32',
+ "Don't support pipes for Windows")
+ def test_write_pipe_disconnect_on_close(self):
+ proto = None
+ transport = None
+
+ def factory():
+ nonlocal proto
+ proto = MyWritePipeProto(loop=self.loop)
+ return proto
+
+ rsock, wsock = test_utils.socketpair()
+ pipeobj = io.open(wsock.detach(), 'wb', 1024)
+
+ @tasks.coroutine
+ def connect():
+ nonlocal transport
+ t, p = yield from self.loop.connect_write_pipe(factory,
+ pipeobj)
+ self.assertIs(p, proto)
+ self.assertIs(t, proto.transport)
+ self.assertEqual('CONNECTED', proto.state)
+ transport = t
+
+ self.loop.run_until_complete(connect())
+ self.assertEqual('CONNECTED', proto.state)
+
+ transport.write(b'1')
+ data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
+ self.assertEqual(b'1', data)
+
+ rsock.close()
+
+ self.loop.run_until_complete(proto.done)
+ self.assertEqual('CLOSED', proto.state)
+
+ def test_prompt_cancellation(self):
+ r, w = test_utils.socketpair()
+ r.setblocking(False)
+ f = self.loop.sock_recv(r, 1)
+ ov = getattr(f, 'ov', None)
+ if ov is not None:
+ self.assertTrue(ov.pending)
+
+ @tasks.coroutine
+ def main():
+ try:
+ self.loop.call_soon(f.cancel)
+ yield from f
+ except futures.CancelledError:
+ res = 'cancelled'
+ else:
+ res = None
+ finally:
+ self.loop.stop()
+ return res
+
+ start = time.monotonic()
+ t = tasks.Task(main(), loop=self.loop)
+ self.loop.run_forever()
+ elapsed = time.monotonic() - start
+
+ self.assertLess(elapsed, 0.1)
+ self.assertEqual(t.result(), 'cancelled')
+ self.assertRaises(futures.CancelledError, f.result)
+ if ov is not None:
+ self.assertFalse(ov.pending)
+ self.loop._stop_serving(r)
+
+ r.close()
+ w.close()
+
+
+class SubprocessTestsMixin:
+
+ def check_terminated(self, returncode):
+ if sys.platform == 'win32':
+ self.assertIsInstance(returncode, int)
+ # expect 1 but sometimes get 0
+ else:
+ self.assertEqual(-signal.SIGTERM, returncode)
+
+ def check_killed(self, returncode):
+ if sys.platform == 'win32':
+ self.assertIsInstance(returncode, int)
+ # expect 1 but sometimes get 0
+ else:
+ self.assertEqual(-signal.SIGKILL, returncode)
+
+ def test_subprocess_exec(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+ self.assertEqual('CONNECTED', proto.state)
+
+ stdin = transp.get_pipe_transport(0)
+ stdin.write(b'Python The Winner')
+ self.loop.run_until_complete(proto.got_data[1].wait())
+ transp.close()
+ self.loop.run_until_complete(proto.completed)
+ self.check_terminated(proto.returncode)
+ self.assertEqual(b'Python The Winner', proto.data[1])
+
+ def test_subprocess_interactive(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+ self.assertEqual('CONNECTED', proto.state)
+
+ try:
+ stdin = transp.get_pipe_transport(0)
+ stdin.write(b'Python ')
+ self.loop.run_until_complete(proto.got_data[1].wait())
+ proto.got_data[1].clear()
+ self.assertEqual(b'Python ', proto.data[1])
+
+ stdin.write(b'The Winner')
+ self.loop.run_until_complete(proto.got_data[1].wait())
+ self.assertEqual(b'Python The Winner', proto.data[1])
+ finally:
+ transp.close()
+
+ self.loop.run_until_complete(proto.completed)
+ self.check_terminated(proto.returncode)
+
+ def test_subprocess_shell(self):
+ proto = None
+ transp = None
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_shell(
+ functools.partial(MySubprocessProtocol, self.loop),
+ 'echo Python')
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ transp.get_pipe_transport(0).close()
+ self.loop.run_until_complete(proto.completed)
+ self.assertEqual(0, proto.returncode)
+ self.assertTrue(all(f.done() for f in proto.disconnects.values()))
+ self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
+ self.assertEqual(proto.data[2], b'')
+
+ def test_subprocess_exitcode(self):
+ proto = None
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto
+ transp, proto = yield from self.loop.subprocess_shell(
+ functools.partial(MySubprocessProtocol, self.loop),
+ 'exit 7', stdin=None, stdout=None, stderr=None)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.completed)
+ self.assertEqual(7, proto.returncode)
+
+ def test_subprocess_close_after_finish(self):
+ proto = None
+ transp = None
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_shell(
+ functools.partial(MySubprocessProtocol, self.loop),
+ 'exit 7', stdin=None, stdout=None, stderr=None)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.assertIsNone(transp.get_pipe_transport(0))
+ self.assertIsNone(transp.get_pipe_transport(1))
+ self.assertIsNone(transp.get_pipe_transport(2))
+ self.loop.run_until_complete(proto.completed)
+ self.assertEqual(7, proto.returncode)
+ self.assertIsNone(transp.close())
+
+ def test_subprocess_kill(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ transp.kill()
+ self.loop.run_until_complete(proto.completed)
+ self.check_killed(proto.returncode)
+
+ def test_subprocess_terminate(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ transp.terminate()
+ self.loop.run_until_complete(proto.completed)
+ self.check_terminated(proto.returncode)
+
+ @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
+ def test_subprocess_send_signal(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ transp.send_signal(signal.SIGHUP)
+ self.loop.run_until_complete(proto.completed)
+ self.assertEqual(-signal.SIGHUP, proto.returncode)
+
+ def test_subprocess_stderr(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ stdin = transp.get_pipe_transport(0)
+ stdin.write(b'test')
+
+ self.loop.run_until_complete(proto.completed)
+
+ transp.close()
+ self.assertEqual(b'OUT:test', proto.data[1])
+ self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
+ self.assertEqual(0, proto.returncode)
+
+ def test_subprocess_stderr_redirect_to_stdout(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog, stderr=subprocess.STDOUT)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ stdin = transp.get_pipe_transport(0)
+ self.assertIsNotNone(transp.get_pipe_transport(1))
+ self.assertIsNone(transp.get_pipe_transport(2))
+
+ stdin.write(b'test')
+ self.loop.run_until_complete(proto.completed)
+ self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
+ proto.data[1])
+ self.assertEqual(b'', proto.data[2])
+
+ transp.close()
+ self.assertEqual(0, proto.returncode)
+
+ def test_subprocess_close_client_stream(self):
+ proto = None
+ transp = None
+
+ prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto, transp
+ transp, proto = yield from self.loop.subprocess_exec(
+ functools.partial(MySubprocessProtocol, self.loop),
+ sys.executable, prog)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.connected)
+
+ stdin = transp.get_pipe_transport(0)
+ stdout = transp.get_pipe_transport(1)
+ stdin.write(b'test')
+ self.loop.run_until_complete(proto.got_data[1].wait())
+ self.assertEqual(b'OUT:test', proto.data[1])
+
+ stdout.close()
+ self.loop.run_until_complete(proto.disconnects[1])
+ stdin.write(b'xxx')
+ self.loop.run_until_complete(proto.got_data[2].wait())
+ if sys.platform != 'win32':
+ self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
+ else:
+ # After closing the read-end of a pipe, writing to the
+ # write-end using os.write() fails with errno==EINVAL and
+ # GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
+ # WriteFile() we get ERROR_BROKEN_PIPE as expected.)
+ self.assertEqual(b'ERR:OSError', proto.data[2])
+ transp.close()
+ self.loop.run_until_complete(proto.completed)
+ self.check_terminated(proto.returncode)
+
+ def test_subprocess_wait_no_same_group(self):
+ proto = None
+ transp = None
+
+ @tasks.coroutine
+ def connect():
+ nonlocal proto
+ # start the new process in a new session
+ transp, proto = yield from self.loop.subprocess_shell(
+ functools.partial(MySubprocessProtocol, self.loop),
+ 'exit 7', stdin=None, stdout=None, stderr=None,
+ start_new_session=True)
+ self.assertIsInstance(proto, MySubprocessProtocol)
+
+ self.loop.run_until_complete(connect())
+ self.loop.run_until_complete(proto.completed)
+ self.assertEqual(7, proto.returncode)
+
+
+if sys.platform == 'win32':
+ from asyncio import windows_events
+
+ class SelectEventLoopTests(EventLoopTestsMixin, unittest.TestCase):
+
+ def create_event_loop(self):
+ return windows_events.SelectorEventLoop()
+
+ class ProactorEventLoopTests(EventLoopTestsMixin,
+ SubprocessTestsMixin,
+ unittest.TestCase):
+
+ def create_event_loop(self):
+ return windows_events.ProactorEventLoop()
+
+ def test_create_ssl_connection(self):
+ raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
+
+ def test_create_server_ssl(self):
+ raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
+
+ def test_create_server_ssl_verify_failed(self):
+ raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
+
+ def test_create_server_ssl_match_failed(self):
+ raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
+
+ def test_create_server_ssl_verified(self):
+ raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
+
+ def test_reader_callback(self):
+ raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
+
+ def test_reader_callback_cancel(self):
+ raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
+
+ def test_writer_callback(self):
+ raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
+
+ def test_writer_callback_cancel(self):
+ raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
+
+ def test_create_datagram_endpoint(self):
+ raise unittest.SkipTest(
+ "IocpEventLoop does not have create_datagram_endpoint()")
+else:
+ from asyncio import selectors
+ from asyncio import unix_events
+
+ class UnixEventLoopTestsMixin(EventLoopTestsMixin):
+ def setUp(self):
+ super().setUp()
+ watcher = unix_events.SafeChildWatcher()
+ watcher.attach_loop(self.loop)
+ events.set_child_watcher(watcher)
+
+ def tearDown(self):
+ events.set_child_watcher(None)
+ super().tearDown()
+
+ if hasattr(selectors, 'KqueueSelector'):
+ class KqueueEventLoopTests(UnixEventLoopTestsMixin,
+ SubprocessTestsMixin,
+ unittest.TestCase):
+
+ def create_event_loop(self):
+ return unix_events.SelectorEventLoop(
+ selectors.KqueueSelector())
+
+ if hasattr(selectors, 'EpollSelector'):
+ class EPollEventLoopTests(UnixEventLoopTestsMixin,
+ SubprocessTestsMixin,
+ unittest.TestCase):
+
+ def create_event_loop(self):
+ return unix_events.SelectorEventLoop(selectors.EpollSelector())
+
+ if hasattr(selectors, 'PollSelector'):
+ class PollEventLoopTests(UnixEventLoopTestsMixin,
+ SubprocessTestsMixin,
+ unittest.TestCase):
+
+ def create_event_loop(self):
+ return unix_events.SelectorEventLoop(selectors.PollSelector())
+
+ # Should always exist.
+ class SelectEventLoopTests(UnixEventLoopTestsMixin,
+ SubprocessTestsMixin,
+ unittest.TestCase):
+
+ def create_event_loop(self):
+ return unix_events.SelectorEventLoop(selectors.SelectSelector())
+
+
+class HandleTests(unittest.TestCase):
+
+ def test_handle(self):
+ def callback(*args):
+ return args
+
+ args = ()
+ h = events.Handle(callback, args)
+ self.assertIs(h._callback, callback)
+ self.assertIs(h._args, args)
+ self.assertFalse(h._cancelled)
+
+ r = repr(h)
+ self.assertTrue(r.startswith(
+ 'Handle('
+ '<function HandleTests.test_handle.<locals>.callback'))
+ self.assertTrue(r.endswith('())'))
+
+ h.cancel()
+ self.assertTrue(h._cancelled)
+
+ r = repr(h)
+ self.assertTrue(r.startswith(
+ 'Handle('
+ '<function HandleTests.test_handle.<locals>.callback'))
+ self.assertTrue(r.endswith('())<cancelled>'), r)
+
+ def test_make_handle(self):
+ def callback(*args):
+ return args
+ h1 = events.Handle(callback, ())
+ self.assertRaises(
+ AssertionError, events.make_handle, h1, ())
+
+ @unittest.mock.patch('asyncio.events.logger')
+ def test_callback_with_exception(self, log):
+ def callback():
+ raise ValueError()
+
+ h = events.Handle(callback, ())
+ h._run()
+ self.assertTrue(log.exception.called)
+
+
+class TimerTests(unittest.TestCase):
+
+ def test_hash(self):
+ when = time.monotonic()
+ h = events.TimerHandle(when, lambda: False, ())
+ self.assertEqual(hash(h), hash(when))
+
+ def test_timer(self):
+ def callback(*args):
+ return args
+
+ args = ()
+ when = time.monotonic()
+ h = events.TimerHandle(when, callback, args)
+ self.assertIs(h._callback, callback)
+ self.assertIs(h._args, args)
+ self.assertFalse(h._cancelled)
+
+ r = repr(h)
+ self.assertTrue(r.endswith('())'))
+
+ h.cancel()
+ self.assertTrue(h._cancelled)
+
+ r = repr(h)
+ self.assertTrue(r.endswith('())<cancelled>'), r)
+
+ self.assertRaises(AssertionError,
+ events.TimerHandle, None, callback, args)
+
+ def test_timer_comparison(self):
+ def callback(*args):
+ return args
+
+ when = time.monotonic()
+
+ h1 = events.TimerHandle(when, callback, ())
+ h2 = events.TimerHandle(when, callback, ())
+ # TODO: Use assertLess etc.
+ self.assertFalse(h1 < h2)
+ self.assertFalse(h2 < h1)
+ self.assertTrue(h1 <= h2)
+ self.assertTrue(h2 <= h1)
+ self.assertFalse(h1 > h2)
+ self.assertFalse(h2 > h1)
+ self.assertTrue(h1 >= h2)
+ self.assertTrue(h2 >= h1)
+ self.assertTrue(h1 == h2)
+ self.assertFalse(h1 != h2)
+
+ h2.cancel()
+ self.assertFalse(h1 == h2)
+
+ h1 = events.TimerHandle(when, callback, ())
+ h2 = events.TimerHandle(when + 10.0, callback, ())
+ self.assertTrue(h1 < h2)
+ self.assertFalse(h2 < h1)
+ self.assertTrue(h1 <= h2)
+ self.assertFalse(h2 <= h1)
+ self.assertFalse(h1 > h2)
+ self.assertTrue(h2 > h1)
+ self.assertFalse(h1 >= h2)
+ self.assertTrue(h2 >= h1)
+ self.assertFalse(h1 == h2)
+ self.assertTrue(h1 != h2)
+
+ h3 = events.Handle(callback, ())
+ self.assertIs(NotImplemented, h1.__eq__(h3))
+ self.assertIs(NotImplemented, h1.__ne__(h3))
+
+
+class AbstractEventLoopTests(unittest.TestCase):
+
+ def test_not_implemented(self):
+ f = unittest.mock.Mock()
+ loop = events.AbstractEventLoop()
+ self.assertRaises(
+ NotImplementedError, loop.run_forever)
+ self.assertRaises(
+ NotImplementedError, loop.run_until_complete, None)
+ self.assertRaises(
+ NotImplementedError, loop.stop)
+ self.assertRaises(
+ NotImplementedError, loop.is_running)
+ self.assertRaises(
+ NotImplementedError, loop.close)
+ self.assertRaises(
+ NotImplementedError, loop.call_later, None, None)
+ self.assertRaises(
+ NotImplementedError, loop.call_at, f, f)
+ self.assertRaises(
+ NotImplementedError, loop.call_soon, None)
+ self.assertRaises(
+ NotImplementedError, loop.time)
+ self.assertRaises(
+ NotImplementedError, loop.call_soon_threadsafe, None)
+ self.assertRaises(
+ NotImplementedError, loop.run_in_executor, f, f)
+ self.assertRaises(
+ NotImplementedError, loop.set_default_executor, f)
+ self.assertRaises(
+ NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
+ self.assertRaises(
+ NotImplementedError, loop.getnameinfo, ('localhost', 8080))
+ self.assertRaises(
+ NotImplementedError, loop.create_connection, f)
+ self.assertRaises(
+ NotImplementedError, loop.create_server, f)
+ self.assertRaises(
+ NotImplementedError, loop.create_datagram_endpoint, f)
+ self.assertRaises(
+ NotImplementedError, loop.add_reader, 1, f)
+ self.assertRaises(
+ NotImplementedError, loop.remove_reader, 1)
+ self.assertRaises(
+ NotImplementedError, loop.add_writer, 1, f)
+ self.assertRaises(
+ NotImplementedError, loop.remove_writer, 1)
+ self.assertRaises(
+ NotImplementedError, loop.sock_recv, f, 10)
+ self.assertRaises(
+ NotImplementedError, loop.sock_sendall, f, 10)
+ self.assertRaises(
+ NotImplementedError, loop.sock_connect, f, f)
+ self.assertRaises(
+ NotImplementedError, loop.sock_accept, f)
+ self.assertRaises(
+ NotImplementedError, loop.add_signal_handler, 1, f)
+ self.assertRaises(
+ NotImplementedError, loop.remove_signal_handler, 1)
+ self.assertRaises(
+ NotImplementedError, loop.remove_signal_handler, 1)
+ self.assertRaises(
+ NotImplementedError, loop.connect_read_pipe, f,
+ unittest.mock.sentinel.pipe)
+ self.assertRaises(
+ NotImplementedError, loop.connect_write_pipe, f,
+ unittest.mock.sentinel.pipe)
+ self.assertRaises(
+ NotImplementedError, loop.subprocess_shell, f,
+ unittest.mock.sentinel)
+ self.assertRaises(
+ NotImplementedError, loop.subprocess_exec, f)
+
+
+class ProtocolsAbsTests(unittest.TestCase):
+
+ def test_empty(self):
+ f = unittest.mock.Mock()
+ p = protocols.Protocol()
+ self.assertIsNone(p.connection_made(f))
+ self.assertIsNone(p.connection_lost(f))
+ self.assertIsNone(p.data_received(f))
+ self.assertIsNone(p.eof_received())
+
+ dp = protocols.DatagramProtocol()
+ self.assertIsNone(dp.connection_made(f))
+ self.assertIsNone(dp.connection_lost(f))
+ self.assertIsNone(dp.error_received(f))
+ self.assertIsNone(dp.datagram_received(f, f))
+
+ sp = protocols.SubprocessProtocol()
+ self.assertIsNone(sp.connection_made(f))
+ self.assertIsNone(sp.connection_lost(f))
+ self.assertIsNone(sp.pipe_data_received(1, f))
+ self.assertIsNone(sp.pipe_connection_lost(1, f))
+ self.assertIsNone(sp.process_exited())
+
+
+class PolicyTests(unittest.TestCase):
+
+ def create_policy(self):
+ if sys.platform == "win32":
+ from asyncio import windows_events
+ return windows_events.DefaultEventLoopPolicy()
+ else:
+ from asyncio import unix_events
+ return unix_events.DefaultEventLoopPolicy()
+
+ def test_event_loop_policy(self):
+ policy = events.AbstractEventLoopPolicy()
+ self.assertRaises(NotImplementedError, policy.get_event_loop)
+ self.assertRaises(NotImplementedError, policy.set_event_loop, object())
+ self.assertRaises(NotImplementedError, policy.new_event_loop)
+ self.assertRaises(NotImplementedError, policy.get_child_watcher)
+ self.assertRaises(NotImplementedError, policy.set_child_watcher,
+ object())
+
+ def test_get_event_loop(self):
+ policy = self.create_policy()
+ self.assertIsNone(policy._local._loop)
+
+ loop = policy.get_event_loop()
+ self.assertIsInstance(loop, events.AbstractEventLoop)
+
+ self.assertIs(policy._local._loop, loop)
+ self.assertIs(loop, policy.get_event_loop())
+ loop.close()
+
+ def test_get_event_loop_calls_set_event_loop(self):
+ policy = self.create_policy()
+
+ with unittest.mock.patch.object(
+ policy, "set_event_loop",
+ wraps=policy.set_event_loop) as m_set_event_loop:
+
+ loop = policy.get_event_loop()
+
+ # policy._local._loop must be set through .set_event_loop()
+ # (the unix DefaultEventLoopPolicy needs this call to attach
+ # the child watcher correctly)
+ m_set_event_loop.assert_called_with(loop)
+
+ loop.close()
+
+ def test_get_event_loop_after_set_none(self):
+ policy = self.create_policy()
+ policy.set_event_loop(None)
+ self.assertRaises(AssertionError, policy.get_event_loop)
+
+ @unittest.mock.patch('asyncio.events.threading.current_thread')
+ def test_get_event_loop_thread(self, m_current_thread):
+
+ def f():
+ policy = self.create_policy()
+ self.assertRaises(AssertionError, policy.get_event_loop)
+
+ th = threading.Thread(target=f)
+ th.start()
+ th.join()
+
+ def test_new_event_loop(self):
+ policy = self.create_policy()
+
+ loop = policy.new_event_loop()
+ self.assertIsInstance(loop, events.AbstractEventLoop)
+ loop.close()
+
+ def test_set_event_loop(self):
+ policy = self.create_policy()
+ old_loop = policy.get_event_loop()
+
+ self.assertRaises(AssertionError, policy.set_event_loop, object())
+
+ loop = policy.new_event_loop()
+ policy.set_event_loop(loop)
+ self.assertIs(loop, policy.get_event_loop())
+ self.assertIsNot(old_loop, policy.get_event_loop())
+ loop.close()
+ old_loop.close()
+
+ def test_get_event_loop_policy(self):
+ policy = events.get_event_loop_policy()
+ self.assertIsInstance(policy, events.AbstractEventLoopPolicy)
+ self.assertIs(policy, events.get_event_loop_policy())
+
+ def test_set_event_loop_policy(self):
+ self.assertRaises(
+ AssertionError, events.set_event_loop_policy, object())
+
+ old_policy = events.get_event_loop_policy()
+
+ policy = self.create_policy()
+ events.set_event_loop_policy(policy)
+ self.assertIs(policy, events.get_event_loop_policy())
+ self.assertIsNot(policy, old_policy)
+
+
+if __name__ == '__main__':
+ unittest.main()
- Issue #19545: Avoid chained exceptions while passing stray % to
time.strptime(). Initial patch by Claudiu Popa.
-- Issue #19633: Fixed writing not compressed 16- and 32-bit wave files on
- big-endian platforms.
+Tests
+-----
-- Issue #19449: in csv's writerow, handle non-string keys when generating the
- error message that certain keys are not in the 'fieldnames' list.
++- Issue #19572: More skipped tests explicitly marked as skipped.
+
-- Fix test.support.bind_port() to not cause an error when Python was compiled
- on a system with SO_REUSEPORT defined in the headers but run on a system
- with an OS kernel that does not support that reasonably new socket option.
+- Issue #19595: Re-enabled a long-disabled test in test_winsound.
-- Fix compilation error under gcc of the ctypes module bundled libffi for arm.
+- Issue #19588: Fixed tests in test_random that were silently skipped most
+ of the time. Patch by Julian Gindi.
-- Issue #19523: Closed FileHandler leak which occurred when delay was set.
+Build
+-----
-- Issue #13674: Prevented time.strftime from crashing on Windows when given
- a year before 1900 and a format of %y.
+- Issue #19922: define _INCLUDE__STDC_A1_SOURCE in HP-UX to include mbstate_t
+ for mbrtowc().
-- Issue #19544 and Issue #6286: Restore use of urllib over http allowing use
- of http_proxy for Distutils upload command, a feature accidentally lost
- in the rollback of distutils2.
+- Issue #19788: kill_python(_d).exe is now run as a PreBuildEvent on the
+ pythoncore sub-project. This should prevent build errors due a previous
+ build's python(_d).exe still running.
-- Issue #19544 and Issue #7457: Restore the read_pkg_file method to
- distutils.dist.DistributionMetadata accidentally removed in the undo of
- distutils2.
+Documentation
+-------------
-- Issue #1575020: Fixed support of 24-bit wave files on big-endian platforms.
+- Issue #18840: Introduce the json module in the tutorial, and deemphasize
+ the pickle module.
-- Issue #19480: HTMLParser now accepts all valid start-tag names as defined
- by the HTML5 standard.
+- Issue #19845: Updated the Compiling Python on Windows section.
-- Issue #6157: Fixed tkinter.Text.debug(). Original patch by Guilherme Polo.
+- Issue #19795: Improved markup of True/False constants.
-- Issue #6160: The bbox() method of tkinter.Spinbox now returns a tuple of
- integers instead of a string. Based on patch by Guilherme Polo.
-- Issue #10197: Rework subprocess.get[status]output to use subprocess
- functionality and thus to work on Windows. Patch by Nick Coghlan.
+What's New in Python 3.4.0 Beta 1?
+==================================
-- Issue #19286: Directories in ``package_data`` are no longer added to
- the filelist, preventing failure outlined in the ticket.
+Release date: 2013-11-24
-Tests
------
+Core and Builtins
+-----------------
-- Issue #19595: Re-enabled a long-disabled test in test_winsound.
+- Use the repr of a module name in more places in import, especially
+ exceptions.
-- Issue #19588: Fixed tests in test_random that were silently skipped most
- of the time. Patch by Julian Gindi.
+- Issue #19619: str.encode, bytes.decode and bytearray.decode now use an
+ internal API to throw LookupError for known non-text encodings, rather
+ than attempting the encoding or decoding operation and then throwing a
+ TypeError for an unexpected output type. (The latter mechanism remains
+ in place for third party non-text encodings)
-- Issue #19596: Set untestable tests in test_importlib to None to avoid
- reporting success on empty tests.
+- Issue #19183: Implement PEP 456 'secure and interchangeable hash algorithm'.
+ Python now uses SipHash24 on all major platforms.
-- Issue #19440: Clean up test_capi by removing an unnecessary __future__
- import, converting from test_main to unittest.main, and running the
- _testcapi module tests within a unittest TestCase.
+- Issue #12892: The utf-16* and utf-32* encoders no longer allow surrogate code
+ points (U+D800-U+DFFF) to be encoded. The utf-32* decoders no longer decode
+ byte sequences that correspond to surrogate code points. The surrogatepass
+ error handler now works with the utf-16* and utf-32* codecs. Based on
+ patches by Victor Stinner and Kang-Hao (Kenny) Lu.
-- Issue #18702, 19572: All skipped tests now reported as skipped.
+- Issue #17806: Added keyword-argument support for "tabsize" to
+ str/bytes.expandtabs().
-- Issue #19085: Added basic tests for all tkinter widget options.
+- Issue #17828: Output type errors in str.encode(), bytes.decode() and
+ bytearray.decode() now direct users to codecs.encode() or codecs.decode()
+ as appropriate.
-Documentation
--------------
+- Issue #17828: The interpreter now attempts to chain errors that occur in
+ codec processing with a replacement exception of the same type that
+ includes the codec name in the error message. It ensures it only does this
+ when the creation of the replacement exception won't lose any information.
-- Issue #18840: Introduce the json module in the tutorial, and deemphasize
- the pickle module.
+- Issue #19466: Clear the frames of daemon threads earlier during the
+ Python shutdown to call objects destructors. So "unclosed file" resource
+ warnings are now corretly emitted for daemon threads.
-- Issue #19845: Updated the Compiling Python on Windows section.
+- Issue #19514: Deduplicate some _Py_IDENTIFIER declarations.
+ Patch by Andrei Dorian Duma.
-- Issue #19795: Improved markup of True/False constants.
+- Issue #17936: Fix O(n**2) behaviour when adding or removing many subclasses
+ of a given type.
-- Issue #18326: Clarify that list.sort's arguments are keyword-only. Also,
- attempt to reduce confusion in the glossary by not saying there are
- different "types" of arguments and parameters.
+- Issue #19428: zipimport now handles errors when reading truncated or invalid
+ ZIP archive.
-Build
------
+- Issue #18408: Add a new PyFrame_FastToLocalsWithError() function to handle
+ exceptions when merging fast locals into f_locals of a frame.
+ PyEval_GetLocals() now raises an exception and return NULL on failure.
-- Issue #19788: kill_python(_d).exe is now run as a PreBuildEvent on the
- pythoncore sub-project. This should prevent build errors due a previous
- build's python(_d).exe still running.
+- Issue #19369: Optimized the usage of __length_hint__().
-- Add workaround for VS 2010 nmake clean issue. VS 2010 doesn't set up PATH
- for nmake.exe correctly.
+- Issue #18603: Ensure that PyOS_mystricmp and PyOS_mystrnicmp are in the
+ Python executable and not removed by the linker's optimizer.
+- Issue #19306: Add extra hints to the faulthandler module's stack
+ dumps that these are "upside down".
-What's New in Python 3.3.3?
-===========================
+Library
+-------
-*Release date: 17-Nov-2013*
+- Issue #3158: doctest can now find doctests in functions and methods
+ written in C.
-No changes from release candidate 2.
+- Issue #13477: Added command line interface to the tarfile module.
+ Original patch by Berker Peksag.
+- Issue #19674: inspect.signature() now produces a correct signature
+ for some builtins.
-What's New in Python 3.3.3 release candidate 2?
-===============================================
+- Issue #19722: Added opcode.stack_effect(), which
+ computes the stack effect of bytecode instructions.
-*Release date: 11-Nov-2013*
+- Issue #19735: Implement private function ssl._create_stdlib_context() to
+ create SSLContext objects in Python's stdlib module. It provides a single
+ configuration point and makes use of SSLContext.load_default_certs().
-Library
--------
+- Issue #16203: Add re.fullmatch() function and regex.fullmatch() method,
+ which anchor the pattern at both ends of the string to match.
+ Original patch by Matthew Barnett.
-- Issue #19227: Any re-seeding of the OpenSSL RNG on fork has been removed;
- this should be handled by OpenSSL itself or by the application.
+- Issue #13592: Improved the repr for regular expression pattern objects.
+ Based on patch by Hugo Lopes Tavares.
-- Issue #19435: Fix directory traversal attack on CGIHttpRequestHandler.
+- Issue #19641: Added the audioop.byteswap() function to convert big-endian
+ samples to little-endian and vice versa.
-Tests
------
+- Issue #15204: Deprecated the 'U' mode in file-like objects.
-- Issue #18964: Fix test_tcl when run with Tcl/Tk versions < 8.5.
+- Issue #17810: Implement PEP 3154, pickle protocol 4.
-Build
------
+- Issue #19668: Added support for the cp1125 encoding.
-- Issue #15663: Revert OS X installer built-in Tcl/Tk support for 3.3.3.
- Some third-party projects, such as Matplotlib and PIL/Pillow,
- depended on being able to build with Tcl and Tk frameworks in
- /Library/Frameworks.
+- Issue #19689: Add ssl.create_default_context() factory function. It creates
+ a new SSLContext object with secure default settings.
+- Issue #19727: os.utime(..., None) is now potentially more precise
+ under Windows.
-What's New in Python 3.3.3 release candidate 1?
-===============================================
+- Issue #17201: ZIP64 extensions now are enabled by default. Patch by
+ William Mallard.
-*Release date: 27-Oct-2013*
+- Issue #19292: Add SSLContext.load_default_certs() to load default root CA
+ certificates from default stores or system stores. By default the method
+ loads CA certs for authentication of server certs.
-Core and Builtins
------------------
+- Issue #19673: Add pathlib to the stdlib as a provisional module (PEP 428).
-- Issue #18603: Ensure that PyOS_mystricmp and PyOS_mystrnicmp are in the
- Python executable and not removed by the linker's optimizer.
+- Issue #16596: pdb in a generator now properly skips over yield and
+ yield from rather than stepping out of the generator into its
+ caller. (This is essential for stepping through asyncio coroutines.)
-- Issue #19279: UTF-7 decoder no more produces illegal strings.
+- Issue #17916: Added dis.Bytecode.from_traceback() and
+ dis.Bytecode.current_offset to easily display "current instruction"
+ markers in the new disassembly API (Patch by Claudiu Popa).
-- Fix macro expansion of _PyErr_OCCURRED(), and make sure to use it in at
- least one place so as to avoid regressions.
+- Issue #19552: venv now supports bootstrapping pip into virtual environments
-- Issue #19014: memoryview.cast() is now allowed on zero-length views.
+- Issue #17134: Finalize interface to Windows' certificate store. Cert and
+ CRL enumeration are now two functions. enum_certificates() also returns
+ purpose flags as set of OIDs.
-- Issue #19098: Prevent overflow in the compiler when the recursion limit is set
- absurdly high.
+- Issue #19555: Restore sysconfig.get_config_var('SO'), (and the distutils
+ equivalent) with a DeprecationWarning pointing people at $EXT_SUFFIX.
-- Issue #18942: sys._debugmallocstats() output was damaged on Windows.
+- Issue #8813: Add SSLContext.verify_flags to change the verification flags
+ of the context in order to enable certification revocation list (CRL)
+ checks or strict X509 rules.
-- Issue #18667: Add missing "HAVE_FCHOWNAT" symbol to posix._have_functions.
+- Issue #18294: Fix the zlib module to make it 64-bit safe.
-- Issue #18368: PyOS_StdioReadline() no longer leaks memory when realloc()
- fails.
+- Issue #19682: Fix compatibility issue with old version of OpenSSL that
+ was introduced by Issue #18379.
-- Issue #16741: Fix an error reporting in int().
+- Issue #14455: plistlib now supports binary plists and has an updated API.
-- Issue #17899: Fix rare file descriptor leak in os.listdir().
+- Issue #19633: Fixed writing not compressed 16- and 32-bit wave files on
+ big-endian platforms.
-- Issue #18552: Check return value of PyArena_AddPyObject() in
- obj2ast_object().
+- Issue #18379: SSLSocket.getpeercert() returns CA issuer AIA fields, OCSP
+ and CRL distribution points.
-- Issue #18560: Fix potential NULL pointer dereference in sum().
+- Issue #18138: Implement cadata argument of SSLContext.load_verify_location()
+ to load CA certificates and CRL from memory. It supports PEM and DER
+ encoded strings.
-- Issue #15905: Fix theoretical buffer overflow in handling of sys.argv[0],
- prefix and exec_prefix if the operation system does not obey MAXPATHLEN.
+- Issue #18775: Add name and block_size attribute to HMAC object. They now
+ provide the same API elements as non-keyed cryptographic hash functions.
-- Issue #18344: Fix potential ref-leaks in _bufferedreader_read_all().
+- Issue #17276: MD5 as default digestmod for HMAC is deprecated. The HMAC
+ module supports digestmod names, e.g. hmac.HMAC('sha1').
-- Issue #17872: Fix a segfault in marshal.load() when input stream returns
- more bytes than requested.
+- Issue #19449: in csv's writerow, handle non-string keys when generating the
+ error message that certain keys are not in the 'fieldnames' list.
-- Issue #18426: Fix NULL pointer dereference in C extension import when
- PyModule_GetDef() returns an error.
+- Issue #13633: Added a new convert_charrefs keyword arg to HTMLParser that,
+ when True, automatically converts all character references.
-- Issue #18328: Reorder ops in PyThreadState_Delete*() functions. Now the
- tstate is first removed from TLS and then deallocated.
+- Issue #2927: Added the unescape() function to the html module.
-- Issue #18184: PyUnicode_FromFormat() and PyUnicode_FromFormatV() now raise
- OverflowError when an argument of %c format is out of range.
+- Issue #8402: Added the escape() function to the glob module.
-- Issue #18137: Detect integer overflow on precision in float.__format__()
- and complex.__format__().
+- Issue #17618: Add Base85 and Ascii85 encoding/decoding to the base64 module.
-- Issue #18183: Fix various unicode operations on strings with large unicode
- codepoints.
+- Issue #19634: time.strftime("%y") now raises a ValueError on AIX when given a
+ year before 1900.
-- Issue #18180: Fix ref leak in _PyImport_GetDynLoadWindows().
+- Fix test.support.bind_port() to not cause an error when Python was compiled
+ on a system with SO_REUSEPORT defined in the headers but run on a system
+ with an OS kernel that does not support that reasonably new socket option.
-- Issue #18038: SyntaxError raised during compilation sources with illegal
- encoding now always contains an encoding name.
+- Fix compilation error under gcc of the ctypes module bundled libffi for arm.
-- Issue #17644: Fix a crash in str.format when curly braces are used in square
- brackets.
+- Issue #19448: Add private API to SSL module to lookup ASN.1 objects by OID,
+ NID, short name and long name.
-- Issue #17983: Raise a SyntaxError for a ``global __class__`` statement in a
- class body.
+- Issue #19282: dbm.open now supports the context manager protocol. (Inital
+ patch by Claudiu Popa)
-- Issue #17927: Frame objects kept arguments alive if they had been copied into
- a cell, even if the cell was cleared.
+- Issue #8311: Added support for writing any bytes-like objects in the aifc,
+ sunau, and wave modules.
-Library
--------
+- Issue #5202: Added support for unseekable files in the wave module.
+
+- Issue #19544 and Issue #1180: Restore global option to ignore
+ ~/.pydistutils.cfg in Distutils, accidentally removed in backout of
+ distutils2 changes.
+
+- Issue #19523: Closed FileHandler leak which occurred when delay was set.
+
+- Issue #19544 and #6516: Restore support for --user and --group parameters to
+ sdist command accidentally rolled back as part of the distutils2 rollback.
+
+- Issue #13674: Prevented time.strftime from crashing on Windows when given
+ a year before 1900 and a format of %y.
+
+- Issue #19406: implementation of the ensurepip module (part of PEP 453).
+ Patch by Donald Stufft and Nick Coghlan.
+
+- Issue #19544 and Issue #6286: Restore use of urllib over http allowing use
+ of http_proxy for Distutils upload command, a feature accidentally lost
+ in the rollback of distutils2.
+
+- Issue #19544 and Issue #7457: Restore the read_pkg_file method to
+ distutils.dist.DistributionMetadata accidentally removed in the undo of
+ distutils2.
+
+- Issue #16685: Added support for any bytes-like objects in the audioop module.
+ Removed support for strings.
+
+- Issue #7171: Add Windows implementation of ``inet_ntop`` and ``inet_pton``
+ to socket module. Patch by Atsuo Ishimoto.
+
+- Issue #19261: Added support for writing 24-bit samples in the sunau module.
+
+- Issue #1097797: Added CP273 encoding, used on IBM mainframes in
+ Germany and Austria. Mapping provided by Michael Bierenfeld.
+
+- Issue #1575020: Fixed support of 24-bit wave files on big-endian platforms.
+
+- Issue #19378: Fixed a number of cases in the dis module where the new
+ "file" parameter was not being honoured correctly
+
+- Issue #19378: Removed the "dis.Bytecode.show_info" method
+
+- Issue #19378: Renamed the "dis.Bytecode.display_code" method to
+ "dis.Bytecode.dis" and converted it to returning a string rather than
+ printing output.
+
+- Issue #19378: the "line_offset" parameter in the new "dis.get_instructions"
+ API has been renamed to "first_line" (and the default value and usage
+ changed accordingly). This should reduce confusion with the more common use
+ of "offset" in the dis docs to refer to bytecode offsets.
+
+- Issue #18678: Corrected spwd struct member names in spwd module:
+ sp_nam->sp_namp, and sp_pwd->sp_pwdp. The old names are kept as extra
+ structseq members, for backward compatibility.
+
+- Issue #6157: Fixed tkinter.Text.debug(). tkinter.Text.bbox() now raises
+ TypeError instead of TclError on wrong number of arguments. Original patch
+ by Guilherme Polo.
+
+- Issue #10197: Rework subprocess.get[status]output to use subprocess
+ functionality and thus to work on Windows. Patch by Nick Coghlan
+
+- Issue #6160: The bbox() method of tkinter.Spinbox now returns a tuple of
+ integers instead of a string. Based on patch by Guilherme Polo.
+
+- Issue #19403: contextlib.redirect_stdout is now reentrant
+
+- Issue #19286: Directories in ``package_data`` are no longer added to
+ the filelist, preventing failure outlined in the ticket.
+
+- Issue #19480: HTMLParser now accepts all valid start-tag names as defined
+ by the HTML5 standard.
+
+- Issue #15114: The html.parser module now raises a DeprecationWarning when the
+ strict argument of HTMLParser or the HTMLParser.error method are used.
+
+- Issue #19410: Undo the special-casing removal of '' for
+ importlib.machinery.FileFinder.
+
+- Issue #19424: Fix the warnings module to accept filename containing surrogate
+ characters.
+
+- Issue #19435: Fix directory traversal attack on CGIHttpRequestHandler.
+
+- Issue #19227: Remove pthread_atfork() handler. The handler was added to
+ solve #18747 but has caused issues.
+
+- Issue #19420: Fix reference leak in module initalization code of
+ _hashopenssl.c
-- Issue #19395: Raise an exception when attempting to pickle a bz2 or lzma
- compressor/decompressor object, rather than creating a pickle that would
- cause a segfault when loaded and used.
+- Issue #19329: Optimized compiling charsets in regular expressions.
- Issue #19227: Try to fix deadlocks caused by re-seeding then OpenSSL
pseudo-random number generator on fork().