--- /dev/null
+#
+# Simple benchmarks for the multiprocessing package
+#
+
+import time, sys, multiprocessing, threading, Queue, gc
+
+if sys.platform == 'win32':
+ _timer = time.clock
+else:
+ _timer = time.time
+
+delta = 1
+
+
+#### TEST_QUEUESPEED
+
+def queuespeed_func(q, c, iterations):
+ a = '0' * 256
+ c.acquire()
+ c.notify()
+ c.release()
+
+ for i in xrange(iterations):
+ q.put(a)
+
+ q.put('STOP')
+
+def test_queuespeed(Process, q, c):
+ elapsed = 0
+ iterations = 1
+
+ while elapsed < delta:
+ iterations *= 2
+
+ p = Process(target=queuespeed_func, args=(q, c, iterations))
+ c.acquire()
+ p.start()
+ c.wait()
+ c.release()
+
+ result = None
+ t = _timer()
+
+ while result != 'STOP':
+ result = q.get()
+
+ elapsed = _timer() - t
+
+ p.join()
+
+ print iterations, 'objects passed through the queue in', elapsed, 'seconds'
+ print 'average number/sec:', iterations/elapsed
+
+
+#### TEST_PIPESPEED
+
+def pipe_func(c, cond, iterations):
+ a = '0' * 256
+ cond.acquire()
+ cond.notify()
+ cond.release()
+
+ for i in xrange(iterations):
+ c.send(a)
+
+ c.send('STOP')
+
+def test_pipespeed():
+ c, d = multiprocessing.Pipe()
+ cond = multiprocessing.Condition()
+ elapsed = 0
+ iterations = 1
+
+ while elapsed < delta:
+ iterations *= 2
+
+ p = multiprocessing.Process(target=pipe_func,
+ args=(d, cond, iterations))
+ cond.acquire()
+ p.start()
+ cond.wait()
+ cond.release()
+
+ result = None
+ t = _timer()
+
+ while result != 'STOP':
+ result = c.recv()
+
+ elapsed = _timer() - t
+ p.join()
+
+ print iterations, 'objects passed through connection in',elapsed,'seconds'
+ print 'average number/sec:', iterations/elapsed
+
+
+#### TEST_SEQSPEED
+
+def test_seqspeed(seq):
+ elapsed = 0
+ iterations = 1
+
+ while elapsed < delta:
+ iterations *= 2
+
+ t = _timer()
+
+ for i in xrange(iterations):
+ a = seq[5]
+
+ elapsed = _timer()-t
+
+ print iterations, 'iterations in', elapsed, 'seconds'
+ print 'average number/sec:', iterations/elapsed
+
+
+#### TEST_LOCK
+
+def test_lockspeed(l):
+ elapsed = 0
+ iterations = 1
+
+ while elapsed < delta:
+ iterations *= 2
+
+ t = _timer()
+
+ for i in xrange(iterations):
+ l.acquire()
+ l.release()
+
+ elapsed = _timer()-t
+
+ print iterations, 'iterations in', elapsed, 'seconds'
+ print 'average number/sec:', iterations/elapsed
+
+
+#### TEST_CONDITION
+
+def conditionspeed_func(c, N):
+ c.acquire()
+ c.notify()
+
+ for i in xrange(N):
+ c.wait()
+ c.notify()
+
+ c.release()
+
+def test_conditionspeed(Process, c):
+ elapsed = 0
+ iterations = 1
+
+ while elapsed < delta:
+ iterations *= 2
+
+ c.acquire()
+ p = Process(target=conditionspeed_func, args=(c, iterations))
+ p.start()
+
+ c.wait()
+
+ t = _timer()
+
+ for i in xrange(iterations):
+ c.notify()
+ c.wait()
+
+ elapsed = _timer()-t
+
+ c.release()
+ p.join()
+
+ print iterations * 2, 'waits in', elapsed, 'seconds'
+ print 'average number/sec:', iterations * 2 / elapsed
+
+####
+
+def test():
+ manager = multiprocessing.Manager()
+
+ gc.disable()
+
+ print '\n\t######## testing Queue.Queue\n'
+ test_queuespeed(threading.Thread, Queue.Queue(),
+ threading.Condition())
+ print '\n\t######## testing multiprocessing.Queue\n'
+ test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
+ multiprocessing.Condition())
+ print '\n\t######## testing Queue managed by server process\n'
+ test_queuespeed(multiprocessing.Process, manager.Queue(),
+ manager.Condition())
+ print '\n\t######## testing multiprocessing.Pipe\n'
+ test_pipespeed()
+
+ print
+
+ print '\n\t######## testing list\n'
+ test_seqspeed(range(10))
+ print '\n\t######## testing list managed by server process\n'
+ test_seqspeed(manager.list(range(10)))
+ print '\n\t######## testing Array("i", ..., lock=False)\n'
+ test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
+ print '\n\t######## testing Array("i", ..., lock=True)\n'
+ test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
+
+ print
+
+ print '\n\t######## testing threading.Lock\n'
+ test_lockspeed(threading.Lock())
+ print '\n\t######## testing threading.RLock\n'
+ test_lockspeed(threading.RLock())
+ print '\n\t######## testing multiprocessing.Lock\n'
+ test_lockspeed(multiprocessing.Lock())
+ print '\n\t######## testing multiprocessing.RLock\n'
+ test_lockspeed(multiprocessing.RLock())
+ print '\n\t######## testing lock managed by server process\n'
+ test_lockspeed(manager.Lock())
+ print '\n\t######## testing rlock managed by server process\n'
+ test_lockspeed(manager.RLock())
+
+ print
+
+ print '\n\t######## testing threading.Condition\n'
+ test_conditionspeed(threading.Thread, threading.Condition())
+ print '\n\t######## testing multiprocessing.Condition\n'
+ test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
+ print '\n\t######## testing condition managed by a server process\n'
+ test_conditionspeed(multiprocessing.Process, manager.Condition())
+
+ gc.enable()
+
+if __name__ == '__main__':
+ multiprocessing.freeze_support()
+ test()
--- /dev/null
+#\r
+# Module to allow spawning of processes on foreign host\r
+#\r
+# Depends on `multiprocessing` package -- tested with `processing-0.60`\r
+#\r
+\r
+__all__ = ['Cluster', 'Host', 'get_logger', 'current_process']\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import sys\r
+import os\r
+import tarfile\r
+import shutil\r
+import subprocess\r
+import logging\r
+import itertools\r
+import Queue\r
+\r
+try:\r
+ import cPickle as pickle\r
+except ImportError:\r
+ import pickle\r
+\r
+from multiprocessing import Process, current_process, cpu_count\r
+from multiprocessing import util, managers, connection, forking, pool\r
+\r
+#\r
+# Logging\r
+#\r
+\r
+def get_logger():\r
+ return _logger\r
+\r
+_logger = logging.getLogger('distributing')\r
+_logger.propogate = 0\r
+\r
+util.fix_up_logger(_logger)\r
+_formatter = logging.Formatter(util.DEFAULT_LOGGING_FORMAT)\r
+_handler = logging.StreamHandler()\r
+_handler.setFormatter(_formatter)\r
+_logger.addHandler(_handler)\r
+\r
+info = _logger.info\r
+debug = _logger.debug\r
+\r
+#\r
+# Get number of cpus\r
+#\r
+\r
+try:\r
+ slot_count = cpu_count()\r
+except NotImplemented:\r
+ slot_count = 1\r
+ \r
+#\r
+# Manager type which spawns subprocesses\r
+#\r
+\r
+class HostManager(managers.SyncManager):\r
+ '''\r
+ Manager type used for spawning processes on a (presumably) foreign host\r
+ ''' \r
+ def __init__(self, address, authkey):\r
+ managers.SyncManager.__init__(self, address, authkey)\r
+ self._name = 'Host-unknown'\r
+\r
+ def Process(self, group=None, target=None, name=None, args=(), kwargs={}):\r
+ if hasattr(sys.modules['__main__'], '__file__'):\r
+ main_path = os.path.basename(sys.modules['__main__'].__file__)\r
+ else:\r
+ main_path = None\r
+ data = pickle.dumps((target, args, kwargs))\r
+ p = self._RemoteProcess(data, main_path)\r
+ if name is None:\r
+ temp = self._name.split('Host-')[-1] + '/Process-%s'\r
+ name = temp % ':'.join(map(str, p.get_identity()))\r
+ p.set_name(name)\r
+ return p\r
+\r
+ @classmethod\r
+ def from_address(cls, address, authkey):\r
+ manager = cls(address, authkey)\r
+ managers.transact(address, authkey, 'dummy')\r
+ manager._state.value = managers.State.STARTED\r
+ manager._name = 'Host-%s:%s' % manager.address\r
+ manager.shutdown = util.Finalize(\r
+ manager, HostManager._finalize_host,\r
+ args=(manager._address, manager._authkey, manager._name),\r
+ exitpriority=-10\r
+ )\r
+ return manager\r
+\r
+ @staticmethod\r
+ def _finalize_host(address, authkey, name):\r
+ managers.transact(address, authkey, 'shutdown')\r
+ \r
+ def __repr__(self):\r
+ return '<Host(%s)>' % self._name\r
+\r
+#\r
+# Process subclass representing a process on (possibly) a remote machine\r
+#\r
+\r
+class RemoteProcess(Process):\r
+ '''\r
+ Represents a process started on a remote host\r
+ '''\r
+ def __init__(self, data, main_path):\r
+ assert not main_path or os.path.basename(main_path) == main_path\r
+ Process.__init__(self)\r
+ self._data = data\r
+ self._main_path = main_path\r
+ \r
+ def _bootstrap(self):\r
+ forking.prepare({'main_path': self._main_path})\r
+ self._target, self._args, self._kwargs = pickle.loads(self._data)\r
+ return Process._bootstrap(self)\r
+ \r
+ def get_identity(self):\r
+ return self._identity\r
+\r
+HostManager.register('_RemoteProcess', RemoteProcess)\r
+\r
+#\r
+# A Pool class that uses a cluster\r
+#\r
+\r
+class DistributedPool(pool.Pool):\r
+ \r
+ def __init__(self, cluster, processes=None, initializer=None, initargs=()):\r
+ self._cluster = cluster\r
+ self.Process = cluster.Process\r
+ pool.Pool.__init__(self, processes or len(cluster),\r
+ initializer, initargs)\r
+ \r
+ def _setup_queues(self):\r
+ self._inqueue = self._cluster._SettableQueue()\r
+ self._outqueue = self._cluster._SettableQueue()\r
+ self._quick_put = self._inqueue.put\r
+ self._quick_get = self._outqueue.get\r
+\r
+ @staticmethod\r
+ def _help_stuff_finish(inqueue, task_handler, size):\r
+ inqueue.set_contents([None] * size)\r
+\r
+#\r
+# Manager type which starts host managers on other machines\r
+#\r
+\r
+def LocalProcess(**kwds):\r
+ p = Process(**kwds)\r
+ p.set_name('localhost/' + p.get_name())\r
+ return p\r
+\r
+class Cluster(managers.SyncManager):\r
+ '''\r
+ Represents collection of slots running on various hosts.\r
+ \r
+ `Cluster` is a subclass of `SyncManager` so it allows creation of\r
+ various types of shared objects.\r
+ '''\r
+ def __init__(self, hostlist, modules):\r
+ managers.SyncManager.__init__(self, address=('localhost', 0))\r
+ self._hostlist = hostlist\r
+ self._modules = modules\r
+ if __name__ not in modules:\r
+ modules.append(__name__)\r
+ files = [sys.modules[name].__file__ for name in modules]\r
+ for i, file in enumerate(files):\r
+ if file.endswith('.pyc') or file.endswith('.pyo'):\r
+ files[i] = file[:-4] + '.py'\r
+ self._files = [os.path.abspath(file) for file in files]\r
+ \r
+ def start(self):\r
+ managers.SyncManager.start(self)\r
+ \r
+ l = connection.Listener(family='AF_INET', authkey=self._authkey)\r
+ \r
+ for i, host in enumerate(self._hostlist):\r
+ host._start_manager(i, self._authkey, l.address, self._files)\r
+\r
+ for host in self._hostlist:\r
+ if host.hostname != 'localhost':\r
+ conn = l.accept()\r
+ i, address, cpus = conn.recv()\r
+ conn.close()\r
+ other_host = self._hostlist[i]\r
+ other_host.manager = HostManager.from_address(address,\r
+ self._authkey)\r
+ other_host.slots = other_host.slots or cpus\r
+ other_host.Process = other_host.manager.Process\r
+ else:\r
+ host.slots = host.slots or slot_count\r
+ host.Process = LocalProcess\r
+\r
+ self._slotlist = [\r
+ Slot(host) for host in self._hostlist for i in range(host.slots)\r
+ ]\r
+ self._slot_iterator = itertools.cycle(self._slotlist)\r
+ self._base_shutdown = self.shutdown\r
+ del self.shutdown\r
+ \r
+ def shutdown(self):\r
+ for host in self._hostlist:\r
+ if host.hostname != 'localhost':\r
+ host.manager.shutdown()\r
+ self._base_shutdown()\r
+ \r
+ def Process(self, group=None, target=None, name=None, args=(), kwargs={}):\r
+ slot = self._slot_iterator.next()\r
+ return slot.Process(\r
+ group=group, target=target, name=name, args=args, kwargs=kwargs\r
+ )\r
+\r
+ def Pool(self, processes=None, initializer=None, initargs=()):\r
+ return DistributedPool(self, processes, initializer, initargs)\r
+ \r
+ def __getitem__(self, i):\r
+ return self._slotlist[i]\r
+\r
+ def __len__(self):\r
+ return len(self._slotlist)\r
+\r
+ def __iter__(self):\r
+ return iter(self._slotlist)\r
+\r
+#\r
+# Queue subclass used by distributed pool\r
+#\r
+\r
+class SettableQueue(Queue.Queue):\r
+ def empty(self):\r
+ return not self.queue\r
+ def full(self):\r
+ return self.maxsize > 0 and len(self.queue) == self.maxsize\r
+ def set_contents(self, contents):\r
+ # length of contents must be at least as large as the number of\r
+ # threads which have potentially called get()\r
+ self.not_empty.acquire()\r
+ try:\r
+ self.queue.clear()\r
+ self.queue.extend(contents)\r
+ self.not_empty.notifyAll()\r
+ finally:\r
+ self.not_empty.release()\r
+ \r
+Cluster.register('_SettableQueue', SettableQueue)\r
+\r
+#\r
+# Class representing a notional cpu in the cluster\r
+#\r
+\r
+class Slot(object):\r
+ def __init__(self, host):\r
+ self.host = host\r
+ self.Process = host.Process\r
+\r
+#\r
+# Host\r
+#\r
+\r
+class Host(object):\r
+ '''\r
+ Represents a host to use as a node in a cluster.\r
+\r
+ `hostname` gives the name of the host. If hostname is not\r
+ "localhost" then ssh is used to log in to the host. To log in as\r
+ a different user use a host name of the form\r
+ "username@somewhere.org"\r
+\r
+ `slots` is used to specify the number of slots for processes on\r
+ the host. This affects how often processes will be allocated to\r
+ this host. Normally this should be equal to the number of cpus on\r
+ that host.\r
+ '''\r
+ def __init__(self, hostname, slots=None):\r
+ self.hostname = hostname\r
+ self.slots = slots\r
+ \r
+ def _start_manager(self, index, authkey, address, files):\r
+ if self.hostname != 'localhost':\r
+ tempdir = copy_to_remote_temporary_directory(self.hostname, files)\r
+ debug('startup files copied to %s:%s', self.hostname, tempdir)\r
+ p = subprocess.Popen(\r
+ ['ssh', self.hostname, 'python', '-c',\r
+ '"import os; os.chdir(%r); '\r
+ 'from distributing import main; main()"' % tempdir],\r
+ stdin=subprocess.PIPE\r
+ )\r
+ data = dict(\r
+ name='BoostrappingHost', index=index,\r
+ dist_log_level=_logger.getEffectiveLevel(),\r
+ dir=tempdir, authkey=str(authkey), parent_address=address\r
+ )\r
+ pickle.dump(data, p.stdin, pickle.HIGHEST_PROTOCOL)\r
+ p.stdin.close()\r
+\r
+#\r
+# Copy files to remote directory, returning name of directory\r
+#\r
+\r
+unzip_code = '''"\r
+import tempfile, os, sys, tarfile\r
+tempdir = tempfile.mkdtemp(prefix='distrib-')\r
+os.chdir(tempdir)\r
+tf = tarfile.open(fileobj=sys.stdin, mode='r|gz')\r
+for ti in tf:\r
+ tf.extract(ti)\r
+print tempdir\r
+"'''\r
+\r
+def copy_to_remote_temporary_directory(host, files):\r
+ p = subprocess.Popen(\r
+ ['ssh', host, 'python', '-c', unzip_code],\r
+ stdout=subprocess.PIPE, stdin=subprocess.PIPE\r
+ )\r
+ tf = tarfile.open(fileobj=p.stdin, mode='w|gz')\r
+ for name in files:\r
+ tf.add(name, os.path.basename(name))\r
+ tf.close()\r
+ p.stdin.close()\r
+ return p.stdout.read().rstrip()\r
+\r
+#\r
+# Code which runs a host manager\r
+#\r
+\r
+def main(): \r
+ # get data from parent over stdin\r
+ data = pickle.load(sys.stdin)\r
+ sys.stdin.close()\r
+\r
+ # set some stuff\r
+ _logger.setLevel(data['dist_log_level'])\r
+ forking.prepare(data)\r
+ \r
+ # create server for a `HostManager` object\r
+ server = managers.Server(HostManager._registry, ('', 0), data['authkey'])\r
+ current_process()._server = server\r
+ \r
+ # report server address and number of cpus back to parent\r
+ conn = connection.Client(data['parent_address'], authkey=data['authkey'])\r
+ conn.send((data['index'], server.address, slot_count))\r
+ conn.close()\r
+ \r
+ # set name etc\r
+ current_process().set_name('Host-%s:%s' % server.address)\r
+ util._run_after_forkers()\r
+ \r
+ # register a cleanup function\r
+ def cleanup(directory):\r
+ debug('removing directory %s', directory)\r
+ shutil.rmtree(directory)\r
+ debug('shutting down host manager')\r
+ util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)\r
+ \r
+ # start host manager\r
+ debug('remote host manager starting in %s', data['dir'])\r
+ server.serve_forever()\r
--- /dev/null
+#
+# This module shows how to use arbitrary callables with a subclass of
+# `BaseManager`.
+#
+
+from multiprocessing import freeze_support
+from multiprocessing.managers import BaseManager, BaseProxy
+import operator
+
+##
+
+class Foo(object):
+ def f(self):
+ print 'you called Foo.f()'
+ def g(self):
+ print 'you called Foo.g()'
+ def _h(self):
+ print 'you called Foo._h()'
+
+# A simple generator function
+def baz():
+ for i in xrange(10):
+ yield i*i
+
+# Proxy type for generator objects
+class GeneratorProxy(BaseProxy):
+ _exposed_ = ('next', '__next__')
+ def __iter__(self):
+ return self
+ def next(self):
+ return self._callmethod('next')
+ def __next__(self):
+ return self._callmethod('__next__')
+
+# Function to return the operator module
+def get_operator_module():
+ return operator
+
+##
+
+class MyManager(BaseManager):
+ pass
+
+# register the Foo class; make `f()` and `g()` accessible via proxy
+MyManager.register('Foo1', Foo)
+
+# register the Foo class; make `g()` and `_h()` accessible via proxy
+MyManager.register('Foo2', Foo, exposed=('g', '_h'))
+
+# register the generator function baz; use `GeneratorProxy` to make proxies
+MyManager.register('baz', baz, proxytype=GeneratorProxy)
+
+# register get_operator_module(); make public functions accessible via proxy
+MyManager.register('operator', get_operator_module)
+
+##
+
+def test():
+ manager = MyManager()
+ manager.start()
+
+ print '-' * 20
+
+ f1 = manager.Foo1()
+ f1.f()
+ f1.g()
+ assert not hasattr(f1, '_h')
+ assert sorted(f1._exposed_) == sorted(['f', 'g'])
+
+ print '-' * 20
+
+ f2 = manager.Foo2()
+ f2.g()
+ f2._h()
+ assert not hasattr(f2, 'f')
+ assert sorted(f2._exposed_) == sorted(['g', '_h'])
+
+ print '-' * 20
+
+ it = manager.baz()
+ for i in it:
+ print '<%d>' % i,
+ print
+
+ print '-' * 20
+
+ op = manager.operator()
+ print 'op.add(23, 45) =', op.add(23, 45)
+ print 'op.pow(2, 94) =', op.pow(2, 94)
+ print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6)
+ print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3)
+ print 'op._exposed_ =', op._exposed_
+
+##
+
+if __name__ == '__main__':
+ freeze_support()
+ test()
--- /dev/null
+#
+# A test of `multiprocessing.Pool` class
+#
+
+import multiprocessing
+import time
+import random
+import sys
+
+#
+# Functions used by test code
+#
+
+def calculate(func, args):
+ result = func(*args)
+ return '%s says that %s%s = %s' % (
+ multiprocessing.current_process().get_name(),
+ func.__name__, args, result
+ )
+
+def calculatestar(args):
+ return calculate(*args)
+
+def mul(a, b):
+ time.sleep(0.5*random.random())
+ return a * b
+
+def plus(a, b):
+ time.sleep(0.5*random.random())
+ return a + b
+
+def f(x):
+ return 1.0 / (x-5.0)
+
+def pow3(x):
+ return x**3
+
+def noop(x):
+ pass
+
+#
+# Test code
+#
+
+def test():
+ print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
+
+ #
+ # Create pool
+ #
+
+ PROCESSES = 4
+ print 'Creating pool with %d processes\n' % PROCESSES
+ pool = multiprocessing.Pool(PROCESSES)
+ print 'pool = %s' % pool
+ print
+
+ #
+ # Tests
+ #
+
+ TASKS = [(mul, (i, 7)) for i in range(10)] + \
+ [(plus, (i, 8)) for i in range(10)]
+
+ results = [pool.apply_async(calculate, t) for t in TASKS]
+ imap_it = pool.imap(calculatestar, TASKS)
+ imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
+
+ print 'Ordered results using pool.apply_async():'
+ for r in results:
+ print '\t', r.get()
+ print
+
+ print 'Ordered results using pool.imap():'
+ for x in imap_it:
+ print '\t', x
+ print
+
+ print 'Unordered results using pool.imap_unordered():'
+ for x in imap_unordered_it:
+ print '\t', x
+ print
+
+ print 'Ordered results using pool.map() --- will block till complete:'
+ for x in pool.map(calculatestar, TASKS):
+ print '\t', x
+ print
+
+ #
+ # Simple benchmarks
+ #
+
+ N = 100000
+ print 'def pow3(x): return x**3'
+
+ t = time.time()
+ A = map(pow3, xrange(N))
+ print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
+ (N, time.time() - t)
+
+ t = time.time()
+ B = pool.map(pow3, xrange(N))
+ print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
+ (N, time.time() - t)
+
+ t = time.time()
+ C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
+ print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
+ ' seconds' % (N, N//8, time.time() - t)
+
+ assert A == B == C, (len(A), len(B), len(C))
+ print
+
+ L = [None] * 1000000
+ print 'def noop(x): pass'
+ print 'L = [None] * 1000000'
+
+ t = time.time()
+ A = map(noop, L)
+ print '\tmap(noop, L):\n\t\t%s seconds' % \
+ (time.time() - t)
+
+ t = time.time()
+ B = pool.map(noop, L)
+ print '\tpool.map(noop, L):\n\t\t%s seconds' % \
+ (time.time() - t)
+
+ t = time.time()
+ C = list(pool.imap(noop, L, chunksize=len(L)//8))
+ print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
+ (len(L)//8, time.time() - t)
+
+ assert A == B == C, (len(A), len(B), len(C))
+ print
+
+ del A, B, C, L
+
+ #
+ # Test error handling
+ #
+
+ print 'Testing error handling:'
+
+ try:
+ print pool.apply(f, (5,))
+ except ZeroDivisionError:
+ print '\tGot ZeroDivisionError as expected from pool.apply()'
+ else:
+ raise AssertionError, 'expected ZeroDivisionError'
+
+ try:
+ print pool.map(f, range(10))
+ except ZeroDivisionError:
+ print '\tGot ZeroDivisionError as expected from pool.map()'
+ else:
+ raise AssertionError, 'expected ZeroDivisionError'
+
+ try:
+ print list(pool.imap(f, range(10)))
+ except ZeroDivisionError:
+ print '\tGot ZeroDivisionError as expected from list(pool.imap())'
+ else:
+ raise AssertionError, 'expected ZeroDivisionError'
+
+ it = pool.imap(f, range(10))
+ for i in range(10):
+ try:
+ x = it.next()
+ except ZeroDivisionError:
+ if i == 5:
+ pass
+ except StopIteration:
+ break
+ else:
+ if i == 5:
+ raise AssertionError, 'expected ZeroDivisionError'
+
+ assert i == 9
+ print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
+ print
+
+ #
+ # Testing timeouts
+ #
+
+ print 'Testing ApplyResult.get() with timeout:',
+ res = pool.apply_async(calculate, TASKS[0])
+ while 1:
+ sys.stdout.flush()
+ try:
+ sys.stdout.write('\n\t%s' % res.get(0.02))
+ break
+ except multiprocessing.TimeoutError:
+ sys.stdout.write('.')
+ print
+ print
+
+ print 'Testing IMapIterator.next() with timeout:',
+ it = pool.imap(calculatestar, TASKS)
+ while 1:
+ sys.stdout.flush()
+ try:
+ sys.stdout.write('\n\t%s' % it.next(0.02))
+ except StopIteration:
+ break
+ except multiprocessing.TimeoutError:
+ sys.stdout.write('.')
+ print
+ print
+
+ #
+ # Testing callback
+ #
+
+ print 'Testing callback:'
+
+ A = []
+ B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
+
+ r = pool.apply_async(mul, (7, 8), callback=A.append)
+ r.wait()
+
+ r = pool.map_async(pow3, range(10), callback=A.extend)
+ r.wait()
+
+ if A == B:
+ print '\tcallbacks succeeded\n'
+ else:
+ print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
+
+ #
+ # Check there are no outstanding tasks
+ #
+
+ assert not pool._cache, 'cache = %r' % pool._cache
+
+ #
+ # Check close() methods
+ #
+
+ print 'Testing close():'
+
+ for worker in pool._pool:
+ assert worker.is_alive()
+
+ result = pool.apply_async(time.sleep, [0.5])
+ pool.close()
+ pool.join()
+
+ assert result.get() is None
+
+ for worker in pool._pool:
+ assert not worker.is_alive()
+
+ print '\tclose() succeeded\n'
+
+ #
+ # Check terminate() method
+ #
+
+ print 'Testing terminate():'
+
+ pool = multiprocessing.Pool(2)
+ DELTA = 0.1
+ ignore = pool.apply(pow3, [2])
+ results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
+ pool.terminate()
+ pool.join()
+
+ for worker in pool._pool:
+ assert not worker.is_alive()
+
+ print '\tterminate() succeeded\n'
+
+ #
+ # Check garbage collection
+ #
+
+ print 'Testing garbage collection:'
+
+ pool = multiprocessing.Pool(2)
+ DELTA = 0.1
+ processes = pool._pool
+ ignore = pool.apply(pow3, [2])
+ results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
+
+ results = pool = None
+
+ time.sleep(DELTA * 2)
+
+ for worker in processes:
+ assert not worker.is_alive()
+
+ print '\tgarbage collection succeeded\n'
+
+
+if __name__ == '__main__':
+ multiprocessing.freeze_support()
+
+ assert len(sys.argv) in (1, 2)
+
+ if len(sys.argv) == 1 or sys.argv[1] == 'processes':
+ print ' Using processes '.center(79, '-')
+ elif sys.argv[1] == 'threads':
+ print ' Using threads '.center(79, '-')
+ import multiprocessing.dummy as multiprocessing
+ else:
+ print 'Usage:\n\t%s [processes | threads]' % sys.argv[0]
+ raise SystemExit(2)
+
+ test()
--- /dev/null
+#
+# A test file for the `multiprocessing` package
+#
+
+import time, sys, random
+from Queue import Empty
+
+import multiprocessing # may get overwritten
+
+
+#### TEST_VALUE
+
+def value_func(running, mutex):
+ random.seed()
+ time.sleep(random.random()*4)
+
+ mutex.acquire()
+ print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished'
+ running.value -= 1
+ mutex.release()
+
+def test_value():
+ TASKS = 10
+ running = multiprocessing.Value('i', TASKS)
+ mutex = multiprocessing.Lock()
+
+ for i in range(TASKS):
+ p = multiprocessing.Process(target=value_func, args=(running, mutex))
+ p.start()
+
+ while running.value > 0:
+ time.sleep(0.08)
+ mutex.acquire()
+ print running.value,
+ sys.stdout.flush()
+ mutex.release()
+
+ print
+ print 'No more running processes'
+
+
+#### TEST_QUEUE
+
+def queue_func(queue):
+ for i in range(30):
+ time.sleep(0.5 * random.random())
+ queue.put(i*i)
+ queue.put('STOP')
+
+def test_queue():
+ q = multiprocessing.Queue()
+
+ p = multiprocessing.Process(target=queue_func, args=(q,))
+ p.start()
+
+ o = None
+ while o != 'STOP':
+ try:
+ o = q.get(timeout=0.3)
+ print o,
+ sys.stdout.flush()
+ except Empty:
+ print 'TIMEOUT'
+
+ print
+
+
+#### TEST_CONDITION
+
+def condition_func(cond):
+ cond.acquire()
+ print '\t' + str(cond)
+ time.sleep(2)
+ print '\tchild is notifying'
+ print '\t' + str(cond)
+ cond.notify()
+ cond.release()
+
+def test_condition():
+ cond = multiprocessing.Condition()
+
+ p = multiprocessing.Process(target=condition_func, args=(cond,))
+ print cond
+
+ cond.acquire()
+ print cond
+ cond.acquire()
+ print cond
+
+ p.start()
+
+ print 'main is waiting'
+ cond.wait()
+ print 'main has woken up'
+
+ print cond
+ cond.release()
+ print cond
+ cond.release()
+
+ p.join()
+ print cond
+
+
+#### TEST_SEMAPHORE
+
+def semaphore_func(sema, mutex, running):
+ sema.acquire()
+
+ mutex.acquire()
+ running.value += 1
+ print running.value, 'tasks are running'
+ mutex.release()
+
+ random.seed()
+ time.sleep(random.random()*2)
+
+ mutex.acquire()
+ running.value -= 1
+ print '%s has finished' % multiprocessing.current_process()
+ mutex.release()
+
+ sema.release()
+
+def test_semaphore():
+ sema = multiprocessing.Semaphore(3)
+ mutex = multiprocessing.RLock()
+ running = multiprocessing.Value('i', 0)
+
+ processes = [
+ multiprocessing.Process(target=semaphore_func,
+ args=(sema, mutex, running))
+ for i in range(10)
+ ]
+
+ for p in processes:
+ p.start()
+
+ for p in processes:
+ p.join()
+
+
+#### TEST_JOIN_TIMEOUT
+
+def join_timeout_func():
+ print '\tchild sleeping'
+ time.sleep(5.5)
+ print '\n\tchild terminating'
+
+def test_join_timeout():
+ p = multiprocessing.Process(target=join_timeout_func)
+ p.start()
+
+ print 'waiting for process to finish'
+
+ while 1:
+ p.join(timeout=1)
+ if not p.is_alive():
+ break
+ print '.',
+ sys.stdout.flush()
+
+
+#### TEST_EVENT
+
+def event_func(event):
+ print '\t%r is waiting' % multiprocessing.current_process()
+ event.wait()
+ print '\t%r has woken up' % multiprocessing.current_process()
+
+def test_event():
+ event = multiprocessing.Event()
+
+ processes = [multiprocessing.Process(target=event_func, args=(event,))
+ for i in range(5)]
+
+ for p in processes:
+ p.start()
+
+ print 'main is sleeping'
+ time.sleep(2)
+
+ print 'main is setting event'
+ event.set()
+
+ for p in processes:
+ p.join()
+
+
+#### TEST_SHAREDVALUES
+
+def sharedvalues_func(values, arrays, shared_values, shared_arrays):
+ for i in range(len(values)):
+ v = values[i][1]
+ sv = shared_values[i].value
+ assert v == sv
+
+ for i in range(len(values)):
+ a = arrays[i][1]
+ sa = list(shared_arrays[i][:])
+ assert a == sa
+
+ print 'Tests passed'
+
+def test_sharedvalues():
+ values = [
+ ('i', 10),
+ ('h', -2),
+ ('d', 1.25)
+ ]
+ arrays = [
+ ('i', range(100)),
+ ('d', [0.25 * i for i in range(100)]),
+ ('H', range(1000))
+ ]
+
+ shared_values = [multiprocessing.Value(id, v) for id, v in values]
+ shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays]
+
+ p = multiprocessing.Process(
+ target=sharedvalues_func,
+ args=(values, arrays, shared_values, shared_arrays)
+ )
+ p.start()
+ p.join()
+
+ assert p.get_exitcode() == 0
+
+
+####
+
+def test(namespace=multiprocessing):
+ global multiprocessing
+
+ multiprocessing = namespace
+
+ for func in [ test_value, test_queue, test_condition,
+ test_semaphore, test_join_timeout, test_event,
+ test_sharedvalues ]:
+
+ print '\n\t######## %s\n' % func.__name__
+ func()
+
+ ignore = multiprocessing.active_children() # cleanup any old processes
+ if hasattr(multiprocessing, '_debug_info'):
+ info = multiprocessing._debug_info()
+ if info:
+ print info
+ raise ValueError, 'there should be no positive refcounts left'
+
+
+if __name__ == '__main__':
+ multiprocessing.freeze_support()
+
+ assert len(sys.argv) in (1, 2)
+
+ if len(sys.argv) == 1 or sys.argv[1] == 'processes':
+ print ' Using processes '.center(79, '-')
+ namespace = multiprocessing
+ elif sys.argv[1] == 'manager':
+ print ' Using processes and a manager '.center(79, '-')
+ namespace = multiprocessing.Manager()
+ namespace.Process = multiprocessing.Process
+ namespace.current_process = multiprocessing.current_process
+ namespace.active_children = multiprocessing.active_children
+ elif sys.argv[1] == 'threads':
+ print ' Using threads '.center(79, '-')
+ import multiprocessing.dummy as namespace
+ else:
+ print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
+ raise SystemExit, 2
+
+ test(namespace)
--- /dev/null
+#
+# Example where a pool of http servers share a single listening socket
+#
+# On Windows this module depends on the ability to pickle a socket
+# object so that the worker processes can inherit a copy of the server
+# object. (We import `multiprocessing.reduction` to enable this pickling.)
+#
+# Not sure if we should synchronize access to `socket.accept()` method by
+# using a process-shared lock -- does not seem to be necessary.
+#
+
+import os
+import sys
+
+from multiprocessing import Process, current_process, freeze_support
+from BaseHTTPServer import HTTPServer
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+
+if sys.platform == 'win32':
+ import multiprocessing.reduction # make sockets pickable/inheritable
+
+
+def note(format, *args):
+ sys.stderr.write('[%s]\t%s\n' % (current_process().get_name(),format%args))
+
+
+class RequestHandler(SimpleHTTPRequestHandler):
+ # we override log_message() to show which process is handling the request
+ def log_message(self, format, *args):
+ note(format, *args)
+
+def serve_forever(server):
+ note('starting server')
+ try:
+ server.serve_forever()
+ except KeyboardInterrupt:
+ pass
+
+
+def runpool(address, number_of_processes):
+ # create a single server object -- children will each inherit a copy
+ server = HTTPServer(address, RequestHandler)
+
+ # create child processes to act as workers
+ for i in range(number_of_processes-1):
+ Process(target=serve_forever, args=(server,)).start()
+
+ # main process also acts as a worker
+ serve_forever(server)
+
+
+def test():
+ DIR = os.path.join(os.path.dirname(__file__), '..')
+ ADDRESS = ('localhost', 8000)
+ NUMBER_OF_PROCESSES = 4
+
+ print 'Serving at http://%s:%d using %d worker processes' % \
+ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
+ print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
+
+ os.chdir(DIR)
+ runpool(ADDRESS, NUMBER_OF_PROCESSES)
+
+
+if __name__ == '__main__':
+ freeze_support()
+ test()
--- /dev/null
+#
+# Simple example which uses a pool of workers to carry out some tasks.
+#
+# Notice that the results will probably not come out of the output
+# queue in the same in the same order as the corresponding tasks were
+# put on the input queue. If it is important to get the results back
+# in the original order then consider using `Pool.map()` or
+# `Pool.imap()` (which will save on the amount of code needed anyway).
+#
+
+import time
+import random
+
+from multiprocessing import Process, Queue, current_process, freeze_support
+
+#
+# Function run by worker processes
+#
+
+def worker(input, output):
+ for func, args in iter(input.get, 'STOP'):
+ result = calculate(func, args)
+ output.put(result)
+
+#
+# Function used to calculate result
+#
+
+def calculate(func, args):
+ result = func(*args)
+ return '%s says that %s%s = %s' % \
+ (current_process().get_name(), func.__name__, args, result)
+
+#
+# Functions referenced by tasks
+#
+
+def mul(a, b):
+ time.sleep(0.5*random.random())
+ return a * b
+
+def plus(a, b):
+ time.sleep(0.5*random.random())
+ return a + b
+
+#
+#
+#
+
+def test():
+ NUMBER_OF_PROCESSES = 4
+ TASKS1 = [(mul, (i, 7)) for i in range(20)]
+ TASKS2 = [(plus, (i, 8)) for i in range(10)]
+
+ # Create queues
+ task_queue = Queue()
+ done_queue = Queue()
+
+ # Submit tasks
+ for task in TASKS1:
+ task_queue.put(task)
+
+ # Start worker processes
+ for i in range(NUMBER_OF_PROCESSES):
+ Process(target=worker, args=(task_queue, done_queue)).start()
+
+ # Get and print results
+ print 'Unordered results:'
+ for i in range(len(TASKS1)):
+ print '\t', done_queue.get()
+
+ # Add more tasks using `put()`
+ for task in TASKS2:
+ task_queue.put(task)
+
+ # Get and print some more results
+ for i in range(len(TASKS2)):
+ print '\t', done_queue.get()
+
+ # Tell child processes to stop
+ for i in range(NUMBER_OF_PROCESSES):
+ task_queue.put('STOP')
+
+
+if __name__ == '__main__':
+ freeze_support()
+ test()
--- /dev/null
+:mod:`multiprocessing` --- Process-based "threading" interface
+==============================================================
+
+.. module:: multiprocessing
+ :synopsis: Process-based "threading" interface.
+
+.. versionadded:: 2.6
+
+:mod:`multiprocessing` is a package for the Python language which supports the
+spawning of processes using a similar API of the :mod:`threading` module. It
+runs on both Unix and Windows.
+
+The :mod:`multiprocessing` module offers the capability of both local and remote
+concurrency effectively side-stepping the Global Interpreter Lock by utilizing
+subprocesses for "threads". Due to this, the :mod:`multiprocessing` module
+allows the programmer to fully leverage multiple processors on a given machine.
+
+
+Introduction
+------------
+
+
+Threads, processes and the GIL
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To run more than one piece of code at the same time on the same computer one has
+the choice of either using multiple processes or multiple threads.
+
+Although a program can be made up of multiple processes, these processes are in
+effect completely independent of one another: different processes are not able
+to cooperate with one another unless one sets up some means of communication
+between them (such as by using sockets). If a lot of data must be transferred
+between processes then this can be inefficient.
+
+On the other hand, multiple threads within a single process are intimately
+connected: they share their data but often can interfere badly with one another.
+It is often argued that the only way to make multithreaded programming "easy" is
+to avoid relying on any shared state and for the threads to only communicate by
+passing messages to each other.
+
+CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading
+easier than it is in most languages by making sure that only one thread can
+manipulate the interpreter's objects at a time. As a result, it is often safe
+to let multiple threads access data without using any additional locking as one
+would need to in a language such as C.
+
+One downside of the GIL is that on multi-processor (or multi-core) systems a
+multithreaded Python program can only make use of one processor at a time unless
+your application makes heavy use of I/O which effectively side-steps this. This
+is a problem that can be overcome by using multiple processes instead.
+
+This package allows one to write multi-process programs using much the same API
+that one uses for writing threaded programs.
+
+
+Forking and spawning
+~~~~~~~~~~~~~~~~~~~~
+
+There are two ways of creating a new process in Python:
+
+* The current process can *fork* a new child process by using the
+ :func:`os.fork` function. This effectively creates an identical copy of the
+ current process which is now able to go off and perform some task set by the
+ parent process. This means that the child process inherits *copies* of all
+ variables that the parent process had. However, :func:`os.fork` is not
+ available on every platform: in particular Windows does not support it.
+
+* Alternatively, the current process can spawn a completely new Python
+ interpreter by using the :mod:`subprocess` module or one of the
+ :func:`os.spawn*` functions. Getting this new interpreter in to a fit state
+ to perform the task set for it by its parent process is, however, a bit of a
+ challenge.
+
+The :mod:`multiprocessing` module uses :func:`os.fork` if it is available since
+it makes life a lot simpler. Forking the process is also more efficient in
+terms of memory usage and the time needed to create the new process.
+
+
+The :class:`Process` class
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In :mod:`multiprocessing`, processes are spawned by creating a :class:`Process`
+object and then calling its :meth:`Process.start` method. :class:`Process`
+follows the API of :class:`threading.Thread`. A trivial example of a
+multiprocess program is ::
+
+ from multiprocessing import Process
+
+ def f(name):
+ print 'hello', name
+
+ if __name__ == '__main__':
+ p = Process(target=f, args=('bob',))
+ p.start()
+ p.join()
+
+Here the function ``f`` is run in a child process.
+
+For an explanation of why (on Windows) the ``if __name__ == '__main__'`` part is
+necessary, see :ref:`multiprocessing-programming`.
+
+
+
+Exchanging objects between processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:mod:`multiprocessing` supports two types of communication channel between
+processes:
+
+**Queues**
+
+ The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For
+ example::
+
+ from multiprocessing import Process, Queue
+
+ def f(q):
+ q.put([42, None, 'hello'])
+
+ if __name__ == '__main__':
+ q = Queue()
+ p = Process(target=f, args=(q,))
+ p.start()
+ print q.get() # prints "[42, None, 'hello']"
+ p.join()
+
+ Queues are thread and process safe.
+
+**Pipes**
+
+ The :func:`Pipe` function returns a pair of connection objects connected by a
+ pipe which by default is duplex (two-way). For example::
+
+ from multiprocessing import Process, Pipe
+
+ def f(conn):
+ conn.send([42, None, 'hello'])
+ conn.close()
+
+ if __name__ == '__main__':
+ parent_conn, child_conn = Pipe()
+ p = Process(target=f, args=(child_conn,))
+ p.start()
+ print parent_conn.recv() # prints "[42, None, 'hello']"
+ p.join()
+
+ The two connection objects returned by :func:`Pipe` represent the two ends of
+ the pipe. Each connection object has :meth:`send` and :meth:`recv` methods
+ (among others). Note that data in a pipe may become corrupted if two
+ processes (or threads) try to read from or write to the *same* end of the
+ pipe at the same time. Of course there is no risk of corruption from
+ processes using different ends of the pipe at the same time.
+
+
+Synchronization between processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:mod:`multiprocessing` contains equivalents of all the synchronization
+primitives from :mod:`threading`. For instance one can use a lock to ensure
+that only one process prints to standard output at a time::
+
+ from multiprocessing import Process, Lock
+
+ def f(l, i):
+ l.acquire()
+ print 'hello world', i
+ l.release()
+
+ if __name__ == '__main__':
+ lock = Lock()
+
+ for num in range(10):
+ Process(target=f, args=(lock, num)).start()
+
+Without using the lock output from the different processes is liable to get all
+mixed up.
+
+
+Sharing state between processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned above, when doing concurrent programming it is usually best to
+avoid using shared state as far as possible. This is particularly true when
+using multiple processes.
+
+However, if you really do need to use some shared data then
+:mod:`multiprocessing` provides a couple of ways of doing so.
+
+**Shared memory**
+
+ Data can be stored in a shared memory map using :class:`Value` or
+ :class:`Array`. For example, the following code ::
+
+ from multiprocessing import Process, Value, Array
+
+ def f(n, a):
+ n.value = 3.1415927
+ for i in range(len(a)):
+ a[i] = -a[i]
+
+ if __name__ == '__main__':
+ num = Value('d', 0.0)
+ arr = Array('i', range(10))
+
+ p = Process(target=f, args=(num, arr))
+ p.start()
+ p.join()
+
+ print num.value
+ print arr[:]
+
+ will print ::
+
+ 3.1415927
+ [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
+
+ The ``'d'`` and ``'i'`` arguments used when creating ``num`` and ``arr`` are
+ typecodes of the kind used by the :mod:`array` module: ``'d'`` indicates a
+ double precision float and ``'i'`` inidicates a signed integer. These shared
+ objects will be process and thread safe.
+
+ For more flexibility in using shared memory one can use the
+ :mod:`multiprocessing.sharedctypes` module which supports the creation of
+ arbitrary ctypes objects allocated from shared memory.
+
+**Server process**
+
+ A manager object returned by :func:`Manager` controls a server process which
+ holds python objects and allows other processes to manipulate them using
+ proxies.
+
+ A manager returned by :func:`Manager` will support types :class:`list`,
+ :class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`,
+ :class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`,
+ :class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For
+ example, ::
+
+ from multiprocessing import Process, Manager
+
+ def f(d, l):
+ d[1] = '1'
+ d['2'] = 2
+ d[0.25] = None
+ l.reverse()
+
+ if __name__ == '__main__':
+ manager = Manager()
+
+ d = manager.dict()
+ l = manager.list(range(10))
+
+ p = Process(target=f, args=(d, l))
+ p.start()
+ p.join()
+
+ print d
+ print l
+
+ will print ::
+
+ {0.25: None, 1: '1', '2': 2}
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+
+ Server process managers are more flexible than using shared memory objects
+ because they can be made to support arbitrary object types. Also, a single
+ manager can be shared by processes on different computers over a network.
+ They are, however, slower than using shared memory.
+
+
+Using a pool of workers
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The :class:`multiprocessing.pool.Pool()` class represens a pool of worker
+processes. It has methods which allows tasks to be offloaded to the worker
+processes in a few different ways.
+
+For example::
+
+ from multiprocessing import Pool
+
+ def f(x):
+ return x*x
+
+ if __name__ == '__main__':
+ pool = Pool(processes=4) # start 4 worker processes
+ result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously
+ print result.get(timeout=1) # prints "100" unless your computer is *very* slow
+ print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
+
+
+Reference
+---------
+
+The :mod:`multiprocessing` package mostly replicates the API of the
+:mod:`threading` module.
+
+
+:class:`Process` and exceptions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: Process([group[, target[, name[, args[, kwargs]]]]])
+
+ Process objects represent activity that is run in a separate process. The
+ :class:`Process` class has equivalents of all the methods of
+ :class:`threading.Thread`.
+
+ The constructor should always be called with keyword arguments. *group*
+ should always be ``None``; it exists soley for compatibility with
+ :class:`threading.Thread`. *target* is the callable object to be invoked by
+ the :meth:`run()` method. It defaults to None, meaning nothing is
+ called. *name* is the process name. By default, a unique name is constructed
+ of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\
+ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length
+ is determined by the *generation* of the process. *args* is the argument
+ tuple for the target invocation. *kwargs* is a dictionary of keyword
+ arguments for the target invocation. By default, no arguments are passed to
+ *target*.
+
+ If a subclass overrides the constructor, it must make sure it invokes the
+ base class constructor (:meth:`Process.__init__`) before doing anything else
+ to the process.
+
+ .. method:: run()
+
+ Method representing the process's activity.
+
+ You may override this method in a subclass. The standard :meth:`run`
+ method invokes the callable object passed to the object's constructor as
+ the target argument, if any, with sequential and keyword arguments taken
+ from the *args* and *kwargs* arguments, respectively.
+
+ .. method:: start()
+
+ Start the process's activity.
+
+ This must be called at most once per process object. It arranges for the
+ object's :meth:`run` method to be invoked in a separate process.
+
+ .. method:: join([timeout])
+
+ Block the calling thread until the process whose :meth:`join` method is
+ called terminates or until the optional timeout occurs.
+
+ If *timeout* is ``None`` then there is no timeout.
+
+ A process can be joined many times.
+
+ A process cannot join itself because this would cause a deadlock. It is
+ an error to attempt to join a process before it has been started.
+
+ .. method:: get_name()
+
+ Return the process's name.
+
+ .. method:: set_name(name)
+
+ Set the process's name.
+
+ The name is a string used for identification purposes only. It has no
+ semantics. Multiple processes may be given the same name. The initial
+ name is set by the constructor.
+
+ .. method:: is_alive()
+
+ Return whether the process is alive.
+
+ Roughly, a process object is alive from the moment the :meth:`start`
+ method returns until the child process terminates.
+
+ .. method:: is_daemon()
+
+ Return the process's daemon flag.
+
+ .. method:: set_daemon(daemonic)
+
+ Set the process's daemon flag to the Boolean value *daemonic*. This must
+ be called before :meth:`start` is called.
+
+ The initial value is inherited from the creating process.
+
+ When a process exits, it attempts to terminate all of its daemonic child
+ processes.
+
+ Note that a daemonic process is not allowed to create child processes.
+ Otherwise a daemonic process would leave its children orphaned if it gets
+ terminated when its parent process exits.
+
+ In addition process objects also support the following methods:
+
+ .. method:: get_pid()
+
+ Return the process ID. Before the process is spawned, this will be
+ ``None``.
+
+ .. method:: get_exit_code()
+
+ Return the child's exit code. This will be ``None`` if the process has
+ not yet terminated. A negative value *-N* indicates that the child was
+ terminated by signal *N*.
+
+ .. method:: get_auth_key()
+
+ Return the process's authentication key (a byte string).
+
+ When :mod:`multiprocessing` is initialized the main process is assigned a
+ random string using :func:`os.random`.
+
+ When a :class:`Process` object is created, it will inherit the
+ authentication key of its parent process, although this may be changed
+ using :meth:`set_auth_key` below.
+
+ See :ref:`multiprocessing-auth-keys`.
+
+ .. method:: set_auth_key(authkey)
+
+ Set the process's authentication key which must be a byte string.
+
+ .. method:: terminate()`
+
+ Terminate the process. On Unix this is done using the ``SIGTERM`` signal,
+ on Windows ``TerminateProcess()`` is used. Note that exit handlers and
+ finally clauses etc will not be executed.
+
+ Note that descendant processes of the process will *not* be terminated --
+ they will simply become orphaned.
+
+ .. warning::
+
+ If this method is used when the associated process is using a pipe or
+ queue then the pipe or queue is liable to become corrupted and may
+ become unusable by other process. Similarly, if the process has
+ acquired a lock or semaphore etc. then terminating it is liable to
+ cause other processes to deadlock.
+
+ Note that the :meth:`start`, :meth:`join`, :meth:`is_alive` and
+ :meth:`get_exit_code` methods should only be called by the process that
+ created the process object.
+
+ Example usage of some of the methods of :class:`Process`::
+
+ >>> import processing, time, signal
+ >>> p = processing.Process(target=time.sleep, args=(1000,))
+ >>> print p, p.is_alive()
+ <Process(Process-1, initial)> False
+ >>> p.start()
+ >>> print p, p.is_alive()
+ <Process(Process-1, started)> True
+ >>> p.terminate()
+ >>> print p, p.is_alive()
+ <Process(Process-1, stopped[SIGTERM])> False
+ >>> p.get_exit_code() == -signal.SIGTERM
+ True
+
+
+.. exception:: BufferTooShort
+
+ Exception raised by :meth:`Connection.recv_bytes_into()` when the supplied
+ buffer object is too small for the message read.
+
+ If ``e`` is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give
+ the message as a byte string.
+
+
+Pipes and Queues
+~~~~~~~~~~~~~~~~
+
+When using multiple processes, one generally uses message passing for
+communication between processes and avoids having to use any synchronization
+primitives like locks.
+
+For passing messages one can use :func:`Pipe` (for a connection between two
+processes) or a queue (which allows multiple producers and consumers).
+
+The :class:`Queue` and :class:`JoinableQueue` types are multi-producer,
+multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the
+standard library. They differ in that :class:`Queue` lacks the
+:meth:`task_done` and :meth:`join` methods introduced into Python 2.5's
+:class:`Queue.Queue` class.
+
+If you use :class:`JoinableQueue` then you **must** call
+:meth:`JoinableQueue.task_done` for each task removed from the queue or else the
+semaphore used to count the number of unfinished tasks may eventually overflow
+raising an exception.
+
+.. note::
+
+ :mod:`multiprocessing` uses the usual :exc:`Queue.Empty` and
+ :exc:`Queue.Full` exceptions to signal a timeout. They are not available in
+ the :mod:`multiprocessing` namespace so you need to import them from
+ :mod:`Queue`.
+
+
+.. warning::
+
+ If a process is killed using :meth:`Process.terminate` or :func:`os.kill`
+ while it is trying to use a :class:`Queue`, then the data in the queue is
+ likely to become corrupted. This may cause any other processes to get an
+ exception when it tries to use the queue later on.
+
+.. warning::
+
+ As mentioned above, if a child process has put items on a queue (and it has
+ not used :meth:`JoinableQueue.cancel_join_thread`), then that process will
+ not terminate until all buffered items have been flushed to the pipe.
+
+ This means that if you try joining that process you may get a deadlock unless
+ you are sure that all items which have been put on the queue have been
+ consumed. Similarly, if the child process is non-daemonic then the parent
+ process may hang on exit when it tries to join all it non-daemonic children.
+
+ Note that a queue created using a manager does not have this issue. See
+ :ref:`multiprocessing-programming`.
+
+Note that one can also create a shared queue by using a manager object -- see
+:ref:`multiprocessing-managers`.
+
+For an example of the usage of queues for interprocess communication see
+:ref:`multiprocessing-examples`.
+
+
+.. function:: Pipe([duplex])
+
+ Returns a pair ``(conn1, conn2)`` of :class:`Connection` objects representing
+ the ends of a pipe.
+
+ If *duplex* is ``True`` (the default) then the pipe is bidirectional. If
+ *duplex* is ``False`` then the pipe is unidirectional: ``conn1`` can only be
+ used for receiving messages and ``conn2`` can only be used for sending
+ messages.
+
+
+.. class:: Queue([maxsize])
+
+ Returns a process shared queue implemented using a pipe and a few
+ locks/semaphores. When a process first puts an item on the queue a feeder
+ thread is started which transfers objects from a buffer into the pipe.
+
+ The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the
+ standard library's :mod:`Queue` module are raised to signal timeouts.
+
+ :class:`Queue` implements all the methods of :class:`Queue.Queue` except for
+ :meth:`task_done` and :meth:`join`.
+
+ .. method:: qsize()
+
+ Return the approximate size of the queue. Because of
+ multithreading/multiprocessing semantics, this number is not reliable.
+
+ Note that this may raise :exc:`NotImplementedError` on Unix platforms like
+ MacOS X where ``sem_getvalue()`` is not implemented.
+
+ .. method:: empty()
+
+ Return ``True`` if the queue is empty, ``False`` otherwise. Because of
+ multithreading/multiprocessing semantics, this is not reliable.
+
+ .. method:: full()
+
+ Return ``True`` if the queue is full, ``False`` otherwise. Because of
+ multithreading/multiprocessing semantics, this is not reliable.
+
+ .. method:: put(item[, block[, timeout]])`
+
+ Put item into the queue. If optional args *block* is ``True`` (the
+ default) and *timeout* is ``None`` (the default), block if necessary until
+ a free slot is available. If *timeout* is a positive number, it blocks at
+ most *timeout* seconds and raises the :exc:`Queue.Full` exception if no
+ free slot was available within that time. Otherwise (*block* is
+ ``False``), put an item on the queue if a free slot is immediately
+ available, else raise the :exc:`Queue.Full` exception (*timeout* is
+ ignored in that case).
+
+ .. method:: put_nowait(item)
+
+ Equivalent to ``put(item, False)``.
+
+ .. method:: get([block[, timeout]])
+
+ Remove and return an item from the queue. If optional args *block* is
+ ``True`` (the default) and *timeout* is ``None`` (the default), block if
+ necessary until an item is available. If *timeout* is a positive number,
+ it blocks at most *timeout* seconds and raises the :exc:`Queue.Empty`
+ exception if no item was available within that time. Otherwise (block is
+ ``False``), return an item if one is immediately available, else raise the
+ :exc:`Queue.Empty` exception (*timeout* is ignored in that case).
+
+ .. method:: get_nowait()
+ get_no_wait()
+
+ Equivalent to ``get(False)``.
+
+ :class:`multiprocessing.Queue` has a few additional methods not found in
+ :class:`Queue.Queue` which are usually unnecessary:
+
+ .. method:: close()
+
+ Indicate that no more data will be put on this queue by the current
+ process. The background thread will quit once it has flushed all buffered
+ data to the pipe. This is called automatically when the queue is garbage
+ collected.
+
+ .. method:: join_thread()
+
+ Join the background thread. This can only be used after :meth:`close` has
+ been called. It blocks until the background thread exits, ensuring that
+ all data in the buffer has been flushed to the pipe.
+
+ By default if a process is not the creator of the queue then on exit it
+ will attempt to join the queue's background thread. The process can call
+ :meth:`cancel_join_thread()` to make :meth:`join_thread()` do nothing.
+
+ .. method:: cancel_join_thread()
+
+ Prevent :meth:`join_thread` from blocking. In particular, this prevents
+ the background thread from being joined automatically when the process
+ exits -- see :meth:`join_thread()`.
+
+
+.. class:: JoinableQueue([maxsize])
+
+ :class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which
+ additionally has :meth:`task_done` and :meth:`join` methods.
+
+ .. method:: task_done()
+
+ Indicate that a formerly enqueued task is complete. Used by queue consumer
+ threads. For each :meth:`get` used to fetch a task, a subsequent call to
+ :meth:`task_done` tells the queue that the processing on the task is
+ complete.
+
+ If a :meth:`join` is currently blocking, it will resume when all items
+ have been processed (meaning that a :meth:`task_done` call was received
+ for every item that had been :meth:`put` into the queue).
+
+ Raises a :exc:`ValueError` if called more times than there were items
+ placed in the queue.
+
+
+ .. method:: join()
+
+ Block until all items in the queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the
+ queue. The count goes down whenever a consumer thread calls
+ :meth:`task_done` to indicate that the item was retrieved and all work on
+ it is complete. When the count of unfinished tasks drops to zero,
+ :meth:`join` unblocks.
+
+
+Miscellaneous
+~~~~~~~~~~~~~
+
+.. function:: active_children()
+
+ Return list of all live children of the current process.
+
+ Calling this has the side affect of "joining" any processes which have
+ already finished.
+
+.. function:: cpu_count()
+
+ Return the number of CPUs in the system. May raise
+ :exc:`NotImplementedError`.
+
+.. function:: current_process()
+
+ Return the :class:`Process` object corresponding to the current process.
+
+ An analogue of :func:`threading.current_thread`.
+
+.. function:: freeze_support()
+
+ Add support for when a program which uses :mod:`multiprocessing` has been
+ frozen to produce a Windows executable. (Has been tested with **py2exe**,
+ **PyInstaller** and **cx_Freeze**.)
+
+ One needs to call this function straight after the ``if __name__ ==
+ '__main__'`` line of the main module. For example::
+
+ from multiprocessing import Process, freeze_support
+
+ def f():
+ print 'hello world!'
+
+ if __name__ == '__main__':
+ freeze_support()
+ Process(target=f).start()
+
+ If the :func:`freeze_support()` line is missed out then trying to run the
+ frozen executable will raise :exc:`RuntimeError`.
+
+ If the module is being run normally by the Python interpreter then
+ :func:`freeze_support()` has no effect.
+
+.. function:: set_executable()
+
+ Sets the path of the python interpreter to use when starting a child process.
+ (By default `sys.executable` is used). Embedders will probably need to do
+ some thing like ::
+
+ setExecutable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
+
+ before they can create child processes. (Windows only)
+
+
+.. note::
+
+ :mod:`multiprocessing` contains no analogues of
+ :func:`threading.active_count`, :func:`threading.enumerate`,
+ :func:`threading.settrace`, :func:`threading.setprofile`,
+ :class:`threading.Timer`, or :class:`threading.local`.
+
+
+Connection Objects
+~~~~~~~~~~~~~~~~~~
+
+Connection objects allow the sending and receiving of picklable objects or
+strings. They can be thought of as message oriented connected sockets.
+
+Connection objects usually created using :func:`Pipe()` -- see also
+:ref:`multiprocessing-listeners-clients`.
+
+.. class:: Connection
+
+ .. method:: send(obj)
+
+ Send an object to the other end of the connection which should be read
+ using :meth:`recv`.
+
+ The object must be picklable.
+
+ .. method:: recv()
+
+ Return an object sent from the other end of the connection using
+ :meth:`send`. Raises :exc:`EOFError` if there is nothing left to receive
+ and the other end was closed.
+
+ .. method:: fileno()
+
+ Returns the file descriptor or handle used by the connection.
+
+ .. method:: close()
+
+ Close the connection.
+
+ This is called automatically when the connection is garbage collected.
+
+ .. method:: poll([timeout])
+
+ Return whether there is any data available to be read.
+
+ If *timeout* is not specified then it will return immediately. If
+ *timeout* is a number then this specifies the maximum time in seconds to
+ block. If *timeout* is ``None`` then an infinite timeout is used.
+
+ .. method:: send_bytes(buffer[, offset[, size]])
+
+ Send byte data from an object supporting the buffer interface as a
+ complete message.
+
+ If *offset* is given then data is read from that position in *buffer*. If
+ *size* is given then that many bytes will be read from buffer.
+
+ .. method:: recv_bytes([maxlength])
+
+ Return a complete message of byte data sent from the other end of the
+ connection as a string. Raises :exc:`EOFError` if there is nothing left
+ to receive and the other end has closed.
+
+ If *maxlength* is specified and the message is longer than *maxlength*
+ then :exc:`IOError` is raised and the connection will no longer be
+ readable.
+
+ .. method:: recv_bytes_into(buffer[, offset])
+
+ Read into *buffer* a complete message of byte data sent from the other end
+ of the connection and return the number of bytes in the message. Raises
+ :exc:`EOFError` if there is nothing left to receive and the other end was
+ closed.
+
+ *buffer* must be an object satisfying the writable buffer interface. If
+ *offset* is given then the message will be written into the buffer from
+ *that position. Offset must be a non-negative integer less than the
+ *length of *buffer* (in bytes).
+
+ If the buffer is too short then a :exc:`BufferTooShort` exception is
+ raised and the complete message is available as ``e.args[0]`` where ``e``
+ is the exception instance.
+
+
+For example:
+
+ >>> from multiprocessing import Pipe
+ >>> a, b = Pipe()
+ >>> a.send([1, 'hello', None])
+ >>> b.recv()
+ [1, 'hello', None]
+ >>> b.send_bytes('thank you')
+ >>> a.recv_bytes()
+ 'thank you'
+ >>> import array
+ >>> arr1 = array.array('i', range(5))
+ >>> arr2 = array.array('i', [0] * 10)
+ >>> a.send_bytes(arr1)
+ >>> count = b.recv_bytes_into(arr2)
+ >>> assert count == len(arr1) * arr1.itemsize
+ >>> arr2
+ array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])
+
+
+.. warning::
+
+ The :meth:`Connection.recv` method automatically unpickles the data it
+ receives, which can be a security risk unless you can trust the process
+ which sent the message.
+
+ Therefore, unless the connection object was produced using :func:`Pipe()`
+ you should only use the `recv()` and `send()` methods after performing some
+ sort of authentication. See :ref:`multiprocessing-auth-keys`.
+
+.. warning::
+
+ If a process is killed while it is trying to read or write to a pipe then
+ the data in the pipe is likely to become corrupted, because it may become
+ impossible to be sure where the message boundaries lie.
+
+
+Synchronization primitives
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Generally synchronization primitives are not as necessary in a multiprocess
+program as they are in a mulithreaded program. See the documentation for the
+standard library's :mod:`threading` module.
+
+Note that one can also create synchronization primitives by using a manager
+object -- see :ref:`multiprocessing-managers`.
+
+.. class:: BoundedSemaphore([value])
+
+ A bounded semaphore object: a clone of :class:`threading.BoundedSemaphore`.
+
+ (On Mac OSX this is indistiguishable from :class:`Semaphore` because
+ ``sem_getvalue()`` is not implemented on that platform).
+
+.. class:: Condition([lock])
+
+ A condition variable: a clone of `threading.Condition`.
+
+ If *lock* is specified then it should be a :class:`Lock` or :class:`RLock`
+ object from :mod:`multiprocessing`.
+
+.. class:: Event()
+
+ A clone of :class:`threading.Event`.
+
+.. class:: Lock()
+
+ A non-recursive lock object: a clone of :class:`threading.Lock`.
+
+.. class:: RLock()
+
+ A recursive lock object: a clone of :class:`threading.RLock`.
+
+.. class:: Semaphore([value])
+
+ A bounded semaphore object: a clone of :class:`threading.Semaphore`.
+
+.. note::
+
+ The :meth:`acquire()` method of :class:`BoundedSemaphore`, :class:`Lock`,
+ :class:`RLock` and :class:`Semaphore` has a timeout parameter not supported
+ by the equivalents in :mod:`threading`. The signature is
+ ``acquire(block=True, timeout=None)`` with keyword parameters being
+ acceptable. If *block* is ``True`` and *timeout* is not ``None`` then it
+ specifies a timeout in seconds. If *block* is ``False`` then *timeout* is
+ ignored.
+
+.. note::
+
+ If the SIGINT signal generated by Ctrl-C arrives while the main thread is
+ blocked by a call to :meth:`BoundedSemaphore.acquire`, :meth:`Lock.acquire`,
+ :meth:`RLock.acquire`, :meth:`Semaphore.acquire`, :meth:`Condition.acquire`
+ or :meth:`Condition.wait` then the call will be immediately interrupted and
+ :exc:`KeyboardInterrupt` will be raised.
+
+ This differs from the behaviour of :mod:`threading` where SIGINT will be
+ ignored while the equivalent blocking calls are in progress.
+
+
+Shared :mod:`ctypes` Objects
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to create shared objects using shared memory which can be
+inherited by child processes.
+
+.. function:: Value(typecode_or_type[, lock[, *args]])
+
+ Return a :mod:`ctypes` object allocated from shared memory. By default the
+ return value is actually a synchronized wrapper for the object.
+
+ *typecode_or_type* determines the type of the returned object: it is either a
+ ctypes type or a one character typecode of the kind used by the :mod:`array`
+ module. *\*args* is passed on to the constructor for the type.
+
+ If *lock* is ``True`` (the default) then a new lock object is created to
+ synchronize access to the value. If *lock* is a :class:`Lock` or
+ :class:`RLock` object then that will be used to synchronize access to the
+ value. If *lock* is ``False`` then access to the returned object will not be
+ automatically protected by a lock, so it will not necessarily be
+ "process-safe".
+
+ Note that *lock* is a keyword-only argument.
+
+.. function:: Array(typecode_or_type, size_or_initializer, *, lock=True)
+
+ Return a ctypes array allocated from shared memory. By default the return
+ value is actually a synchronized wrapper for the array.
+
+ *typecode_or_type* determines the type of the elements of the returned array:
+ it is either a ctypes type or a one character typecode of the kind used by
+ the :mod:`array` module. If *size_or_initializer* is an integer, then it
+ determines the length of the array, and the array will be initially zeroed.
+ Otherwise, *size_or_initializer* is a sequence which is used to initialize
+ the array and whose length determines the length of the array.
+
+ If *lock* is ``True`` (the default) then a new lock object is created to
+ synchronize access to the value. If *lock* is a :class:`Lock` or
+ :class:`RLock` object then that will be used to synchronize access to the
+ value. If *lock* is ``False`` then access to the returned object will not be
+ automatically protected by a lock, so it will not necessarily be
+ "process-safe".
+
+ Note that *lock* is a keyword only argument.
+
+ Note that an array of :data:`ctypes.c_char` has *value* and *rawvalue*
+ attributes which allow one to use it to store and retrieve strings.
+
+
+The :mod:`multiprocessing.sharedctypes` module
+>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+.. module:: multiprocessing.sharedctypes
+ :synopsis: Allocate ctypes objects from shared memory.
+
+The :mod:`multiprocessing.sharedctypes` module provides functions for allocating
+:mod:`ctypes` objects from shared memory which can be inherited by child
+processes.
+
+.. note::
+
+ Although it is posible to store a pointer in shared memory remember that this
+ will refer to a location in the address space of a specific process.
+ However, the pointer is quite likely to be invalid in the context of a second
+ process and trying to dereference the pointer from the second process may
+ cause a crash.
+
+.. function:: RawArray(typecode_or_type, size_or_initializer)
+
+ Return a ctypes array allocated from shared memory.
+
+ *typecode_or_type* determines the type of the elements of the returned array:
+ it is either a ctypes type or a one character typecode of the kind used by
+ the :mod:`array` module. If *size_or_initializer* is an integer then it
+ determines the length of the array, and the array will be initially zeroed.
+ Otherwise *size_or_initializer* is a sequence which is used to initialize the
+ array and whose length determines the length of the array.
+
+ Note that setting and getting an element is potentially non-atomic -- use
+ :func:`Array` instead to make sure that access is automatically synchronized
+ using a lock.
+
+.. function:: RawValue(typecode_or_type, *args)
+
+ Return a ctypes object allocated from shared memory.
+
+ *typecode_or_type* determines the type of the returned object: it is either a
+ ctypes type or a one character typecode of the kind used by the :mod:`array`
+ module. */*args* is passed on to the constructor for the type.
+
+ Note that setting and getting the value is potentially non-atomic -- use
+ :func:`Value` instead to make sure that access is automatically synchronized
+ using a lock.
+
+ Note that an array of :data:`ctypes.c_char` has ``value`` and ``rawvalue``
+ attributes which allow one to use it to store and retrieve strings -- see
+ documentation for :mod:`ctypes`.
+
+.. function:: Array(typecode_or_type, size_or_initializer[, lock[, *args]])
+
+ The same as :func:`RawArray` except that depending on the value of *lock* a
+ process-safe synchronization wrapper may be returned instead of a raw ctypes
+ array.
+
+ If *lock* is ``True`` (the default) then a new lock object is created to
+ synchronize access to the value. If *lock* is a :class:`Lock` or
+ :class:`RLock` object then that will be used to synchronize access to the
+ value. If *lock* is ``False`` then access to the returned object will not be
+ automatically protected by a lock, so it will not necessarily be
+ "process-safe".
+
+ Note that *lock* is a keyword-only argument.
+
+.. function:: Value(typecode_or_type, *args[, lock])
+
+ The same as :func:`RawValue` except that depending on the value of *lock* a
+ process-safe synchronization wrapper may be returned instead of a raw ctypes
+ object.
+
+ If *lock* is ``True`` (the default) then a new lock object is created to
+ synchronize access to the value. If *lock* is a :class:`Lock` or
+ :class:`RLock` object then that will be used to synchronize access to the
+ value. If *lock* is ``False`` then access to the returned object will not be
+ automatically protected by a lock, so it will not necessarily be
+ "process-safe".
+
+ Note that *lock* is a keyword-only argument.
+
+.. function:: copy(obj)
+
+ Return a ctypes object allocated from shared memory which is a copy of the
+ ctypes object *obj*.
+
+.. function:: synchronized(obj[, lock])
+
+ Return a process-safe wrapper object for a ctypes object which uses *lock* to
+ synchronize access. If *lock* is ``None`` (the default) then a
+ :class:`multiprocessing.RLock` object is created automatically.
+
+ A synchronized wrapper will have two methods in addition to those of the
+ object it wraps: :meth:`get_obj()` returns the wrapped object and
+ :meth:`get_lock()` returns the lock object used for synchronization.
+
+ Note that accessing the ctypes object through the wrapper can be a lot slower
+ han accessing the raw ctypes object.
+
+
+The table below compares the syntax for creating shared ctypes objects from
+shared memory with the normal ctypes syntax. (In the table ``MyStruct`` is some
+subclass of :class:`ctypes.Structure`.)
+
+==================== ========================== ===========================
+ctypes sharedctypes using type sharedctypes using typecode
+==================== ========================== ===========================
+c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
+MyStruct(4, 6) RawValue(MyStruct, 4, 6)
+(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
+(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))
+==================== ========================== ===========================
+
+
+Below is an example where a number of ctypes objects are modified by a child
+process::
+
+ from multiprocessing import Process, Lock
+ from multiprocessing.sharedctypes import Value, Array
+ from ctypes import Structure, c_double
+
+ class Point(Structure):
+ _fields_ = [('x', c_double), ('y', c_double)]
+
+ def modify(n, x, s, A):
+ n.value **= 2
+ x.value **= 2
+ s.value = s.value.upper()
+ for a in A:
+ a.x **= 2
+ a.y **= 2
+
+ if __name__ == '__main__':
+ lock = Lock()
+
+ n = Value('i', 7)
+ x = Value(ctypes.c_double, 1.0/3.0, lock=False)
+ s = Array('c', 'hello world', lock=lock)
+ A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)
+
+ p = Process(target=modify, args=(n, x, s, A))
+ p.start()
+ p.join()
+
+ print n.value
+ print x.value
+ print s.value
+ print [(a.x, a.y) for a in A]
+
+
+.. highlightlang:: none
+
+The results printed are ::
+
+ 49
+ 0.1111111111111111
+ HELLO WORLD
+ [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]
+
+.. highlightlang:: python
+
+
+.. _multiprocessing-managers:
+
+Managers
+~~~~~~~~
+
+Managers provide a way to create data which can be shared between different
+processes. A manager object controls a server process which manages *shared
+objects*. Other processes can access the shared objects by using proxies.
+
+.. function:: multiprocessing.Manager()
+
+ Returns a started :class:`SyncManager` object which can be used for sharing
+ objects between processes. The returned manager object corresponds to a
+ spawned child process and has methods which will create shared objects and
+ return corresponding proxies.
+
+.. module:: multiprocessing.managers
+ :synopsis: Share data between process with shared objects.
+
+Manager processes will be shutdown as soon as they are garbage collected or
+their parent process exits. The manager classes are defined in the
+:mod:`multiprocessing.managers` module:
+
+.. class:: BaseManager([address[, authkey]])
+
+ Create a BaseManager object.
+
+ Once created one should call :meth:`start` or :meth:`serve_forever` to ensure
+ that the manager object refers to a started manager process.
+
+ *address* is the address on which the manager process listens for new
+ connections. If *address* is ``None`` then an arbitrary one is chosen.
+
+ *authkey* is the authentication key which will be used to check the validity
+ of incoming connections to the server process. If *authkey* is ``None`` then
+ ``current_process().get_auth_key()``. Otherwise *authkey* is used and it
+ must be a string.
+
+ .. method:: start()
+
+ Start a subprocess to start the manager.
+
+ .. method:: server_forever()
+
+ Run the server in the current process.
+
+ .. method:: from_address(address, authkey)
+
+ A class method which creates a manager object referring to a pre-existing
+ server process which is using the given address and authentication key.
+
+ .. method:: shutdown()
+
+ Stop the process used by the manager. This is only available if
+ meth:`start` has been used to start the server process.
+
+ This can be called multiple times.
+
+ .. method:: register(typeid[, callable[, proxytype[, exposed[, method_to_typeid[, create_method]]]]])
+
+ A classmethod which can be used for registering a type or callable with
+ the manager class.
+
+ *typeid* is a "type identifier" which is used to identify a particular
+ type of shared object. This must be a string.
+
+ *callable* is a callable used for creating objects for this type
+ identifier. If a manager instance will be created using the
+ :meth:`from_address()` classmethod or if the *create_method* argument is
+ ``False`` then this can be left as ``None``.
+
+ *proxytype* is a subclass of :class:`multiprocessing.managers.BaseProxy`
+ which is used to create proxies for shared objects with this *typeid*. If
+ ``None`` then a proxy class is created automatically.
+
+ *exposed* is used to specify a sequence of method names which proxies for
+ this typeid should be allowed to access using
+ :meth:`BaseProxy._callMethod`. (If *exposed* is ``None`` then
+ :attr:`proxytype._exposed_` is used instead if it exists.) In the case
+ where no exposed list is specified, all "public methods" of the shared
+ object will be accessible. (Here a "public method" means any attribute
+ which has a ``__call__()`` method and whose name does not begin with
+ ``'_'``.)
+
+ *method_to_typeid* is a mapping used to specify the return type of those
+ exposed methods which should return a proxy. It maps method names to
+ typeid strings. (If *method_to_typeid* is ``None`` then
+ :attr:`proxytype._method_to_typeid_` is used instead if it exists.) If a
+ method's name is not a key of this mapping or if the mapping is ``None``
+ then the object returned by the method will be copied by value.
+
+ *create_method* determines whether a method should be created with name
+ *typeid* which can be used to tell the server process to create a new
+ shared object and return a proxy for it. By default it is ``True``.
+
+ :class:`BaseManager` instances also have one read-only property:
+
+ .. attribute:: address
+
+ The address used by the manager.
+
+
+.. class:: SyncManager
+
+ A subclass of :class:`BaseManager` which can be used for the synchronization
+ of processes. Objects of this type are returned by
+ :func:`multiprocessing.Manager()`.
+
+ It also supports creation of shared lists and dictionaries.
+
+ .. method:: BoundedSemaphore([value])
+
+ Create a shared :class:`threading.BoundedSemaphore` object and return a
+ proxy for it.
+
+ .. method:: Condition([lock])
+
+ Create a shared :class:`threading.Condition` object and return a proxy for
+ it.
+
+ If *lock* is supplied then it should be a proxy for a
+ :class:`threading.Lock` or :class:`threading.RLock` object.
+
+ .. method:: Event()
+
+ Create a shared :class:`threading.Event` object and return a proxy for it.
+
+ .. method:: Lock()
+
+ Create a shared :class:`threading.Lock` object and return a proxy for it.
+
+ .. method:: Namespace()
+
+ Create a shared :class:`Namespace` object and return a proxy for it.
+
+ .. method:: Queue([maxsize])
+
+ Create a shared `Queue.Queue` object and return a proxy for it.
+
+ .. method:: RLock()
+
+ Create a shared :class:`threading.RLock` object and return a proxy for it.
+
+ .. method:: Semaphore([value])
+
+ Create a shared :class:`threading.Semaphore` object and return a proxy for
+ it.
+
+ .. method:: Array(typecode, sequence)
+
+ Create an array and return a proxy for it. (*format* is ignored.)
+
+ .. method:: Value(typecode, value)
+
+ Create an object with a writable ``value`` attribute and return a proxy
+ for it.
+
+ .. method:: dict()
+ dict(mapping)
+ dict(sequence)
+
+ Create a shared ``dict`` object and return a proxy for it.
+
+ .. method:: list()
+ list(sequence)
+
+ Create a shared ``list`` object and return a proxy for it.
+
+
+Namespace objects
+>>>>>>>>>>>>>>>>>
+
+A namespace object has no public methods, but does have writable attributes.
+Its representation shows the values of its attributes.
+
+However, when using a proxy for a namespace object, an attribute beginning with
+``'_'`` will be an attribute of the proxy and not an attribute of the referent::
+
+ >>> manager = multiprocessing.Manager()
+ >>> Global = manager.Namespace()
+ >>> Global.x = 10
+ >>> Global.y = 'hello'
+ >>> Global._z = 12.3 # this is an attribute of the proxy
+ >>> print Global
+ Namespace(x=10, y='hello')
+
+
+Customized managers
+>>>>>>>>>>>>>>>>>>>
+
+To create one's own manager, one creates a subclass of :class:`BaseManager` and
+use the :meth:`resgister()` classmethod to register new types or callables with
+the manager class. For example::
+
+ from multiprocessing.managers import BaseManager
+
+ class MathsClass(object):
+ def add(self, x, y):
+ return x + y
+ def mul(self, x, y):
+ return x * y
+
+ class MyManager(BaseManager):
+ pass
+
+ MyManager.register('Maths', MathsClass)
+
+ if __name__ == '__main__':
+ manager = MyManager()
+ manager.start()
+ maths = manager.Maths()
+ print maths.add(4, 3) # prints 7
+ print maths.mul(7, 8) # prints 56
+
+
+Using a remote manager
+>>>>>>>>>>>>>>>>>>>>>>
+
+It is possible to run a manager server on one machine and have clients use it
+from other machines (assuming that the firewalls involved allow it).
+
+Running the following commands creates a server for a single shared queue which
+remote clients can access::
+
+ >>> from multiprocessing.managers import BaseManager
+ >>> import Queue
+ >>> queue = Queue.Queue()
+ >>> class QueueManager(BaseManager): pass
+ ...
+ >>> QueueManager.register('getQueue', callable=lambda:queue)
+ >>> m = QueueManager(address=('', 50000), authkey='abracadabra')
+ >>> m.serveForever()
+
+One client can access the server as follows::
+
+ >>> from multiprocessing.managers import BaseManager
+ >>> class QueueManager(BaseManager): pass
+ ...
+ >>> QueueManager.register('getQueue')
+ >>> m = QueueManager.from_address(address=('foo.bar.org', 50000),
+ >>> authkey='abracadabra')
+ >>> queue = m.getQueue()
+ >>> queue.put('hello')
+
+Another client can also use it::
+
+ >>> from multiprocessing.managers import BaseManager
+ >>> class QueueManager(BaseManager): pass
+ ...
+ >>> QueueManager.register('getQueue')
+ >>> m = QueueManager.from_address(address=('foo.bar.org', 50000), authkey='abracadabra')
+ >>> queue = m.getQueue()
+ >>> queue.get()
+ 'hello'
+
+
+Proxy Objects
+~~~~~~~~~~~~~
+
+A proxy is an object which *refers* to a shared object which lives (presumably)
+in a different process. The shared object is said to be the *referent* of the
+proxy. Multiple proxy objects may have the same referent.
+
+A proxy object has methods which invoke corresponding methods of its referent
+(although not every method of the referent will necessarily be available through
+the proxy). A proxy can usually be used in most of the same ways that its
+referent can::
+
+ >>> from multiprocessing import Manager
+ >>> manager = Manager()
+ >>> l = manager.list([i*i for i in range(10)])
+ >>> print l
+ [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
+ >>> print repr(l)
+ <ListProxy object, typeid 'list' at 0xb799974c>
+ >>> l[4]
+ 16
+ >>> l[2:5]
+ [4, 9, 16]
+
+Notice that applying :func:`str` to a proxy will return the representation of
+the referent, whereas applying :func:`repr` will return the representation of
+the proxy.
+
+An important feature of proxy objects is that they are picklable so they can be
+passed between processes. Note, however, that if a proxy is sent to the
+corresponding manager's process then unpickling it will produce the referent
+itself. This means, for example, that one shared object can contain a second::
+
+ >>> a = manager.list()
+ >>> b = manager.list()
+ >>> a.append(b) # referent of `a` now contains referent of `b`
+ >>> print a, b
+ [[]] []
+ >>> b.append('hello')
+ >>> print a, b
+ [['hello']] ['hello']
+
+.. note::
+
+ The proxy types in :mod:`multiprocessing` do nothing to support comparisons
+ by value. So, for instance, ::
+
+ manager.list([1,2,3]) == [1,2,3]
+
+ will return ``False``. One should just use a copy of the referent instead
+ when making comparisons.
+
+.. class:: BaseProxy
+
+ Proxy objects are instances of subclasses of :class:`BaseProxy`.
+
+ .. method:: _call_method(methodname[, args[, kwds]])
+
+ Call and return the result of a method of the proxy's referent.
+
+ If ``proxy`` is a proxy whose referent is ``obj`` then the expression ::
+
+ proxy._call_method(methodname, args, kwds)
+
+ will evaluate the expression ::
+
+ getattr(obj, methodname)(*args, **kwds)
+
+ in the manager's process.
+
+ The returned value will be a copy of the result of the call or a proxy to
+ a new shared object -- see documentation for the *method_to_typeid*
+ argument of :meth:`BaseManager.register`.
+
+ If an exception is raised by the call, then then is re-raised by
+ :meth:`_call_method`. If some other exception is raised in the manager's
+ process then this is converted into a :exc:`RemoteError` exception and is
+ raised by :meth:`_call_method`.
+
+ Note in particular that an exception will be raised if *methodname* has
+ not been *exposed*
+
+ An example of the usage of :meth:`_call_method()`::
+
+ >>> l = manager.list(range(10))
+ >>> l._call_method('__len__')
+ 10
+ >>> l._call_method('__getslice__', (2, 7)) # equiv to `l[2:7]`
+ [2, 3, 4, 5, 6]
+ >>> l._call_method('__getitem__', (20,)) # equiv to `l[20]`
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+
+ .. method:: _get_value()
+
+ Return a copy of the referent.
+
+ If the referent is unpicklable then this will raise an exception.
+
+ .. method:: __repr__
+
+ Return a representation of the proxy object.
+
+ .. method:: __str__
+
+ Return the representation of the referent.
+
+
+Cleanup
+>>>>>>>
+
+A proxy object uses a weakref callback so that when it gets garbage collected it
+deregisters itself from the manager which owns its referent.
+
+A shared object gets deleted from the manager process when there are no longer
+any proxies referring to it.
+
+
+Process Pools
+~~~~~~~~~~~~~
+
+.. module:: multiprocessing.pool
+ :synopsis: Create pools of processes.
+
+One can create a pool of processes which will carry out tasks submitted to it
+with the :class:`Pool` class in :mod:`multiprocess.pool`.
+
+.. class:: multiprocessing.Pool([processes[, initializer[, initargs]]])
+
+ A process pool object which controls a pool of worker processes to which jobs
+ can be submitted. It supports asynchronous results with timeouts and
+ callbacks and has a parallel map implementation.
+
+ *processes* is the number of worker processes to use. If *processes* is
+ ``None`` then the number returned by :func:`cpu_count` is used. If
+ *initializer* is not ``None`` then each worker process will call
+ ``initializer(*initargs)`` when it starts.
+
+ .. method:: apply(func[, args[, kwds]])
+
+ Equivalent of the :func:`apply` builtin function. It blocks till the
+ result is ready.
+
+ .. method:: apply_async(func[, args[, kwds[, callback]]])
+
+ A variant of the :meth:`apply` method which returns a result object.
+
+ If *callback* is specified then it should be a callable which accepts a
+ single argument. When the result becomes ready *callback* is applied to
+ it (unless the call failed). *callback* should complete immediately since
+ otherwise the thread which handles the results will get blocked.
+
+ .. method:: map(func, iterable[, chunksize])
+
+ A parallel equivalent of the :func:`map` builtin function. It blocks till
+ the result is ready.
+
+ This method chops the iterable into a number of chunks which it submits to
+ the process pool as separate tasks. The (approximate) size of these
+ chunks can be specified by setting *chunksize* to a positive integer.
+
+ .. method:: map_async(func, iterable[, chunksize[, callback]])
+
+ A variant of the :meth:`.map` method which returns a result object.
+
+ If *callback* is specified then it should be a callable which accepts a
+ single argument. When the result becomes ready *callback* is applied to
+ it (unless the call failed). *callback* should complete immediately since
+ otherwise the thread which handles the results will get blocked.
+
+ .. method:: imap(func, iterable[, chunksize])
+
+ An equivalent of :func:`itertools.imap`.
+
+ The *chunksize* argument is the same as the one used by the :meth:`.map`
+ method. For very long iterables using a large value for *chunksize* can
+ make make the job complete **much** faster than using the default value of
+ ``1``.
+
+ Also if *chunksize* is ``1`` then the :meth:`next` method of the iterator
+ returned by the :meth:`imap` method has an optional *timeout* parameter:
+ ``next(timeout)`` will raise :exc:`multiprocessing.TimeoutError` if the
+ result cannot be returned within *timeout* seconds.
+
+ .. method:: imap_unordered(func, iterable[, chunksize])
+
+ The same as :meth:`imap` except that the ordering of the results from the
+ returned iterator should be considered arbitrary. (Only when there is
+ only one worker process is the order guaranteed to be "correct".)
+
+ .. method:: close()
+
+ Prevents any more tasks from being submitted to the pool. Once all the
+ tasks have been completed the worker processes will exit.
+
+ .. method:: terminate()
+
+ Stops the worker processes immediately without completing outstanding
+ work. When the pool object is garbage collected :meth:`terminate` will be
+ called immediately.
+
+ .. method:: join()
+
+ Wait for the worker processes to exit. One must call :meth:`close` or
+ :meth:`terminate` before using :meth:`join`.
+
+
+.. class:: AsyncResult
+
+ The class of the result returned by :meth:`Pool.apply_async` and
+ :meth:`Pool.map_async`.
+
+ .. method:: get([timeout)
+
+ Return the result when it arrives. If *timeout* is not ``None`` and the
+ result does not arrive within *timeout* seconds then
+ :exc:`multiprocessing.TimeoutError` is raised. If the remote call raised
+ an exception then that exception will be reraised by :meth:`get`.
+
+ .. method:: wait([timeout])
+
+ Wait until the result is available or until *timeout* seconds pass.
+
+ .. method:: ready()
+
+ Return whether the call has completed.
+
+ .. method:: successful()
+
+ Return whether the call completed without raising an exception. Will
+ raise :exc:`AssertionError` if the result is not ready.
+
+The following example demonstrates the use of a pool::
+
+ from multiprocessing import Pool
+
+ def f(x):
+ return x*x
+
+ if __name__ == '__main__':
+ pool = Pool(processes=4) # start 4 worker processes
+
+ result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously
+ print result.get(timeout=1) # prints "100" unless your computer is *very* slow
+
+ print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
+
+ it = pool.imap(f, range(10))
+ print it.next() # prints "0"
+ print it.next() # prints "1"
+ print it.next(timeout=1) # prints "4" unless your computer is *very* slow
+
+ import time
+ result = pool.applyAsync(time.sleep, (10,))
+ print result.get(timeout=1) # raises TimeoutError
+
+
+.. _multiprocessing-listeners-clients:
+
+Listeners and Clients
+~~~~~~~~~~~~~~~~~~~~~
+
+.. module:: multiprocessing.connection
+ :synopsis: API for dealing with sockets.
+
+Usually message passing between processes is done using queues or by using
+:class:`Connection` objects returned by :func:`Pipe`.
+
+However, the :mod:`multiprocessing.connection` module allows some extra
+flexibility. It basically gives a high level message oriented API for dealing
+with sockets or Windows named pipes, and also has support for *digest
+authentication* using the :mod:`hmac` module from the standard library.
+
+
+.. function:: deliver_challenge(connection, authkey)
+
+ Send a randomly generated message to the other end of the connection and wait
+ for a reply.
+
+ If the reply matches the digest of the message using *authkey* as the key
+ then a welcome message is sent to the other end of the connection. Otherwise
+ :exc:`AuthenticationError` is raised.
+
+.. function:: answerChallenge(connection, authkey)
+
+ Receive a message, calculate the digest of the message using *authkey* as the
+ key, and then send the digest back.
+
+ If a welcome message is not received, then :exc:`AuthenticationError` is
+ raised.
+
+.. function:: Client(address[, family[, authenticate[, authkey]]])
+
+ Attempt to set up a connection to the listener which is using address
+ *address*, returning a :class:`Connection`.
+
+ The type of the connection is determined by *family* argument, but this can
+ generally be omitted since it can usually be inferred from the format of
+ *address*. (See :ref:`multiprocessing-address-formats`)
+
+ If *authentication* is ``True`` or *authkey* is a string then digest
+ authentication is used. The key used for authentication will be either
+ *authkey* or ``current_process().get_auth_key()`` if *authkey* is ``None``.
+ If authentication fails then :exc:`AuthenticationError` is raised. See
+ :ref:`multiprocessing-auth-keys`.
+
+.. class:: Listener([address[, family[, backlog[, authenticate[, authkey]]]]])
+
+ A wrapper for a bound socket or Windows named pipe which is 'listening' for
+ connections.
+
+ *address* is the address to be used by the bound socket or named pipe of the
+ listener object.
+
+ *family* is the type of socket (or named pipe) to use. This can be one of
+ the strings ``'AF_INET'`` (for a TCP socket), ``'AF_UNIX'`` (for a Unix
+ domain socket) or ``'AF_PIPE'`` (for a Windows named pipe). Of these only
+ the first is guaranteed to be available. If *family* is ``None`` then the
+ family is inferred from the format of *address*. If *address* is also
+ ``None`` then a default is chosen. This default is the family which is
+ assumed to be the fastest available. See
+ :ref:`multiprocessing-address-formats`. Note that if *family* is
+ ``'AF_UNIX'`` and address is ``None`` then the socket will be created in a
+ private temporary directory created using :func:`tempfile.mkstemp`.
+
+ If the listener object uses a socket then *backlog* (1 by default) is passed
+ to the :meth:`listen` method of the socket once it has been bound.
+
+ If *authenticate* is ``True`` (``False`` by default) or *authkey* is not
+ ``None`` then digest authentication is used.
+
+ If *authkey* is a string then it will be used as the authentication key;
+ otherwise it must be *None*.
+
+ If *authkey* is ``None`` and *authenticate* is ``True`` then
+ ``current_process().get_auth_key()`` is used as the authentication key. If
+ *authkey* is ``None`` and *authentication* is ``False`` then no
+ authentication is done. If authentication fails then
+ :exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`.
+
+ .. method:: accept()
+
+ Accept a connection on the bound socket or named pipe of the listener
+ object and return a :class:`Connection` object. If authentication is
+ attempted and fails, then :exc:`AuthenticationError` is raised.
+
+ .. method:: close()
+
+ Close the bound socket or named pipe of the listener object. This is
+ called automatically when the listener is garbage collected. However it
+ is advisable to call it explicitly.
+
+ Listener objects have the following read-only properties:
+
+ .. attribute:: address
+
+ The address which is being used by the Listener object.
+
+ .. attribute:: last_accepted
+
+ The address from which the last accepted connection came. If this is
+ unavailable then it is ``None``.
+
+
+The module defines two exceptions:
+
+.. exception:: AuthenticationError
+
+ Exception raised when there is an authentication error.
+
+.. exception:: BufferTooShort
+
+ Exception raise by the :meth:`Connection.recv_bytes_into` method of a
+ connection object when the supplied buffer object is too small for the
+ message read.
+
+ If *e* is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give
+ the message as a byte string.
+
+
+**Examples**
+
+The following server code creates a listener which uses ``'secret password'`` as
+an authentication key. It then waits for a connection and sends some data to
+the client::
+
+ from multiprocessing.connection import Listener
+ from array import array
+
+ address = ('localhost', 6000) # family is deduced to be 'AF_INET'
+ listener = Listener(address, authkey='secret password')
+
+ conn = listener.accept()
+ print 'connection accepted from', listener.last_accepted
+
+ conn.send([2.25, None, 'junk', float])
+
+ conn.send_bytes('hello')
+
+ conn.send_bytes(array('i', [42, 1729]))
+
+ conn.close()
+ listener.close()
+
+The following code connects to the server and receives some data from the
+server::
+
+ from multiprocessing.connection import Client
+ from array import array
+
+ address = ('localhost', 6000)
+ conn = Client(address, authkey='secret password')
+
+ print conn.recv() # => [2.25, None, 'junk', float]
+
+ print conn.recv_bytes() # => 'hello'
+
+ arr = array('i', [0, 0, 0, 0, 0])
+ print conn.recv_bytes_into(arr) # => 8
+ print arr # => array('i', [42, 1729, 0, 0, 0])
+
+ conn.close()
+
+
+.. _multiprocessing-address-formats:
+
+Address Formats
+>>>>>>>>>>>>>>>
+
+* An ``'AF_INET'`` address is a tuple of the form ``(hostname, port)``` where
+ *hostname* is a string and *port* is an integer.
+
+* An ``'AF_UNIX'``` address is a string representing a filename on the
+ filesystem.
+
+* An ``'AF_PIPE'`` address is a string of the form
+ ``r'\\\\.\\pipe\\PipeName'``. To use :func:`Client` to connect to a named
+ pipe on a remote computer called ServerName* one should use an address of the
+ form ``r'\\\\ServerName\\pipe\\PipeName'`` instead.
+
+Note that any string beginning with two backslashes is assumed by default to be
+an ``'AF_PIPE'`` address rather than an ``'AF_UNIX'`` address.
+
+
+.. _multiprocessing-auth-keys:
+
+Authentication keys
+~~~~~~~~~~~~~~~~~~~
+
+When one uses :meth:`Connection.recv`, the data received is automatically
+unpickled. Unfortunately unpickling data from an untrusted source is a security
+risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module
+to provide digest authentication.
+
+An authentication key is a string which can be thought of as a password: once a
+connection is established both ends will demand proof that the other knows the
+authentication key. (Demonstrating that both ends are using the same key does
+**not** involve sending the key over the connection.)
+
+If authentication is requested but do authentication key is specified then the
+return value of ``current_process().get_auth_key`` is used (see
+:class:`Process`). This value will automatically inherited by any
+:class:`Process` object that the current process creates. This means that (by
+default) all processes of a multi-process program will share a single
+authentication key which can be used when setting up connections between the
+themselves.
+
+Suitable authentication keys can also be generated by using :func:`os.urandom`.
+
+
+Logging
+~~~~~~~
+
+Some support for logging is available. Note, however, that the :mod:`logging`
+package does not use process shared locks so it is possible (depending on the
+handler type) for messages from different processes to get mixed up.
+
+.. currentmodule:: multiprocessing
+.. function:: get_logger()
+
+ Returns the logger used by :mod:`multiprocessing`. If necessary, a new one
+ will be created.
+
+ When first created the logger has level :data:`logging.NOTSET` and has a
+ handler which sends output to :data:`sys.stderr` using format
+ ``'[%(levelname)s/%(processName)s] %(message)s'``. (The logger allows use of
+ the non-standard ``'%(processName)s'`` format.) Message sent to this logger
+ will not by default propogate to the root logger.
+
+ Note that on Windows child processes will only inherit the level of the
+ parent process's logger -- any other customization of the logger will not be
+ inherited.
+
+Below is an example session with logging turned on::
+
+ >>> import processing, logging
+ >>> logger = processing.getLogger()
+ >>> logger.setLevel(logging.INFO)
+ >>> logger.warning('doomed')
+ [WARNING/MainProcess] doomed
+ >>> m = processing.Manager()
+ [INFO/SyncManager-1] child process calling self.run()
+ [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
+ >>> del m
+ [INFO/MainProcess] sending shutdown message to manager
+ [INFO/SyncManager-1] manager exiting with exitcode 0
+
+
+The :mod:`multiprocessing.dummy` module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. module:: multiprocessing.dummy
+ :synopsis: Dumb wrapper around threading.
+
+:mod:`multiprocessing.dummy` replicates the API of :mod:`multiprocessing` but is
+no more than a wrapper around the `threading` module.
+
+
+.. _multiprocessing-programming:
+
+Programming guidelines
+----------------------
+
+There are certain guidelines and idioms which should be adhered to when using
+:mod:`multiprocessing`.
+
+
+All platforms
+~~~~~~~~~~~~~
+
+Avoid shared state
+
+ As far as possible one should try to avoid shifting large amounts of data
+ between processes.
+
+ It is probably best to stick to using queues or pipes for communication
+ between processes rather than using the lower level synchronization
+ primitives from the :mod:`threading` module.
+
+Picklability
+
+ Ensure that the arguments to the methods of proxies are picklable.
+
+Thread safety of proxies
+
+ Do not use a proxy object from more than one thread unless you protect it
+ with a lock.
+
+ (There is never a problem with different processes using the *same* proxy.)
+
+Joining zombie processes
+
+ On Unix when a process finishes but has not been joined it becomes a zombie.
+ There should never be very many because each time a new process starts (or
+ :func:`active_children` is called) all completed processes which have not
+ yet been joined will be joined. Also calling a finished process's
+ :meth:`Process.is_alive` will join the process. Even so it is probably good
+ practice to explicitly join all the processes that you start.
+
+Better to inherit than pickle/unpickle
+
+ On Windows many of types from :mod:`multiprocessing` need to be picklable so
+ that child processes can use them. However, one should generally avoid
+ sending shared objects to other processes using pipes or queues. Instead
+ you should arrange the program so that a process which need access to a
+ shared resource created elsewhere can inherit it from an ancestor process.
+
+Avoid terminating processes
+
+ Using the :meth:`Process.terminate` method to stop a process is liable to
+ cause any shared resources (such as locks, semaphores, pipes and queues)
+ currently being used by the process to become broken or unavailable to other
+ processes.
+
+ Therefore it is probably best to only consider using
+ :meth:`Process.terminate()` on processes which never use any shared
+ resources.
+
+Joining processes that use queues
+
+ Bear in mind that a process that has put items in a queue will wait before
+ terminating until all the buffered items are fed by the "feeder" thread to
+ the underlying pipe. (The child process can call the
+ :meth:`Queue.cancel_join` method of the queue to avoid this behaviour.)
+
+ This means that whenever you use a queue you need to make sure that all
+ items which have been put on the queue will eventually be removed before the
+ process is joined. Otherwise you cannot be sure that processes which have
+ put items on the queue will terminate. Remember also that non-daemonic
+ processes will be automatically be joined.
+
+ An example which will deadlock is the following::
+
+ from multiprocessing import Process, Queue
+
+ def f(q):
+ q.put('X' * 1000000)
+
+ if __name__ == '__main__':
+ queue = Queue()
+ p = Process(target=f, args=(queue,))
+ p.start()
+ p.join() # this deadlocks
+ obj = queue.get()
+
+ A fix here would be to swap the last two lines round (or simply remove the
+ ``p.join()`` line).
+
+Explicity pass resources to child processes
+
+ On Unix a child process can make use of a shared resource created in a
+ parent process using a global resource. However, it is better to pass the
+ object as an argument to the constructor for the child process.
+
+ Apart from making the code (potentially) compatible with Windows this also
+ ensures that as long as the child process is still alive the object will not
+ be garbage collected in the parent process. This might be important if some
+ resource is freed when the object is garbage collected in the parent
+ process.
+
+ So for instance ::
+
+ from multiprocessing import Process, Lock
+
+ def f():
+ ... do something using "lock" ...
+
+ if __name__ == '__main__':
+ lock = Lock()
+ for i in range(10):
+ Process(target=f).start()
+
+ should be rewritten as ::
+
+ from multiprocessing import Process, Lock
+
+ def f(l):
+ ... do something using "l" ...
+
+ if __name__ == '__main__':
+ lock = Lock()
+ for i in range(10):
+ Process(target=f, args=(lock,)).start()
+
+
+Windows
+~~~~~~~
+
+Since Windows lacks :func:`os.fork` it has a few extra restrictions:
+
+More picklability
+
+ Ensure that all arguments to :meth:`Process.__init__` are picklable. This
+ means, in particular, that bound or unbound methods cannot be used directly
+ as the ``target`` argument on Windows --- just define a function and use
+ that instead.
+
+ Also, if you subclass :class:`Process` then make sure that instances will be
+ picklable when the :meth:`Process.start` method is called.
+
+Global variables
+
+ Bear in mind that if code run in a child process tries to access a global
+ variable, then the value it sees (if any) may not be the same as the value
+ in the parent process at the time that :meth:`Process.start` was called.
+
+ However, global variables which are just module level constants cause no
+ problems.
+
+Safe importing of main module
+
+ Make sure that the main module can be safely imported by a new Python
+ interpreter without causing unintended side effects (such a starting a new
+ process).
+
+ For example, under Windows running the following module would fail with a
+ :exc:`RuntimeError`::
+
+ from multiprocessing import Process
+
+ def foo():
+ print 'hello'
+
+ p = Process(target=foo)
+ p.start()
+
+ Instead one should protect the "entry point" of the program by using ``if
+ __name__ == '__main__':`` as follows::
+
+ from multiprocessing import Process, freeze_support
+
+ def foo():
+ print 'hello'
+
+ if __name__ == '__main__':
+ freeze_support()
+ p = Process(target=foo)
+ p.start()
+
+ (The :func:`freeze_support()` line can be omitted if the program will be run
+ normally instead of frozen.)
+
+ This allows the newly spawned Python interpreter to safely import the module
+ and then run the module's ``foo()`` function.
+
+ Similar restrictions apply if a pool or manager is created in the main
+ module.
+
+
+.. _multiprocessing-examples:
+
+Examples
+--------
+
+Demonstration of how to create and use customized managers and proxies:
+
+.. literalinclude:: ../includes/mp_newtype.py
+
+
+Using :class:`Pool`:
+
+.. literalinclude:: ../includes/mp_pool.py
+
+
+Synchronization types like locks, conditions and queues:
+
+.. literalinclude:: ../includes/mp_synchronize.py
+
+
+An showing how to use queues to feed tasks to a collection of worker process and
+collect the results:
+
+.. literalinclude:: ../includes/mp_workers.py
+
+
+An example of how a pool of worker processes can each run a
+:class:`SimpleHTTPServer.HttpServer` instance while sharing a single listening
+socket.
+
+.. literalinclude:: ../includes/mp_webserver.py
+
+
+Some simple benchmarks comparing :mod:`multiprocessing` with :mod:`threading`:
+
+.. literalinclude:: ../includes/mp_benchmarks.py
+
+An example/demo of how to use the :class:`managers.SyncManager`, :class:`Process`
+and others to build a system which can distribute processes and work via a
+distributed queue to a "cluster" of machines on a network, accessible via SSH.
+You will need to have private key authentication for all hosts configured for
+this to work.
+
+.. literalinclude:: ../includes/mp_distributing.py
\ No newline at end of file
threading.rst
dummy_thread.rst
dummy_threading.rst
+ multiprocessing.rst
mmap.rst
readline.rst
rlcompleter.rst
--- /dev/null
+#\r
+# Package analogous to 'threading.py' but using processes\r
+#\r
+# multiprocessing/__init__.py\r
+#\r
+# This package is intended to duplicate the functionality (and much of\r
+# the API) of threading.py but uses processes instead of threads. A\r
+# subpackage 'multiprocessing.dummy' has the same API but is a simple\r
+# wrapper for 'threading'.\r
+#\r
+# Try calling `multiprocessing.doc.main()` to read the html\r
+# documentation in in a webbrowser.\r
+#\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk\r
+# All rights reserved.\r
+#\r
+# Redistribution and use in source and binary forms, with or without\r
+# modification, are permitted provided that the following conditions\r
+# are met:\r
+#\r
+# 1. Redistributions of source code must retain the above copyright\r
+# notice, this list of conditions and the following disclaimer.\r
+# 2. Redistributions in binary form must reproduce the above copyright\r
+# notice, this list of conditions and the following disclaimer in the\r
+# documentation and/or other materials provided with the distribution.\r
+# 3. Neither the name of author nor the names of any contributors may be\r
+# used to endorse or promote products derived from this software\r
+# without specific prior written permission.\r
+#\r
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND\r
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\r
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
+#\r
+\r
+__version__ = '0.70a1'\r
+\r
+__all__ = [\r
+ 'Process', 'current_process', 'active_children', 'freeze_support',\r
+ 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',\r
+ 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',\r
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',\r
+ 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',\r
+ 'RawValue', 'RawArray'\r
+ ]\r
+\r
+__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import os\r
+import sys\r
+\r
+import _multiprocessing\r
+from multiprocessing.process import Process, current_process, active_children\r
+\r
+#\r
+# Exceptions\r
+#\r
+\r
+class ProcessError(Exception):\r
+ pass\r
+ \r
+class BufferTooShort(ProcessError):\r
+ pass\r
+ \r
+class TimeoutError(ProcessError):\r
+ pass\r
+\r
+class AuthenticationError(ProcessError):\r
+ pass\r
+\r
+#\r
+# Definitions not depending on native semaphores\r
+#\r
+\r
+def Manager():\r
+ '''\r
+ Returns a manager associated with a running server process\r
+\r
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`\r
+ can be used to create shared objects.\r
+ '''\r
+ from multiprocessing.managers import SyncManager\r
+ m = SyncManager()\r
+ m.start()\r
+ return m\r
+\r
+def Pipe(duplex=True):\r
+ '''\r
+ Returns two connection object connected by a pipe\r
+ '''\r
+ from multiprocessing.connection import Pipe\r
+ return Pipe(duplex)\r
+\r
+def cpu_count():\r
+ '''\r
+ Returns the number of CPUs in the system\r
+ '''\r
+ if sys.platform == 'win32':\r
+ try:\r
+ num = int(os.environ['NUMBER_OF_PROCESSORS'])\r
+ except (ValueError, KeyError):\r
+ num = 0\r
+ elif sys.platform == 'darwin':\r
+ try:\r
+ num = int(os.popen('sysctl -n hw.ncpu').read())\r
+ except ValueError:\r
+ num = 0\r
+ else:\r
+ try:\r
+ num = os.sysconf('SC_NPROCESSORS_ONLN')\r
+ except (ValueError, OSError, AttributeError):\r
+ num = 0\r
+ \r
+ if num >= 1:\r
+ return num\r
+ else:\r
+ raise NotImplementedError('cannot determine number of cpus')\r
+\r
+def freeze_support():\r
+ '''\r
+ Check whether this is a fake forked process in a frozen executable.\r
+ If so then run code specified by commandline and exit.\r
+ '''\r
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):\r
+ from multiprocessing.forking import freeze_support\r
+ freeze_support()\r
+\r
+def get_logger():\r
+ '''\r
+ Return package logger -- if it does not already exist then it is created\r
+ '''\r
+ from multiprocessing.util import get_logger\r
+ return get_logger()\r
+\r
+def log_to_stderr(level=None):\r
+ '''\r
+ Turn on logging and add a handler which prints to stderr\r
+ '''\r
+ from multiprocessing.util import log_to_stderr\r
+ return log_to_stderr(level)\r
+ \r
+def allow_connection_pickling():\r
+ '''\r
+ Install support for sending connections and sockets between processes\r
+ '''\r
+ from multiprocessing import reduction\r
+ \r
+#\r
+# Definitions depending on native semaphores\r
+#\r
+\r
+def Lock():\r
+ '''\r
+ Returns a non-recursive lock object\r
+ '''\r
+ from multiprocessing.synchronize import Lock\r
+ return Lock()\r
+\r
+def RLock():\r
+ '''\r
+ Returns a recursive lock object\r
+ '''\r
+ from multiprocessing.synchronize import RLock\r
+ return RLock()\r
+\r
+def Condition(lock=None):\r
+ '''\r
+ Returns a condition object\r
+ '''\r
+ from multiprocessing.synchronize import Condition\r
+ return Condition(lock)\r
+\r
+def Semaphore(value=1):\r
+ '''\r
+ Returns a semaphore object\r
+ '''\r
+ from multiprocessing.synchronize import Semaphore\r
+ return Semaphore(value)\r
+\r
+def BoundedSemaphore(value=1):\r
+ '''\r
+ Returns a bounded semaphore object\r
+ '''\r
+ from multiprocessing.synchronize import BoundedSemaphore\r
+ return BoundedSemaphore(value)\r
+\r
+def Event():\r
+ '''\r
+ Returns an event object\r
+ '''\r
+ from multiprocessing.synchronize import Event\r
+ return Event()\r
+\r
+def Queue(maxsize=0):\r
+ '''\r
+ Returns a queue object\r
+ '''\r
+ from multiprocessing.queues import Queue\r
+ return Queue(maxsize)\r
+\r
+def JoinableQueue(maxsize=0):\r
+ '''\r
+ Returns a queue object\r
+ '''\r
+ from multiprocessing.queues import JoinableQueue\r
+ return JoinableQueue(maxsize)\r
+\r
+def Pool(processes=None, initializer=None, initargs=()):\r
+ '''\r
+ Returns a process pool object\r
+ '''\r
+ from multiprocessing.pool import Pool\r
+ return Pool(processes, initializer, initargs)\r
+\r
+def RawValue(typecode_or_type, *args):\r
+ '''\r
+ Returns a shared object\r
+ '''\r
+ from multiprocessing.sharedctypes import RawValue\r
+ return RawValue(typecode_or_type, *args)\r
+\r
+def RawArray(typecode_or_type, size_or_initializer):\r
+ '''\r
+ Returns a shared array\r
+ '''\r
+ from multiprocessing.sharedctypes import RawArray\r
+ return RawArray(typecode_or_type, size_or_initializer)\r
+\r
+def Value(typecode_or_type, *args, **kwds):\r
+ '''\r
+ Returns a synchronized shared object\r
+ '''\r
+ from multiprocessing.sharedctypes import Value\r
+ return Value(typecode_or_type, *args, **kwds)\r
+\r
+def Array(typecode_or_type, size_or_initializer, **kwds):\r
+ '''\r
+ Returns a synchronized shared array\r
+ '''\r
+ from multiprocessing.sharedctypes import Array\r
+ return Array(typecode_or_type, size_or_initializer, **kwds)\r
+\r
+#\r
+#\r
+#\r
+\r
+if sys.platform == 'win32':\r
+\r
+ def set_executable(executable):\r
+ '''\r
+ Sets the path to a python.exe or pythonw.exe binary used to run\r
+ child processes on Windows instead of sys.executable.\r
+ Useful for people embedding Python. \r
+ '''\r
+ from multiprocessing.forking import set_executable\r
+ set_executable(executable)\r
+\r
+ __all__ += ['set_executable']\r
--- /dev/null
+#\r
+# A higher level module for using sockets (or Windows named pipes)\r
+#\r
+# multiprocessing/connection.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = [ 'Client', 'Listener', 'Pipe' ]\r
+\r
+import os\r
+import sys\r
+import socket\r
+import time\r
+import tempfile\r
+import itertools\r
+\r
+import _multiprocessing\r
+from multiprocessing import current_process\r
+from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug\r
+from multiprocessing.forking import duplicate, close\r
+\r
+\r
+#\r
+#\r
+#\r
+\r
+BUFSIZE = 8192\r
+\r
+_mmap_counter = itertools.count()\r
+\r
+default_family = 'AF_INET'\r
+families = ['AF_INET']\r
+\r
+if hasattr(socket, 'AF_UNIX'):\r
+ default_family = 'AF_UNIX'\r
+ families += ['AF_UNIX']\r
+\r
+if sys.platform == 'win32':\r
+ default_family = 'AF_PIPE'\r
+ families += ['AF_PIPE']\r
+\r
+#\r
+#\r
+#\r
+\r
+def arbitrary_address(family):\r
+ '''\r
+ Return an arbitrary free address for the given family\r
+ '''\r
+ if family == 'AF_INET':\r
+ return ('localhost', 0)\r
+ elif family == 'AF_UNIX': \r
+ return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())\r
+ elif family == 'AF_PIPE':\r
+ return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %\r
+ (os.getpid(), _mmap_counter.next()))\r
+ else:\r
+ raise ValueError('unrecognized family')\r
+\r
+\r
+def address_type(address):\r
+ '''\r
+ Return the types of the address\r
+\r
+ This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'\r
+ '''\r
+ if type(address) == tuple:\r
+ return 'AF_INET'\r
+ elif type(address) is str and address.startswith('\\\\'):\r
+ return 'AF_PIPE'\r
+ elif type(address) is str:\r
+ return 'AF_UNIX'\r
+ else:\r
+ raise ValueError('address type of %r unrecognized' % address)\r
+\r
+#\r
+# Public functions\r
+#\r
+\r
+class Listener(object):\r
+ '''\r
+ Returns a listener object.\r
+\r
+ This is a wrapper for a bound socket which is 'listening' for\r
+ connections, or for a Windows named pipe.\r
+ '''\r
+ def __init__(self, address=None, family=None, backlog=1, authkey=None):\r
+ family = family or (address and address_type(address)) \\r
+ or default_family\r
+ address = address or arbitrary_address(family)\r
+\r
+ if family == 'AF_PIPE':\r
+ self._listener = PipeListener(address, backlog)\r
+ else:\r
+ self._listener = SocketListener(address, family, backlog)\r
+\r
+ if authkey is not None and not isinstance(authkey, bytes):\r
+ raise TypeError, 'authkey should be a byte string'\r
+\r
+ self._authkey = authkey\r
+\r
+ def accept(self):\r
+ '''\r
+ Accept a connection on the bound socket or named pipe of `self`.\r
+\r
+ Returns a `Connection` object.\r
+ '''\r
+ c = self._listener.accept()\r
+ if self._authkey:\r
+ deliver_challenge(c, self._authkey)\r
+ answer_challenge(c, self._authkey)\r
+ return c\r
+\r
+ def close(self):\r
+ '''\r
+ Close the bound socket or named pipe of `self`.\r
+ '''\r
+ return self._listener.close()\r
+\r
+ address = property(lambda self: self._listener._address)\r
+ last_accepted = property(lambda self: self._listener._last_accepted)\r
+\r
+\r
+def Client(address, family=None, authkey=None):\r
+ '''\r
+ Returns a connection to the address of a `Listener`\r
+ '''\r
+ family = family or address_type(address)\r
+ if family == 'AF_PIPE':\r
+ c = PipeClient(address)\r
+ else:\r
+ c = SocketClient(address)\r
+\r
+ if authkey is not None and not isinstance(authkey, bytes):\r
+ raise TypeError, 'authkey should be a byte string'\r
+\r
+ if authkey is not None:\r
+ answer_challenge(c, authkey)\r
+ deliver_challenge(c, authkey)\r
+\r
+ return c\r
+\r
+\r
+if sys.platform != 'win32':\r
+\r
+ def Pipe(duplex=True):\r
+ '''\r
+ Returns pair of connection objects at either end of a pipe\r
+ '''\r
+ if duplex:\r
+ s1, s2 = socket.socketpair()\r
+ c1 = _multiprocessing.Connection(os.dup(s1.fileno()))\r
+ c2 = _multiprocessing.Connection(os.dup(s2.fileno()))\r
+ s1.close()\r
+ s2.close()\r
+ else:\r
+ fd1, fd2 = os.pipe()\r
+ c1 = _multiprocessing.Connection(fd1, writable=False)\r
+ c2 = _multiprocessing.Connection(fd2, readable=False)\r
+\r
+ return c1, c2\r
+ \r
+else:\r
+\r
+ from ._multiprocessing import win32\r
+\r
+ def Pipe(duplex=True):\r
+ '''\r
+ Returns pair of connection objects at either end of a pipe\r
+ '''\r
+ address = arbitrary_address('AF_PIPE')\r
+ if duplex:\r
+ openmode = win32.PIPE_ACCESS_DUPLEX\r
+ access = win32.GENERIC_READ | win32.GENERIC_WRITE\r
+ obsize, ibsize = BUFSIZE, BUFSIZE\r
+ else:\r
+ openmode = win32.PIPE_ACCESS_INBOUND\r
+ access = win32.GENERIC_WRITE\r
+ obsize, ibsize = 0, BUFSIZE\r
+\r
+ h1 = win32.CreateNamedPipe(\r
+ address, openmode,\r
+ win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |\r
+ win32.PIPE_WAIT,\r
+ 1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL\r
+ )\r
+ h2 = win32.CreateFile(\r
+ address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL\r
+ )\r
+ win32.SetNamedPipeHandleState(\r
+ h2, win32.PIPE_READMODE_MESSAGE, None, None\r
+ )\r
+\r
+ try:\r
+ win32.ConnectNamedPipe(h1, win32.NULL)\r
+ except WindowsError, e:\r
+ if e.args[0] != win32.ERROR_PIPE_CONNECTED:\r
+ raise\r
+\r
+ c1 = _multiprocessing.PipeConnection(h1, writable=duplex)\r
+ c2 = _multiprocessing.PipeConnection(h2, readable=duplex)\r
+ \r
+ return c1, c2\r
+\r
+#\r
+# Definitions for connections based on sockets\r
+#\r
+\r
+class SocketListener(object):\r
+ '''\r
+ Represtation of a socket which is bound to an address and listening\r
+ '''\r
+ def __init__(self, address, family, backlog=1):\r
+ self._socket = socket.socket(getattr(socket, family))\r
+ self._socket.bind(address)\r
+ self._socket.listen(backlog)\r
+ address = self._socket.getsockname()\r
+ if type(address) is tuple:\r
+ address = (socket.getfqdn(address[0]),) + address[1:]\r
+ self._address = address\r
+ self._family = family\r
+ self._last_accepted = None\r
+\r
+ sub_debug('listener bound to address %r', self._address)\r
+\r
+ if family == 'AF_UNIX':\r
+ self._unlink = Finalize(\r
+ self, os.unlink, args=(self._address,), exitpriority=0\r
+ )\r
+ else:\r
+ self._unlink = None\r
+\r
+ def accept(self):\r
+ s, self._last_accepted = self._socket.accept()\r
+ fd = duplicate(s.fileno())\r
+ conn = _multiprocessing.Connection(fd)\r
+ s.close()\r
+ return conn\r
+\r
+ def close(self):\r
+ self._socket.close()\r
+ if self._unlink is not None:\r
+ self._unlink()\r
+\r
+\r
+def SocketClient(address):\r
+ '''\r
+ Return a connection object connected to the socket given by `address`\r
+ '''\r
+ family = address_type(address)\r
+ s = socket.socket( getattr(socket, family) )\r
+\r
+ while 1:\r
+ try:\r
+ s.connect(address)\r
+ except socket.error, e:\r
+ if e.args[0] != 10061: # 10061 => connection refused\r
+ debug('failed to connect to address %s', address)\r
+ raise\r
+ time.sleep(0.01)\r
+ else:\r
+ break\r
+ else:\r
+ raise\r
+\r
+ fd = duplicate(s.fileno())\r
+ conn = _multiprocessing.Connection(fd)\r
+ s.close()\r
+ return conn\r
+\r
+#\r
+# Definitions for connections based on named pipes\r
+#\r
+\r
+if sys.platform == 'win32':\r
+\r
+ class PipeListener(object):\r
+ '''\r
+ Representation of a named pipe\r
+ '''\r
+ def __init__(self, address, backlog=None):\r
+ self._address = address\r
+ handle = win32.CreateNamedPipe(\r
+ address, win32.PIPE_ACCESS_DUPLEX,\r
+ win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |\r
+ win32.PIPE_WAIT,\r
+ win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,\r
+ win32.NMPWAIT_WAIT_FOREVER, win32.NULL\r
+ )\r
+ self._handle_queue = [handle]\r
+ self._last_accepted = None\r
+ \r
+ sub_debug('listener created with address=%r', self._address)\r
+\r
+ self.close = Finalize(\r
+ self, PipeListener._finalize_pipe_listener,\r
+ args=(self._handle_queue, self._address), exitpriority=0\r
+ )\r
+ \r
+ def accept(self):\r
+ newhandle = win32.CreateNamedPipe(\r
+ self._address, win32.PIPE_ACCESS_DUPLEX,\r
+ win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |\r
+ win32.PIPE_WAIT,\r
+ win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,\r
+ win32.NMPWAIT_WAIT_FOREVER, win32.NULL\r
+ )\r
+ self._handle_queue.append(newhandle)\r
+ handle = self._handle_queue.pop(0)\r
+ try:\r
+ win32.ConnectNamedPipe(handle, win32.NULL)\r
+ except WindowsError, e:\r
+ if e.args[0] != win32.ERROR_PIPE_CONNECTED:\r
+ raise\r
+ return _multiprocessing.PipeConnection(handle)\r
+\r
+ @staticmethod\r
+ def _finalize_pipe_listener(queue, address):\r
+ sub_debug('closing listener with address=%r', address)\r
+ for handle in queue:\r
+ close(handle)\r
+ \r
+ def PipeClient(address):\r
+ '''\r
+ Return a connection object connected to the pipe given by `address`\r
+ '''\r
+ while 1:\r
+ try:\r
+ win32.WaitNamedPipe(address, 1000)\r
+ h = win32.CreateFile(\r
+ address, win32.GENERIC_READ | win32.GENERIC_WRITE,\r
+ 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL\r
+ )\r
+ except WindowsError, e:\r
+ if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,\r
+ win32.ERROR_PIPE_BUSY):\r
+ raise\r
+ else:\r
+ break\r
+ else:\r
+ raise\r
+\r
+ win32.SetNamedPipeHandleState(\r
+ h, win32.PIPE_READMODE_MESSAGE, None, None\r
+ )\r
+ return _multiprocessing.PipeConnection(h)\r
+\r
+#\r
+# Authentication stuff\r
+#\r
+\r
+MESSAGE_LENGTH = 20\r
+\r
+CHALLENGE = '#CHALLENGE#'\r
+WELCOME = '#WELCOME#'\r
+FAILURE = '#FAILURE#'\r
+\r
+if sys.version_info >= (3, 0): # XXX can use bytes literals in 2.6/3.0\r
+ CHALLENGE = CHALLENGE.encode('ascii')\r
+ WELCOME = WELCOME.encode('ascii')\r
+ FAILURE = FAILURE.encode('ascii')\r
+\r
+def deliver_challenge(connection, authkey):\r
+ import hmac\r
+ assert isinstance(authkey, bytes)\r
+ message = os.urandom(MESSAGE_LENGTH)\r
+ connection.send_bytes(CHALLENGE + message)\r
+ digest = hmac.new(authkey, message).digest()\r
+ response = connection.recv_bytes(256) # reject large message\r
+ if response == digest:\r
+ connection.send_bytes(WELCOME)\r
+ else:\r
+ connection.send_bytes(FAILURE)\r
+ raise AuthenticationError('digest received was wrong')\r
+\r
+def answer_challenge(connection, authkey):\r
+ import hmac\r
+ assert isinstance(authkey, bytes)\r
+ message = connection.recv_bytes(256) # reject large message\r
+ assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message\r
+ message = message[len(CHALLENGE):]\r
+ digest = hmac.new(authkey, message).digest()\r
+ connection.send_bytes(digest)\r
+ response = connection.recv_bytes(256) # reject large message\r
+ if response != WELCOME:\r
+ raise AuthenticationError('digest sent was rejected')\r
+\r
+#\r
+# Support for using xmlrpclib for serialization\r
+#\r
+\r
+class ConnectionWrapper(object):\r
+ def __init__(self, conn, dumps, loads):\r
+ self._conn = conn\r
+ self._dumps = dumps\r
+ self._loads = loads\r
+ for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):\r
+ obj = getattr(conn, attr)\r
+ setattr(self, attr, obj) \r
+ def send(self, obj):\r
+ s = self._dumps(obj)\r
+ self._conn.send_bytes(s)\r
+ def recv(self):\r
+ s = self._conn.recv_bytes()\r
+ return self._loads(s)\r
+\r
+def _xml_dumps(obj):\r
+ return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')\r
+\r
+def _xml_loads(s):\r
+ (obj,), method = xmlrpclib.loads(s.decode('utf8'))\r
+ return obj\r
+\r
+class XmlListener(Listener):\r
+ def accept(self):\r
+ global xmlrpclib\r
+ import xmlrpclib\r
+ obj = Listener.accept(self)\r
+ return ConnectionWrapper(obj, _xml_dumps, _xml_loads)\r
+\r
+def XmlClient(*args, **kwds):\r
+ global xmlrpclib\r
+ import xmlrpclib\r
+ return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)\r
--- /dev/null
+#\r
+# Support for the API of the multiprocessing package using threads\r
+#\r
+# multiprocessing/dummy/__init__.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = [\r
+ 'Process', 'current_process', 'active_children', 'freeze_support',\r
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',\r
+ 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'\r
+ ]\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import threading\r
+import sys\r
+import weakref\r
+import array\r
+import itertools\r
+\r
+from multiprocessing import TimeoutError, cpu_count\r
+from multiprocessing.dummy.connection import Pipe\r
+from threading import Lock, RLock, Semaphore, BoundedSemaphore\r
+from threading import Event\r
+from Queue import Queue\r
+\r
+#\r
+#\r
+#\r
+\r
+class DummyProcess(threading.Thread):\r
+\r
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\r
+ threading.Thread.__init__(self, group, target, name, args, kwargs)\r
+ self._pid = None\r
+ self._children = weakref.WeakKeyDictionary()\r
+ self._start_called = False\r
+ self._parent = current_process()\r
+\r
+ def start(self):\r
+ assert self._parent is current_process()\r
+ self._start_called = True\r
+ self._parent._children[self] = None\r
+ threading.Thread.start(self)\r
+\r
+ def get_exitcode(self):\r
+ if self._start_called and not self.isAlive():\r
+ return 0\r
+ else:\r
+ return None\r
+\r
+ # XXX\r
+ if sys.version_info < (3, 0):\r
+ is_alive = threading.Thread.isAlive.im_func\r
+ get_name = threading.Thread.getName.im_func\r
+ set_name = threading.Thread.setName.im_func\r
+ is_daemon = threading.Thread.isDaemon.im_func\r
+ set_daemon = threading.Thread.setDaemon.im_func\r
+ else:\r
+ is_alive = threading.Thread.isAlive\r
+ get_name = threading.Thread.getName\r
+ set_name = threading.Thread.setName\r
+ is_daemon = threading.Thread.isDaemon\r
+ set_daemon = threading.Thread.setDaemon\r
+\r
+#\r
+#\r
+#\r
+ \r
+class Condition(threading._Condition):\r
+ # XXX\r
+ if sys.version_info < (3, 0):\r
+ notify_all = threading._Condition.notifyAll.im_func\r
+ else:\r
+ notify_all = threading._Condition.notifyAll\r
+\r
+#\r
+#\r
+#\r
+\r
+Process = DummyProcess\r
+current_process = threading.currentThread\r
+current_process()._children = weakref.WeakKeyDictionary()\r
+\r
+def active_children():\r
+ children = current_process()._children\r
+ for p in list(children):\r
+ if not p.isAlive():\r
+ children.pop(p, None)\r
+ return list(children)\r
+\r
+def freeze_support():\r
+ pass\r
+\r
+#\r
+#\r
+#\r
+\r
+class Namespace(object):\r
+ def __init__(self, **kwds):\r
+ self.__dict__.update(kwds)\r
+ def __repr__(self):\r
+ items = self.__dict__.items()\r
+ temp = []\r
+ for name, value in items:\r
+ if not name.startswith('_'):\r
+ temp.append('%s=%r' % (name, value))\r
+ temp.sort()\r
+ return 'Namespace(%s)' % str.join(', ', temp)\r
+\r
+dict = dict\r
+list = list\r
+\r
+def Array(typecode, sequence, lock=True):\r
+ return array.array(typecode, sequence)\r
+\r
+class Value(object):\r
+ def __init__(self, typecode, value, lock=True):\r
+ self._typecode = typecode\r
+ self._value = value\r
+ def _get(self):\r
+ return self._value\r
+ def _set(self, value):\r
+ self._value = value\r
+ value = property(_get, _set)\r
+ def __repr__(self):\r
+ return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)\r
+\r
+def Manager():\r
+ return sys.modules[__name__]\r
+\r
+def shutdown():\r
+ pass\r
+\r
+def Pool(processes=None, initializer=None, initargs=()):\r
+ from multiprocessing.pool import ThreadPool\r
+ return ThreadPool(processes, initializer, initargs)\r
+\r
+JoinableQueue = Queue\r
--- /dev/null
+#\r
+# Analogue of `multiprocessing.connection` which uses queues instead of sockets\r
+#\r
+# multiprocessing/dummy/connection.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = [ 'Client', 'Listener', 'Pipe' ]\r
+\r
+from Queue import Queue\r
+\r
+\r
+families = [None]\r
+\r
+\r
+class Listener(object):\r
+\r
+ def __init__(self, address=None, family=None, backlog=1):\r
+ self._backlog_queue = Queue(backlog)\r
+\r
+ def accept(self):\r
+ return Connection(*self._backlog_queue.get())\r
+\r
+ def close(self):\r
+ self._backlog_queue = None\r
+\r
+ address = property(lambda self: self._backlog_queue)\r
+\r
+\r
+def Client(address):\r
+ _in, _out = Queue(), Queue()\r
+ address.put((_out, _in))\r
+ return Connection(_in, _out)\r
+\r
+\r
+def Pipe(duplex=True):\r
+ a, b = Queue(), Queue()\r
+ return Connection(a, b), Connection(b, a)\r
+\r
+\r
+class Connection(object):\r
+\r
+ def __init__(self, _in, _out):\r
+ self._out = _out\r
+ self._in = _in\r
+ self.send = self.send_bytes = _out.put\r
+ self.recv = self.recv_bytes = _in.get\r
+\r
+ def poll(self, timeout=0.0):\r
+ if self._in.qsize() > 0:\r
+ return True\r
+ if timeout <= 0.0:\r
+ return False\r
+ self._in.not_empty.acquire()\r
+ self._in.not_empty.wait(timeout)\r
+ self._in.not_empty.release()\r
+ return self._in.qsize() > 0\r
+\r
+ def close(self):\r
+ pass\r
--- /dev/null
+#\r
+# Module for starting a process object using os.fork() or CreateProcess()\r
+#\r
+# multiprocessing/forking.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+import os\r
+import sys\r
+import signal\r
+\r
+from multiprocessing import util, process\r
+\r
+__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close']\r
+\r
+#\r
+# Check that the current thread is spawning a child process\r
+#\r
+\r
+def assert_spawning(self):\r
+ if not Popen.thread_is_spawning():\r
+ raise RuntimeError(\r
+ '%s objects should only be shared between processes'\r
+ ' through inheritance' % type(self).__name__\r
+ )\r
+\r
+#\r
+# Unix\r
+#\r
+\r
+if sys.platform != 'win32':\r
+ import time\r
+\r
+ exit = os._exit\r
+ duplicate = os.dup\r
+ close = os.close\r
+\r
+ #\r
+ # We define a Popen class similar to the one from subprocess, but\r
+ # whose constructor takes a process object as its argument.\r
+ #\r
+\r
+ class Popen(object):\r
+\r
+ def __init__(self, process_obj):\r
+ sys.stdout.flush()\r
+ sys.stderr.flush()\r
+ self.returncode = None\r
+\r
+ self.pid = os.fork()\r
+ if self.pid == 0:\r
+ if 'random' in sys.modules:\r
+ import random\r
+ random.seed()\r
+ code = process_obj._bootstrap()\r
+ sys.stdout.flush()\r
+ sys.stderr.flush()\r
+ os._exit(code)\r
+\r
+ def poll(self, flag=os.WNOHANG):\r
+ if self.returncode is None:\r
+ pid, sts = os.waitpid(self.pid, flag)\r
+ if pid == self.pid:\r
+ if os.WIFSIGNALED(sts):\r
+ self.returncode = -os.WTERMSIG(sts)\r
+ else:\r
+ assert os.WIFEXITED(sts)\r
+ self.returncode = os.WEXITSTATUS(sts)\r
+ return self.returncode\r
+\r
+ def wait(self, timeout=None):\r
+ if timeout is None:\r
+ return self.poll(0)\r
+ deadline = time.time() + timeout\r
+ delay = 0.0005\r
+ while 1:\r
+ res = self.poll()\r
+ if res is not None:\r
+ break\r
+ remaining = deadline - time.time()\r
+ if remaining <= 0:\r
+ break\r
+ delay = min(delay * 2, remaining, 0.05)\r
+ time.sleep(delay)\r
+ return res\r
+\r
+ def terminate(self):\r
+ if self.returncode is None:\r
+ try:\r
+ os.kill(self.pid, signal.SIGTERM)\r
+ except OSError, e:\r
+ if self.wait(timeout=0.1) is None:\r
+ raise\r
+ \r
+ @staticmethod\r
+ def thread_is_spawning():\r
+ return False\r
+\r
+#\r
+# Windows\r
+#\r
+\r
+else:\r
+ import thread\r
+ import msvcrt\r
+ import _subprocess\r
+ import copy_reg\r
+ import time\r
+ \r
+ from ._multiprocessing import win32, Connection, PipeConnection\r
+ from .util import Finalize\r
+ \r
+ try:\r
+ from cPickle import dump, load, HIGHEST_PROTOCOL\r
+ except ImportError:\r
+ from pickle import dump, load, HIGHEST_PROTOCOL\r
+\r
+ #\r
+ #\r
+ #\r
+\r
+ TERMINATE = 0x10000\r
+ WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))\r
+\r
+ exit = win32.ExitProcess\r
+ close = win32.CloseHandle\r
+\r
+ #\r
+ # _python_exe is the assumed path to the python executable.\r
+ # People embedding Python want to modify it.\r
+ #\r
+\r
+ if sys.executable.lower().endswith('pythonservice.exe'):\r
+ _python_exe = os.path.join(sys.exec_prefix, 'python.exe')\r
+ else:\r
+ _python_exe = sys.executable\r
+\r
+ def set_executable(exe):\r
+ global _python_exe\r
+ _python_exe = exe\r
+\r
+ #\r
+ #\r
+ #\r
+\r
+ def duplicate(handle, target_process=None, inheritable=False):\r
+ if target_process is None:\r
+ target_process = _subprocess.GetCurrentProcess()\r
+ return _subprocess.DuplicateHandle(\r
+ _subprocess.GetCurrentProcess(), handle, target_process,\r
+ 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS\r
+ ).Detach()\r
+\r
+ #\r
+ # We define a Popen class similar to the one from subprocess, but\r
+ # whose constructor takes a process object as its argument.\r
+ #\r
+\r
+ class Popen(object):\r
+ '''\r
+ Start a subprocess to run the code of a process object\r
+ '''\r
+ _tls = thread._local()\r
+\r
+ def __init__(self, process_obj):\r
+ # create pipe for communication with child\r
+ rfd, wfd = os.pipe()\r
+\r
+ # get handle for read end of the pipe and make it inheritable\r
+ rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)\r
+ os.close(rfd)\r
+\r
+ # start process\r
+ cmd = get_command_line() + [rhandle]\r
+ cmd = ' '.join('"%s"' % x for x in cmd)\r
+ hp, ht, pid, tid = _subprocess.CreateProcess(\r
+ _python_exe, cmd, None, None, 1, 0, None, None, None\r
+ )\r
+ ht.Close()\r
+ close(rhandle)\r
+\r
+ # set attributes of self\r
+ self.pid = pid\r
+ self.returncode = None\r
+ self._handle = hp\r
+\r
+ # send information to child\r
+ prep_data = get_preparation_data(process_obj._name)\r
+ to_child = os.fdopen(wfd, 'wb')\r
+ Popen._tls.process_handle = int(hp)\r
+ try:\r
+ dump(prep_data, to_child, HIGHEST_PROTOCOL)\r
+ dump(process_obj, to_child, HIGHEST_PROTOCOL)\r
+ finally:\r
+ del Popen._tls.process_handle\r
+ to_child.close()\r
+\r
+ @staticmethod\r
+ def thread_is_spawning():\r
+ return getattr(Popen._tls, 'process_handle', None) is not None\r
+\r
+ @staticmethod\r
+ def duplicate_for_child(handle):\r
+ return duplicate(handle, Popen._tls.process_handle)\r
+\r
+ def wait(self, timeout=None):\r
+ if self.returncode is None:\r
+ if timeout is None:\r
+ msecs = _subprocess.INFINITE\r
+ else:\r
+ msecs = max(0, int(timeout * 1000 + 0.5))\r
+\r
+ res = _subprocess.WaitForSingleObject(int(self._handle), msecs)\r
+ if res == _subprocess.WAIT_OBJECT_0:\r
+ code = _subprocess.GetExitCodeProcess(self._handle)\r
+ if code == TERMINATE:\r
+ code = -signal.SIGTERM\r
+ self.returncode = code\r
+ \r
+ return self.returncode\r
+\r
+ def poll(self):\r
+ return self.wait(timeout=0)\r
+\r
+ def terminate(self):\r
+ if self.returncode is None:\r
+ try:\r
+ _subprocess.TerminateProcess(int(self._handle), TERMINATE)\r
+ except WindowsError:\r
+ if self.wait(timeout=0.1) is None:\r
+ raise\r
+ \r
+ #\r
+ #\r
+ #\r
+\r
+ def is_forking(argv):\r
+ '''\r
+ Return whether commandline indicates we are forking\r
+ '''\r
+ if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':\r
+ assert len(argv) == 3\r
+ return True\r
+ else:\r
+ return False\r
+\r
+\r
+ def freeze_support():\r
+ '''\r
+ Run code for process object if this in not the main process\r
+ '''\r
+ if is_forking(sys.argv):\r
+ main()\r
+ sys.exit()\r
+\r
+\r
+ def get_command_line():\r
+ '''\r
+ Returns prefix of command line used for spawning a child process\r
+ '''\r
+ if process.current_process()._identity==() and is_forking(sys.argv):\r
+ raise RuntimeError('''\r
+ Attempt to start a new process before the current process\r
+ has finished its bootstrapping phase.\r
+\r
+ This probably means that you are on Windows and you have\r
+ forgotten to use the proper idiom in the main module:\r
+\r
+ if __name__ == '__main__':\r
+ freeze_support()\r
+ ...\r
+\r
+ The "freeze_support()" line can be omitted if the program\r
+ is not going to be frozen to produce a Windows executable.''')\r
+\r
+ if getattr(sys, 'frozen', False):\r
+ return [sys.executable, '--multiprocessing-fork']\r
+ else:\r
+ prog = 'from multiprocessing.forking import main; main()'\r
+ return [_python_exe, '-c', prog, '--multiprocessing-fork']\r
+\r
+\r
+ def main():\r
+ '''\r
+ Run code specifed by data received over pipe\r
+ '''\r
+ assert is_forking(sys.argv)\r
+\r
+ handle = int(sys.argv[-1])\r
+ fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)\r
+ from_parent = os.fdopen(fd, 'rb')\r
+\r
+ process.current_process()._inheriting = True\r
+ preparation_data = load(from_parent)\r
+ prepare(preparation_data)\r
+ self = load(from_parent)\r
+ process.current_process()._inheriting = False\r
+\r
+ from_parent.close()\r
+\r
+ exitcode = self._bootstrap()\r
+ exit(exitcode)\r
+\r
+\r
+ def get_preparation_data(name):\r
+ '''\r
+ Return info about parent needed by child to unpickle process object\r
+ '''\r
+ from .util import _logger, _log_to_stderr\r
+ \r
+ d = dict(\r
+ name=name,\r
+ sys_path=sys.path,\r
+ sys_argv=sys.argv,\r
+ log_to_stderr=_log_to_stderr,\r
+ orig_dir=process.ORIGINAL_DIR,\r
+ authkey=process.current_process().get_authkey(),\r
+ )\r
+ \r
+ if _logger is not None:\r
+ d['log_level'] = _logger.getEffectiveLevel()\r
+\r
+ if not WINEXE:\r
+ main_path = getattr(sys.modules['__main__'], '__file__', None)\r
+ if not main_path and sys.argv[0] not in ('', '-c'):\r
+ main_path = sys.argv[0]\r
+ if main_path is not None:\r
+ if not os.path.isabs(main_path) and \\r
+ process.ORIGINAL_DIR is not None:\r
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)\r
+ d['main_path'] = os.path.normpath(main_path)\r
+\r
+ return d\r
+\r
+ #\r
+ # Make (Pipe)Connection picklable\r
+ #\r
+ \r
+ def reduce_connection(conn):\r
+ if not Popen.thread_is_spawning():\r
+ raise RuntimeError(\r
+ 'By default %s objects can only be shared between processes\n'\r
+ 'using inheritance' % type(conn).__name__\r
+ )\r
+ return type(conn), (Popen.duplicate_for_child(conn.fileno()),\r
+ conn.readable, conn.writable)\r
+ \r
+ copy_reg.pickle(Connection, reduce_connection)\r
+ copy_reg.pickle(PipeConnection, reduce_connection)\r
+\r
+\r
+#\r
+# Prepare current process\r
+#\r
+\r
+old_main_modules = []\r
+\r
+def prepare(data):\r
+ '''\r
+ Try to get current process ready to unpickle process object\r
+ '''\r
+ old_main_modules.append(sys.modules['__main__'])\r
+\r
+ if 'name' in data:\r
+ process.current_process().set_name(data['name'])\r
+\r
+ if 'authkey' in data:\r
+ process.current_process()._authkey = data['authkey']\r
+ \r
+ if 'log_to_stderr' in data and data['log_to_stderr']:\r
+ util.log_to_stderr()\r
+\r
+ if 'log_level' in data:\r
+ util.get_logger().setLevel(data['log_level'])\r
+\r
+ if 'sys_path' in data:\r
+ sys.path = data['sys_path']\r
+\r
+ if 'sys_argv' in data:\r
+ sys.argv = data['sys_argv']\r
+\r
+ if 'dir' in data:\r
+ os.chdir(data['dir'])\r
+\r
+ if 'orig_dir' in data:\r
+ process.ORIGINAL_DIR = data['orig_dir']\r
+\r
+ if 'main_path' in data:\r
+ main_path = data['main_path']\r
+ main_name = os.path.splitext(os.path.basename(main_path))[0]\r
+ if main_name == '__init__':\r
+ main_name = os.path.basename(os.path.dirname(main_path))\r
+\r
+ if main_name != 'ipython':\r
+ import imp\r
+\r
+ if main_path is None:\r
+ dirs = None\r
+ elif os.path.basename(main_path).startswith('__init__.py'):\r
+ dirs = [os.path.dirname(os.path.dirname(main_path))]\r
+ else:\r
+ dirs = [os.path.dirname(main_path)]\r
+\r
+ assert main_name not in sys.modules, main_name\r
+ file, path_name, etc = imp.find_module(main_name, dirs)\r
+ try:\r
+ # We would like to do "imp.load_module('__main__', ...)"\r
+ # here. However, that would cause 'if __name__ ==\r
+ # "__main__"' clauses to be executed.\r
+ main_module = imp.load_module(\r
+ '__parents_main__', file, path_name, etc\r
+ )\r
+ finally:\r
+ if file:\r
+ file.close()\r
+\r
+ sys.modules['__main__'] = main_module\r
+ main_module.__name__ = '__main__'\r
+\r
+ # Try to make the potentially picklable objects in\r
+ # sys.modules['__main__'] realize they are in the main\r
+ # module -- somewhat ugly.\r
+ for obj in main_module.__dict__.values():\r
+ try:\r
+ if obj.__module__ == '__parents_main__':\r
+ obj.__module__ = '__main__'\r
+ except Exception:\r
+ pass\r
--- /dev/null
+#
+# Module which supports allocation of memory from an mmap
+#
+# multiprocessing/heap.py
+#
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
+#
+
+import bisect
+import mmap
+import tempfile
+import os
+import sys
+import threading
+import itertools
+
+import _multiprocessing
+from multiprocessing.util import Finalize, info
+from multiprocessing.forking import assert_spawning
+
+__all__ = ['BufferWrapper']
+
+#
+# Inheirtable class which wraps an mmap, and from which blocks can be allocated
+#
+
+if sys.platform == 'win32':
+
+ from ._multiprocessing import win32
+
+ class Arena(object):
+
+ _counter = itertools.count()
+
+ def __init__(self, size):
+ self.size = size
+ self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == 0, 'tagname already in use'
+ self._state = (self.size, self.name)
+
+ def __getstate__(self):
+ assert_spawning(self)
+ return self._state
+
+ def __setstate__(self, state):
+ self.size, self.name = self._state = state
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
+
+else:
+
+ class Arena(object):
+
+ def __init__(self, size):
+ self.buffer = mmap.mmap(-1, size)
+ self.size = size
+ self.name = None
+
+#
+# Class allowing allocation of chunks of memory from arenas
+#
+
+class Heap(object):
+
+ _alignment = 8
+
+ def __init__(self, size=mmap.PAGESIZE):
+ self._lastpid = os.getpid()
+ self._lock = threading.Lock()
+ self._size = size
+ self._lengths = []
+ self._len_to_seq = {}
+ self._start_to_block = {}
+ self._stop_to_block = {}
+ self._allocated_blocks = set()
+ self._arenas = []
+
+ @staticmethod
+ def _roundup(n, alignment):
+ # alignment must be a power of 2
+ mask = alignment - 1
+ return (n + mask) & ~mask
+
+ def _malloc(self, size):
+ # returns a large enough block -- it might be much larger
+ i = bisect.bisect_left(self._lengths, size)
+ if i == len(self._lengths):
+ length = self._roundup(max(self._size, size), mmap.PAGESIZE)
+ self._size *= 2
+ info('allocating a new mmap of length %d', length)
+ arena = Arena(length)
+ self._arenas.append(arena)
+ return (arena, 0, length)
+ else:
+ length = self._lengths[i]
+ seq = self._len_to_seq[length]
+ block = seq.pop()
+ if not seq:
+ del self._len_to_seq[length], self._lengths[i]
+
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+ return block
+
+ def _free(self, block):
+ # free location and try to merge with neighbours
+ (arena, start, stop) = block
+
+ try:
+ prev_block = self._stop_to_block[(arena, start)]
+ except KeyError:
+ pass
+ else:
+ start, _ = self._absorb(prev_block)
+
+ try:
+ next_block = self._start_to_block[(arena, stop)]
+ except KeyError:
+ pass
+ else:
+ _, stop = self._absorb(next_block)
+
+ block = (arena, start, stop)
+ length = stop - start
+
+ try:
+ self._len_to_seq[length].append(block)
+ except KeyError:
+ self._len_to_seq[length] = [block]
+ bisect.insort(self._lengths, length)
+
+ self._start_to_block[(arena, start)] = block
+ self._stop_to_block[(arena, stop)] = block
+
+ def _absorb(self, block):
+ # deregister this block so it can be merged with a neighbour
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+
+ length = stop - start
+ seq = self._len_to_seq[length]
+ seq.remove(block)
+ if not seq:
+ del self._len_to_seq[length]
+ self._lengths.remove(length)
+
+ return start, stop
+
+ def free(self, block):
+ # free a block returned by malloc()
+ assert os.getpid() == self._lastpid
+ self._lock.acquire()
+ try:
+ self._allocated_blocks.remove(block)
+ self._free(block)
+ finally:
+ self._lock.release()
+
+ def malloc(self, size):
+ # return a block of right size (possibly rounded up)
+ assert 0 <= size < sys.maxint
+ if os.getpid() != self._lastpid:
+ self.__init__() # reinitialize after fork
+ self._lock.acquire()
+ try:
+ size = self._roundup(max(size,1), self._alignment)
+ (arena, start, stop) = self._malloc(size)
+ new_stop = start + size
+ if new_stop < stop:
+ self._free((arena, new_stop, stop))
+ block = (arena, start, new_stop)
+ self._allocated_blocks.add(block)
+ return block
+ finally:
+ self._lock.release()
+
+#
+# Class representing a chunk of an mmap -- can be inherited
+#
+
+class BufferWrapper(object):
+
+ _heap = Heap()
+
+ def __init__(self, size):
+ assert 0 <= size < sys.maxint
+ block = BufferWrapper._heap.malloc(size)
+ self._state = (block, size)
+ Finalize(self, BufferWrapper._heap.free, args=(block,))
+
+ def get_address(self):
+ (arena, start, stop), size = self._state
+ address, length = _multiprocessing.address_of_buffer(arena.buffer)
+ assert size <= length
+ return address + start
+
+ def get_size(self):
+ return self._state[1]
--- /dev/null
+#\r
+# Module providing the `SyncManager` class for dealing\r
+# with shared objects\r
+#\r
+# multiprocessing/managers.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import os\r
+import sys\r
+import weakref\r
+import threading\r
+import array\r
+import copy_reg\r
+import Queue\r
+\r
+from traceback import format_exc\r
+from multiprocessing import Process, current_process, active_children, Pool, util, connection\r
+from multiprocessing.process import AuthenticationString\r
+from multiprocessing.forking import exit, Popen, assert_spawning\r
+from multiprocessing.util import Finalize, info\r
+\r
+try:\r
+ from cPickle import PicklingError\r
+except ImportError:\r
+ from pickle import PicklingError\r
+\r
+#\r
+#\r
+#\r
+\r
+try:\r
+ bytes\r
+except NameError:\r
+ bytes = str # XXX not needed in Py2.6 and Py3.0\r
+ \r
+#\r
+# Register some things for pickling\r
+#\r
+\r
+def reduce_array(a):\r
+ return array.array, (a.typecode, a.tostring())\r
+copy_reg.pickle(array.array, reduce_array)\r
+\r
+view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]\r
+if view_types[0] is not list: # XXX only needed in Py3.0\r
+ def rebuild_as_list(obj):\r
+ return list, (list(obj),)\r
+ for view_type in view_types:\r
+ copy_reg.pickle(view_type, rebuild_as_list)\r
+ \r
+#\r
+# Type for identifying shared objects\r
+#\r
+\r
+class Token(object):\r
+ '''\r
+ Type to uniquely indentify a shared object\r
+ '''\r
+ __slots__ = ('typeid', 'address', 'id')\r
+\r
+ def __init__(self, typeid, address, id):\r
+ (self.typeid, self.address, self.id) = (typeid, address, id)\r
+\r
+ def __getstate__(self):\r
+ return (self.typeid, self.address, self.id)\r
+\r
+ def __setstate__(self, state):\r
+ (self.typeid, self.address, self.id) = state\r
+\r
+ def __repr__(self):\r
+ return 'Token(typeid=%r, address=%r, id=%r)' % \\r
+ (self.typeid, self.address, self.id)\r
+\r
+#\r
+# Function for communication with a manager's server process\r
+#\r
+\r
+def dispatch(c, id, methodname, args=(), kwds={}):\r
+ '''\r
+ Send a message to manager using connection `c` and return response\r
+ '''\r
+ c.send((id, methodname, args, kwds))\r
+ kind, result = c.recv()\r
+ if kind == '#RETURN':\r
+ return result\r
+ raise convert_to_error(kind, result)\r
+\r
+def convert_to_error(kind, result):\r
+ if kind == '#ERROR':\r
+ return result\r
+ elif kind == '#TRACEBACK':\r
+ assert type(result) is str\r
+ return RemoteError(result)\r
+ elif kind == '#UNSERIALIZABLE':\r
+ assert type(result) is str\r
+ return RemoteError('Unserializable message: %s\n' % result)\r
+ else:\r
+ return ValueError('Unrecognized message type')\r
+ \r
+class RemoteError(Exception):\r
+ def __str__(self):\r
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)\r
+\r
+#\r
+# Functions for finding the method names of an object\r
+#\r
+\r
+def all_methods(obj):\r
+ '''\r
+ Return a list of names of methods of `obj`\r
+ '''\r
+ temp = []\r
+ for name in dir(obj):\r
+ func = getattr(obj, name)\r
+ if hasattr(func, '__call__'):\r
+ temp.append(name)\r
+ return temp\r
+\r
+def public_methods(obj):\r
+ '''\r
+ Return a list of names of methods of `obj` which do not start with '_'\r
+ '''\r
+ return [name for name in all_methods(obj) if name[0] != '_']\r
+\r
+#\r
+# Server which is run in a process controlled by a manager\r
+#\r
+\r
+class Server(object):\r
+ '''\r
+ Server class which runs in a process controlled by a manager object\r
+ '''\r
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',\r
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']\r
+\r
+ def __init__(self, registry, address, authkey, serializer):\r
+ assert isinstance(authkey, bytes)\r
+ self.registry = registry\r
+ self.authkey = AuthenticationString(authkey)\r
+ Listener, Client = listener_client[serializer]\r
+\r
+ # do authentication later\r
+ self.listener = Listener(address=address, backlog=5)\r
+ self.address = self.listener.address\r
+\r
+ self.id_to_obj = {0: (None, ())}\r
+ self.id_to_refcount = {}\r
+ self.mutex = threading.RLock()\r
+ self.stop = 0\r
+\r
+ def serve_forever(self):\r
+ '''\r
+ Run the server forever\r
+ '''\r
+ current_process()._manager_server = self\r
+ try:\r
+ try:\r
+ while 1:\r
+ try:\r
+ c = self.listener.accept()\r
+ except (OSError, IOError):\r
+ continue\r
+ t = threading.Thread(target=self.handle_request, args=(c,))\r
+ t.setDaemon(True)\r
+ t.start()\r
+ except (KeyboardInterrupt, SystemExit):\r
+ pass\r
+ finally:\r
+ self.stop = 999\r
+ self.listener.close()\r
+\r
+ def handle_request(self, c):\r
+ '''\r
+ Handle a new connection\r
+ '''\r
+ funcname = result = request = None\r
+ try:\r
+ connection.deliver_challenge(c, self.authkey)\r
+ connection.answer_challenge(c, self.authkey)\r
+ request = c.recv()\r
+ ignore, funcname, args, kwds = request\r
+ assert funcname in self.public, '%r unrecognized' % funcname\r
+ func = getattr(self, funcname)\r
+ except Exception:\r
+ msg = ('#TRACEBACK', format_exc())\r
+ else:\r
+ try:\r
+ result = func(c, *args, **kwds)\r
+ except Exception:\r
+ msg = ('#TRACEBACK', format_exc())\r
+ else:\r
+ msg = ('#RETURN', result)\r
+ try:\r
+ c.send(msg)\r
+ except Exception, e:\r
+ try:\r
+ c.send(('#TRACEBACK', format_exc()))\r
+ except Exception:\r
+ pass\r
+ util.info('Failure to send message: %r', msg)\r
+ util.info(' ... request was %r', request)\r
+ util.info(' ... exception was %r', e)\r
+\r
+ c.close()\r
+\r
+ def serve_client(self, conn):\r
+ '''\r
+ Handle requests from the proxies in a particular process/thread\r
+ '''\r
+ util.debug('starting server thread to service %r',\r
+ threading.currentThread().getName())\r
+\r
+ recv = conn.recv\r
+ send = conn.send\r
+ id_to_obj = self.id_to_obj\r
+\r
+ while not self.stop:\r
+\r
+ try:\r
+ methodname = obj = None\r
+ request = recv()\r
+ ident, methodname, args, kwds = request\r
+ obj, exposed, gettypeid = id_to_obj[ident]\r
+\r
+ if methodname not in exposed:\r
+ raise AttributeError(\r
+ 'method %r of %r object is not in exposed=%r' %\r
+ (methodname, type(obj), exposed)\r
+ )\r
+\r
+ function = getattr(obj, methodname)\r
+\r
+ try:\r
+ res = function(*args, **kwds)\r
+ except Exception, e:\r
+ msg = ('#ERROR', e)\r
+ else:\r
+ typeid = gettypeid and gettypeid.get(methodname, None)\r
+ if typeid:\r
+ rident, rexposed = self.create(conn, typeid, res)\r
+ token = Token(typeid, self.address, rident)\r
+ msg = ('#PROXY', (rexposed, token))\r
+ else:\r
+ msg = ('#RETURN', res)\r
+\r
+ except AttributeError:\r
+ if methodname is None:\r
+ msg = ('#TRACEBACK', format_exc())\r
+ else:\r
+ try:\r
+ fallback_func = self.fallback_mapping[methodname]\r
+ result = fallback_func(\r
+ self, conn, ident, obj, *args, **kwds\r
+ )\r
+ msg = ('#RETURN', result)\r
+ except Exception:\r
+ msg = ('#TRACEBACK', format_exc())\r
+\r
+ except EOFError:\r
+ util.debug('got EOF -- exiting thread serving %r',\r
+ threading.currentThread().getName())\r
+ sys.exit(0)\r
+\r
+ except Exception:\r
+ msg = ('#TRACEBACK', format_exc())\r
+\r
+ try:\r
+ try:\r
+ send(msg)\r
+ except Exception, e:\r
+ send(('#UNSERIALIZABLE', repr(msg)))\r
+ except Exception, e:\r
+ util.info('exception in thread serving %r',\r
+ threading.currentThread().getName())\r
+ util.info(' ... message was %r', msg)\r
+ util.info(' ... exception was %r', e)\r
+ conn.close()\r
+ sys.exit(1)\r
+\r
+ def fallback_getvalue(self, conn, ident, obj):\r
+ return obj\r
+\r
+ def fallback_str(self, conn, ident, obj):\r
+ return str(obj)\r
+\r
+ def fallback_repr(self, conn, ident, obj):\r
+ return repr(obj)\r
+\r
+ fallback_mapping = {\r
+ '__str__':fallback_str,\r
+ '__repr__':fallback_repr,\r
+ '#GETVALUE':fallback_getvalue\r
+ }\r
+\r
+ def dummy(self, c):\r
+ pass\r
+\r
+ def debug_info(self, c):\r
+ '''\r
+ Return some info --- useful to spot problems with refcounting\r
+ '''\r
+ self.mutex.acquire()\r
+ try:\r
+ result = []\r
+ keys = self.id_to_obj.keys()\r
+ keys.sort()\r
+ for ident in keys:\r
+ if ident != 0:\r
+ result.append(' %s: refcount=%s\n %s' %\r
+ (ident, self.id_to_refcount[ident],\r
+ str(self.id_to_obj[ident][0])[:75]))\r
+ return '\n'.join(result)\r
+ finally:\r
+ self.mutex.release()\r
+\r
+ def number_of_objects(self, c):\r
+ '''\r
+ Number of shared objects\r
+ '''\r
+ return len(self.id_to_obj) - 1 # don't count ident=0\r
+\r
+ def shutdown(self, c):\r
+ '''\r
+ Shutdown this process\r
+ '''\r
+ try:\r
+ try:\r
+ util.debug('manager received shutdown message')\r
+ c.send(('#RETURN', None))\r
+\r
+ if sys.stdout != sys.__stdout__:\r
+ util.debug('resetting stdout, stderr')\r
+ sys.stdout = sys.__stdout__\r
+ sys.stderr = sys.__stderr__\r
+ \r
+ util._run_finalizers(0)\r
+\r
+ for p in active_children():\r
+ util.debug('terminating a child process of manager')\r
+ p.terminate()\r
+\r
+ for p in active_children():\r
+ util.debug('terminating a child process of manager')\r
+ p.join()\r
+\r
+ util._run_finalizers()\r
+ util.info('manager exiting with exitcode 0')\r
+ except:\r
+ import traceback\r
+ traceback.print_exc()\r
+ finally:\r
+ exit(0)\r
+ \r
+ def create(self, c, typeid, *args, **kwds):\r
+ '''\r
+ Create a new shared object and return its id\r
+ '''\r
+ self.mutex.acquire()\r
+ try:\r
+ callable, exposed, method_to_typeid, proxytype = \\r
+ self.registry[typeid]\r
+ \r
+ if callable is None:\r
+ assert len(args) == 1 and not kwds\r
+ obj = args[0]\r
+ else:\r
+ obj = callable(*args, **kwds)\r
+\r
+ if exposed is None:\r
+ exposed = public_methods(obj)\r
+ if method_to_typeid is not None:\r
+ assert type(method_to_typeid) is dict\r
+ exposed = list(exposed) + list(method_to_typeid)\r
+\r
+ ident = '%x' % id(obj) # convert to string because xmlrpclib\r
+ # only has 32 bit signed integers\r
+ util.debug('%r callable returned object with id %r', typeid, ident)\r
+\r
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)\r
+ if ident not in self.id_to_refcount:\r
+ self.id_to_refcount[ident] = None\r
+ return ident, tuple(exposed)\r
+ finally:\r
+ self.mutex.release()\r
+\r
+ def get_methods(self, c, token):\r
+ '''\r
+ Return the methods of the shared object indicated by token\r
+ '''\r
+ return tuple(self.id_to_obj[token.id][1])\r
+\r
+ def accept_connection(self, c, name):\r
+ '''\r
+ Spawn a new thread to serve this connection\r
+ '''\r
+ threading.currentThread().setName(name)\r
+ c.send(('#RETURN', None))\r
+ self.serve_client(c)\r
+\r
+ def incref(self, c, ident):\r
+ self.mutex.acquire()\r
+ try:\r
+ try:\r
+ self.id_to_refcount[ident] += 1\r
+ except TypeError:\r
+ assert self.id_to_refcount[ident] is None\r
+ self.id_to_refcount[ident] = 1\r
+ finally:\r
+ self.mutex.release()\r
+\r
+ def decref(self, c, ident):\r
+ self.mutex.acquire()\r
+ try:\r
+ assert self.id_to_refcount[ident] >= 1\r
+ self.id_to_refcount[ident] -= 1\r
+ if self.id_to_refcount[ident] == 0:\r
+ del self.id_to_obj[ident], self.id_to_refcount[ident]\r
+ util.debug('disposing of obj with id %d', ident)\r
+ finally:\r
+ self.mutex.release()\r
+\r
+#\r
+# Class to represent state of a manager\r
+#\r
+\r
+class State(object):\r
+ __slots__ = ['value']\r
+ INITIAL = 0\r
+ STARTED = 1\r
+ SHUTDOWN = 2\r
+\r
+#\r
+# Mapping from serializer name to Listener and Client types\r
+#\r
+\r
+listener_client = {\r
+ 'pickle' : (connection.Listener, connection.Client),\r
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)\r
+ }\r
+\r
+#\r
+# Definition of BaseManager\r
+#\r
+\r
+class BaseManager(object):\r
+ '''\r
+ Base class for managers\r
+ '''\r
+ _registry = {}\r
+ _Server = Server\r
+ \r
+ def __init__(self, address=None, authkey=None, serializer='pickle'):\r
+ if authkey is None:\r
+ authkey = current_process().get_authkey()\r
+ self._address = address # XXX not final address if eg ('', 0)\r
+ self._authkey = AuthenticationString(authkey)\r
+ self._state = State()\r
+ self._state.value = State.INITIAL\r
+ self._serializer = serializer\r
+ self._Listener, self._Client = listener_client[serializer]\r
+\r
+ def __reduce__(self):\r
+ return type(self).from_address, \\r
+ (self._address, self._authkey, self._serializer)\r
+\r
+ def get_server(self):\r
+ '''\r
+ Return server object with serve_forever() method and address attribute\r
+ '''\r
+ assert self._state.value == State.INITIAL\r
+ return Server(self._registry, self._address,\r
+ self._authkey, self._serializer)\r
+\r
+ def connect(self):\r
+ '''\r
+ Connect manager object to the server process\r
+ '''\r
+ Listener, Client = listener_client[self._serializer]\r
+ conn = Client(self._address, authkey=self._authkey)\r
+ dispatch(conn, None, 'dummy')\r
+ self._state.value = State.STARTED\r
+ \r
+ def start(self):\r
+ '''\r
+ Spawn a server process for this manager object\r
+ '''\r
+ assert self._state.value == State.INITIAL\r
+\r
+ # pipe over which we will retrieve address of server\r
+ reader, writer = connection.Pipe(duplex=False)\r
+\r
+ # spawn process which runs a server\r
+ self._process = Process(\r
+ target=type(self)._run_server,\r
+ args=(self._registry, self._address, self._authkey,\r
+ self._serializer, writer),\r
+ )\r
+ ident = ':'.join(str(i) for i in self._process._identity)\r
+ self._process.set_name(type(self).__name__ + '-' + ident)\r
+ self._process.start()\r
+\r
+ # get address of server\r
+ writer.close()\r
+ self._address = reader.recv()\r
+ reader.close()\r
+\r
+ # register a finalizer\r
+ self._state.value = State.STARTED\r
+ self.shutdown = util.Finalize(\r
+ self, type(self)._finalize_manager,\r
+ args=(self._process, self._address, self._authkey,\r
+ self._state, self._Client),\r
+ exitpriority=0\r
+ )\r
+\r
+ @classmethod\r
+ def _run_server(cls, registry, address, authkey, serializer, writer):\r
+ '''\r
+ Create a server, report its address and run it\r
+ '''\r
+ # create server\r
+ server = cls._Server(registry, address, authkey, serializer)\r
+\r
+ # inform parent process of the server's address\r
+ writer.send(server.address)\r
+ writer.close()\r
+\r
+ # run the manager\r
+ util.info('manager serving at %r', server.address)\r
+ server.serve_forever()\r
+\r
+ def _create(self, typeid, *args, **kwds):\r
+ '''\r
+ Create a new shared object; return the token and exposed tuple\r
+ '''\r
+ assert self._state.value == State.STARTED, 'server not yet started'\r
+ conn = self._Client(self._address, authkey=self._authkey)\r
+ try:\r
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)\r
+ finally:\r
+ conn.close()\r
+ return Token(typeid, self._address, id), exposed\r
+\r
+ def join(self, timeout=None):\r
+ '''\r
+ Join the manager process (if it has been spawned)\r
+ '''\r
+ self._process.join(timeout)\r
+\r
+ def _debug_info(self):\r
+ '''\r
+ Return some info about the servers shared objects and connections\r
+ '''\r
+ conn = self._Client(self._address, authkey=self._authkey)\r
+ try:\r
+ return dispatch(conn, None, 'debug_info')\r
+ finally:\r
+ conn.close()\r
+\r
+ def _number_of_objects(self):\r
+ '''\r
+ Return the number of shared objects\r
+ '''\r
+ conn = self._Client(self._address, authkey=self._authkey)\r
+ try: \r
+ return dispatch(conn, None, 'number_of_objects')\r
+ finally:\r
+ conn.close() \r
+\r
+ def __enter__(self):\r
+ return self\r
+\r
+ def __exit__(self, exc_type, exc_val, exc_tb):\r
+ self.shutdown()\r
+\r
+ @staticmethod\r
+ def _finalize_manager(process, address, authkey, state, _Client):\r
+ '''\r
+ Shutdown the manager process; will be registered as a finalizer\r
+ '''\r
+ if process.is_alive():\r
+ util.info('sending shutdown message to manager')\r
+ try:\r
+ conn = _Client(address, authkey=authkey)\r
+ try:\r
+ dispatch(conn, None, 'shutdown')\r
+ finally:\r
+ conn.close()\r
+ except Exception:\r
+ pass\r
+\r
+ process.join(timeout=0.2)\r
+ if process.is_alive():\r
+ util.info('manager still alive')\r
+ if hasattr(process, 'terminate'):\r
+ util.info('trying to `terminate()` manager process')\r
+ process.terminate()\r
+ process.join(timeout=0.1)\r
+ if process.is_alive():\r
+ util.info('manager still alive after terminate')\r
+\r
+ state.value = State.SHUTDOWN\r
+ try:\r
+ del BaseProxy._address_to_local[address]\r
+ except KeyError:\r
+ pass\r
+ \r
+ address = property(lambda self: self._address)\r
+\r
+ @classmethod\r
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,\r
+ method_to_typeid=None, create_method=True):\r
+ '''\r
+ Register a typeid with the manager type\r
+ '''\r
+ if '_registry' not in cls.__dict__:\r
+ cls._registry = cls._registry.copy()\r
+\r
+ if proxytype is None:\r
+ proxytype = AutoProxy\r
+\r
+ exposed = exposed or getattr(proxytype, '_exposed_', None)\r
+\r
+ method_to_typeid = method_to_typeid or \\r
+ getattr(proxytype, '_method_to_typeid_', None)\r
+\r
+ if method_to_typeid:\r
+ for key, value in method_to_typeid.items():\r
+ assert type(key) is str, '%r is not a string' % key\r
+ assert type(value) is str, '%r is not a string' % value\r
+\r
+ cls._registry[typeid] = (\r
+ callable, exposed, method_to_typeid, proxytype\r
+ )\r
+ \r
+ if create_method:\r
+ def temp(self, *args, **kwds):\r
+ util.debug('requesting creation of a shared %r object', typeid)\r
+ token, exp = self._create(typeid, *args, **kwds)\r
+ proxy = proxytype(\r
+ token, self._serializer, manager=self,\r
+ authkey=self._authkey, exposed=exp\r
+ )\r
+ return proxy\r
+ temp.__name__ = typeid\r
+ setattr(cls, typeid, temp)\r
+\r
+#\r
+# Subclass of set which get cleared after a fork\r
+#\r
+\r
+class ProcessLocalSet(set):\r
+ def __init__(self):\r
+ util.register_after_fork(self, lambda obj: obj.clear())\r
+ def __reduce__(self):\r
+ return type(self), ()\r
+\r
+#\r
+# Definition of BaseProxy\r
+#\r
+\r
+class BaseProxy(object):\r
+ '''\r
+ A base for proxies of shared objects\r
+ '''\r
+ _address_to_local = {}\r
+ _mutex = util.ForkAwareThreadLock()\r
+\r
+ def __init__(self, token, serializer, manager=None,\r
+ authkey=None, exposed=None, incref=True):\r
+ BaseProxy._mutex.acquire()\r
+ try:\r
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)\r
+ if tls_idset is None:\r
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()\r
+ BaseProxy._address_to_local[token.address] = tls_idset\r
+ finally:\r
+ BaseProxy._mutex.release()\r
+\r
+ # self._tls is used to record the connection used by this\r
+ # thread to communicate with the manager at token.address\r
+ self._tls = tls_idset[0]\r
+\r
+ # self._idset is used to record the identities of all shared\r
+ # objects for which the current process owns references and\r
+ # which are in the manager at token.address\r
+ self._idset = tls_idset[1]\r
+\r
+ self._token = token\r
+ self._id = self._token.id\r
+ self._manager = manager\r
+ self._serializer = serializer\r
+ self._Client = listener_client[serializer][1]\r
+\r
+ if authkey is not None:\r
+ self._authkey = AuthenticationString(authkey)\r
+ elif self._manager is not None:\r
+ self._authkey = self._manager._authkey\r
+ else:\r
+ self._authkey = current_process().get_authkey()\r
+\r
+ if incref:\r
+ self._incref()\r
+ \r
+ util.register_after_fork(self, BaseProxy._after_fork)\r
+ \r
+ def _connect(self):\r
+ util.debug('making connection to manager')\r
+ name = current_process().get_name()\r
+ if threading.currentThread().getName() != 'MainThread':\r
+ name += '|' + threading.currentThread().getName()\r
+ conn = self._Client(self._token.address, authkey=self._authkey)\r
+ dispatch(conn, None, 'accept_connection', (name,))\r
+ self._tls.connection = conn\r
+ \r
+ def _callmethod(self, methodname, args=(), kwds={}):\r
+ '''\r
+ Try to call a method of the referrent and return a copy of the result\r
+ '''\r
+ try:\r
+ conn = self._tls.connection\r
+ except AttributeError:\r
+ util.debug('thread %r does not own a connection',\r
+ threading.currentThread().getName())\r
+ self._connect()\r
+ conn = self._tls.connection\r
+\r
+ conn.send((self._id, methodname, args, kwds))\r
+ kind, result = conn.recv()\r
+ \r
+ if kind == '#RETURN':\r
+ return result\r
+ elif kind == '#PROXY':\r
+ exposed, token = result\r
+ proxytype = self._manager._registry[token.typeid][-1]\r
+ return proxytype(\r
+ token, self._serializer, manager=self._manager,\r
+ authkey=self._authkey, exposed=exposed\r
+ )\r
+ raise convert_to_error(kind, result)\r
+\r
+ def _getvalue(self):\r
+ '''\r
+ Get a copy of the value of the referent\r
+ '''\r
+ return self._callmethod('#GETVALUE')\r
+\r
+ def _incref(self):\r
+ conn = self._Client(self._token.address, authkey=self._authkey)\r
+ dispatch(conn, None, 'incref', (self._id,))\r
+ util.debug('INCREF %r', self._token.id)\r
+\r
+ self._idset.add(self._id)\r
+\r
+ state = self._manager and self._manager._state\r
+\r
+ self._close = util.Finalize(\r
+ self, BaseProxy._decref,\r
+ args=(self._token, self._authkey, state,\r
+ self._tls, self._idset, self._Client),\r
+ exitpriority=10\r
+ )\r
+\r
+ @staticmethod\r
+ def _decref(token, authkey, state, tls, idset, _Client):\r
+ idset.discard(token.id)\r
+\r
+ # check whether manager is still alive\r
+ if state is None or state.value == State.STARTED:\r
+ # tell manager this process no longer cares about referent\r
+ try:\r
+ util.debug('DECREF %r', token.id)\r
+ conn = _Client(token.address, authkey=authkey)\r
+ dispatch(conn, None, 'decref', (token.id,))\r
+ except Exception, e:\r
+ util.debug('... decref failed %s', e)\r
+\r
+ else:\r
+ util.debug('DECREF %r -- manager already shutdown', token.id)\r
+\r
+ # check whether we can close this thread's connection because\r
+ # the process owns no more references to objects for this manager\r
+ if not idset and hasattr(tls, 'connection'):\r
+ util.debug('thread %r has no more proxies so closing conn',\r
+ threading.currentThread().getName())\r
+ tls.connection.close()\r
+ del tls.connection\r
+ \r
+ def _after_fork(self):\r
+ self._manager = None\r
+ try:\r
+ self._incref()\r
+ except Exception, e:\r
+ # the proxy may just be for a manager which has shutdown\r
+ util.info('incref failed: %s' % e)\r
+\r
+ def __reduce__(self):\r
+ kwds = {}\r
+ if Popen.thread_is_spawning():\r
+ kwds['authkey'] = self._authkey\r
+ \r
+ if getattr(self, '_isauto', False):\r
+ kwds['exposed'] = self._exposed_\r
+ return (RebuildProxy,\r
+ (AutoProxy, self._token, self._serializer, kwds))\r
+ else:\r
+ return (RebuildProxy,\r
+ (type(self), self._token, self._serializer, kwds))\r
+\r
+ def __deepcopy__(self, memo):\r
+ return self._getvalue()\r
+ \r
+ def __repr__(self):\r
+ return '<%s object, typeid %r at %s>' % \\r
+ (type(self).__name__, self._token.typeid, '0x%x' % id(self))\r
+\r
+ def __str__(self):\r
+ '''\r
+ Return representation of the referent (or a fall-back if that fails)\r
+ '''\r
+ try:\r
+ return self._callmethod('__repr__')\r
+ except Exception:\r
+ return repr(self)[:-1] + "; '__str__()' failed>"\r
+\r
+#\r
+# Function used for unpickling\r
+#\r
+\r
+def RebuildProxy(func, token, serializer, kwds):\r
+ '''\r
+ Function used for unpickling proxy objects.\r
+\r
+ If possible the shared object is returned, or otherwise a proxy for it.\r
+ '''\r
+ server = getattr(current_process(), '_manager_server', None)\r
+ \r
+ if server and server.address == token.address:\r
+ return server.id_to_obj[token.id][0]\r
+ else:\r
+ incref = (\r
+ kwds.pop('incref', True) and\r
+ not getattr(current_process(), '_inheriting', False)\r
+ )\r
+ return func(token, serializer, incref=incref, **kwds)\r
+\r
+#\r
+# Functions to create proxies and proxy types\r
+#\r
+\r
+def MakeProxyType(name, exposed, _cache={}):\r
+ '''\r
+ Return an proxy type whose methods are given by `exposed`\r
+ '''\r
+ exposed = tuple(exposed)\r
+ try:\r
+ return _cache[(name, exposed)]\r
+ except KeyError:\r
+ pass\r
+\r
+ dic = {}\r
+\r
+ for meth in exposed:\r
+ exec '''def %s(self, *args, **kwds):\r
+ return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic\r
+\r
+ ProxyType = type(name, (BaseProxy,), dic)\r
+ ProxyType._exposed_ = exposed\r
+ _cache[(name, exposed)] = ProxyType\r
+ return ProxyType\r
+\r
+\r
+def AutoProxy(token, serializer, manager=None, authkey=None,\r
+ exposed=None, incref=True):\r
+ '''\r
+ Return an auto-proxy for `token`\r
+ '''\r
+ _Client = listener_client[serializer][1]\r
+ \r
+ if exposed is None:\r
+ conn = _Client(token.address, authkey=authkey)\r
+ try:\r
+ exposed = dispatch(conn, None, 'get_methods', (token,))\r
+ finally:\r
+ conn.close()\r
+\r
+ if authkey is None and manager is not None:\r
+ authkey = manager._authkey\r
+ if authkey is None:\r
+ authkey = current_process().get_authkey()\r
+\r
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)\r
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,\r
+ incref=incref)\r
+ proxy._isauto = True\r
+ return proxy\r
+\r
+#\r
+# Types/callables which we will register with SyncManager\r
+#\r
+\r
+class Namespace(object):\r
+ def __init__(self, **kwds):\r
+ self.__dict__.update(kwds)\r
+ def __repr__(self):\r
+ items = self.__dict__.items()\r
+ temp = []\r
+ for name, value in items:\r
+ if not name.startswith('_'):\r
+ temp.append('%s=%r' % (name, value))\r
+ temp.sort()\r
+ return 'Namespace(%s)' % str.join(', ', temp)\r
+\r
+class Value(object):\r
+ def __init__(self, typecode, value, lock=True):\r
+ self._typecode = typecode\r
+ self._value = value\r
+ def get(self):\r
+ return self._value\r
+ def set(self, value):\r
+ self._value = value\r
+ def __repr__(self):\r
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)\r
+ value = property(get, set)\r
+\r
+def Array(typecode, sequence, lock=True):\r
+ return array.array(typecode, sequence)\r
+\r
+#\r
+# Proxy types used by SyncManager\r
+#\r
+\r
+class IteratorProxy(BaseProxy):\r
+ # XXX remove methods for Py3.0 and Py2.6\r
+ _exposed_ = ('__next__', 'next', 'send', 'throw', 'close')\r
+ def __iter__(self):\r
+ return self\r
+ def __next__(self, *args):\r
+ return self._callmethod('__next__', args)\r
+ def next(self, *args):\r
+ return self._callmethod('next', args)\r
+ def send(self, *args):\r
+ return self._callmethod('send', args)\r
+ def throw(self, *args):\r
+ return self._callmethod('throw', args)\r
+ def close(self, *args):\r
+ return self._callmethod('close', args)\r
+\r
+\r
+class AcquirerProxy(BaseProxy):\r
+ _exposed_ = ('acquire', 'release')\r
+ def acquire(self, blocking=True):\r
+ return self._callmethod('acquire', (blocking,))\r
+ def release(self):\r
+ return self._callmethod('release')\r
+ def __enter__(self):\r
+ return self._callmethod('acquire')\r
+ def __exit__(self, exc_type, exc_val, exc_tb):\r
+ return self._callmethod('release')\r
+\r
+\r
+class ConditionProxy(AcquirerProxy):\r
+ # XXX will Condition.notfyAll() name be available in Py3.0?\r
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notifyAll')\r
+ def wait(self, timeout=None):\r
+ return self._callmethod('wait', (timeout,))\r
+ def notify(self):\r
+ return self._callmethod('notify')\r
+ def notify_all(self):\r
+ return self._callmethod('notifyAll')\r
+\r
+class EventProxy(BaseProxy):\r
+ # XXX will Event.isSet name be available in Py3.0?\r
+ _exposed_ = ('isSet', 'set', 'clear', 'wait')\r
+ def is_set(self):\r
+ return self._callmethod('isSet')\r
+ def set(self):\r
+ return self._callmethod('set')\r
+ def clear(self):\r
+ return self._callmethod('clear')\r
+ def wait(self, timeout=None):\r
+ return self._callmethod('wait', (timeout,))\r
+\r
+class NamespaceProxy(BaseProxy):\r
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')\r
+ def __getattr__(self, key):\r
+ if key[0] == '_':\r
+ return object.__getattribute__(self, key)\r
+ callmethod = object.__getattribute__(self, '_callmethod')\r
+ return callmethod('__getattribute__', (key,)) \r
+ def __setattr__(self, key, value):\r
+ if key[0] == '_':\r
+ return object.__setattr__(self, key, value)\r
+ callmethod = object.__getattribute__(self, '_callmethod')\r
+ return callmethod('__setattr__', (key, value))\r
+ def __delattr__(self, key):\r
+ if key[0] == '_':\r
+ return object.__delattr__(self, key)\r
+ callmethod = object.__getattribute__(self, '_callmethod')\r
+ return callmethod('__delattr__', (key,))\r
+\r
+ \r
+class ValueProxy(BaseProxy):\r
+ _exposed_ = ('get', 'set')\r
+ def get(self):\r
+ return self._callmethod('get')\r
+ def set(self, value):\r
+ return self._callmethod('set', (value,))\r
+ value = property(get, set)\r
+\r
+\r
+BaseListProxy = MakeProxyType('BaseListProxy', (\r
+ '__add__', '__contains__', '__delitem__', '__delslice__',\r
+ '__getitem__', '__getslice__', '__len__', '__mul__',\r
+ '__reversed__', '__rmul__', '__setitem__', '__setslice__',\r
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',\r
+ 'reverse', 'sort', '__imul__'\r
+ )) # XXX __getslice__ and __setslice__ unneeded in Py3.0\r
+class ListProxy(BaseListProxy):\r
+ def __iadd__(self, value):\r
+ self._callmethod('extend', (value,))\r
+ return self\r
+ def __imul__(self, value):\r
+ self._callmethod('__imul__', (value,))\r
+ return self\r
+\r
+\r
+DictProxy = MakeProxyType('DictProxy', (\r
+ '__contains__', '__delitem__', '__getitem__', '__len__',\r
+ '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',\r
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'\r
+ ))\r
+\r
+\r
+ArrayProxy = MakeProxyType('ArrayProxy', (\r
+ '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'\r
+ )) # XXX __getslice__ and __setslice__ unneeded in Py3.0\r
+\r
+\r
+PoolProxy = MakeProxyType('PoolProxy', (\r
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',\r
+ 'map', 'map_async', 'terminate'\r
+ ))\r
+PoolProxy._method_to_typeid_ = {\r
+ 'apply_async': 'AsyncResult',\r
+ 'map_async': 'AsyncResult',\r
+ 'imap': 'Iterator',\r
+ 'imap_unordered': 'Iterator'\r
+ }\r
+\r
+#\r
+# Definition of SyncManager\r
+#\r
+\r
+class SyncManager(BaseManager):\r
+ '''\r
+ Subclass of `BaseManager` which supports a number of shared object types.\r
+ \r
+ The types registered are those intended for the synchronization\r
+ of threads, plus `dict`, `list` and `Namespace`.\r
+ \r
+ The `multiprocessing.Manager()` function creates started instances of\r
+ this class.\r
+ '''\r
+\r
+SyncManager.register('Queue', Queue.Queue)\r
+SyncManager.register('JoinableQueue', Queue.Queue)\r
+SyncManager.register('Event', threading.Event, EventProxy)\r
+SyncManager.register('Lock', threading.Lock, AcquirerProxy)\r
+SyncManager.register('RLock', threading.RLock, AcquirerProxy)\r
+SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)\r
+SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,\r
+ AcquirerProxy)\r
+SyncManager.register('Condition', threading.Condition, ConditionProxy)\r
+SyncManager.register('Pool', Pool, PoolProxy)\r
+SyncManager.register('list', list, ListProxy)\r
+SyncManager.register('dict', dict, DictProxy)\r
+SyncManager.register('Value', Value, ValueProxy)\r
+SyncManager.register('Array', Array, ArrayProxy)\r
+SyncManager.register('Namespace', Namespace, NamespaceProxy)\r
+\r
+# types returned by methods of PoolProxy\r
+SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)\r
+SyncManager.register('AsyncResult', create_method=False)\r
--- /dev/null
+#\r
+# Module providing the `Pool` class for managing a process pool\r
+#\r
+# multiprocessing/pool.py\r
+#\r
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = ['Pool']\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import threading\r
+import Queue\r
+import itertools\r
+import collections\r
+import time\r
+\r
+from multiprocessing import Process, cpu_count, TimeoutError\r
+from multiprocessing.util import Finalize, debug\r
+\r
+#\r
+# Constants representing the state of a pool\r
+#\r
+\r
+RUN = 0\r
+CLOSE = 1\r
+TERMINATE = 2\r
+\r
+#\r
+# Miscellaneous\r
+#\r
+\r
+job_counter = itertools.count()\r
+\r
+def mapstar(args):\r
+ return map(*args)\r
+\r
+#\r
+# Code run by worker processes\r
+#\r
+\r
+def worker(inqueue, outqueue, initializer=None, initargs=()):\r
+ put = outqueue.put\r
+ get = inqueue.get\r
+ if hasattr(inqueue, '_writer'):\r
+ inqueue._writer.close()\r
+ outqueue._reader.close()\r
+\r
+ if initializer is not None:\r
+ initializer(*initargs)\r
+\r
+ while 1:\r
+ try:\r
+ task = get()\r
+ except (EOFError, IOError):\r
+ debug('worker got EOFError or IOError -- exiting')\r
+ break\r
+ \r
+ if task is None:\r
+ debug('worker got sentinel -- exiting')\r
+ break\r
+ \r
+ job, i, func, args, kwds = task\r
+ try:\r
+ result = (True, func(*args, **kwds))\r
+ except Exception, e:\r
+ result = (False, e)\r
+ put((job, i, result))\r
+ \r
+#\r
+# Class representing a process pool\r
+#\r
+\r
+class Pool(object):\r
+ '''\r
+ Class which supports an async version of the `apply()` builtin\r
+ '''\r
+ Process = Process\r
+\r
+ def __init__(self, processes=None, initializer=None, initargs=()):\r
+ self._setup_queues()\r
+ self._taskqueue = Queue.Queue()\r
+ self._cache = {}\r
+ self._state = RUN\r
+\r
+ if processes is None:\r
+ try:\r
+ processes = cpu_count()\r
+ except NotImplementedError:\r
+ processes = 1\r
+ \r
+ self._pool = []\r
+ for i in range(processes):\r
+ w = self.Process(\r
+ target=worker,\r
+ args=(self._inqueue, self._outqueue, initializer, initargs)\r
+ )\r
+ self._pool.append(w)\r
+ w.set_name(w.get_name().replace('Process', 'PoolWorker'))\r
+ w.set_daemon(True)\r
+ w.start()\r
+ \r
+ self._task_handler = threading.Thread(\r
+ target=Pool._handle_tasks,\r
+ args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)\r
+ )\r
+ self._task_handler.setDaemon(True)\r
+ self._task_handler._state = RUN\r
+ self._task_handler.start()\r
+\r
+ self._result_handler = threading.Thread(\r
+ target=Pool._handle_results,\r
+ args=(self._outqueue, self._quick_get, self._cache)\r
+ )\r
+ self._result_handler.setDaemon(True)\r
+ self._result_handler._state = RUN\r
+ self._result_handler.start()\r
+\r
+ self._terminate = Finalize(\r
+ self, self._terminate_pool,\r
+ args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,\r
+ self._task_handler, self._result_handler, self._cache),\r
+ exitpriority=15\r
+ )\r
+\r
+ def _setup_queues(self):\r
+ from .queues import SimpleQueue\r
+ self._inqueue = SimpleQueue()\r
+ self._outqueue = SimpleQueue()\r
+ self._quick_put = self._inqueue._writer.send\r
+ self._quick_get = self._outqueue._reader.recv\r
+ \r
+ def apply(self, func, args=(), kwds={}):\r
+ '''\r
+ Equivalent of `apply()` builtin\r
+ '''\r
+ assert self._state == RUN\r
+ return self.apply_async(func, args, kwds).get()\r
+\r
+ def map(self, func, iterable, chunksize=None):\r
+ '''\r
+ Equivalent of `map()` builtin\r
+ '''\r
+ assert self._state == RUN\r
+ return self.map_async(func, iterable, chunksize).get()\r
+\r
+ def imap(self, func, iterable, chunksize=1):\r
+ '''\r
+ Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`\r
+ '''\r
+ assert self._state == RUN\r
+ if chunksize == 1:\r
+ result = IMapIterator(self._cache)\r
+ self._taskqueue.put((((result._job, i, func, (x,), {})\r
+ for i, x in enumerate(iterable)), result._set_length))\r
+ return result\r
+ else:\r
+ assert chunksize > 1\r
+ task_batches = Pool._get_tasks(func, iterable, chunksize)\r
+ result = IMapIterator(self._cache)\r
+ self._taskqueue.put((((result._job, i, mapstar, (x,), {})\r
+ for i, x in enumerate(task_batches)), result._set_length))\r
+ return (item for chunk in result for item in chunk)\r
+\r
+ def imap_unordered(self, func, iterable, chunksize=1):\r
+ '''\r
+ Like `imap()` method but ordering of results is arbitrary\r
+ '''\r
+ assert self._state == RUN\r
+ if chunksize == 1:\r
+ result = IMapUnorderedIterator(self._cache)\r
+ self._taskqueue.put((((result._job, i, func, (x,), {})\r
+ for i, x in enumerate(iterable)), result._set_length))\r
+ return result\r
+ else:\r
+ assert chunksize > 1\r
+ task_batches = Pool._get_tasks(func, iterable, chunksize)\r
+ result = IMapUnorderedIterator(self._cache)\r
+ self._taskqueue.put((((result._job, i, mapstar, (x,), {})\r
+ for i, x in enumerate(task_batches)), result._set_length))\r
+ return (item for chunk in result for item in chunk)\r
+ \r
+ def apply_async(self, func, args=(), kwds={}, callback=None):\r
+ '''\r
+ Asynchronous equivalent of `apply()` builtin\r
+ '''\r
+ assert self._state == RUN\r
+ result = ApplyResult(self._cache, callback)\r
+ self._taskqueue.put(([(result._job, None, func, args, kwds)], None))\r
+ return result\r
+\r
+ def map_async(self, func, iterable, chunksize=None, callback=None):\r
+ '''\r
+ Asynchronous equivalent of `map()` builtin\r
+ '''\r
+ assert self._state == RUN\r
+ if not hasattr(iterable, '__len__'):\r
+ iterable = list(iterable)\r
+ \r
+ if chunksize is None:\r
+ chunksize, extra = divmod(len(iterable), len(self._pool) * 4)\r
+ if extra:\r
+ chunksize += 1\r
+ \r
+ task_batches = Pool._get_tasks(func, iterable, chunksize)\r
+ result = MapResult(self._cache, chunksize, len(iterable), callback)\r
+ self._taskqueue.put((((result._job, i, mapstar, (x,), {})\r
+ for i, x in enumerate(task_batches)), None))\r
+ return result\r
+\r
+ @staticmethod\r
+ def _handle_tasks(taskqueue, put, outqueue, pool):\r
+ thread = threading.currentThread()\r
+\r
+ for taskseq, set_length in iter(taskqueue.get, None):\r
+ i = -1\r
+ for i, task in enumerate(taskseq):\r
+ if thread._state:\r
+ debug('task handler found thread._state != RUN')\r
+ break\r
+ try:\r
+ put(task)\r
+ except IOError:\r
+ debug('could not put task on queue')\r
+ break\r
+ else:\r
+ if set_length:\r
+ debug('doing set_length()')\r
+ set_length(i+1)\r
+ continue\r
+ break\r
+ else:\r
+ debug('task handler got sentinel')\r
+ \r
+\r
+ try:\r
+ # tell result handler to finish when cache is empty\r
+ debug('task handler sending sentinel to result handler')\r
+ outqueue.put(None)\r
+ \r
+ # tell workers there is no more work\r
+ debug('task handler sending sentinel to workers')\r
+ for p in pool:\r
+ put(None)\r
+ except IOError:\r
+ debug('task handler got IOError when sending sentinels')\r
+\r
+ debug('task handler exiting')\r
+\r
+ @staticmethod\r
+ def _handle_results(outqueue, get, cache):\r
+ thread = threading.currentThread()\r
+\r
+ while 1:\r
+ try:\r
+ task = get()\r
+ except (IOError, EOFError):\r
+ debug('result handler got EOFError/IOError -- exiting')\r
+ return\r
+ \r
+ if thread._state:\r
+ assert thread._state == TERMINATE\r
+ debug('result handler found thread._state=TERMINATE')\r
+ break\r
+ \r
+ if task is None:\r
+ debug('result handler got sentinel')\r
+ break\r
+\r
+ job, i, obj = task\r
+ try:\r
+ cache[job]._set(i, obj)\r
+ except KeyError:\r
+ pass\r
+\r
+ while cache and thread._state != TERMINATE:\r
+ try:\r
+ task = get()\r
+ except (IOError, EOFError):\r
+ debug('result handler got EOFError/IOError -- exiting')\r
+ return\r
+\r
+ if task is None:\r
+ debug('result handler ignoring extra sentinel')\r
+ continue\r
+ job, i, obj = task\r
+ try:\r
+ cache[job]._set(i, obj)\r
+ except KeyError:\r
+ pass\r
+\r
+ if hasattr(outqueue, '_reader'):\r
+ debug('ensuring that outqueue is not full')\r
+ # If we don't make room available in outqueue then\r
+ # attempts to add the sentinel (None) to outqueue may\r
+ # block. There is guaranteed to be no more than 2 sentinels.\r
+ try:\r
+ for i in range(10):\r
+ if not outqueue._reader.poll():\r
+ break\r
+ get()\r
+ except (IOError, EOFError):\r
+ pass\r
+\r
+ debug('result handler exiting: len(cache)=%s, thread._state=%s',\r
+ len(cache), thread._state)\r
+\r
+ @staticmethod\r
+ def _get_tasks(func, it, size):\r
+ it = iter(it)\r
+ while 1:\r
+ x = tuple(itertools.islice(it, size))\r
+ if not x:\r
+ return\r
+ yield (func, x)\r
+\r
+ def __reduce__(self):\r
+ raise NotImplementedError(\r
+ 'pool objects cannot be passed between processes or pickled'\r
+ )\r
+ \r
+ def close(self):\r
+ debug('closing pool')\r
+ if self._state == RUN:\r
+ self._state = CLOSE\r
+ self._taskqueue.put(None)\r
+\r
+ def terminate(self):\r
+ debug('terminating pool')\r
+ self._state = TERMINATE\r
+ self._terminate()\r
+\r
+ def join(self):\r
+ debug('joining pool')\r
+ assert self._state in (CLOSE, TERMINATE)\r
+ self._task_handler.join()\r
+ self._result_handler.join()\r
+ for p in self._pool:\r
+ p.join()\r
+\r
+ @staticmethod\r
+ def _help_stuff_finish(inqueue, task_handler, size):\r
+ # task_handler may be blocked trying to put items on inqueue\r
+ debug('removing tasks from inqueue until task handler finished')\r
+ inqueue._rlock.acquire()\r
+ while task_handler.isAlive() and inqueue._reader.poll():\r
+ inqueue._reader.recv()\r
+ time.sleep(0)\r
+\r
+ @classmethod\r
+ def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,\r
+ task_handler, result_handler, cache):\r
+ # this is guaranteed to only be called once\r
+ debug('finalizing pool')\r
+ \r
+ task_handler._state = TERMINATE\r
+ taskqueue.put(None) # sentinel\r
+\r
+ debug('helping task handler/workers to finish')\r
+ cls._help_stuff_finish(inqueue, task_handler, len(pool))\r
+\r
+ assert result_handler.isAlive() or len(cache) == 0\r
+ \r
+ result_handler._state = TERMINATE\r
+ outqueue.put(None) # sentinel\r
+\r
+ if pool and hasattr(pool[0], 'terminate'):\r
+ debug('terminating workers')\r
+ for p in pool:\r
+ p.terminate()\r
+\r
+ debug('joining task handler')\r
+ task_handler.join(1e100)\r
+\r
+ debug('joining result handler')\r
+ result_handler.join(1e100)\r
+\r
+ if pool and hasattr(pool[0], 'terminate'):\r
+ debug('joining pool workers')\r
+ for p in pool:\r
+ p.join()\r
+\r
+#\r
+# Class whose instances are returned by `Pool.apply_async()`\r
+#\r
+\r
+class ApplyResult(object):\r
+\r
+ def __init__(self, cache, callback):\r
+ self._cond = threading.Condition(threading.Lock())\r
+ self._job = job_counter.next()\r
+ self._cache = cache\r
+ self._ready = False\r
+ self._callback = callback\r
+ cache[self._job] = self\r
+ \r
+ def ready(self):\r
+ return self._ready\r
+ \r
+ def successful(self):\r
+ assert self._ready\r
+ return self._success\r
+ \r
+ def wait(self, timeout=None):\r
+ self._cond.acquire()\r
+ try:\r
+ if not self._ready:\r
+ self._cond.wait(timeout)\r
+ finally:\r
+ self._cond.release()\r
+\r
+ def get(self, timeout=None):\r
+ self.wait(timeout)\r
+ if not self._ready:\r
+ raise TimeoutError\r
+ if self._success:\r
+ return self._value\r
+ else:\r
+ raise self._value\r
+\r
+ def _set(self, i, obj):\r
+ self._success, self._value = obj\r
+ if self._callback and self._success:\r
+ self._callback(self._value)\r
+ self._cond.acquire()\r
+ try:\r
+ self._ready = True\r
+ self._cond.notify()\r
+ finally:\r
+ self._cond.release()\r
+ del self._cache[self._job]\r
+\r
+#\r
+# Class whose instances are returned by `Pool.map_async()`\r
+#\r
+\r
+class MapResult(ApplyResult):\r
+ \r
+ def __init__(self, cache, chunksize, length, callback):\r
+ ApplyResult.__init__(self, cache, callback)\r
+ self._success = True\r
+ self._value = [None] * length\r
+ self._chunksize = chunksize\r
+ if chunksize <= 0:\r
+ self._number_left = 0\r
+ self._ready = True\r
+ else:\r
+ self._number_left = length//chunksize + bool(length % chunksize)\r
+ \r
+ def _set(self, i, success_result):\r
+ success, result = success_result\r
+ if success:\r
+ self._value[i*self._chunksize:(i+1)*self._chunksize] = result\r
+ self._number_left -= 1\r
+ if self._number_left == 0:\r
+ if self._callback:\r
+ self._callback(self._value)\r
+ del self._cache[self._job]\r
+ self._cond.acquire()\r
+ try:\r
+ self._ready = True\r
+ self._cond.notify()\r
+ finally:\r
+ self._cond.release()\r
+\r
+ else:\r
+ self._success = False\r
+ self._value = result\r
+ del self._cache[self._job]\r
+ self._cond.acquire()\r
+ try:\r
+ self._ready = True\r
+ self._cond.notify()\r
+ finally:\r
+ self._cond.release()\r
+\r
+#\r
+# Class whose instances are returned by `Pool.imap()`\r
+#\r
+\r
+class IMapIterator(object):\r
+\r
+ def __init__(self, cache):\r
+ self._cond = threading.Condition(threading.Lock())\r
+ self._job = job_counter.next()\r
+ self._cache = cache\r
+ self._items = collections.deque()\r
+ self._index = 0\r
+ self._length = None\r
+ self._unsorted = {}\r
+ cache[self._job] = self\r
+ \r
+ def __iter__(self):\r
+ return self\r
+ \r
+ def next(self, timeout=None):\r
+ self._cond.acquire()\r
+ try:\r
+ try:\r
+ item = self._items.popleft()\r
+ except IndexError:\r
+ if self._index == self._length:\r
+ raise StopIteration\r
+ self._cond.wait(timeout)\r
+ try:\r
+ item = self._items.popleft()\r
+ except IndexError:\r
+ if self._index == self._length:\r
+ raise StopIteration\r
+ raise TimeoutError\r
+ finally:\r
+ self._cond.release()\r
+\r
+ success, value = item\r
+ if success:\r
+ return value\r
+ raise value\r
+\r
+ __next__ = next # XXX\r
+ \r
+ def _set(self, i, obj):\r
+ self._cond.acquire()\r
+ try:\r
+ if self._index == i:\r
+ self._items.append(obj)\r
+ self._index += 1\r
+ while self._index in self._unsorted:\r
+ obj = self._unsorted.pop(self._index)\r
+ self._items.append(obj)\r
+ self._index += 1\r
+ self._cond.notify()\r
+ else:\r
+ self._unsorted[i] = obj\r
+ \r
+ if self._index == self._length:\r
+ del self._cache[self._job]\r
+ finally:\r
+ self._cond.release()\r
+ \r
+ def _set_length(self, length):\r
+ self._cond.acquire()\r
+ try:\r
+ self._length = length\r
+ if self._index == self._length:\r
+ self._cond.notify()\r
+ del self._cache[self._job]\r
+ finally:\r
+ self._cond.release()\r
+\r
+#\r
+# Class whose instances are returned by `Pool.imap_unordered()`\r
+#\r
+\r
+class IMapUnorderedIterator(IMapIterator):\r
+\r
+ def _set(self, i, obj):\r
+ self._cond.acquire()\r
+ try:\r
+ self._items.append(obj)\r
+ self._index += 1\r
+ self._cond.notify()\r
+ if self._index == self._length:\r
+ del self._cache[self._job]\r
+ finally:\r
+ self._cond.release()\r
+\r
+#\r
+#\r
+#\r
+\r
+class ThreadPool(Pool):\r
+ \r
+ from .dummy import Process\r
+ \r
+ def __init__(self, processes=None, initializer=None, initargs=()):\r
+ Pool.__init__(self, processes, initializer, initargs)\r
+ \r
+ def _setup_queues(self):\r
+ self._inqueue = Queue.Queue()\r
+ self._outqueue = Queue.Queue()\r
+ self._quick_put = self._inqueue.put\r
+ self._quick_get = self._outqueue.get\r
+ \r
+ @staticmethod\r
+ def _help_stuff_finish(inqueue, task_handler, size):\r
+ # put sentinels at head of inqueue to make workers finish\r
+ inqueue.not_empty.acquire()\r
+ try:\r
+ inqueue.queue.clear()\r
+ inqueue.queue.extend([None] * size)\r
+ inqueue.not_empty.notifyAll()\r
+ finally:\r
+ inqueue.not_empty.release()\r
--- /dev/null
+#\r
+# Module providing the `Process` class which emulates `threading.Thread`\r
+#\r
+# multiprocessing/process.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = ['Process', 'current_process', 'active_children']\r
+\r
+#\r
+# Imports\r
+#\r
+\r
+import os\r
+import sys\r
+import signal\r
+import itertools\r
+\r
+#\r
+#\r
+#\r
+\r
+try:\r
+ ORIGINAL_DIR = os.path.abspath(os.getcwd())\r
+except OSError:\r
+ ORIGINAL_DIR = None\r
+\r
+try:\r
+ bytes\r
+except NameError:\r
+ bytes = str # XXX not needed in Py2.6 and Py3.0\r
+\r
+#\r
+# Public functions\r
+#\r
+\r
+def current_process():\r
+ '''\r
+ Return process object representing the current process\r
+ '''\r
+ return _current_process\r
+\r
+def active_children():\r
+ '''\r
+ Return list of process objects corresponding to live child processes\r
+ '''\r
+ _cleanup()\r
+ return list(_current_process._children)\r
+ \r
+#\r
+#\r
+#\r
+\r
+def _cleanup():\r
+ # check for processes which have finished\r
+ for p in list(_current_process._children):\r
+ if p._popen.poll() is not None:\r
+ _current_process._children.discard(p)\r
+\r
+#\r
+# The `Process` class\r
+#\r
+\r
+class Process(object):\r
+ '''\r
+ Process objects represent activity that is run in a separate process\r
+\r
+ The class is analagous to `threading.Thread`\r
+ '''\r
+ _Popen = None\r
+ \r
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\r
+ assert group is None, 'group argument must be None for now'\r
+ count = _current_process._counter.next()\r
+ self._identity = _current_process._identity + (count,)\r
+ self._authkey = _current_process._authkey\r
+ self._daemonic = _current_process._daemonic\r
+ self._tempdir = _current_process._tempdir\r
+ self._parent_pid = os.getpid()\r
+ self._popen = None\r
+ self._target = target\r
+ self._args = tuple(args)\r
+ self._kwargs = dict(kwargs)\r
+ self._name = name or type(self).__name__ + '-' + \\r
+ ':'.join(str(i) for i in self._identity)\r
+\r
+ def run(self):\r
+ '''\r
+ Method to be run in sub-process; can be overridden in sub-class\r
+ '''\r
+ if self._target:\r
+ self._target(*self._args, **self._kwargs)\r
+ \r
+ def start(self):\r
+ '''\r
+ Start child process\r
+ '''\r
+ assert self._popen is None, 'cannot start a process twice'\r
+ assert self._parent_pid == os.getpid(), \\r
+ 'can only start a process object created by current process'\r
+ assert not _current_process._daemonic, \\r
+ 'daemonic processes are not allowed to have children'\r
+ _cleanup()\r
+ if self._Popen is not None:\r
+ Popen = self._Popen\r
+ else:\r
+ from .forking import Popen\r
+ self._popen = Popen(self)\r
+ _current_process._children.add(self)\r
+\r
+ def terminate(self):\r
+ '''\r
+ Terminate process; sends SIGTERM signal or uses TerminateProcess()\r
+ '''\r
+ self._popen.terminate()\r
+ \r
+ def join(self, timeout=None):\r
+ '''\r
+ Wait until child process terminates\r
+ '''\r
+ assert self._parent_pid == os.getpid(), 'can only join a child process'\r
+ assert self._popen is not None, 'can only join a started process'\r
+ res = self._popen.wait(timeout)\r
+ if res is not None:\r
+ _current_process._children.discard(self)\r
+\r
+ def is_alive(self):\r
+ '''\r
+ Return whether process is alive\r
+ '''\r
+ if self is _current_process:\r
+ return True\r
+ assert self._parent_pid == os.getpid(), 'can only test a child process'\r
+ if self._popen is None:\r
+ return False\r
+ self._popen.poll()\r
+ return self._popen.returncode is None\r
+\r
+ def get_name(self):\r
+ '''\r
+ Return name of process\r
+ '''\r
+ return self._name\r
+\r
+ def set_name(self, name):\r
+ '''\r
+ Set name of process\r
+ '''\r
+ assert isinstance(name, str), 'name must be a string'\r
+ self._name = name\r
+\r
+ def is_daemon(self):\r
+ '''\r
+ Return whether process is a daemon\r
+ '''\r
+ return self._daemonic\r
+\r
+ def set_daemon(self, daemonic):\r
+ '''\r
+ Set whether process is a daemon\r
+ '''\r
+ assert self._popen is None, 'process has already started'\r
+ self._daemonic = daemonic\r
+\r
+ def get_authkey(self):\r
+ '''\r
+ Return authorization key of process\r
+ '''\r
+ return self._authkey\r
+\r
+ def set_authkey(self, authkey):\r
+ '''\r
+ Set authorization key of process\r
+ '''\r
+ self._authkey = AuthenticationString(authkey)\r
+\r
+ def get_exitcode(self):\r
+ '''\r
+ Return exit code of process or `None` if it has yet to stop\r
+ '''\r
+ if self._popen is None:\r
+ return self._popen\r
+ return self._popen.poll()\r
+\r
+ def get_ident(self):\r
+ '''\r
+ Return indentifier (PID) of process or `None` if it has yet to start\r
+ '''\r
+ if self is _current_process:\r
+ return os.getpid()\r
+ else:\r
+ return self._popen and self._popen.pid\r
+\r
+ pid = property(get_ident)\r
+\r
+ def __repr__(self):\r
+ if self is _current_process:\r
+ status = 'started'\r
+ elif self._parent_pid != os.getpid():\r
+ status = 'unknown'\r
+ elif self._popen is None:\r
+ status = 'initial'\r
+ else:\r
+ if self._popen.poll() is not None:\r
+ status = self.get_exitcode()\r
+ else:\r
+ status = 'started'\r
+\r
+ if type(status) is int:\r
+ if status == 0:\r
+ status = 'stopped'\r
+ else:\r
+ status = 'stopped[%s]' % _exitcode_to_name.get(status, status)\r
+\r
+ return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,\r
+ status, self._daemonic and ' daemon' or '')\r
+\r
+ ##\r
+ \r
+ def _bootstrap(self):\r
+ from . import util\r
+ global _current_process\r
+ \r
+ try:\r
+ self._children = set()\r
+ self._counter = itertools.count(1)\r
+ try:\r
+ os.close(sys.stdin.fileno())\r
+ except (OSError, ValueError):\r
+ pass\r
+ _current_process = self\r
+ util._finalizer_registry.clear()\r
+ util._run_after_forkers()\r
+ util.info('child process calling self.run()')\r
+ try:\r
+ self.run()\r
+ exitcode = 0\r
+ finally:\r
+ util._exit_function()\r
+ except SystemExit, e:\r
+ if not e.args:\r
+ exitcode = 1\r
+ elif type(e.args[0]) is int:\r
+ exitcode = e.args[0]\r
+ else:\r
+ sys.stderr.write(e.args[0] + '\n')\r
+ sys.stderr.flush()\r
+ exitcode = 1\r
+ except:\r
+ exitcode = 1\r
+ import traceback\r
+ sys.stderr.write('Process %s:\n' % self.get_name())\r
+ sys.stderr.flush()\r
+ traceback.print_exc()\r
+\r
+ util.info('process exiting with exitcode %d' % exitcode)\r
+ return exitcode\r
+\r
+#\r
+# We subclass bytes to avoid accidental transmission of auth keys over network\r
+#\r
+\r
+class AuthenticationString(bytes):\r
+ def __reduce__(self):\r
+ from .forking import Popen\r
+ if not Popen.thread_is_spawning():\r
+ raise TypeError(\r
+ 'Pickling an AuthenticationString object is '\r
+ 'disallowed for security reasons'\r
+ )\r
+ return AuthenticationString, (bytes(self),)\r
+\r
+#\r
+# Create object representing the main process\r
+#\r
+\r
+class _MainProcess(Process):\r
+\r
+ def __init__(self):\r
+ self._identity = ()\r
+ self._daemonic = False\r
+ self._name = 'MainProcess'\r
+ self._parent_pid = None\r
+ self._popen = None\r
+ self._counter = itertools.count(1)\r
+ self._children = set()\r
+ self._authkey = AuthenticationString(os.urandom(32))\r
+ self._tempdir = None\r
+\r
+_current_process = _MainProcess()\r
+del _MainProcess\r
+\r
+#\r
+# Give names to some return codes\r
+#\r
+\r
+_exitcode_to_name = {}\r
+\r
+for name, signum in signal.__dict__.items():\r
+ if name[:3]=='SIG' and '_' not in name:\r
+ _exitcode_to_name[-signum] = name\r
--- /dev/null
+#\r
+# Module implementing queues\r
+#\r
+# multiprocessing/queues.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = ['Queue', 'SimpleQueue']\r
+\r
+import sys\r
+import os\r
+import threading\r
+import collections\r
+import time\r
+import atexit\r
+import weakref\r
+\r
+from Queue import Empty, Full\r
+import _multiprocessing\r
+from multiprocessing import Pipe\r
+from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition\r
+from multiprocessing.util import debug, info, Finalize, register_after_fork\r
+from multiprocessing.forking import assert_spawning\r
+\r
+#\r
+# Queue type using a pipe, buffer and thread\r
+#\r
+\r
+class Queue(object):\r
+\r
+ def __init__(self, maxsize=0):\r
+ if maxsize <= 0:\r
+ maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX\r
+ self._maxsize = maxsize\r
+ self._reader, self._writer = Pipe(duplex=False)\r
+ self._rlock = Lock()\r
+ self._opid = os.getpid()\r
+ if sys.platform == 'win32':\r
+ self._wlock = None\r
+ else:\r
+ self._wlock = Lock()\r
+ self._sem = BoundedSemaphore(maxsize)\r
+ \r
+ self._after_fork()\r
+ \r
+ if sys.platform != 'win32':\r
+ register_after_fork(self, Queue._after_fork)\r
+\r
+ def __getstate__(self):\r
+ assert_spawning(self)\r
+ return (self._maxsize, self._reader, self._writer,\r
+ self._rlock, self._wlock, self._sem, self._opid)\r
+ \r
+ def __setstate__(self, state):\r
+ (self._maxsize, self._reader, self._writer,\r
+ self._rlock, self._wlock, self._sem, self._opid) = state\r
+ self._after_fork()\r
+ \r
+ def _after_fork(self):\r
+ debug('Queue._after_fork()')\r
+ self._notempty = threading.Condition(threading.Lock())\r
+ self._buffer = collections.deque()\r
+ self._thread = None\r
+ self._jointhread = None\r
+ self._joincancelled = False\r
+ self._closed = False\r
+ self._close = None\r
+ self._send = self._writer.send\r
+ self._recv = self._reader.recv\r
+ self._poll = self._reader.poll\r
+ \r
+ def put(self, obj, block=True, timeout=None):\r
+ assert not self._closed\r
+ if not self._sem.acquire(block, timeout):\r
+ raise Full\r
+\r
+ self._notempty.acquire()\r
+ try:\r
+ if self._thread is None:\r
+ self._start_thread()\r
+ self._buffer.append(obj)\r
+ self._notempty.notify()\r
+ finally:\r
+ self._notempty.release()\r
+\r
+ def get(self, block=True, timeout=None):\r
+ if block and timeout is None:\r
+ self._rlock.acquire()\r
+ try:\r
+ res = self._recv()\r
+ self._sem.release()\r
+ return res\r
+ finally:\r
+ self._rlock.release()\r
+ \r
+ else:\r
+ if block:\r
+ deadline = time.time() + timeout\r
+ if not self._rlock.acquire(block, timeout):\r
+ raise Empty\r
+ try:\r
+ if not self._poll(block and (deadline-time.time()) or 0.0):\r
+ raise Empty\r
+ res = self._recv()\r
+ self._sem.release()\r
+ return res\r
+ finally:\r
+ self._rlock.release()\r
+\r
+ def qsize(self):\r
+ # Raises NotImplementError on Mac OSX because of broken sem_getvalue()\r
+ return self._maxsize - self._sem._semlock._get_value()\r
+\r
+ def empty(self):\r
+ return not self._poll()\r
+\r
+ def full(self):\r
+ return self._sem._semlock._is_zero()\r
+\r
+ def get_nowait(self):\r
+ return self.get(False)\r
+\r
+ def put_nowait(self, obj):\r
+ return self.put(obj, False)\r
+\r
+ def close(self):\r
+ self._closed = True\r
+ self._reader.close()\r
+ if self._close:\r
+ self._close()\r
+\r
+ def join_thread(self):\r
+ debug('Queue.join_thread()')\r
+ assert self._closed\r
+ if self._jointhread:\r
+ self._jointhread()\r
+ \r
+ def cancel_join_thread(self):\r
+ debug('Queue.cancel_join_thread()')\r
+ self._joincancelled = True\r
+ try:\r
+ self._jointhread.cancel()\r
+ except AttributeError:\r
+ pass\r
+\r
+ def _start_thread(self):\r
+ debug('Queue._start_thread()')\r
+ \r
+ # Start thread which transfers data from buffer to pipe\r
+ self._buffer.clear()\r
+ self._thread = threading.Thread(\r
+ target=Queue._feed,\r
+ args=(self._buffer, self._notempty, self._send,\r
+ self._wlock, self._writer.close),\r
+ name='QueueFeederThread'\r
+ )\r
+ self._thread.setDaemon(True)\r
+\r
+ debug('doing self._thread.start()')\r
+ self._thread.start()\r
+ debug('... done self._thread.start()')\r
+\r
+ # On process exit we will wait for data to be flushed to pipe.\r
+ #\r
+ # However, if this process created the queue then all\r
+ # processes which use the queue will be descendants of this\r
+ # process. Therefore waiting for the queue to be flushed\r
+ # is pointless once all the child processes have been joined.\r
+ created_by_this_process = (self._opid == os.getpid())\r
+ if not self._joincancelled and not created_by_this_process:\r
+ self._jointhread = Finalize(\r
+ self._thread, Queue._finalize_join,\r
+ [weakref.ref(self._thread)],\r
+ exitpriority=-5\r
+ )\r
+ \r
+ # Send sentinel to the thread queue object when garbage collected\r
+ self._close = Finalize(\r
+ self, Queue._finalize_close,\r
+ [self._buffer, self._notempty],\r
+ exitpriority=10\r
+ )\r
+ \r
+ @staticmethod\r
+ def _finalize_join(twr):\r
+ debug('joining queue thread')\r
+ thread = twr()\r
+ if thread is not None:\r
+ thread.join()\r
+ debug('... queue thread joined')\r
+ else:\r
+ debug('... queue thread already dead')\r
+ \r
+ @staticmethod\r
+ def _finalize_close(buffer, notempty):\r
+ debug('telling queue thread to quit')\r
+ notempty.acquire()\r
+ try:\r
+ buffer.append(_sentinel)\r
+ notempty.notify()\r
+ finally:\r
+ notempty.release()\r
+\r
+ @staticmethod\r
+ def _feed(buffer, notempty, send, writelock, close):\r
+ debug('starting thread to feed data to pipe')\r
+ from .util import is_exiting\r
+ \r
+ nacquire = notempty.acquire\r
+ nrelease = notempty.release\r
+ nwait = notempty.wait\r
+ bpopleft = buffer.popleft\r
+ sentinel = _sentinel\r
+ if sys.platform != 'win32':\r
+ wacquire = writelock.acquire\r
+ wrelease = writelock.release\r
+ else:\r
+ wacquire = None\r
+ \r
+ try:\r
+ while 1:\r
+ nacquire()\r
+ try:\r
+ if not buffer:\r
+ nwait()\r
+ finally:\r
+ nrelease()\r
+ try:\r
+ while 1:\r
+ obj = bpopleft()\r
+ if obj is sentinel:\r
+ debug('feeder thread got sentinel -- exiting')\r
+ close()\r
+ return\r
+\r
+ if wacquire is None:\r
+ send(obj)\r
+ else:\r
+ wacquire()\r
+ try:\r
+ send(obj)\r
+ finally:\r
+ wrelease()\r
+ except IndexError:\r
+ pass\r
+ except Exception, e:\r
+ # Since this runs in a daemon thread the resources it uses\r
+ # may be become unusable while the process is cleaning up.\r
+ # We ignore errors which happen after the process has\r
+ # started to cleanup.\r
+ try:\r
+ if is_exiting():\r
+ info('error in queue thread: %s', e)\r
+ else:\r
+ import traceback\r
+ traceback.print_exc()\r
+ except Exception:\r
+ pass\r
+ \r
+_sentinel = object()\r
+\r
+#\r
+# A queue type which also supports join() and task_done() methods\r
+#\r
+# Note that if you do not call task_done() for each finished task then\r
+# eventually the counter's semaphore may overflow causing Bad Things\r
+# to happen.\r
+#\r
+\r
+class JoinableQueue(Queue):\r
+\r
+ def __init__(self, maxsize=0):\r
+ Queue.__init__(self, maxsize)\r
+ self._unfinished_tasks = Semaphore(0)\r
+ self._cond = Condition()\r
+ \r
+ def __getstate__(self):\r
+ return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)\r
+\r
+ def __setstate__(self, state):\r
+ Queue.__setstate__(self, state[:-2])\r
+ self._cond, self._unfinished_tasks = state[-2:]\r
+\r
+ def put(self, item, block=True, timeout=None):\r
+ Queue.put(self, item, block, timeout)\r
+ self._unfinished_tasks.release()\r
+ \r
+ def task_done(self):\r
+ self._cond.acquire()\r
+ try:\r
+ if not self._unfinished_tasks.acquire(False):\r
+ raise ValueError('task_done() called too many times')\r
+ if self._unfinished_tasks._semlock._is_zero():\r
+ self._cond.notify_all()\r
+ finally:\r
+ self._cond.release()\r
+ \r
+ def join(self):\r
+ self._cond.acquire()\r
+ try:\r
+ if not self._unfinished_tasks._semlock._is_zero():\r
+ self._cond.wait()\r
+ finally:\r
+ self._cond.release()\r
+\r
+#\r
+# Simplified Queue type -- really just a locked pipe\r
+#\r
+\r
+class SimpleQueue(object):\r
+\r
+ def __init__(self):\r
+ self._reader, self._writer = Pipe(duplex=False)\r
+ self._rlock = Lock()\r
+ if sys.platform == 'win32':\r
+ self._wlock = None\r
+ else:\r
+ self._wlock = Lock()\r
+ self._make_methods()\r
+\r
+ def empty(self):\r
+ return not self._reader.poll()\r
+\r
+ def __getstate__(self):\r
+ assert_spawning(self)\r
+ return (self._reader, self._writer, self._rlock, self._wlock)\r
+\r
+ def __setstate__(self, state):\r
+ (self._reader, self._writer, self._rlock, self._wlock) = state\r
+ self._make_methods()\r
+\r
+ def _make_methods(self):\r
+ recv = self._reader.recv\r
+ racquire, rrelease = self._rlock.acquire, self._rlock.release\r
+ def get():\r
+ racquire()\r
+ try:\r
+ return recv()\r
+ finally:\r
+ rrelease()\r
+ self.get = get\r
+\r
+ if self._wlock is None:\r
+ # writes to a message oriented win32 pipe are atomic\r
+ self.put = self._writer.send\r
+ else:\r
+ send = self._writer.send\r
+ wacquire, wrelease = self._wlock.acquire, self._wlock.release\r
+ def put(obj):\r
+ wacquire()\r
+ try:\r
+ return send(obj)\r
+ finally:\r
+ wrelease()\r
+ self.put = put\r
--- /dev/null
+#\r
+# Module to allow connection and socket objects to be transferred\r
+# between processes\r
+#\r
+# multiprocessing/reduction.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = []\r
+\r
+import os\r
+import sys\r
+import socket\r
+import threading\r
+import copy_reg\r
+\r
+import _multiprocessing\r
+from multiprocessing import current_process\r
+from multiprocessing.forking import Popen, duplicate, close\r
+from multiprocessing.util import register_after_fork, debug, sub_debug\r
+from multiprocessing.connection import Client, Listener\r
+\r
+\r
+#\r
+#\r
+#\r
+\r
+if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):\r
+ raise ImportError('pickling of connections not supported')\r
+\r
+#\r
+# Platform specific definitions\r
+#\r
+\r
+if sys.platform == 'win32':\r
+ import _subprocess\r
+ from ._multiprocessing import win32\r
+ \r
+ def send_handle(conn, handle, destination_pid):\r
+ process_handle = win32.OpenProcess(\r
+ win32.PROCESS_ALL_ACCESS, False, destination_pid\r
+ )\r
+ try:\r
+ new_handle = duplicate(handle, process_handle)\r
+ conn.send(new_handle)\r
+ finally:\r
+ close(process_handle)\r
+ \r
+ def recv_handle(conn):\r
+ return conn.recv()\r
+\r
+else:\r
+ def send_handle(conn, handle, destination_pid):\r
+ _multiprocessing.sendfd(conn.fileno(), handle)\r
+ \r
+ def recv_handle(conn):\r
+ return _multiprocessing.recvfd(conn.fileno())\r
+\r
+#\r
+# Support for a per-process server thread which caches pickled handles\r
+#\r
+\r
+_cache = set()\r
+\r
+def _reset(obj):\r
+ global _lock, _listener, _cache\r
+ for h in _cache:\r
+ close(h)\r
+ _cache.clear()\r
+ _lock = threading.Lock()\r
+ _listener = None\r
+\r
+_reset(None)\r
+register_after_fork(_reset, _reset)\r
+\r
+def _get_listener():\r
+ global _listener\r
+\r
+ if _listener is None:\r
+ _lock.acquire()\r
+ try:\r
+ if _listener is None:\r
+ debug('starting listener and thread for sending handles')\r
+ _listener = Listener(authkey=current_process().get_authkey())\r
+ t = threading.Thread(target=_serve)\r
+ t.setDaemon(True)\r
+ t.start()\r
+ finally:\r
+ _lock.release()\r
+\r
+ return _listener\r
+\r
+def _serve():\r
+ from .util import is_exiting, sub_warning\r
+ \r
+ while 1:\r
+ try:\r
+ conn = _listener.accept()\r
+ handle_wanted, destination_pid = conn.recv()\r
+ _cache.remove(handle_wanted)\r
+ send_handle(conn, handle_wanted, destination_pid)\r
+ close(handle_wanted)\r
+ conn.close()\r
+ except:\r
+ if not is_exiting():\r
+ import traceback\r
+ sub_warning(\r
+ 'thread for sharing handles raised exception :\n' +\r
+ '-'*79 + '\n' + traceback.format_exc() + '-'*79\r
+ )\r
+ \r
+#\r
+# Functions to be used for pickling/unpickling objects with handles\r
+#\r
+\r
+def reduce_handle(handle):\r
+ if Popen.thread_is_spawning():\r
+ return (None, Popen.duplicate_for_child(handle), True)\r
+ dup_handle = duplicate(handle)\r
+ _cache.add(dup_handle)\r
+ sub_debug('reducing handle %d', handle)\r
+ return (_get_listener().address, dup_handle, False)\r
+\r
+def rebuild_handle(pickled_data):\r
+ address, handle, inherited = pickled_data\r
+ if inherited:\r
+ return handle\r
+ sub_debug('rebuilding handle %d', handle)\r
+ conn = Client(address, authkey=current_process().get_authkey())\r
+ conn.send((handle, os.getpid()))\r
+ new_handle = recv_handle(conn)\r
+ conn.close()\r
+ return new_handle\r
+\r
+#\r
+# Register `_multiprocessing.Connection` with `copy_reg`\r
+#\r
+\r
+def reduce_connection(conn):\r
+ rh = reduce_handle(conn.fileno())\r
+ return rebuild_connection, (rh, conn.readable, conn.writable)\r
+\r
+def rebuild_connection(reduced_handle, readable, writable):\r
+ handle = rebuild_handle(reduced_handle)\r
+ return _multiprocessing.Connection(\r
+ handle, readable=readable, writable=writable\r
+ )\r
+\r
+copy_reg.pickle(_multiprocessing.Connection, reduce_connection)\r
+\r
+#\r
+# Register `socket.socket` with `copy_reg`\r
+#\r
+\r
+def fromfd(fd, family, type_, proto=0):\r
+ s = socket.fromfd(fd, family, type_, proto)\r
+ if s.__class__ is not socket.socket:\r
+ s = socket.socket(_sock=s)\r
+ return s\r
+\r
+def reduce_socket(s):\r
+ reduced_handle = reduce_handle(s.fileno())\r
+ return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)\r
+\r
+def rebuild_socket(reduced_handle, family, type_, proto):\r
+ fd = rebuild_handle(reduced_handle)\r
+ _sock = fromfd(fd, family, type_, proto)\r
+ close(fd)\r
+ return _sock\r
+\r
+copy_reg.pickle(socket.socket, reduce_socket)\r
+\r
+#\r
+# Register `_multiprocessing.PipeConnection` with `copy_reg`\r
+#\r
+\r
+if sys.platform == 'win32':\r
+ \r
+ def reduce_pipe_connection(conn):\r
+ rh = reduce_handle(conn.fileno())\r
+ return rebuild_pipe_connection, (rh, conn.readable, conn.writable)\r
+ \r
+ def rebuild_pipe_connection(reduced_handle, readable, writable):\r
+ handle = rebuild_handle(reduced_handle)\r
+ return _multiprocessing.PipeConnection(\r
+ handle, readable=readable, writable=writable\r
+ )\r
+ \r
+ copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)\r
--- /dev/null
+#\r
+# Module which supports allocation of ctypes objects from shared memory\r
+#\r
+# multiprocessing/sharedctypes.py\r
+#\r
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+import sys\r
+import ctypes\r
+import weakref\r
+import copy_reg\r
+\r
+from multiprocessing import heap, RLock\r
+from multiprocessing.forking import assert_spawning\r
+\r
+__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']\r
+\r
+#\r
+#\r
+#\r
+\r
+typecode_to_type = {\r
+ 'c': ctypes.c_char, 'u': ctypes.c_wchar,\r
+ 'b': ctypes.c_byte, 'B': ctypes.c_ubyte,\r
+ 'h': ctypes.c_short, 'H': ctypes.c_ushort,\r
+ 'i': ctypes.c_int, 'I': ctypes.c_uint,\r
+ 'l': ctypes.c_long, 'L': ctypes.c_ulong,\r
+ 'f': ctypes.c_float, 'd': ctypes.c_double\r
+ }\r
+\r
+#\r
+#\r
+#\r
+\r
+def _new_value(type_):\r
+ size = ctypes.sizeof(type_)\r
+ wrapper = heap.BufferWrapper(size)\r
+ return rebuild_ctype(type_, wrapper, None)\r
+\r
+def RawValue(typecode_or_type, *args):\r
+ '''\r
+ Returns a ctypes object allocated from shared memory\r
+ '''\r
+ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)\r
+ obj = _new_value(type_)\r
+ ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))\r
+ obj.__init__(*args)\r
+ return obj\r
+\r
+def RawArray(typecode_or_type, size_or_initializer):\r
+ '''\r
+ Returns a ctypes array allocated from shared memory\r
+ '''\r
+ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)\r
+ if isinstance(size_or_initializer, int):\r
+ type_ = type_ * size_or_initializer\r
+ return _new_value(type_)\r
+ else:\r
+ type_ = type_ * len(size_or_initializer)\r
+ result = _new_value(type_)\r
+ result.__init__(*size_or_initializer)\r
+ return result\r
+\r
+def Value(typecode_or_type, *args, **kwds):\r
+ '''\r
+ Return a synchronization wrapper for a Value\r
+ '''\r
+ lock = kwds.pop('lock', None)\r
+ if kwds:\r
+ raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())\r
+ obj = RawValue(typecode_or_type, *args)\r
+ if lock is None:\r
+ lock = RLock()\r
+ assert hasattr(lock, 'acquire')\r
+ return synchronized(obj, lock)\r
+\r
+def Array(typecode_or_type, size_or_initializer, **kwds):\r
+ '''\r
+ Return a synchronization wrapper for a RawArray\r
+ '''\r
+ lock = kwds.pop('lock', None)\r
+ if kwds:\r
+ raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())\r
+ obj = RawArray(typecode_or_type, size_or_initializer)\r
+ if lock is None:\r
+ lock = RLock()\r
+ assert hasattr(lock, 'acquire')\r
+ return synchronized(obj, lock)\r
+\r
+def copy(obj):\r
+ new_obj = _new_value(type(obj))\r
+ ctypes.pointer(new_obj)[0] = obj\r
+ return new_obj\r
+ \r
+def synchronized(obj, lock=None):\r
+ assert not isinstance(obj, SynchronizedBase), 'object already synchronized'\r
+ \r
+ if isinstance(obj, ctypes._SimpleCData):\r
+ return Synchronized(obj, lock)\r
+ elif isinstance(obj, ctypes.Array):\r
+ if obj._type_ is ctypes.c_char:\r
+ return SynchronizedString(obj, lock)\r
+ return SynchronizedArray(obj, lock)\r
+ else:\r
+ cls = type(obj)\r
+ try:\r
+ scls = class_cache[cls]\r
+ except KeyError:\r
+ names = [field[0] for field in cls._fields_]\r
+ d = dict((name, make_property(name)) for name in names)\r
+ classname = 'Synchronized' + cls.__name__\r
+ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)\r
+ return scls(obj, lock)\r
+\r
+#\r
+# Functions for pickling/unpickling\r
+#\r
+\r
+def reduce_ctype(obj):\r
+ assert_spawning(obj)\r
+ if isinstance(obj, ctypes.Array):\r
+ return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)\r
+ else:\r
+ return rebuild_ctype, (type(obj), obj._wrapper, None)\r
+ \r
+def rebuild_ctype(type_, wrapper, length):\r
+ if length is not None:\r
+ type_ = type_ * length\r
+ if sys.platform == 'win32' and type_ not in copy_reg.dispatch_table:\r
+ copy_reg.pickle(type_, reduce_ctype)\r
+ obj = type_.from_address(wrapper.get_address())\r
+ obj._wrapper = wrapper\r
+ return obj\r
+\r
+#\r
+# Function to create properties\r
+#\r
+\r
+def make_property(name):\r
+ try:\r
+ return prop_cache[name]\r
+ except KeyError:\r
+ d = {}\r
+ exec template % ((name,)*7) in d\r
+ prop_cache[name] = d[name]\r
+ return d[name]\r
+\r
+template = '''\r
+def get%s(self):\r
+ self.acquire()\r
+ try:\r
+ return self._obj.%s\r
+ finally:\r
+ self.release()\r
+def set%s(self, value):\r
+ self.acquire()\r
+ try:\r
+ self._obj.%s = value\r
+ finally:\r
+ self.release()\r
+%s = property(get%s, set%s)\r
+'''\r
+\r
+prop_cache = {}\r
+class_cache = weakref.WeakKeyDictionary()\r
+\r
+#\r
+# Synchronized wrappers\r
+#\r
+\r
+class SynchronizedBase(object):\r
+ \r
+ def __init__(self, obj, lock=None):\r
+ self._obj = obj\r
+ self._lock = lock or RLock()\r
+ self.acquire = self._lock.acquire\r
+ self.release = self._lock.release\r
+\r
+ def __reduce__(self):\r
+ assert_spawning(self)\r
+ return synchronized, (self._obj, self._lock)\r
+ \r
+ def get_obj(self):\r
+ return self._obj\r
+ \r
+ def get_lock(self):\r
+ return self._lock\r
+ \r
+ def __repr__(self):\r
+ return '<%s wrapper for %s>' % (type(self).__name__, self._obj)\r
+ \r
+ \r
+class Synchronized(SynchronizedBase):\r
+ value = make_property('value')\r
+ \r
+ \r
+class SynchronizedArray(SynchronizedBase):\r
+ \r
+ def __len__(self):\r
+ return len(self._obj)\r
+ \r
+ def __getitem__(self, i):\r
+ self.acquire()\r
+ try:\r
+ return self._obj[i]\r
+ finally:\r
+ self.release()\r
+ \r
+ def __setitem__(self, i, value):\r
+ self.acquire()\r
+ try:\r
+ self._obj[i] = value\r
+ finally:\r
+ self.release()\r
+ \r
+ def __getslice__(self, start, stop):\r
+ self.acquire()\r
+ try:\r
+ return self._obj[start:stop]\r
+ finally:\r
+ self.release()\r
+ \r
+ def __setslice__(self, start, stop, values):\r
+ self.acquire()\r
+ try:\r
+ self._obj[start:stop] = values\r
+ finally:\r
+ self.release()\r
+ \r
+ \r
+class SynchronizedString(SynchronizedArray):\r
+ value = make_property('value')\r
+ raw = make_property('raw')\r
--- /dev/null
+#\r
+# Module implementing synchronization primitives\r
+#\r
+# multiprocessing/synchronize.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+__all__ = [\r
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'\r
+ ]\r
+\r
+import threading\r
+import os\r
+import sys\r
+\r
+from time import time as _time, sleep as _sleep\r
+\r
+import _multiprocessing\r
+from multiprocessing.process import current_process\r
+from multiprocessing.util import Finalize, register_after_fork, debug\r
+from multiprocessing.forking import assert_spawning, Popen\r
+\r
+#\r
+# Constants\r
+#\r
+\r
+RECURSIVE_MUTEX, SEMAPHORE = range(2)\r
+SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX\r
+\r
+#\r
+# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`\r
+#\r
+\r
+class SemLock(object):\r
+\r
+ def __init__(self, kind, value, maxvalue):\r
+ sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)\r
+ debug('created semlock with handle %s' % sl.handle)\r
+ self._make_methods()\r
+ \r
+ if sys.platform != 'win32':\r
+ def _after_fork(obj):\r
+ obj._semlock._after_fork()\r
+ register_after_fork(self, _after_fork)\r
+\r
+ def _make_methods(self):\r
+ self.acquire = self._semlock.acquire\r
+ self.release = self._semlock.release\r
+ self.__enter__ = self._semlock.__enter__\r
+ self.__exit__ = self._semlock.__exit__\r
+\r
+ def __getstate__(self):\r
+ assert_spawning(self)\r
+ sl = self._semlock\r
+ return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)\r
+\r
+ def __setstate__(self, state):\r
+ self._semlock = _multiprocessing.SemLock._rebuild(*state)\r
+ debug('recreated blocker with handle %r' % state[0])\r
+ self._make_methods()\r
+\r
+#\r
+# Semaphore\r
+#\r
+\r
+class Semaphore(SemLock):\r
+\r
+ def __init__(self, value=1):\r
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)\r
+\r
+ def get_value(self):\r
+ return self._semlock._get_value()\r
+\r
+ def __repr__(self):\r
+ try:\r
+ value = self._semlock._get_value()\r
+ except Exception:\r
+ value = 'unknown'\r
+ return '<Semaphore(value=%s)>' % value\r
+\r
+#\r
+# Bounded semaphore\r
+#\r
+\r
+class BoundedSemaphore(Semaphore):\r
+\r
+ def __init__(self, value=1):\r
+ SemLock.__init__(self, SEMAPHORE, value, value)\r
+\r
+ def __repr__(self):\r
+ try:\r
+ value = self._semlock._get_value()\r
+ except Exception:\r
+ value = 'unknown'\r
+ return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \\r
+ (value, self._semlock.maxvalue)\r
+\r
+#\r
+# Non-recursive lock\r
+#\r
+\r
+class Lock(SemLock):\r
+\r
+ def __init__(self):\r
+ SemLock.__init__(self, SEMAPHORE, 1, 1)\r
+\r
+ def __repr__(self):\r
+ try:\r
+ if self._semlock._is_mine():\r
+ name = current_process().get_name()\r
+ if threading.currentThread().getName() != 'MainThread':\r
+ name += '|' + threading.currentThread().getName()\r
+ elif self._semlock._get_value() == 1:\r
+ name = 'None'\r
+ elif self._semlock._count() > 0:\r
+ name = 'SomeOtherThread'\r
+ else:\r
+ name = 'SomeOtherProcess'\r
+ except Exception:\r
+ name = 'unknown'\r
+ return '<Lock(owner=%s)>' % name\r
+\r
+#\r
+# Recursive lock\r
+#\r
+\r
+class RLock(SemLock):\r
+\r
+ def __init__(self):\r
+ SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)\r
+ \r
+ def __repr__(self):\r
+ try:\r
+ if self._semlock._is_mine():\r
+ name = current_process().get_name()\r
+ if threading.currentThread().getName() != 'MainThread':\r
+ name += '|' + threading.currentThread().getName()\r
+ count = self._semlock._count()\r
+ elif self._semlock._get_value() == 1:\r
+ name, count = 'None', 0\r
+ elif self._semlock._count() > 0:\r
+ name, count = 'SomeOtherThread', 'nonzero'\r
+ else:\r
+ name, count = 'SomeOtherProcess', 'nonzero'\r
+ except Exception:\r
+ name, count = 'unknown', 'unknown'\r
+ return '<RLock(%s, %s)>' % (name, count)\r
+\r
+#\r
+# Condition variable\r
+#\r
+\r
+class Condition(object):\r
+\r
+ def __init__(self, lock=None):\r
+ self._lock = lock or RLock()\r
+ self._sleeping_count = Semaphore(0)\r
+ self._woken_count = Semaphore(0)\r
+ self._wait_semaphore = Semaphore(0)\r
+ self._make_methods()\r
+\r
+ def __getstate__(self):\r
+ assert_spawning(self)\r
+ return (self._lock, self._sleeping_count,\r
+ self._woken_count, self._wait_semaphore)\r
+\r
+ def __setstate__(self, state):\r
+ (self._lock, self._sleeping_count,\r
+ self._woken_count, self._wait_semaphore) = state\r
+ self._make_methods()\r
+\r
+ def _make_methods(self):\r
+ self.acquire = self._lock.acquire\r
+ self.release = self._lock.release\r
+ self.__enter__ = self._lock.__enter__\r
+ self.__exit__ = self._lock.__exit__\r
+\r
+ def __repr__(self):\r
+ try:\r
+ num_waiters = (self._sleeping_count._semlock._get_value() -\r
+ self._woken_count._semlock._get_value())\r
+ except Exception:\r
+ num_waiters = 'unkown'\r
+ return '<Condition(%s, %s)>' % (self._lock, num_waiters)\r
+\r
+ def wait(self, timeout=None):\r
+ assert self._lock._semlock._is_mine(), \\r
+ 'must acquire() condition before using wait()'\r
+\r
+ # indicate that this thread is going to sleep\r
+ self._sleeping_count.release()\r
+\r
+ # release lock\r
+ count = self._lock._semlock._count()\r
+ for i in xrange(count):\r
+ self._lock.release()\r
+\r
+ try:\r
+ # wait for notification or timeout\r
+ self._wait_semaphore.acquire(True, timeout)\r
+ finally:\r
+ # indicate that this thread has woken\r
+ self._woken_count.release()\r
+\r
+ # reacquire lock\r
+ for i in xrange(count):\r
+ self._lock.acquire()\r
+\r
+ def notify(self):\r
+ assert self._lock._semlock._is_mine(), 'lock is not owned'\r
+ assert not self._wait_semaphore.acquire(False)\r
+ \r
+ # to take account of timeouts since last notify() we subtract\r
+ # woken_count from sleeping_count and rezero woken_count\r
+ while self._woken_count.acquire(False):\r
+ res = self._sleeping_count.acquire(False)\r
+ assert res\r
+ \r
+ if self._sleeping_count.acquire(False): # try grabbing a sleeper\r
+ self._wait_semaphore.release() # wake up one sleeper\r
+ self._woken_count.acquire() # wait for the sleeper to wake\r
+ \r
+ # rezero _wait_semaphore in case a timeout just happened\r
+ self._wait_semaphore.acquire(False)\r
+\r
+ def notify_all(self):\r
+ assert self._lock._semlock._is_mine(), 'lock is not owned'\r
+ assert not self._wait_semaphore.acquire(False)\r
+\r
+ # to take account of timeouts since last notify*() we subtract\r
+ # woken_count from sleeping_count and rezero woken_count\r
+ while self._woken_count.acquire(False):\r
+ res = self._sleeping_count.acquire(False)\r
+ assert res\r
+ \r
+ sleepers = 0\r
+ while self._sleeping_count.acquire(False):\r
+ self._wait_semaphore.release() # wake up one sleeper\r
+ sleepers += 1\r
+\r
+ if sleepers:\r
+ for i in xrange(sleepers):\r
+ self._woken_count.acquire() # wait for a sleeper to wake\r
+\r
+ # rezero wait_semaphore in case some timeouts just happened\r
+ while self._wait_semaphore.acquire(False):\r
+ pass\r
+\r
+#\r
+# Event\r
+#\r
+\r
+class Event(object):\r
+\r
+ def __init__(self):\r
+ self._cond = Condition(Lock())\r
+ self._flag = Semaphore(0)\r
+\r
+ def is_set(self):\r
+ self._cond.acquire()\r
+ try:\r
+ if self._flag.acquire(False):\r
+ self._flag.release()\r
+ return True\r
+ return False\r
+ finally:\r
+ self._cond.release()\r
+ \r
+ def set(self):\r
+ self._cond.acquire()\r
+ try:\r
+ self._flag.acquire(False)\r
+ self._flag.release()\r
+ self._cond.notify_all()\r
+ finally:\r
+ self._cond.release()\r
+\r
+ def clear(self):\r
+ self._cond.acquire()\r
+ try:\r
+ self._flag.acquire(False)\r
+ finally:\r
+ self._cond.release()\r
+\r
+ def wait(self, timeout=None):\r
+ self._cond.acquire()\r
+ try:\r
+ if self._flag.acquire(False):\r
+ self._flag.release()\r
+ else:\r
+ self._cond.wait(timeout)\r
+ finally:\r
+ self._cond.release()\r
--- /dev/null
+#\r
+# Module providing various facilities to other parts of the package\r
+#\r
+# multiprocessing/util.py\r
+#\r
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+import itertools\r
+import weakref\r
+import copy_reg\r
+import atexit\r
+import threading # we want threading to install it's\r
+ # cleanup function before multiprocessing does\r
+\r
+from multiprocessing.process import current_process, active_children\r
+\r
+__all__ = [\r
+ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',\r
+ 'log_to_stderr', 'get_temp_dir', 'register_after_fork',\r
+ 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'\r
+ ]\r
+\r
+#\r
+# Logging\r
+#\r
+\r
+NOTSET = 0\r
+SUBDEBUG = 5\r
+DEBUG = 10\r
+INFO = 20\r
+SUBWARNING = 25\r
+\r
+LOGGER_NAME = 'multiprocessing'\r
+DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'\r
+\r
+_logger = None\r
+_log_to_stderr = False\r
+\r
+def sub_debug(msg, *args):\r
+ if _logger:\r
+ _logger.log(SUBDEBUG, msg, *args)\r
+\r
+def debug(msg, *args):\r
+ if _logger:\r
+ _logger.log(DEBUG, msg, *args)\r
+\r
+def info(msg, *args):\r
+ if _logger:\r
+ _logger.log(INFO, msg, *args)\r
+\r
+def sub_warning(msg, *args):\r
+ if _logger:\r
+ _logger.log(SUBWARNING, msg, *args)\r
+\r
+def get_logger():\r
+ '''\r
+ Returns logger used by multiprocessing\r
+ '''\r
+ global _logger\r
+\r
+ if not _logger:\r
+ import logging, atexit\r
+\r
+ # XXX multiprocessing should cleanup before logging\r
+ if hasattr(atexit, 'unregister'):\r
+ atexit.unregister(_exit_function)\r
+ atexit.register(_exit_function)\r
+ else:\r
+ atexit._exithandlers.remove((_exit_function, (), {}))\r
+ atexit._exithandlers.append((_exit_function, (), {}))\r
+\r
+ _check_logger_class()\r
+ _logger = logging.getLogger(LOGGER_NAME)\r
+\r
+ return _logger\r
+\r
+def _check_logger_class():\r
+ '''\r
+ Make sure process name is recorded when loggers are used\r
+ '''\r
+ # XXX This function is unnecessary once logging is patched\r
+ import logging\r
+ if hasattr(logging, 'multiprocessing'):\r
+ return\r
+ \r
+ logging._acquireLock()\r
+ try:\r
+ OldLoggerClass = logging.getLoggerClass()\r
+ if not getattr(OldLoggerClass, '_process_aware', False):\r
+ class ProcessAwareLogger(OldLoggerClass):\r
+ _process_aware = True\r
+ def makeRecord(self, *args, **kwds):\r
+ record = OldLoggerClass.makeRecord(self, *args, **kwds)\r
+ record.processName = current_process()._name\r
+ return record\r
+ logging.setLoggerClass(ProcessAwareLogger)\r
+ finally:\r
+ logging._releaseLock()\r
+\r
+def log_to_stderr(level=None):\r
+ '''\r
+ Turn on logging and add a handler which prints to stderr\r
+ '''\r
+ global _log_to_stderr\r
+ import logging\r
+ logger = get_logger()\r
+ formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)\r
+ handler = logging.StreamHandler()\r
+ handler.setFormatter(formatter)\r
+ logger.addHandler(handler)\r
+ if level is not None:\r
+ logger.setLevel(level)\r
+ _log_to_stderr = True\r
+\r
+#\r
+# Function returning a temp directory which will be removed on exit\r
+#\r
+\r
+def get_temp_dir():\r
+ # get name of a temp directory which will be automatically cleaned up\r
+ if current_process()._tempdir is None:\r
+ import shutil, tempfile\r
+ tempdir = tempfile.mkdtemp(prefix='pymp-')\r
+ info('created temp directory %s', tempdir)\r
+ Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)\r
+ current_process()._tempdir = tempdir\r
+ return current_process()._tempdir\r
+\r
+#\r
+# Support for reinitialization of objects when bootstrapping a child process\r
+#\r
+\r
+_afterfork_registry = weakref.WeakValueDictionary()\r
+_afterfork_counter = itertools.count()\r
+\r
+def _run_after_forkers():\r
+ items = list(_afterfork_registry.items())\r
+ items.sort()\r
+ for (index, ident, func), obj in items:\r
+ try:\r
+ func(obj)\r
+ except Exception, e:\r
+ info('after forker raised exception %s', e)\r
+\r
+def register_after_fork(obj, func):\r
+ _afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj\r
+\r
+#\r
+# Finalization using weakrefs\r
+#\r
+\r
+_finalizer_registry = {}\r
+_finalizer_counter = itertools.count()\r
+\r
+\r
+class Finalize(object):\r
+ '''\r
+ Class which supports object finalization using weakrefs\r
+ '''\r
+ def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):\r
+ assert exitpriority is None or type(exitpriority) is int\r
+\r
+ if obj is not None:\r
+ self._weakref = weakref.ref(obj, self)\r
+ else:\r
+ assert exitpriority is not None\r
+\r
+ self._callback = callback\r
+ self._args = args\r
+ self._kwargs = kwargs or {}\r
+ self._key = (exitpriority, _finalizer_counter.next())\r
+\r
+ _finalizer_registry[self._key] = self\r
+\r
+ def __call__(self, wr=None):\r
+ '''\r
+ Run the callback unless it has already been called or cancelled\r
+ '''\r
+ try:\r
+ del _finalizer_registry[self._key]\r
+ except KeyError:\r
+ sub_debug('finalizer no longer registered')\r
+ else:\r
+ sub_debug('finalizer calling %s with args %s and kwargs %s',\r
+ self._callback, self._args, self._kwargs)\r
+ res = self._callback(*self._args, **self._kwargs)\r
+ self._weakref = self._callback = self._args = \\r
+ self._kwargs = self._key = None\r
+ return res\r
+\r
+ def cancel(self):\r
+ '''\r
+ Cancel finalization of the object\r
+ '''\r
+ try:\r
+ del _finalizer_registry[self._key]\r
+ except KeyError:\r
+ pass\r
+ else:\r
+ self._weakref = self._callback = self._args = \\r
+ self._kwargs = self._key = None\r
+\r
+ def still_active(self):\r
+ '''\r
+ Return whether this finalizer is still waiting to invoke callback\r
+ '''\r
+ return self._key in _finalizer_registry\r
+\r
+ def __repr__(self):\r
+ try:\r
+ obj = self._weakref()\r
+ except (AttributeError, TypeError):\r
+ obj = None\r
+\r
+ if obj is None:\r
+ return '<Finalize object, dead>'\r
+\r
+ x = '<Finalize object, callback=%s' % \\r
+ getattr(self._callback, '__name__', self._callback)\r
+ if self._args:\r
+ x += ', args=' + str(self._args)\r
+ if self._kwargs:\r
+ x += ', kwargs=' + str(self._kwargs)\r
+ if self._key[0] is not None:\r
+ x += ', exitprority=' + str(self._key[0])\r
+ return x + '>'\r
+\r
+\r
+def _run_finalizers(minpriority=None):\r
+ '''\r
+ Run all finalizers whose exit priority is not None and at least minpriority\r
+\r
+ Finalizers with highest priority are called first; finalizers with\r
+ the same priority will be called in reverse order of creation.\r
+ '''\r
+ if minpriority is None:\r
+ f = lambda p : p[0][0] is not None\r
+ else:\r
+ f = lambda p : p[0][0] is not None and p[0][0] >= minpriority\r
+\r
+ items = [x for x in _finalizer_registry.items() if f(x)]\r
+ items.sort(reverse=True)\r
+\r
+ for key, finalizer in items:\r
+ sub_debug('calling %s', finalizer)\r
+ try:\r
+ finalizer()\r
+ except Exception:\r
+ import traceback\r
+ traceback.print_exc()\r
+\r
+ if minpriority is None:\r
+ _finalizer_registry.clear()\r
+\r
+#\r
+# Clean up on exit\r
+#\r
+\r
+def is_exiting():\r
+ '''\r
+ Returns true if the process is shutting down\r
+ '''\r
+ return _exiting or _exiting is None\r
+\r
+_exiting = False\r
+\r
+def _exit_function():\r
+ global _exiting\r
+\r
+ info('process shutting down')\r
+ debug('running all "atexit" finalizers with priority >= 0')\r
+ _run_finalizers(0)\r
+\r
+ for p in active_children():\r
+ if p._daemonic:\r
+ info('calling terminate() for daemon %s', p.get_name())\r
+ p._popen.terminate()\r
+\r
+ for p in active_children():\r
+ info('calling join() for process %s', p.get_name())\r
+ p.join()\r
+\r
+ debug('running the remaining "atexit" finalizers')\r
+ _run_finalizers()\r
+\r
+atexit.register(_exit_function)\r
+\r
+#\r
+# Some fork aware types\r
+#\r
+\r
+class ForkAwareThreadLock(object):\r
+ def __init__(self):\r
+ self._lock = threading.Lock()\r
+ self.acquire = self._lock.acquire\r
+ self.release = self._lock.release\r
+ register_after_fork(self, ForkAwareThreadLock.__init__)\r
+\r
+class ForkAwareLocal(threading.local):\r
+ def __init__(self):\r
+ register_after_fork(self, lambda obj : obj.__dict__.clear())\r
+ def __reduce__(self):\r
+ return type(self), ()\r
+\r
+#\r
+# Try making some callable types picklable\r
+#\r
+\r
+def _reduce_method(m):\r
+ if m.im_self is None:\r
+ return getattr, (m.im_class, m.im_func.func_name)\r
+ else:\r
+ return getattr, (m.im_self, m.im_func.func_name)\r
+copy_reg.pickle(type(Finalize.__init__), _reduce_method)\r
+\r
+def _reduce_method_descriptor(m):\r
+ return getattr, (m.__objclass__, m.__name__)\r
+copy_reg.pickle(type(list.append), _reduce_method_descriptor)\r
+copy_reg.pickle(type(int.__add__), _reduce_method_descriptor)\r
+\r
+def _reduce_builtin_function_or_method(m):\r
+ return getattr, (m.__self__, m.__name__)\r
+copy_reg.pickle(type(list().append), _reduce_builtin_function_or_method)\r
+copy_reg.pickle(type(int().__add__), _reduce_builtin_function_or_method)\r
+\r
+try:\r
+ from functools import partial\r
+except ImportError:\r
+ pass\r
+else:\r
+ def _reduce_partial(p):\r
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})\r
+ def _rebuild_partial(func, args, keywords):\r
+ return partial(func, *args, **keywords)\r
+ copy_reg.pickle(partial, _reduce_partial)\r
--- /dev/null
+#\r
+# Unit tests for the multiprocessing package\r
+#\r
+\r
+import unittest\r
+import threading\r
+import Queue\r
+import time\r
+import sys\r
+import os\r
+import gc\r
+import signal\r
+import array\r
+import copy\r
+import socket\r
+import random\r
+import logging\r
+\r
+import _multiprocessing\r
+import multiprocessing.dummy\r
+import multiprocessing.connection\r
+import multiprocessing.managers\r
+import multiprocessing.heap\r
+import multiprocessing.managers\r
+import multiprocessing.pool\r
+\r
+from multiprocessing import util\r
+\r
+#\r
+#\r
+#\r
+\r
+if sys.version_info >= (3, 0):\r
+ def latin(s):\r
+ return s.encode('latin')\r
+else:\r
+ latin = str\r
+\r
+try:\r
+ bytes\r
+except NameError:\r
+ bytes = str\r
+ def bytearray(seq):\r
+ return array.array('c', seq)\r
+\r
+#\r
+# Constants\r
+#\r
+\r
+LOG_LEVEL = util.SUBWARNING\r
+#LOG_LEVEL = logging.WARNING\r
+\r
+DELTA = 0.1\r
+CHECK_TIMINGS = False # making true makes tests take a lot longer\r
+ # and can sometimes cause some non-serious\r
+ # failures because some calls block a bit\r
+ # longer than expected\r
+if CHECK_TIMINGS:\r
+ TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4\r
+else:\r
+ TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1\r
+\r
+HAVE_GETVALUE = not getattr(_multiprocessing,\r
+ 'HAVE_BROKEN_SEM_GETVALUE', False)\r
+\r
+#\r
+# Creates a wrapper for a function which records the time it takes to finish\r
+#\r
+\r
+class TimingWrapper(object):\r
+\r
+ def __init__(self, func):\r
+ self.func = func\r
+ self.elapsed = None\r
+\r
+ def __call__(self, *args, **kwds):\r
+ t = time.time()\r
+ try:\r
+ return self.func(*args, **kwds)\r
+ finally:\r
+ self.elapsed = time.time() - t\r
+ \r
+#\r
+# Base class for test cases\r
+#\r
+\r
+class BaseTestCase(object):\r
+ \r
+ ALLOWED_TYPES = ('processes', 'manager', 'threads')\r
+\r
+ def assertTimingAlmostEqual(self, a, b):\r
+ if CHECK_TIMINGS:\r
+ self.assertAlmostEqual(a, b, 1)\r
+\r
+ def assertReturnsIfImplemented(self, value, func, *args):\r
+ try:\r
+ res = func(*args)\r
+ except NotImplementedError:\r
+ pass\r
+ else:\r
+ return self.assertEqual(value, res)\r
+\r
+#\r
+# Return the value of a semaphore\r
+#\r
+\r
+def get_value(self):\r
+ try:\r
+ return self.get_value()\r
+ except AttributeError:\r
+ try:\r
+ return self._Semaphore__value\r
+ except AttributeError:\r
+ try:\r
+ return self._value\r
+ except AttributeError:\r
+ raise NotImplementedError\r
+\r
+#\r
+# Testcases\r
+#\r
+\r
+class _TestProcess(BaseTestCase):\r
+ \r
+ ALLOWED_TYPES = ('processes', 'threads')\r
+ \r
+ def test_current(self):\r
+ if self.TYPE == 'threads':\r
+ return\r
+\r
+ current = self.current_process()\r
+ authkey = current.get_authkey()\r
+ \r
+ self.assertTrue(current.is_alive())\r
+ self.assertTrue(not current.is_daemon()) \r
+ self.assertTrue(isinstance(authkey, bytes))\r
+ self.assertTrue(len(authkey) > 0)\r
+ self.assertEqual(current.get_ident(), os.getpid())\r
+ self.assertEqual(current.get_exitcode(), None)\r
+\r
+ def _test(self, q, *args, **kwds):\r
+ current = self.current_process()\r
+ q.put(args)\r
+ q.put(kwds)\r
+ q.put(current.get_name())\r
+ if self.TYPE != 'threads':\r
+ q.put(bytes(current.get_authkey()))\r
+ q.put(current.pid)\r
+\r
+ def test_process(self):\r
+ q = self.Queue(1)\r
+ e = self.Event()\r
+ args = (q, 1, 2)\r
+ kwargs = {'hello':23, 'bye':2.54}\r
+ name = 'SomeProcess'\r
+ p = self.Process(\r
+ target=self._test, args=args, kwargs=kwargs, name=name\r
+ )\r
+ p.set_daemon(True)\r
+ current = self.current_process()\r
+\r
+ if self.TYPE != 'threads':\r
+ self.assertEquals(p.get_authkey(), current.get_authkey())\r
+ self.assertEquals(p.is_alive(), False)\r
+ self.assertEquals(p.is_daemon(), True)\r
+ self.assertTrue(p not in self.active_children())\r
+ self.assertTrue(type(self.active_children()) is list)\r
+ self.assertEqual(p.get_exitcode(), None)\r
+ \r
+ p.start()\r
+ \r
+ self.assertEquals(p.get_exitcode(), None)\r
+ self.assertEquals(p.is_alive(), True)\r
+ self.assertTrue(p in self.active_children())\r
+ \r
+ self.assertEquals(q.get(), args[1:])\r
+ self.assertEquals(q.get(), kwargs)\r
+ self.assertEquals(q.get(), p.get_name())\r
+ if self.TYPE != 'threads':\r
+ self.assertEquals(q.get(), current.get_authkey())\r
+ self.assertEquals(q.get(), p.pid)\r
+\r
+ p.join()\r
+\r
+ self.assertEquals(p.get_exitcode(), 0)\r
+ self.assertEquals(p.is_alive(), False)\r
+ self.assertTrue(p not in self.active_children()) \r
+\r
+ def _test_terminate(self):\r
+ time.sleep(1000)\r
+\r
+ def test_terminate(self):\r
+ if self.TYPE == 'threads':\r
+ return\r
+ \r
+ p = self.Process(target=self._test_terminate)\r
+ p.set_daemon(True)\r
+ p.start()\r
+\r
+ self.assertEqual(p.is_alive(), True)\r
+ self.assertTrue(p in self.active_children())\r
+ self.assertEqual(p.get_exitcode(), None)\r
+\r
+ p.terminate()\r
+\r
+ join = TimingWrapper(p.join)\r
+ self.assertEqual(join(), None)\r
+ self.assertTimingAlmostEqual(join.elapsed, 0.0)\r
+ \r
+ self.assertEqual(p.is_alive(), False)\r
+ self.assertTrue(p not in self.active_children())\r
+\r
+ p.join()\r
+\r
+ # XXX sometimes get p.get_exitcode() == 0 on Windows ...\r
+ #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)\r
+\r
+ def test_cpu_count(self):\r
+ try:\r
+ cpus = multiprocessing.cpu_count()\r
+ except NotImplementedError:\r
+ cpus = 1\r
+ self.assertTrue(type(cpus) is int)\r
+ self.assertTrue(cpus >= 1)\r
+\r
+ def test_active_children(self):\r
+ self.assertEqual(type(self.active_children()), list)\r
+\r
+ p = self.Process(target=time.sleep, args=(DELTA,))\r
+ self.assertTrue(p not in self.active_children())\r
+ \r
+ p.start()\r
+ self.assertTrue(p in self.active_children())\r
+\r
+ p.join()\r
+ self.assertTrue(p not in self.active_children())\r
+\r
+ def _test_recursion(self, wconn, id):\r
+ from multiprocessing import forking\r
+ wconn.send(id)\r
+ if len(id) < 2:\r
+ for i in range(2):\r
+ p = self.Process(\r
+ target=self._test_recursion, args=(wconn, id+[i])\r
+ )\r
+ p.start()\r
+ p.join()\r
+\r
+ def test_recursion(self):\r
+ rconn, wconn = self.Pipe(duplex=False)\r
+ self._test_recursion(wconn, [])\r
+ \r
+ time.sleep(DELTA)\r
+ result = []\r
+ while rconn.poll():\r
+ result.append(rconn.recv())\r
+ \r
+ expected = [\r
+ [],\r
+ [0],\r
+ [0, 0],\r
+ [0, 1],\r
+ [1],\r
+ [1, 0],\r
+ [1, 1]\r
+ ]\r
+ self.assertEqual(result, expected)\r
+\r
+#\r
+#\r
+#\r
+\r
+class _UpperCaser(multiprocessing.Process):\r
+\r
+ def __init__(self):\r
+ multiprocessing.Process.__init__(self)\r
+ self.child_conn, self.parent_conn = multiprocessing.Pipe()\r
+\r
+ def run(self):\r
+ self.parent_conn.close()\r
+ for s in iter(self.child_conn.recv, None):\r
+ self.child_conn.send(s.upper())\r
+ self.child_conn.close()\r
+\r
+ def submit(self, s):\r
+ assert type(s) is str\r
+ self.parent_conn.send(s)\r
+ return self.parent_conn.recv()\r
+\r
+ def stop(self):\r
+ self.parent_conn.send(None)\r
+ self.parent_conn.close()\r
+ self.child_conn.close()\r
+\r
+class _TestSubclassingProcess(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def test_subclassing(self):\r
+ uppercaser = _UpperCaser()\r
+ uppercaser.start()\r
+ self.assertEqual(uppercaser.submit('hello'), 'HELLO')\r
+ self.assertEqual(uppercaser.submit('world'), 'WORLD')\r
+ uppercaser.stop()\r
+ uppercaser.join()\r
+ \r
+#\r
+#\r
+#\r
+\r
+def queue_empty(q):\r
+ if hasattr(q, 'empty'):\r
+ return q.empty()\r
+ else:\r
+ return q.qsize() == 0\r
+\r
+def queue_full(q, maxsize):\r
+ if hasattr(q, 'full'):\r
+ return q.full()\r
+ else:\r
+ return q.qsize() == maxsize\r
+\r
+\r
+class _TestQueue(BaseTestCase):\r
+\r
+\r
+ def _test_put(self, queue, child_can_start, parent_can_continue):\r
+ child_can_start.wait()\r
+ for i in range(6):\r
+ queue.get()\r
+ parent_can_continue.set()\r
+\r
+ def test_put(self):\r
+ MAXSIZE = 6\r
+ queue = self.Queue(maxsize=MAXSIZE)\r
+ child_can_start = self.Event()\r
+ parent_can_continue = self.Event()\r
+\r
+ proc = self.Process(\r
+ target=self._test_put,\r
+ args=(queue, child_can_start, parent_can_continue)\r
+ )\r
+ proc.set_daemon(True)\r
+ proc.start()\r
+ \r
+ self.assertEqual(queue_empty(queue), True)\r
+ self.assertEqual(queue_full(queue, MAXSIZE), False)\r
+\r
+ queue.put(1)\r
+ queue.put(2, True)\r
+ queue.put(3, True, None)\r
+ queue.put(4, False)\r
+ queue.put(5, False, None)\r
+ queue.put_nowait(6)\r
+\r
+ # the values may be in buffer but not yet in pipe so sleep a bit\r
+ time.sleep(DELTA) \r
+\r
+ self.assertEqual(queue_empty(queue), False)\r
+ self.assertEqual(queue_full(queue, MAXSIZE), True)\r
+\r
+ put = TimingWrapper(queue.put)\r
+ put_nowait = TimingWrapper(queue.put_nowait)\r
+\r
+ self.assertRaises(Queue.Full, put, 7, False)\r
+ self.assertTimingAlmostEqual(put.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Full, put, 7, False, None)\r
+ self.assertTimingAlmostEqual(put.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Full, put_nowait, 7)\r
+ self.assertTimingAlmostEqual(put_nowait.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)\r
+ self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)\r
+\r
+ self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)\r
+ self.assertTimingAlmostEqual(put.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)\r
+ self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)\r
+\r
+ child_can_start.set()\r
+ parent_can_continue.wait()\r
+\r
+ self.assertEqual(queue_empty(queue), True)\r
+ self.assertEqual(queue_full(queue, MAXSIZE), False)\r
+\r
+ proc.join()\r
+\r
+ def _test_get(self, queue, child_can_start, parent_can_continue):\r
+ child_can_start.wait()\r
+ queue.put(1)\r
+ queue.put(2)\r
+ queue.put(3)\r
+ queue.put(4)\r
+ queue.put(5)\r
+ parent_can_continue.set()\r
+ \r
+ def test_get(self):\r
+ queue = self.Queue()\r
+ child_can_start = self.Event()\r
+ parent_can_continue = self.Event()\r
+ \r
+ proc = self.Process(\r
+ target=self._test_get,\r
+ args=(queue, child_can_start, parent_can_continue)\r
+ )\r
+ proc.set_daemon(True)\r
+ proc.start()\r
+ \r
+ self.assertEqual(queue_empty(queue), True)\r
+ \r
+ child_can_start.set()\r
+ parent_can_continue.wait()\r
+\r
+ time.sleep(DELTA)\r
+ self.assertEqual(queue_empty(queue), False)\r
+\r
+ self.assertEqual(queue.get(), 1)\r
+ self.assertEqual(queue.get(True, None), 2)\r
+ self.assertEqual(queue.get(True), 3)\r
+ self.assertEqual(queue.get(timeout=1), 4)\r
+ self.assertEqual(queue.get_nowait(), 5)\r
+ \r
+ self.assertEqual(queue_empty(queue), True)\r
+\r
+ get = TimingWrapper(queue.get)\r
+ get_nowait = TimingWrapper(queue.get_nowait)\r
+ \r
+ self.assertRaises(Queue.Empty, get, False)\r
+ self.assertTimingAlmostEqual(get.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Empty, get, False, None)\r
+ self.assertTimingAlmostEqual(get.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Empty, get_nowait)\r
+ self.assertTimingAlmostEqual(get_nowait.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Empty, get, True, TIMEOUT1)\r
+ self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)\r
+\r
+ self.assertRaises(Queue.Empty, get, False, TIMEOUT2)\r
+ self.assertTimingAlmostEqual(get.elapsed, 0)\r
+\r
+ self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)\r
+ self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)\r
+\r
+ proc.join()\r
+ \r
+ def _test_fork(self, queue):\r
+ for i in range(10, 20):\r
+ queue.put(i)\r
+ # note that at this point the items may only be buffered, so the\r
+ # process cannot shutdown until the feeder thread has finished\r
+ # pushing items onto the pipe.\r
+\r
+ def test_fork(self):\r
+ # Old versions of Queue would fail to create a new feeder\r
+ # thread for a forked process if the original process had its\r
+ # own feeder thread. This test checks that this no longer\r
+ # happens.\r
+\r
+ queue = self.Queue()\r
+\r
+ # put items on queue so that main process starts a feeder thread\r
+ for i in range(10):\r
+ queue.put(i)\r
+\r
+ # wait to make sure thread starts before we fork a new process\r
+ time.sleep(DELTA)\r
+\r
+ # fork process\r
+ p = self.Process(target=self._test_fork, args=(queue,))\r
+ p.start()\r
+\r
+ # check that all expected items are in the queue\r
+ for i in range(20):\r
+ self.assertEqual(queue.get(), i)\r
+ self.assertRaises(Queue.Empty, queue.get, False)\r
+\r
+ p.join()\r
+\r
+ def test_qsize(self):\r
+ q = self.Queue()\r
+ try:\r
+ self.assertEqual(q.qsize(), 0)\r
+ except NotImplementedError:\r
+ return\r
+ q.put(1)\r
+ self.assertEqual(q.qsize(), 1)\r
+ q.put(5)\r
+ self.assertEqual(q.qsize(), 2)\r
+ q.get()\r
+ self.assertEqual(q.qsize(), 1)\r
+ q.get()\r
+ self.assertEqual(q.qsize(), 0)\r
+\r
+ def _test_task_done(self, q):\r
+ for obj in iter(q.get, None):\r
+ time.sleep(DELTA)\r
+ q.task_done()\r
+\r
+ def test_task_done(self):\r
+ queue = self.JoinableQueue()\r
+\r
+ if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):\r
+ return\r
+\r
+ workers = [self.Process(target=self._test_task_done, args=(queue,))\r
+ for i in xrange(4)]\r
+ \r
+ for p in workers:\r
+ p.start()\r
+\r
+ for i in xrange(10):\r
+ queue.put(i)\r
+\r
+ queue.join()\r
+\r
+ for p in workers:\r
+ queue.put(None)\r
+ \r
+ for p in workers:\r
+ p.join()\r
+\r
+#\r
+#\r
+#\r
+\r
+class _TestLock(BaseTestCase):\r
+\r
+ def test_lock(self):\r
+ lock = self.Lock()\r
+ self.assertEqual(lock.acquire(), True)\r
+ self.assertEqual(lock.acquire(False), False)\r
+ self.assertEqual(lock.release(), None)\r
+ self.assertRaises((ValueError, threading.ThreadError), lock.release)\r
+\r
+ def test_rlock(self):\r
+ lock = self.RLock()\r
+ self.assertEqual(lock.acquire(), True)\r
+ self.assertEqual(lock.acquire(), True)\r
+ self.assertEqual(lock.acquire(), True)\r
+ self.assertEqual(lock.release(), None)\r
+ self.assertEqual(lock.release(), None)\r
+ self.assertEqual(lock.release(), None)\r
+ self.assertRaises((AssertionError, RuntimeError), lock.release)\r
+ \r
+ \r
+class _TestSemaphore(BaseTestCase):\r
+\r
+ def _test_semaphore(self, sem):\r
+ self.assertReturnsIfImplemented(2, get_value, sem)\r
+ self.assertEqual(sem.acquire(), True)\r
+ self.assertReturnsIfImplemented(1, get_value, sem)\r
+ self.assertEqual(sem.acquire(), True)\r
+ self.assertReturnsIfImplemented(0, get_value, sem)\r
+ self.assertEqual(sem.acquire(False), False)\r
+ self.assertReturnsIfImplemented(0, get_value, sem)\r
+ self.assertEqual(sem.release(), None)\r
+ self.assertReturnsIfImplemented(1, get_value, sem)\r
+ self.assertEqual(sem.release(), None)\r
+ self.assertReturnsIfImplemented(2, get_value, sem)\r
+ \r
+ def test_semaphore(self):\r
+ sem = self.Semaphore(2)\r
+ self._test_semaphore(sem)\r
+ self.assertEqual(sem.release(), None)\r
+ self.assertReturnsIfImplemented(3, get_value, sem)\r
+ self.assertEqual(sem.release(), None)\r
+ self.assertReturnsIfImplemented(4, get_value, sem)\r
+\r
+ def test_bounded_semaphore(self):\r
+ sem = self.BoundedSemaphore(2)\r
+ self._test_semaphore(sem)\r
+ # Currently fails on OS/X\r
+ #if HAVE_GETVALUE:\r
+ # self.assertRaises(ValueError, sem.release)\r
+ # self.assertReturnsIfImplemented(2, get_value, sem)\r
+\r
+ def test_timeout(self):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ sem = self.Semaphore(0)\r
+ acquire = TimingWrapper(sem.acquire)\r
+\r
+ self.assertEqual(acquire(False), False)\r
+ self.assertTimingAlmostEqual(acquire.elapsed, 0.0)\r
+\r
+ self.assertEqual(acquire(False, None), False)\r
+ self.assertTimingAlmostEqual(acquire.elapsed, 0.0)\r
+\r
+ self.assertEqual(acquire(False, TIMEOUT1), False)\r
+ self.assertTimingAlmostEqual(acquire.elapsed, 0)\r
+\r
+ self.assertEqual(acquire(True, TIMEOUT2), False)\r
+ self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)\r
+\r
+ self.assertEqual(acquire(timeout=TIMEOUT3), False)\r
+ self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)\r
+\r
+\r
+class _TestCondition(BaseTestCase):\r
+ \r
+ def f(self, cond, sleeping, woken, timeout=None):\r
+ cond.acquire()\r
+ sleeping.release()\r
+ cond.wait(timeout)\r
+ woken.release()\r
+ cond.release()\r
+ \r
+ def check_invariant(self, cond):\r
+ # this is only supposed to succeed when there are no sleepers\r
+ if self.TYPE == 'processes':\r
+ try:\r
+ sleepers = (cond._sleeping_count.get_value() -\r
+ cond._woken_count.get_value())\r
+ self.assertEqual(sleepers, 0)\r
+ self.assertEqual(cond._wait_semaphore.get_value(), 0)\r
+ except NotImplementedError:\r
+ pass\r
+ \r
+ def test_notify(self):\r
+ cond = self.Condition()\r
+ sleeping = self.Semaphore(0)\r
+ woken = self.Semaphore(0)\r
+ \r
+ p = self.Process(target=self.f, args=(cond, sleeping, woken))\r
+ p.set_daemon(True)\r
+ p.start()\r
+\r
+ p = threading.Thread(target=self.f, args=(cond, sleeping, woken))\r
+ p.setDaemon(True)\r
+ p.start()\r
+ \r
+ # wait for both children to start sleeping\r
+ sleeping.acquire()\r
+ sleeping.acquire()\r
+ \r
+ # check no process/thread has woken up\r
+ time.sleep(DELTA)\r
+ self.assertReturnsIfImplemented(0, get_value, woken)\r
+\r
+ # wake up one process/thread\r
+ cond.acquire()\r
+ cond.notify()\r
+ cond.release()\r
+ \r
+ # check one process/thread has woken up\r
+ time.sleep(DELTA)\r
+ self.assertReturnsIfImplemented(1, get_value, woken)\r
+\r
+ # wake up another\r
+ cond.acquire()\r
+ cond.notify()\r
+ cond.release()\r
+ \r
+ # check other has woken up\r
+ time.sleep(DELTA)\r
+ self.assertReturnsIfImplemented(2, get_value, woken)\r
+ \r
+ # check state is not mucked up\r
+ self.check_invariant(cond)\r
+ p.join()\r
+ \r
+ def test_notify_all(self):\r
+ cond = self.Condition()\r
+ sleeping = self.Semaphore(0)\r
+ woken = self.Semaphore(0)\r
+\r
+ # start some threads/processes which will timeout\r
+ for i in range(3):\r
+ p = self.Process(target=self.f,\r
+ args=(cond, sleeping, woken, TIMEOUT1))\r
+ p.set_daemon(True)\r
+ p.start()\r
+\r
+ t = threading.Thread(target=self.f,\r
+ args=(cond, sleeping, woken, TIMEOUT1))\r
+ t.setDaemon(True)\r
+ t.start()\r
+\r
+ # wait for them all to sleep\r
+ for i in xrange(6):\r
+ sleeping.acquire()\r
+\r
+ # check they have all timed out\r
+ for i in xrange(6):\r
+ woken.acquire()\r
+ self.assertReturnsIfImplemented(0, get_value, woken)\r
+\r
+ # check state is not mucked up\r
+ self.check_invariant(cond)\r
+\r
+ # start some more threads/processes\r
+ for i in range(3):\r
+ p = self.Process(target=self.f, args=(cond, sleeping, woken))\r
+ p.set_daemon(True)\r
+ p.start()\r
+ \r
+ t = threading.Thread(target=self.f, args=(cond, sleeping, woken))\r
+ t.setDaemon(True)\r
+ t.start()\r
+ \r
+ # wait for them to all sleep\r
+ for i in xrange(6):\r
+ sleeping.acquire()\r
+ \r
+ # check no process/thread has woken up\r
+ time.sleep(DELTA)\r
+ self.assertReturnsIfImplemented(0, get_value, woken)\r
+\r
+ # wake them all up\r
+ cond.acquire()\r
+ cond.notify_all()\r
+ cond.release()\r
+\r
+ # check they have all woken\r
+ time.sleep(DELTA)\r
+ self.assertReturnsIfImplemented(6, get_value, woken)\r
+\r
+ # check state is not mucked up\r
+ self.check_invariant(cond)\r
+\r
+ def test_timeout(self):\r
+ cond = self.Condition()\r
+ wait = TimingWrapper(cond.wait)\r
+ cond.acquire()\r
+ res = wait(TIMEOUT1)\r
+ cond.release()\r
+ self.assertEqual(res, None)\r
+ self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)\r
+\r
+ \r
+class _TestEvent(BaseTestCase):\r
+\r
+ def _test_event(self, event):\r
+ time.sleep(TIMEOUT2)\r
+ event.set()\r
+\r
+ def test_event(self):\r
+ event = self.Event()\r
+ wait = TimingWrapper(event.wait)\r
+ \r
+ # Removed temporaily, due to API shear, this does not \r
+ # work with threading._Event objects. is_set == isSet\r
+ #self.assertEqual(event.is_set(), False)\r
+ \r
+ self.assertEqual(wait(0.0), None)\r
+ self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
+ self.assertEqual(wait(TIMEOUT1), None)\r
+ self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)\r
+\r
+ event.set()\r
+\r
+ # See note above on the API differences\r
+ # self.assertEqual(event.is_set(), True)\r
+ self.assertEqual(wait(), None)\r
+ self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
+ self.assertEqual(wait(TIMEOUT1), None)\r
+ self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
+ # self.assertEqual(event.is_set(), True)\r
+\r
+ event.clear()\r
+\r
+ #self.assertEqual(event.is_set(), False)\r
+\r
+ self.Process(target=self._test_event, args=(event,)).start()\r
+ self.assertEqual(wait(), None)\r
+\r
+#\r
+#\r
+#\r
+\r
+class _TestValue(BaseTestCase):\r
+\r
+ codes_values = [\r
+ ('i', 4343, 24234),\r
+ ('d', 3.625, -4.25),\r
+ ('h', -232, 234),\r
+ ('c', latin('x'), latin('y'))\r
+ ]\r
+\r
+ def _test(self, values):\r
+ for sv, cv in zip(values, self.codes_values):\r
+ sv.value = cv[2]\r
+ \r
+ \r
+ def test_value(self, raw=False):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ if raw:\r
+ values = [self.RawValue(code, value)\r
+ for code, value, _ in self.codes_values]\r
+ else:\r
+ values = [self.Value(code, value)\r
+ for code, value, _ in self.codes_values]\r
+ \r
+ for sv, cv in zip(values, self.codes_values):\r
+ self.assertEqual(sv.value, cv[1])\r
+ \r
+ proc = self.Process(target=self._test, args=(values,))\r
+ proc.start()\r
+ proc.join()\r
+\r
+ for sv, cv in zip(values, self.codes_values):\r
+ self.assertEqual(sv.value, cv[2])\r
+\r
+ def test_rawvalue(self):\r
+ self.test_value(raw=True)\r
+\r
+ def test_getobj_getlock(self):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ val1 = self.Value('i', 5)\r
+ lock1 = val1.get_lock()\r
+ obj1 = val1.get_obj()\r
+\r
+ val2 = self.Value('i', 5, lock=None)\r
+ lock2 = val2.get_lock()\r
+ obj2 = val2.get_obj()\r
+\r
+ lock = self.Lock()\r
+ val3 = self.Value('i', 5, lock=lock)\r
+ lock3 = val3.get_lock()\r
+ obj3 = val3.get_obj()\r
+ self.assertEqual(lock, lock3)\r
+ \r
+ arr4 = self.RawValue('i', 5)\r
+ self.assertFalse(hasattr(arr4, 'get_lock'))\r
+ self.assertFalse(hasattr(arr4, 'get_obj'))\r
+\r
+\r
+class _TestArray(BaseTestCase):\r
+\r
+ def f(self, seq):\r
+ for i in range(1, len(seq)):\r
+ seq[i] += seq[i-1]\r
+\r
+ def test_array(self, raw=False):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]\r
+ if raw:\r
+ arr = self.RawArray('i', seq)\r
+ else:\r
+ arr = self.Array('i', seq)\r
+ \r
+ self.assertEqual(len(arr), len(seq))\r
+ self.assertEqual(arr[3], seq[3])\r
+ self.assertEqual(list(arr[2:7]), list(seq[2:7]))\r
+ \r
+ arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])\r
+ \r
+ self.assertEqual(list(arr[:]), seq)\r
+ \r
+ self.f(seq)\r
+ \r
+ p = self.Process(target=self.f, args=(arr,))\r
+ p.start()\r
+ p.join()\r
+ \r
+ self.assertEqual(list(arr[:]), seq)\r
+ \r
+ def test_rawarray(self):\r
+ self.test_array(raw=True)\r
+ \r
+ def test_getobj_getlock_obj(self):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ arr1 = self.Array('i', range(10))\r
+ lock1 = arr1.get_lock()\r
+ obj1 = arr1.get_obj()\r
+\r
+ arr2 = self.Array('i', range(10), lock=None)\r
+ lock2 = arr2.get_lock()\r
+ obj2 = arr2.get_obj()\r
+\r
+ lock = self.Lock()\r
+ arr3 = self.Array('i', range(10), lock=lock)\r
+ lock3 = arr3.get_lock()\r
+ obj3 = arr3.get_obj()\r
+ self.assertEqual(lock, lock3)\r
+ \r
+ arr4 = self.RawArray('i', range(10))\r
+ self.assertFalse(hasattr(arr4, 'get_lock'))\r
+ self.assertFalse(hasattr(arr4, 'get_obj'))\r
+\r
+#\r
+#\r
+#\r
+\r
+class _TestContainers(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('manager',)\r
+\r
+ def test_list(self):\r
+ a = self.list(range(10))\r
+ self.assertEqual(a[:], range(10))\r
+ \r
+ b = self.list()\r
+ self.assertEqual(b[:], [])\r
+ \r
+ b.extend(range(5))\r
+ self.assertEqual(b[:], range(5))\r
+ \r
+ self.assertEqual(b[2], 2)\r
+ self.assertEqual(b[2:10], [2,3,4])\r
+\r
+ b *= 2\r
+ self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])\r
+\r
+ self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])\r
+\r
+ self.assertEqual(a[:], range(10))\r
+\r
+ d = [a, b]\r
+ e = self.list(d)\r
+ self.assertEqual(\r
+ e[:],\r
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]\r
+ )\r
+ \r
+ f = self.list([a])\r
+ a.append('hello')\r
+ self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])\r
+\r
+ def test_dict(self):\r
+ d = self.dict()\r
+ indices = range(65, 70)\r
+ for i in indices:\r
+ d[i] = chr(i)\r
+ self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))\r
+ self.assertEqual(sorted(d.keys()), indices)\r
+ self.assertEqual(sorted(d.values()), [chr(i) for i in indices])\r
+ self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])\r
+ \r
+ def test_namespace(self):\r
+ n = self.Namespace()\r
+ n.name = 'Bob'\r
+ n.job = 'Builder'\r
+ n._hidden = 'hidden'\r
+ self.assertEqual((n.name, n.job), ('Bob', 'Builder'))\r
+ del n.job\r
+ self.assertEqual(str(n), "Namespace(name='Bob')")\r
+ self.assertTrue(hasattr(n, 'name'))\r
+ self.assertTrue(not hasattr(n, 'job'))\r
+\r
+#\r
+#\r
+#\r
+\r
+def sqr(x, wait=0.0):\r
+ time.sleep(wait)\r
+ return x*x\r
+\r
+class _TestPool(BaseTestCase):\r
+\r
+ def test_apply(self):\r
+ papply = self.pool.apply\r
+ self.assertEqual(papply(sqr, (5,)), sqr(5))\r
+ self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))\r
+\r
+ def test_map(self):\r
+ pmap = self.pool.map\r
+ self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))\r
+ self.assertEqual(pmap(sqr, range(100), chunksize=20),\r
+ map(sqr, range(100)))\r
+ \r
+ def test_async(self):\r
+ res = self.pool.apply_async(sqr, (7, TIMEOUT1,))\r
+ get = TimingWrapper(res.get)\r
+ self.assertEqual(get(), 49)\r
+ self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)\r
+\r
+ def test_async_timeout(self):\r
+ res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))\r
+ get = TimingWrapper(res.get)\r
+ self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)\r
+ self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)\r
+\r
+ def test_imap(self):\r
+ it = self.pool.imap(sqr, range(10))\r
+ self.assertEqual(list(it), map(sqr, range(10)))\r
+\r
+ it = self.pool.imap(sqr, range(10))\r
+ for i in range(10):\r
+ self.assertEqual(it.next(), i*i)\r
+ self.assertRaises(StopIteration, it.next)\r
+\r
+ it = self.pool.imap(sqr, range(1000), chunksize=100)\r
+ for i in range(1000):\r
+ self.assertEqual(it.next(), i*i)\r
+ self.assertRaises(StopIteration, it.next)\r
+\r
+ def test_imap_unordered(self):\r
+ it = self.pool.imap_unordered(sqr, range(1000))\r
+ self.assertEqual(sorted(it), map(sqr, range(1000)))\r
+\r
+ it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)\r
+ self.assertEqual(sorted(it), map(sqr, range(1000)))\r
+\r
+ def test_make_pool(self):\r
+ p = multiprocessing.Pool(3)\r
+ self.assertEqual(3, len(p._pool))\r
+ p.close()\r
+ p.join()\r
+\r
+ def test_terminate(self):\r
+ if self.TYPE == 'manager':\r
+ # On Unix a forked process increfs each shared object to\r
+ # which its parent process held a reference. If the\r
+ # forked process gets terminated then there is likely to\r
+ # be a reference leak. So to prevent\r
+ # _TestZZZNumberOfObjects from failing we skip this test\r
+ # when using a manager.\r
+ return\r
+\r
+ result = self.pool.map_async(\r
+ time.sleep, [0.1 for i in range(10000)], chunksize=1\r
+ )\r
+ self.pool.terminate()\r
+ join = TimingWrapper(self.pool.join)\r
+ join()\r
+ self.assertTrue(join.elapsed < 0.2)\r
+\r
+#\r
+# Test that manager has expected number of shared objects left\r
+#\r
+\r
+class _TestZZZNumberOfObjects(BaseTestCase):\r
+ # Because test cases are sorted alphabetically, this one will get\r
+ # run after all the other tests for the manager. It tests that\r
+ # there have been no "reference leaks" for the manager's shared\r
+ # objects. Note the comment in _TestPool.test_terminate().\r
+ ALLOWED_TYPES = ('manager',)\r
+\r
+ def test_number_of_objects(self):\r
+ EXPECTED_NUMBER = 1 # the pool object is still alive\r
+ multiprocessing.active_children() # discard dead process objs\r
+ gc.collect() # do garbage collection\r
+ refs = self.manager._number_of_objects()\r
+ if refs != EXPECTED_NUMBER:\r
+ print self.manager._debugInfo()\r
+\r
+ self.assertEqual(refs, EXPECTED_NUMBER)\r
+\r
+#\r
+# Test of creating a customized manager class\r
+#\r
+\r
+from multiprocessing.managers import BaseManager, BaseProxy, RemoteError\r
+ \r
+class FooBar(object):\r
+ def f(self):\r
+ return 'f()'\r
+ def g(self):\r
+ raise ValueError\r
+ def _h(self):\r
+ return '_h()'\r
+ \r
+def baz():\r
+ for i in xrange(10):\r
+ yield i*i\r
+\r
+class IteratorProxy(BaseProxy):\r
+ _exposed_ = ('next', '__next__')\r
+ def __iter__(self):\r
+ return self\r
+ def next(self):\r
+ return self._callmethod('next')\r
+ def __next__(self):\r
+ return self._callmethod('__next__')\r
+\r
+class MyManager(BaseManager):\r
+ pass\r
+\r
+MyManager.register('Foo', callable=FooBar)\r
+MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))\r
+MyManager.register('baz', callable=baz, proxytype=IteratorProxy)\r
+\r
+\r
+class _TestMyManager(BaseTestCase):\r
+ \r
+ ALLOWED_TYPES = ('manager',)\r
+\r
+ def test_mymanager(self):\r
+ manager = MyManager()\r
+ manager.start()\r
+ \r
+ foo = manager.Foo()\r
+ bar = manager.Bar()\r
+ baz = manager.baz()\r
+ \r
+ foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]\r
+ bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]\r
+ \r
+ self.assertEqual(foo_methods, ['f', 'g'])\r
+ self.assertEqual(bar_methods, ['f', '_h'])\r
+ \r
+ self.assertEqual(foo.f(), 'f()')\r
+ self.assertRaises(ValueError, foo.g)\r
+ self.assertEqual(foo._callmethod('f'), 'f()')\r
+ self.assertRaises(RemoteError, foo._callmethod, '_h')\r
+ \r
+ self.assertEqual(bar.f(), 'f()')\r
+ self.assertEqual(bar._h(), '_h()')\r
+ self.assertEqual(bar._callmethod('f'), 'f()')\r
+ self.assertEqual(bar._callmethod('_h'), '_h()')\r
+ \r
+ self.assertEqual(list(baz), [i*i for i in range(10)])\r
+ \r
+ manager.shutdown()\r
+ \r
+#\r
+# Test of connecting to a remote server and using xmlrpclib for serialization\r
+#\r
+\r
+_queue = Queue.Queue()\r
+def get_queue():\r
+ return _queue\r
+\r
+class QueueManager(BaseManager):\r
+ '''manager class used by server process'''\r
+QueueManager.register('get_queue', callable=get_queue)\r
+\r
+class QueueManager2(BaseManager):\r
+ '''manager class which specifies the same interface as QueueManager'''\r
+QueueManager2.register('get_queue')\r
+\r
+\r
+SERIALIZER = 'xmlrpclib'\r
+\r
+class _TestRemoteManager(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('manager',)\r
+ \r
+ def _putter(self, address, authkey):\r
+ manager = QueueManager2(\r
+ address=address, authkey=authkey, serializer=SERIALIZER\r
+ )\r
+ manager.connect()\r
+ queue = manager.get_queue()\r
+ queue.put(('hello world', None, True, 2.25))\r
+\r
+ def test_remote(self):\r
+ authkey = os.urandom(32)\r
+\r
+ manager = QueueManager(\r
+ address=('localhost', 0), authkey=authkey, serializer=SERIALIZER\r
+ )\r
+ manager.start()\r
+\r
+ p = self.Process(target=self._putter, args=(manager.address, authkey))\r
+ p.start()\r
+ \r
+ manager2 = QueueManager2(\r
+ address=manager.address, authkey=authkey, serializer=SERIALIZER\r
+ )\r
+ manager2.connect()\r
+ queue = manager2.get_queue()\r
+ \r
+ # Note that xmlrpclib will deserialize object as a list not a tuple\r
+ self.assertEqual(queue.get(), ['hello world', None, True, 2.25])\r
+\r
+ # Because we are using xmlrpclib for serialization instead of\r
+ # pickle this will cause a serialization error.\r
+ self.assertRaises(Exception, queue.put, time.sleep)\r
+\r
+ # Make queue finalizer run before the server is stopped\r
+ del queue\r
+ manager.shutdown()\r
+\r
+#\r
+#\r
+#\r
+\r
+SENTINEL = latin('')\r
+\r
+class _TestConnection(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes', 'threads')\r
+\r
+ def _echo(self, conn):\r
+ for msg in iter(conn.recv_bytes, SENTINEL):\r
+ conn.send_bytes(msg)\r
+ conn.close()\r
+\r
+ def test_connection(self):\r
+ conn, child_conn = self.Pipe()\r
+ \r
+ p = self.Process(target=self._echo, args=(child_conn,))\r
+ p.set_daemon(True)\r
+ p.start()\r
+\r
+ seq = [1, 2.25, None]\r
+ msg = latin('hello world')\r
+ longmsg = msg * 10\r
+ arr = array.array('i', range(4))\r
+\r
+ if self.TYPE == 'processes':\r
+ self.assertEqual(type(conn.fileno()), int)\r
+\r
+ self.assertEqual(conn.send(seq), None)\r
+ self.assertEqual(conn.recv(), seq)\r
+\r
+ self.assertEqual(conn.send_bytes(msg), None)\r
+ self.assertEqual(conn.recv_bytes(), msg)\r
+\r
+ if self.TYPE == 'processes':\r
+ buffer = array.array('i', [0]*10)\r
+ expected = list(arr) + [0] * (10 - len(arr))\r
+ self.assertEqual(conn.send_bytes(arr), None)\r
+ self.assertEqual(conn.recv_bytes_into(buffer),\r
+ len(arr) * buffer.itemsize)\r
+ self.assertEqual(list(buffer), expected)\r
+\r
+ buffer = array.array('i', [0]*10)\r
+ expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))\r
+ self.assertEqual(conn.send_bytes(arr), None)\r
+ self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),\r
+ len(arr) * buffer.itemsize)\r
+ self.assertEqual(list(buffer), expected)\r
+\r
+ buffer = bytearray(latin(' ' * 40))\r
+ self.assertEqual(conn.send_bytes(longmsg), None)\r
+ try:\r
+ res = conn.recv_bytes_into(buffer)\r
+ except multiprocessing.BufferTooShort, e:\r
+ self.assertEqual(e.args, (longmsg,))\r
+ else:\r
+ self.fail('expected BufferTooShort, got %s' % res)\r
+\r
+ poll = TimingWrapper(conn.poll)\r
+\r
+ self.assertEqual(poll(), False)\r
+ self.assertTimingAlmostEqual(poll.elapsed, 0)\r
+\r
+ self.assertEqual(poll(TIMEOUT1), False)\r
+ self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)\r
+\r
+ conn.send(None)\r
+\r
+ self.assertEqual(poll(TIMEOUT1), True)\r
+ self.assertTimingAlmostEqual(poll.elapsed, 0)\r
+ \r
+ self.assertEqual(conn.recv(), None)\r
+\r
+ really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb\r
+ conn.send_bytes(really_big_msg)\r
+ self.assertEqual(conn.recv_bytes(), really_big_msg)\r
+ \r
+ conn.send_bytes(SENTINEL) # tell child to quit\r
+ child_conn.close()\r
+\r
+ if self.TYPE == 'processes':\r
+ self.assertEqual(conn.readable, True)\r
+ self.assertEqual(conn.writable, True)\r
+ self.assertRaises(EOFError, conn.recv)\r
+ self.assertRaises(EOFError, conn.recv_bytes)\r
+\r
+ p.join()\r
+ \r
+ def test_duplex_false(self):\r
+ reader, writer = self.Pipe(duplex=False)\r
+ self.assertEqual(writer.send(1), None)\r
+ self.assertEqual(reader.recv(), 1)\r
+ if self.TYPE == 'processes':\r
+ self.assertEqual(reader.readable, True)\r
+ self.assertEqual(reader.writable, False)\r
+ self.assertEqual(writer.readable, False)\r
+ self.assertEqual(writer.writable, True)\r
+ self.assertRaises(IOError, reader.send, 2)\r
+ self.assertRaises(IOError, writer.recv)\r
+ self.assertRaises(IOError, writer.poll)\r
+\r
+ def test_spawn_close(self):\r
+ # We test that a pipe connection can be closed by parent\r
+ # process immediately after child is spawned. On Windows this\r
+ # would have sometimes failed on old versions because\r
+ # child_conn would be closed before the child got a chance to\r
+ # duplicate it.\r
+ conn, child_conn = self.Pipe()\r
+ \r
+ p = self.Process(target=self._echo, args=(child_conn,))\r
+ p.start()\r
+ child_conn.close() # this might complete before child initializes\r
+\r
+ msg = latin('hello')\r
+ conn.send_bytes(msg)\r
+ self.assertEqual(conn.recv_bytes(), msg)\r
+\r
+ conn.send_bytes(SENTINEL)\r
+ conn.close()\r
+ p.join()\r
+\r
+ def test_sendbytes(self):\r
+ if self.TYPE != 'processes':\r
+ return\r
+\r
+ msg = latin('abcdefghijklmnopqrstuvwxyz')\r
+ a, b = self.Pipe()\r
+ \r
+ a.send_bytes(msg)\r
+ self.assertEqual(b.recv_bytes(), msg)\r
+\r
+ a.send_bytes(msg, 5)\r
+ self.assertEqual(b.recv_bytes(), msg[5:])\r
+\r
+ a.send_bytes(msg, 7, 8)\r
+ self.assertEqual(b.recv_bytes(), msg[7:7+8])\r
+\r
+ a.send_bytes(msg, 26)\r
+ self.assertEqual(b.recv_bytes(), latin(''))\r
+\r
+ a.send_bytes(msg, 26, 0)\r
+ self.assertEqual(b.recv_bytes(), latin(''))\r
+\r
+ self.assertRaises(ValueError, a.send_bytes, msg, 27)\r
+ \r
+ self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)\r
+ \r
+ self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)\r
+\r
+ self.assertRaises(ValueError, a.send_bytes, msg, -1)\r
+\r
+ self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)\r
+ \r
+\r
+class _TestListenerClient(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes', 'threads')\r
+\r
+ def _test(self, address):\r
+ conn = self.connection.Client(address)\r
+ conn.send('hello')\r
+ conn.close()\r
+\r
+ def test_listener_client(self): \r
+ for family in self.connection.families:\r
+ l = self.connection.Listener(family=family)\r
+ p = self.Process(target=self._test, args=(l.address,))\r
+ p.set_daemon(True)\r
+ p.start()\r
+ conn = l.accept()\r
+ self.assertEqual(conn.recv(), 'hello')\r
+ p.join()\r
+ l.close()\r
+\r
+#\r
+# Test of sending connection and socket objects between processes\r
+#\r
+\r
+class _TestPicklingConnections(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def _listener(self, conn, families):\r
+ for fam in families:\r
+ l = self.connection.Listener(family=fam)\r
+ conn.send(l.address)\r
+ new_conn = l.accept()\r
+ conn.send(new_conn)\r
+\r
+ if self.TYPE == 'processes':\r
+ l = socket.socket()\r
+ l.bind(('localhost', 0))\r
+ conn.send(l.getsockname())\r
+ l.listen(1)\r
+ new_conn, addr = l.accept()\r
+ conn.send(new_conn)\r
+ \r
+ conn.recv()\r
+\r
+ def _remote(self, conn):\r
+ for (address, msg) in iter(conn.recv, None):\r
+ client = self.connection.Client(address)\r
+ client.send(msg.upper())\r
+ client.close()\r
+\r
+ if self.TYPE == 'processes':\r
+ address, msg = conn.recv()\r
+ client = socket.socket()\r
+ client.connect(address)\r
+ client.sendall(msg.upper())\r
+ client.close()\r
+\r
+ conn.close()\r
+\r
+ def test_pickling(self):\r
+ try:\r
+ multiprocessing.allow_connection_pickling()\r
+ except ImportError:\r
+ return\r
+ \r
+ families = self.connection.families\r
+\r
+ lconn, lconn0 = self.Pipe()\r
+ lp = self.Process(target=self._listener, args=(lconn0, families))\r
+ lp.start()\r
+ lconn0.close()\r
+\r
+ rconn, rconn0 = self.Pipe()\r
+ rp = self.Process(target=self._remote, args=(rconn0,))\r
+ rp.start()\r
+ rconn0.close()\r
+\r
+ for fam in families:\r
+ msg = ('This connection uses family %s' % fam).encode('ascii')\r
+ address = lconn.recv()\r
+ rconn.send((address, msg))\r
+ new_conn = lconn.recv()\r
+ self.assertEqual(new_conn.recv(), msg.upper())\r
+ \r
+ rconn.send(None)\r
+\r
+ if self.TYPE == 'processes':\r
+ msg = latin('This connection uses a normal socket')\r
+ address = lconn.recv()\r
+ rconn.send((address, msg))\r
+ if hasattr(socket, 'fromfd'):\r
+ new_conn = lconn.recv()\r
+ self.assertEqual(new_conn.recv(100), msg.upper())\r
+ else:\r
+ # XXX On Windows with Py2.6 need to backport fromfd()\r
+ discard = lconn.recv_bytes()\r
+ \r
+ lconn.send(None)\r
+ \r
+ rconn.close()\r
+ lconn.close()\r
+ \r
+ lp.join()\r
+ rp.join()\r
+\r
+#\r
+#\r
+#\r
+\r
+class _TestHeap(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def test_heap(self):\r
+ iterations = 5000\r
+ maxblocks = 50\r
+ blocks = []\r
+\r
+ # create and destroy lots of blocks of different sizes\r
+ for i in xrange(iterations):\r
+ size = int(random.lognormvariate(0, 1) * 1000)\r
+ b = multiprocessing.heap.BufferWrapper(size)\r
+ blocks.append(b)\r
+ if len(blocks) > maxblocks:\r
+ i = random.randrange(maxblocks)\r
+ del blocks[i]\r
+\r
+ # get the heap object\r
+ heap = multiprocessing.heap.BufferWrapper._heap\r
+\r
+ # verify the state of the heap\r
+ all = []\r
+ occupied = 0\r
+ for L in heap._len_to_seq.values():\r
+ for arena, start, stop in L:\r
+ all.append((heap._arenas.index(arena), start, stop,\r
+ stop-start, 'free'))\r
+ for arena, start, stop in heap._allocated_blocks:\r
+ all.append((heap._arenas.index(arena), start, stop,\r
+ stop-start, 'occupied'))\r
+ occupied += (stop-start)\r
+\r
+ all.sort()\r
+\r
+ for i in range(len(all)-1):\r
+ (arena, start, stop) = all[i][:3]\r
+ (narena, nstart, nstop) = all[i+1][:3]\r
+ self.assertTrue((arena != narena and nstart == 0) or\r
+ (stop == nstart))\r
+ \r
+#\r
+#\r
+#\r
+\r
+try:\r
+ from ctypes import Structure, Value, copy, c_int, c_double\r
+except ImportError:\r
+ Structure = object\r
+ c_int = c_double = None\r
+\r
+class _Foo(Structure):\r
+ _fields_ = [\r
+ ('x', c_int),\r
+ ('y', c_double)\r
+ ]\r
+\r
+class _TestSharedCTypes(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def _double(self, x, y, foo, arr, string):\r
+ x.value *= 2\r
+ y.value *= 2\r
+ foo.x *= 2\r
+ foo.y *= 2\r
+ string.value *= 2\r
+ for i in range(len(arr)):\r
+ arr[i] *= 2\r
+\r
+ def test_sharedctypes(self, lock=False):\r
+ if c_int is None:\r
+ return\r
+ \r
+ x = Value('i', 7, lock=lock)\r
+ y = Value(ctypes.c_double, 1.0/3.0, lock=lock)\r
+ foo = Value(_Foo, 3, 2, lock=lock)\r
+ arr = Array('d', range(10), lock=lock)\r
+ string = Array('c', 20, lock=lock)\r
+ string.value = 'hello'\r
+\r
+ p = self.Process(target=self._double, args=(x, y, foo, arr, string))\r
+ p.start()\r
+ p.join()\r
+\r
+ self.assertEqual(x.value, 14)\r
+ self.assertAlmostEqual(y.value, 2.0/3.0)\r
+ self.assertEqual(foo.x, 6)\r
+ self.assertAlmostEqual(foo.y, 4.0)\r
+ for i in range(10):\r
+ self.assertAlmostEqual(arr[i], i*2)\r
+ self.assertEqual(string.value, latin('hellohello'))\r
+\r
+ def test_synchronize(self):\r
+ self.test_sharedctypes(lock=True)\r
+\r
+ def test_copy(self):\r
+ if c_int is None:\r
+ return\r
+\r
+ foo = _Foo(2, 5.0)\r
+ bar = copy(foo)\r
+ foo.x = 0\r
+ foo.y = 0\r
+ self.assertEqual(bar.x, 2)\r
+ self.assertAlmostEqual(bar.y, 5.0)\r
+\r
+#\r
+#\r
+#\r
+\r
+class _TestFinalize(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def _test_finalize(self, conn):\r
+ class Foo(object):\r
+ pass\r
+\r
+ a = Foo()\r
+ util.Finalize(a, conn.send, args=('a',))\r
+ del a # triggers callback for a\r
+\r
+ b = Foo()\r
+ close_b = util.Finalize(b, conn.send, args=('b',)) \r
+ close_b() # triggers callback for b\r
+ close_b() # does nothing because callback has already been called\r
+ del b # does nothing because callback has already been called\r
+\r
+ c = Foo()\r
+ util.Finalize(c, conn.send, args=('c',))\r
+\r
+ d10 = Foo()\r
+ util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)\r
+\r
+ d01 = Foo()\r
+ util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)\r
+ d02 = Foo()\r
+ util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)\r
+ d03 = Foo()\r
+ util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)\r
+\r
+ util.Finalize(None, conn.send, args=('e',), exitpriority=-10)\r
+\r
+ util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)\r
+\r
+ # call mutliprocessing's cleanup function then exit process without\r
+ # garbage collecting locals\r
+ util._exit_function()\r
+ conn.close()\r
+ os._exit(0)\r
+\r
+ def test_finalize(self):\r
+ conn, child_conn = self.Pipe()\r
+ \r
+ p = self.Process(target=self._test_finalize, args=(child_conn,))\r
+ p.start()\r
+ p.join()\r
+\r
+ result = [obj for obj in iter(conn.recv, 'STOP')]\r
+ self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])\r
+\r
+#\r
+# Test that from ... import * works for each module\r
+#\r
+\r
+class _TestImportStar(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def test_import(self):\r
+ modules = (\r
+ 'multiprocessing', 'multiprocessing.connection',\r
+ 'multiprocessing.heap', 'multiprocessing.managers',\r
+ 'multiprocessing.pool', 'multiprocessing.process',\r
+ 'multiprocessing.reduction', 'multiprocessing.sharedctypes',\r
+ 'multiprocessing.synchronize', 'multiprocessing.util'\r
+ )\r
+ \r
+ for name in modules:\r
+ __import__(name)\r
+ mod = sys.modules[name]\r
+ \r
+ for attr in getattr(mod, '__all__', ()):\r
+ self.assertTrue(\r
+ hasattr(mod, attr),\r
+ '%r does not have attribute %r' % (mod, attr)\r
+ )\r
+\r
+#\r
+# Quick test that logging works -- does not test logging output\r
+#\r
+\r
+class _TestLogging(BaseTestCase):\r
+\r
+ ALLOWED_TYPES = ('processes',)\r
+\r
+ def test_enable_logging(self):\r
+ logger = multiprocessing.get_logger()\r
+ logger.setLevel(util.SUBWARNING)\r
+ self.assertTrue(logger is not None)\r
+ logger.debug('this will not be printed')\r
+ logger.info('nor will this')\r
+ logger.setLevel(LOG_LEVEL)\r
+\r
+ def _test_level(self, conn):\r
+ logger = multiprocessing.get_logger()\r
+ conn.send(logger.getEffectiveLevel())\r
+\r
+ def test_level(self):\r
+ LEVEL1 = 32\r
+ LEVEL2 = 37\r
+ \r
+ logger = multiprocessing.get_logger()\r
+ root_logger = logging.getLogger()\r
+ root_level = root_logger.level\r
+\r
+ reader, writer = multiprocessing.Pipe(duplex=False)\r
+\r
+ logger.setLevel(LEVEL1)\r
+ self.Process(target=self._test_level, args=(writer,)).start()\r
+ self.assertEqual(LEVEL1, reader.recv())\r
+\r
+ logger.setLevel(logging.NOTSET)\r
+ root_logger.setLevel(LEVEL2)\r
+ self.Process(target=self._test_level, args=(writer,)).start()\r
+ self.assertEqual(LEVEL2, reader.recv())\r
+\r
+ root_logger.setLevel(root_level)\r
+ logger.setLevel(level=LOG_LEVEL)\r
+\r
+#\r
+# Functions used to create test cases from the base ones in this module\r
+#\r
+\r
+def get_attributes(Source, names):\r
+ d = {}\r
+ for name in names:\r
+ obj = getattr(Source, name)\r
+ if type(obj) == type(get_attributes):\r
+ obj = staticmethod(obj)\r
+ d[name] = obj\r
+ return d\r
+\r
+def create_test_cases(Mixin, type):\r
+ result = {}\r
+ glob = globals()\r
+ Type = type[0].upper() + type[1:]\r
+\r
+ for name in glob.keys():\r
+ if name.startswith('_Test'):\r
+ base = glob[name]\r
+ if type in base.ALLOWED_TYPES:\r
+ newname = 'With' + Type + name[1:]\r
+ class Temp(base, unittest.TestCase, Mixin):\r
+ pass\r
+ result[newname] = Temp\r
+ Temp.__name__ = newname\r
+ Temp.__module__ = Mixin.__module__\r
+ return result\r
+\r
+#\r
+# Create test cases\r
+#\r
+\r
+class ProcessesMixin(object):\r
+ TYPE = 'processes'\r
+ Process = multiprocessing.Process\r
+ locals().update(get_attributes(multiprocessing, (\r
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',\r
+ 'Condition', 'Event', 'Value', 'Array', 'RawValue',\r
+ 'RawArray', 'current_process', 'active_children', 'Pipe',\r
+ 'connection', 'JoinableQueue'\r
+ )))\r
+\r
+testcases_processes = create_test_cases(ProcessesMixin, type='processes')\r
+globals().update(testcases_processes)\r
+\r
+\r
+class ManagerMixin(object):\r
+ TYPE = 'manager'\r
+ Process = multiprocessing.Process\r
+ manager = object.__new__(multiprocessing.managers.SyncManager)\r
+ locals().update(get_attributes(manager, (\r
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', \r
+ 'Condition', 'Event', 'Value', 'Array', 'list', 'dict',\r
+ 'Namespace', 'JoinableQueue'\r
+ )))\r
+\r
+testcases_manager = create_test_cases(ManagerMixin, type='manager')\r
+globals().update(testcases_manager)\r
+\r
+\r
+class ThreadsMixin(object):\r
+ TYPE = 'threads'\r
+ Process = multiprocessing.dummy.Process\r
+ locals().update(get_attributes(multiprocessing.dummy, (\r
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',\r
+ 'Condition', 'Event', 'Value', 'Array', 'current_process',\r
+ 'active_children', 'Pipe', 'connection', 'dict', 'list',\r
+ 'Namespace', 'JoinableQueue'\r
+ )))\r
+\r
+testcases_threads = create_test_cases(ThreadsMixin, type='threads')\r
+globals().update(testcases_threads)\r
+\r
+#\r
+#\r
+#\r
+\r
+def test_main(run=None):\r
+ if run is None:\r
+ from test.test_support import run_unittest as run\r
+\r
+ util.get_temp_dir() # creates temp directory for use by all processes\r
+ \r
+ multiprocessing.get_logger().setLevel(LOG_LEVEL)\r
+\r
+ ProcessesMixin.pool = multiprocessing.Pool(4)\r
+ ThreadsMixin.pool = multiprocessing.dummy.Pool(4)\r
+ ManagerMixin.manager.__init__()\r
+ ManagerMixin.manager.start()\r
+ ManagerMixin.pool = ManagerMixin.manager.Pool(4)\r
+\r
+ testcases = (\r
+ sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +\r
+ sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +\r
+ sorted(testcases_manager.values(), key=lambda tc:tc.__name__)\r
+ )\r
+\r
+ loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase\r
+ suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)\r
+ run(suite)\r
+\r
+ ThreadsMixin.pool.terminate()\r
+ ProcessesMixin.pool.terminate()\r
+ ManagerMixin.pool.terminate()\r
+ ManagerMixin.manager.shutdown()\r
+ \r
+ del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool\r
+\r
+def main():\r
+ test_main(unittest.TextTestRunner(verbosity=2).run)\r
+\r
+if __name__ == '__main__':\r
+ main()\r
--- /dev/null
+/*
+ * Definition of a `Connection` type.
+ * Used by `socket_connection.c` and `pipe_connection.c`.
+ *
+ * connection.h
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#ifndef CONNECTION_H
+#define CONNECTION_H
+
+/*
+ * Read/write flags
+ */
+
+#define READABLE 1
+#define WRITABLE 2
+
+#define CHECK_READABLE(self) \
+ if (!(self->flags & READABLE)) { \
+ PyErr_SetString(PyExc_IOError, "connection is write-only"); \
+ return NULL; \
+ }
+
+#define CHECK_WRITABLE(self) \
+ if (!(self->flags & WRITABLE)) { \
+ PyErr_SetString(PyExc_IOError, "connection is read-only"); \
+ return NULL; \
+ }
+
+/*
+ * Allocation and deallocation
+ */
+
+static PyObject *
+connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ ConnectionObject *self;
+ HANDLE handle;
+ BOOL readable = TRUE, writable = TRUE;
+
+ static char *kwlist[] = {"handle", "readable", "writable", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|ii", kwlist,
+ &handle, &readable, &writable))
+ return NULL;
+
+ if (handle == INVALID_HANDLE_VALUE || (Py_ssize_t)handle < 0) {
+ PyErr_Format(PyExc_IOError, "invalid handle %"
+ PY_FORMAT_SIZE_T "d", (Py_ssize_t)handle);
+ return NULL;
+ }
+
+ if (!readable && !writable) {
+ PyErr_SetString(PyExc_ValueError,
+ "either readable or writable must be true");
+ return NULL;
+ }
+
+ self = PyObject_New(ConnectionObject, type);
+ if (self == NULL)
+ return NULL;
+
+ self->weakreflist = NULL;
+ self->handle = handle;
+ self->flags = 0;
+
+ if (readable)
+ self->flags |= READABLE;
+ if (writable)
+ self->flags |= WRITABLE;
+ assert(self->flags >= 1 && self->flags <= 3);
+
+ return (PyObject*)self;
+}
+
+static void
+connection_dealloc(ConnectionObject* self)
+{
+ if (self->weakreflist != NULL)
+ PyObject_ClearWeakRefs((PyObject*)self);
+
+ if (self->handle != INVALID_HANDLE_VALUE) {
+ Py_BEGIN_ALLOW_THREADS
+ CLOSE(self->handle);
+ Py_END_ALLOW_THREADS
+ }
+ PyObject_Del(self);
+}
+
+/*
+ * Functions for transferring buffers
+ */
+
+static PyObject *
+connection_sendbytes(ConnectionObject *self, PyObject *args)
+{
+ char *buffer;
+ Py_ssize_t length, offset=0, size=PY_SSIZE_T_MIN;
+ int res;
+
+ if (!PyArg_ParseTuple(args, F_RBUFFER "#|" F_PY_SSIZE_T F_PY_SSIZE_T,
+ &buffer, &length, &offset, &size))
+ return NULL;
+
+ CHECK_WRITABLE(self);
+
+ if (offset < 0) {
+ PyErr_SetString(PyExc_ValueError, "offset is negative");
+ return NULL;
+ }
+ if (length < offset) {
+ PyErr_SetString(PyExc_ValueError, "buffer length < offset");
+ return NULL;
+ }
+
+ if (size == PY_SSIZE_T_MIN) {
+ size = length - offset;
+ } else {
+ if (size < 0) {
+ PyErr_SetString(PyExc_ValueError, "size is negative");
+ return NULL;
+ }
+ if (offset + size > length) {
+ PyErr_SetString(PyExc_ValueError,
+ "buffer length < offset + size");
+ return NULL;
+ }
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_send_string(self, buffer + offset, size);
+ Py_END_ALLOW_THREADS
+
+ if (res < 0)
+ return mp_SetError(PyExc_IOError, res);
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+connection_recvbytes(ConnectionObject *self, PyObject *args)
+{
+ char *freeme = NULL;
+ Py_ssize_t res, maxlength = PY_SSIZE_T_MAX;
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(args, "|" F_PY_SSIZE_T, &maxlength))
+ return NULL;
+
+ CHECK_READABLE(self);
+
+ if (maxlength < 0) {
+ PyErr_SetString(PyExc_ValueError, "maxlength < 0");
+ return NULL;
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
+ &freeme, maxlength);
+ Py_END_ALLOW_THREADS
+
+ if (res < 0) {
+ if (res == MP_BAD_MESSAGE_LENGTH) {
+ if ((self->flags & WRITABLE) == 0) {
+ Py_BEGIN_ALLOW_THREADS
+ CLOSE(self->handle);
+ Py_END_ALLOW_THREADS
+ self->handle = INVALID_HANDLE_VALUE;
+ } else {
+ self->flags = WRITABLE;
+ }
+ }
+ mp_SetError(PyExc_IOError, res);
+ } else {
+ if (freeme == NULL) {
+ result = PyString_FromStringAndSize(self->buffer, res);
+ } else {
+ result = PyString_FromStringAndSize(freeme, res);
+ PyMem_Free(freeme);
+ }
+ }
+
+ return result;
+}
+
+static PyObject *
+connection_recvbytes_into(ConnectionObject *self, PyObject *args)
+{
+ char *freeme = NULL, *buffer = NULL;
+ Py_ssize_t res, length, offset = 0;
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(args, "w#|" F_PY_SSIZE_T,
+ &buffer, &length, &offset))
+ return NULL;
+
+ CHECK_READABLE(self);
+
+ if (offset < 0) {
+ PyErr_SetString(PyExc_ValueError, "negative offset");
+ return NULL;
+ }
+
+ if (offset > length) {
+ PyErr_SetString(PyExc_ValueError, "offset too large");
+ return NULL;
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_recv_string(self, buffer+offset, length-offset,
+ &freeme, PY_SSIZE_T_MAX);
+ Py_END_ALLOW_THREADS
+
+ if (res < 0) {
+ if (res == MP_BAD_MESSAGE_LENGTH) {
+ if ((self->flags & WRITABLE) == 0) {
+ Py_BEGIN_ALLOW_THREADS
+ CLOSE(self->handle);
+ Py_END_ALLOW_THREADS
+ self->handle = INVALID_HANDLE_VALUE;
+ } else {
+ self->flags = WRITABLE;
+ }
+ }
+ mp_SetError(PyExc_IOError, res);
+ } else {
+ if (freeme == NULL) {
+ result = PyInt_FromSsize_t(res);
+ } else {
+ result = PyObject_CallFunction(BufferTooShort,
+ F_RBUFFER "#",
+ freeme, res);
+ PyMem_Free(freeme);
+ if (result) {
+ PyErr_SetObject(BufferTooShort, result);
+ Py_DECREF(result);
+ }
+ return NULL;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Functions for transferring objects
+ */
+
+static PyObject *
+connection_send_obj(ConnectionObject *self, PyObject *obj)
+{
+ char *buffer;
+ int res;
+ Py_ssize_t length;
+ PyObject *pickled_string = NULL;
+
+ CHECK_WRITABLE(self);
+
+ pickled_string = PyObject_CallFunctionObjArgs(pickle_dumps, obj,
+ pickle_protocol, NULL);
+ if (!pickled_string)
+ goto failure;
+
+ if (PyString_AsStringAndSize(pickled_string, &buffer, &length) < 0)
+ goto failure;
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_send_string(self, buffer, (int)length);
+ Py_END_ALLOW_THREADS
+
+ if (res < 0) {
+ mp_SetError(PyExc_IOError, res);
+ goto failure;
+ }
+
+ Py_XDECREF(pickled_string);
+ Py_RETURN_NONE;
+
+ failure:
+ Py_XDECREF(pickled_string);
+ return NULL;
+}
+
+static PyObject *
+connection_recv_obj(ConnectionObject *self)
+{
+ char *freeme = NULL;
+ Py_ssize_t res;
+ PyObject *temp = NULL, *result = NULL;
+
+ CHECK_READABLE(self);
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
+ &freeme, PY_SSIZE_T_MAX);
+ Py_END_ALLOW_THREADS
+
+ if (res < 0) {
+ if (res == MP_BAD_MESSAGE_LENGTH) {
+ if ((self->flags & WRITABLE) == 0) {
+ Py_BEGIN_ALLOW_THREADS
+ CLOSE(self->handle);
+ Py_END_ALLOW_THREADS
+ self->handle = INVALID_HANDLE_VALUE;
+ } else {
+ self->flags = WRITABLE;
+ }
+ }
+ mp_SetError(PyExc_IOError, res);
+ } else {
+ if (freeme == NULL) {
+ temp = PyString_FromStringAndSize(self->buffer, res);
+ } else {
+ temp = PyString_FromStringAndSize(freeme, res);
+ PyMem_Free(freeme);
+ }
+ }
+
+ if (temp)
+ result = PyObject_CallFunctionObjArgs(pickle_loads,
+ temp, NULL);
+ Py_XDECREF(temp);
+ return result;
+}
+
+/*
+ * Other functions
+ */
+
+static PyObject *
+connection_poll(ConnectionObject *self, PyObject *args)
+{
+ PyObject *timeout_obj = NULL;
+ double timeout = 0.0;
+ int res;
+
+ CHECK_READABLE(self);
+
+ if (!PyArg_ParseTuple(args, "|O", &timeout_obj))
+ return NULL;
+
+ if (timeout_obj == NULL) {
+ timeout = 0.0;
+ } else if (timeout_obj == Py_None) {
+ timeout = -1.0; /* block forever */
+ } else {
+ timeout = PyFloat_AsDouble(timeout_obj);
+ if (PyErr_Occurred())
+ return NULL;
+ if (timeout < 0.0)
+ timeout = 0.0;
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ res = conn_poll(self, timeout);
+ Py_END_ALLOW_THREADS
+
+ switch (res) {
+ case TRUE:
+ Py_RETURN_TRUE;
+ case FALSE:
+ Py_RETURN_FALSE;
+ default:
+ return mp_SetError(PyExc_IOError, res);
+ }
+}
+
+static PyObject *
+connection_fileno(ConnectionObject* self)
+{
+ if (self->handle == INVALID_HANDLE_VALUE) {
+ PyErr_SetString(PyExc_IOError, "handle is invalid");
+ return NULL;
+ }
+ return PyInt_FromLong((long)self->handle);
+}
+
+static PyObject *
+connection_close(ConnectionObject *self)
+{
+ if (self->handle != INVALID_HANDLE_VALUE) {
+ Py_BEGIN_ALLOW_THREADS
+ CLOSE(self->handle);
+ Py_END_ALLOW_THREADS
+ self->handle = INVALID_HANDLE_VALUE;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+connection_repr(ConnectionObject *self)
+{
+ static char *conn_type[] = {"read-only", "write-only", "read-write"};
+
+ assert(self->flags >= 1 && self->flags <= 3);
+ return FROM_FORMAT("<%s %s, handle %" PY_FORMAT_SIZE_T "d>",
+ conn_type[self->flags - 1],
+ CONNECTION_NAME, (Py_ssize_t)self->handle);
+}
+
+/*
+ * Getters and setters
+ */
+
+static PyObject *
+connection_closed(ConnectionObject *self, void *closure)
+{
+ return PyBool_FromLong((long)(self->handle == INVALID_HANDLE_VALUE));
+}
+
+static PyObject *
+connection_readable(ConnectionObject *self, void *closure)
+{
+ return PyBool_FromLong((long)(self->flags & READABLE));
+}
+
+static PyObject *
+connection_writable(ConnectionObject *self, void *closure)
+{
+ return PyBool_FromLong((long)(self->flags & WRITABLE));
+}
+
+/*
+ * Tables
+ */
+
+static PyMethodDef connection_methods[] = {
+ {"send_bytes", (PyCFunction)connection_sendbytes, METH_VARARGS,
+ "send the byte data from a readable buffer-like object"},
+ {"recv_bytes", (PyCFunction)connection_recvbytes, METH_VARARGS,
+ "receive byte data as a string"},
+ {"recv_bytes_into",(PyCFunction)connection_recvbytes_into,METH_VARARGS,
+ "receive byte data into a writeable buffer-like object\n"
+ "returns the number of bytes read"},
+
+ {"send", (PyCFunction)connection_send_obj, METH_O,
+ "send a (picklable) object"},
+ {"recv", (PyCFunction)connection_recv_obj, METH_NOARGS,
+ "receive a (picklable) object"},
+
+ {"poll", (PyCFunction)connection_poll, METH_VARARGS,
+ "whether there is any input available to be read"},
+ {"fileno", (PyCFunction)connection_fileno, METH_NOARGS,
+ "file descriptor or handle of the connection"},
+ {"close", (PyCFunction)connection_close, METH_NOARGS,
+ "close the connection"},
+
+ {NULL} /* Sentinel */
+};
+
+static PyGetSetDef connection_getset[] = {
+ {"closed", (getter)connection_closed, NULL,
+ "True if the connection is closed", NULL},
+ {"readable", (getter)connection_readable, NULL,
+ "True if the connection is readable", NULL},
+ {"writable", (getter)connection_writable, NULL,
+ "True if the connection is writable", NULL},
+ {NULL}
+};
+
+/*
+ * Connection type
+ */
+
+PyDoc_STRVAR(connection_doc,
+ "Connection type whose constructor signature is\n\n"
+ " Connection(handle, readable=True, writable=True).\n\n"
+ "The constructor does *not* duplicate the handle.");
+
+PyTypeObject CONNECTION_TYPE = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ /* tp_name */ "_multiprocessing." CONNECTION_NAME,
+ /* tp_basicsize */ sizeof(ConnectionObject),
+ /* tp_itemsize */ 0,
+ /* tp_dealloc */ (destructor)connection_dealloc,
+ /* tp_print */ 0,
+ /* tp_getattr */ 0,
+ /* tp_setattr */ 0,
+ /* tp_compare */ 0,
+ /* tp_repr */ (reprfunc)connection_repr,
+ /* tp_as_number */ 0,
+ /* tp_as_sequence */ 0,
+ /* tp_as_mapping */ 0,
+ /* tp_hash */ 0,
+ /* tp_call */ 0,
+ /* tp_str */ 0,
+ /* tp_getattro */ 0,
+ /* tp_setattro */ 0,
+ /* tp_as_buffer */ 0,
+ /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
+ Py_TPFLAGS_HAVE_WEAKREFS,
+ /* tp_doc */ connection_doc,
+ /* tp_traverse */ 0,
+ /* tp_clear */ 0,
+ /* tp_richcompare */ 0,
+ /* tp_weaklistoffset */ offsetof(ConnectionObject, weakreflist),
+ /* tp_iter */ 0,
+ /* tp_iternext */ 0,
+ /* tp_methods */ connection_methods,
+ /* tp_members */ 0,
+ /* tp_getset */ connection_getset,
+ /* tp_base */ 0,
+ /* tp_dict */ 0,
+ /* tp_descr_get */ 0,
+ /* tp_descr_set */ 0,
+ /* tp_dictoffset */ 0,
+ /* tp_init */ 0,
+ /* tp_alloc */ 0,
+ /* tp_new */ connection_new,
+};
+
+#endif /* CONNECTION_H */
--- /dev/null
+/*\r
+ * Extension module used by mutliprocessing package\r
+ *\r
+ * multiprocessing.c\r
+ *\r
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+ */\r
+\r
+#include "multiprocessing.h"\r
+\r
+PyObject *create_win32_namespace(void);\r
+\r
+PyObject *pickle_dumps, *pickle_loads, *pickle_protocol;\r
+PyObject *ProcessError, *BufferTooShort;\r
+\r
+/*\r
+ * Function which raises exceptions based on error codes\r
+ */\r
+\r
+PyObject *\r
+mp_SetError(PyObject *Type, int num)\r
+{\r
+ switch (num) {\r
+#ifdef MS_WINDOWS\r
+ case MP_STANDARD_ERROR: \r
+ if (Type == NULL)\r
+ Type = PyExc_WindowsError;\r
+ PyErr_SetExcFromWindowsErr(Type, 0);\r
+ break;\r
+ case MP_SOCKET_ERROR:\r
+ if (Type == NULL)\r
+ Type = PyExc_WindowsError;\r
+ PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());\r
+ break;\r
+#else /* !MS_WINDOWS */\r
+ case MP_STANDARD_ERROR:\r
+ case MP_SOCKET_ERROR:\r
+ if (Type == NULL)\r
+ Type = PyExc_OSError;\r
+ PyErr_SetFromErrno(Type);\r
+ break;\r
+#endif /* !MS_WINDOWS */\r
+ case MP_MEMORY_ERROR:\r
+ PyErr_NoMemory();\r
+ break;\r
+ case MP_END_OF_FILE:\r
+ PyErr_SetNone(PyExc_EOFError);\r
+ break;\r
+ case MP_EARLY_END_OF_FILE:\r
+ PyErr_SetString(PyExc_IOError,\r
+ "got end of file during message");\r
+ break;\r
+ case MP_BAD_MESSAGE_LENGTH:\r
+ PyErr_SetString(PyExc_IOError, "bad message length");\r
+ break;\r
+ case MP_EXCEPTION_HAS_BEEN_SET:\r
+ break;\r
+ default:\r
+ PyErr_Format(PyExc_RuntimeError,\r
+ "unkown error number %d", num);\r
+ }\r
+ return NULL;\r
+}\r
+\r
+\r
+/*\r
+ * Windows only\r
+ */\r
+\r
+#ifdef MS_WINDOWS\r
+\r
+/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */\r
+\r
+HANDLE sigint_event = NULL;\r
+\r
+static BOOL WINAPI\r
+ProcessingCtrlHandler(DWORD dwCtrlType)\r
+{\r
+ SetEvent(sigint_event);\r
+ return FALSE;\r
+}\r
+\r
+/*\r
+ * Unix only\r
+ */\r
+\r
+#else /* !MS_WINDOWS */\r
+\r
+#if HAVE_FD_TRANSFER\r
+\r
+/* Functions for transferring file descriptors between processes.\r
+ Reimplements some of the functionality of the fdcred\r
+ module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */\r
+\r
+static PyObject *\r
+multiprocessing_sendfd(PyObject *self, PyObject *args)\r
+{\r
+ int conn, fd, res;\r
+ char dummy_char;\r
+ char buf[CMSG_SPACE(sizeof(int))];\r
+ struct msghdr msg = {0};\r
+ struct iovec dummy_iov;\r
+ struct cmsghdr *cmsg;\r
+\r
+ if (!PyArg_ParseTuple(args, "ii", &conn, &fd))\r
+ return NULL;\r
+\r
+ dummy_iov.iov_base = &dummy_char;\r
+ dummy_iov.iov_len = 1;\r
+ msg.msg_control = buf;\r
+ msg.msg_controllen = sizeof(buf);\r
+ msg.msg_iov = &dummy_iov;\r
+ msg.msg_iovlen = 1;\r
+ cmsg = CMSG_FIRSTHDR(&msg);\r
+ cmsg->cmsg_level = SOL_SOCKET;\r
+ cmsg->cmsg_type = SCM_RIGHTS;\r
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));\r
+ msg.msg_controllen = cmsg->cmsg_len;\r
+ *(int*)CMSG_DATA(cmsg) = fd;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ res = sendmsg(conn, &msg, 0);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (res < 0)\r
+ return PyErr_SetFromErrno(PyExc_OSError);\r
+ Py_RETURN_NONE;\r
+}\r
+\r
+static PyObject *\r
+multiprocessing_recvfd(PyObject *self, PyObject *args)\r
+{\r
+ int conn, fd, res;\r
+ char dummy_char;\r
+ char buf[CMSG_SPACE(sizeof(int))];\r
+ struct msghdr msg = {0};\r
+ struct iovec dummy_iov;\r
+ struct cmsghdr *cmsg;\r
+\r
+ if (!PyArg_ParseTuple(args, "i", &conn))\r
+ return NULL;\r
+\r
+ dummy_iov.iov_base = &dummy_char;\r
+ dummy_iov.iov_len = 1;\r
+ msg.msg_control = buf;\r
+ msg.msg_controllen = sizeof(buf);\r
+ msg.msg_iov = &dummy_iov;\r
+ msg.msg_iovlen = 1;\r
+ cmsg = CMSG_FIRSTHDR(&msg);\r
+ cmsg->cmsg_level = SOL_SOCKET;\r
+ cmsg->cmsg_type = SCM_RIGHTS;\r
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));\r
+ msg.msg_controllen = cmsg->cmsg_len;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ res = recvmsg(conn, &msg, 0);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (res < 0)\r
+ return PyErr_SetFromErrno(PyExc_OSError);\r
+\r
+ fd = *(int*)CMSG_DATA(cmsg);\r
+ return Py_BuildValue("i", fd);\r
+}\r
+\r
+#endif /* HAVE_FD_TRANSFER */\r
+\r
+#endif /* !MS_WINDOWS */\r
+\r
+\r
+/*\r
+ * All platforms\r
+ */\r
+\r
+static PyObject*\r
+multiprocessing_address_of_buffer(PyObject *self, PyObject *obj)\r
+{\r
+ void *buffer;\r
+ Py_ssize_t buffer_len;\r
+\r
+ if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)\r
+ return NULL;\r
+\r
+ return Py_BuildValue("N" F_PY_SSIZE_T, \r
+ PyLong_FromVoidPtr(buffer), buffer_len);\r
+}\r
+\r
+\r
+/*\r
+ * Function table\r
+ */\r
+\r
+static PyMethodDef module_methods[] = {\r
+ {"address_of_buffer", multiprocessing_address_of_buffer, METH_O, \r
+ "address_of_buffer(obj) -> int\n" \r
+ "Return address of obj assuming obj supports buffer inteface"},\r
+#if HAVE_FD_TRANSFER\r
+ {"sendfd", multiprocessing_sendfd, METH_VARARGS, \r
+ "sendfd(sockfd, fd) -> None\n"\r
+ "Send file descriptor given by fd over the unix domain socket\n"\r
+ "whose file decriptor is sockfd"},\r
+ {"recvfd", multiprocessing_recvfd, METH_VARARGS,\r
+ "recvfd(sockfd) -> fd\n"\r
+ "Receive a file descriptor over a unix domain socket\n"\r
+ "whose file decriptor is sockfd"},\r
+#endif\r
+ {NULL}\r
+};\r
+\r
+\r
+/*\r
+ * Initialize\r
+ */\r
+\r
+PyMODINIT_FUNC\r
+init_multiprocessing(void)\r
+{\r
+ PyObject *module, *temp;\r
+\r
+ /* Initialize module */\r
+ module = Py_InitModule("_multiprocessing", module_methods);\r
+ if (!module)\r
+ return;\r
+\r
+ /* Get copy of objects from pickle */\r
+ temp = PyImport_ImportModule(PICKLE_MODULE);\r
+ if (!temp)\r
+ return;\r
+ pickle_dumps = PyObject_GetAttrString(temp, "dumps");\r
+ pickle_loads = PyObject_GetAttrString(temp, "loads");\r
+ pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");\r
+ Py_XDECREF(temp);\r
+\r
+ /* Get copy of BufferTooShort */\r
+ temp = PyImport_ImportModule("multiprocessing");\r
+ if (!temp)\r
+ return;\r
+ BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");\r
+ Py_XDECREF(temp);\r
+\r
+ /* Add connection type to module */\r
+ if (PyType_Ready(&ConnectionType) < 0)\r
+ return;\r
+ Py_INCREF(&ConnectionType); \r
+ PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);\r
+\r
+#if defined(MS_WINDOWS) || HAVE_SEM_OPEN\r
+ /* Add SemLock type to module */\r
+ if (PyType_Ready(&SemLockType) < 0)\r
+ return;\r
+ Py_INCREF(&SemLockType);\r
+ PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX", \r
+ Py_BuildValue("i", SEM_VALUE_MAX));\r
+ PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType); \r
+#endif\r
+\r
+#ifdef MS_WINDOWS\r
+ /* Add PipeConnection to module */\r
+ if (PyType_Ready(&PipeConnectionType) < 0)\r
+ return;\r
+ Py_INCREF(&PipeConnectionType);\r
+ PyModule_AddObject(module, "PipeConnection",\r
+ (PyObject*)&PipeConnectionType);\r
+\r
+ /* Initialize win32 class and add to multiprocessing */\r
+ temp = create_win32_namespace();\r
+ if (!temp)\r
+ return;\r
+ PyModule_AddObject(module, "win32", temp);\r
+\r
+ /* Initialize the event handle used to signal Ctrl-C */\r
+ sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);\r
+ if (!sigint_event) {\r
+ PyErr_SetFromWindowsErr(0);\r
+ return;\r
+ }\r
+ if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {\r
+ PyErr_SetFromWindowsErr(0);\r
+ return;\r
+ }\r
+#endif\r
+\r
+ /* Add configuration macros */\r
+ temp = PyDict_New();\r
+ if (!temp)\r
+ return;\r
+ if (PyModule_AddObject(module, "flags", temp) < 0)\r
+ return;\r
+\r
+#define ADD_FLAG(name) \\r
+ if (PyDict_SetItemString(temp, #name, Py_BuildValue("i", name)) < 0) return\r
+ \r
+#ifdef HAVE_SEM_OPEN\r
+ ADD_FLAG(HAVE_SEM_OPEN);\r
+#endif\r
+#ifdef HAVE_SEM_TIMEDWAIT\r
+ ADD_FLAG(HAVE_SEM_TIMEDWAIT);\r
+#endif\r
+#ifdef HAVE_FD_TRANSFER\r
+ ADD_FLAG(HAVE_FD_TRANSFER);\r
+#endif\r
+#ifdef HAVE_BROKEN_SEM_GETVALUE\r
+ ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE);\r
+#endif\r
+#ifdef HAVE_BROKEN_SEM_UNLINK\r
+ ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);\r
+#endif\r
+}\r
--- /dev/null
+#ifndef MULTIPROCESSING_H\r
+#define MULTIPROCESSING_H\r
+\r
+#define PY_SSIZE_T_CLEAN\r
+\r
+#include "Python.h"\r
+#include "structmember.h"\r
+#include "pythread.h"\r
+\r
+/*\r
+ * Platform includes and definitions\r
+ */\r
+\r
+#ifdef MS_WINDOWS\r
+# define WIN32_LEAN_AND_MEAN\r
+# include <windows.h>\r
+# include <winsock2.h>\r
+# include <process.h> /* getpid() */\r
+# define SEM_HANDLE HANDLE\r
+# define SEM_VALUE_MAX LONG_MAX\r
+#else\r
+# include <fcntl.h> /* O_CREAT and O_EXCL */\r
+# include <sys/socket.h>\r
+# include <arpa/inet.h> /* htonl() and ntohl() */\r
+# if HAVE_SEM_OPEN\r
+# include <semaphore.h>\r
+ typedef sem_t *SEM_HANDLE;\r
+# endif\r
+# define HANDLE int\r
+# define SOCKET int\r
+# define BOOL int\r
+# define UINT32 uint32_t\r
+# define INT32 int32_t\r
+# define TRUE 1\r
+# define FALSE 0\r
+# define INVALID_HANDLE_VALUE (-1)\r
+#endif\r
+\r
+/*\r
+ * Make sure Py_ssize_t available\r
+ */\r
+\r
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)\r
+ typedef int Py_ssize_t;\r
+# define PY_SSIZE_T_MAX INT_MAX\r
+# define PY_SSIZE_T_MIN INT_MIN\r
+# define F_PY_SSIZE_T "i"\r
+# define PY_FORMAT_SIZE_T ""\r
+# define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)\r
+#else\r
+# define F_PY_SSIZE_T "n"\r
+#endif\r
+\r
+/*\r
+ * Format codes\r
+ */\r
+\r
+#if SIZEOF_VOID_P == SIZEOF_LONG\r
+# define F_POINTER "k"\r
+# define T_POINTER T_ULONG\r
+#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)\r
+# define F_POINTER "K"\r
+# define T_POINTER T_ULONGLONG\r
+#else\r
+# error "can't find format code for unsigned integer of same size as void*"\r
+#endif\r
+\r
+#ifdef MS_WINDOWS\r
+# define F_HANDLE F_POINTER\r
+# define T_HANDLE T_POINTER\r
+# define F_SEM_HANDLE F_HANDLE\r
+# define T_SEM_HANDLE T_HANDLE\r
+# define F_DWORD "k"\r
+# define T_DWORD T_ULONG\r
+#else\r
+# define F_HANDLE "i"\r
+# define T_HANDLE T_INT\r
+# define F_SEM_HANDLE F_POINTER\r
+# define T_SEM_HANDLE T_POINTER\r
+#endif\r
+\r
+#if PY_VERSION_HEX >= 0x03000000\r
+# define F_RBUFFER "y"\r
+#else\r
+# define F_RBUFFER "s"\r
+#endif\r
+\r
+/*\r
+ * Error codes which can be returned by functions called without GIL\r
+ */\r
+\r
+#define MP_SUCCESS (0)\r
+#define MP_STANDARD_ERROR (-1)\r
+#define MP_MEMORY_ERROR (-1001)\r
+#define MP_END_OF_FILE (-1002)\r
+#define MP_EARLY_END_OF_FILE (-1003)\r
+#define MP_BAD_MESSAGE_LENGTH (-1004)\r
+#define MP_SOCKET_ERROR (-1005)\r
+#define MP_EXCEPTION_HAS_BEEN_SET (-1006)\r
+\r
+PyObject *mp_SetError(PyObject *Type, int num);\r
+\r
+/*\r
+ * Externs - not all will really exist on all platforms\r
+ */\r
+\r
+extern PyObject *pickle_dumps;\r
+extern PyObject *pickle_loads;\r
+extern PyObject *pickle_protocol;\r
+extern PyObject *BufferTooShort;\r
+extern PyTypeObject SemLockType;\r
+extern PyTypeObject ConnectionType;\r
+extern PyTypeObject PipeConnectionType;\r
+extern HANDLE sigint_event;\r
+\r
+/*\r
+ * Py3k compatibility\r
+ */\r
+\r
+#if PY_VERSION_HEX >= 0x03000000\r
+# define PICKLE_MODULE "pickle"\r
+# define FROM_FORMAT PyUnicode_FromFormat\r
+# define PyInt_FromLong PyLong_FromLong\r
+# define PyInt_FromSsize_t PyLong_FromSsize_t\r
+#else\r
+# define PICKLE_MODULE "cPickle"\r
+# define FROM_FORMAT PyString_FromFormat\r
+#endif\r
+\r
+#ifndef PyVarObject_HEAD_INIT\r
+# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,\r
+#endif\r
+\r
+#ifndef Py_TPFLAGS_HAVE_WEAKREFS\r
+# define Py_TPFLAGS_HAVE_WEAKREFS 0\r
+#endif\r
+\r
+/*\r
+ * Connection definition\r
+ */\r
+\r
+#define CONNECTION_BUFFER_SIZE 1024\r
+\r
+typedef struct {\r
+ PyObject_HEAD\r
+ HANDLE handle;\r
+ int flags;\r
+ PyObject *weakreflist;\r
+ char buffer[CONNECTION_BUFFER_SIZE];\r
+} ConnectionObject;\r
+\r
+/*\r
+ * Miscellaneous\r
+ */\r
+\r
+#define MAX_MESSAGE_LENGTH 0x7fffffff\r
+\r
+#ifndef MIN\r
+# define MIN(x, y) ((x) < (y) ? x : y)\r
+# define MAX(x, y) ((x) > (y) ? x : y)\r
+#endif\r
+\r
+#endif /* MULTIPROCESSING_H */\r
--- /dev/null
+/*\r
+ * A type which wraps a pipe handle in message oriented mode\r
+ *\r
+ * pipe_connection.c\r
+ *\r
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+ */\r
+\r
+#include "multiprocessing.h"\r
+\r
+#define CLOSE(h) CloseHandle(h)\r
+\r
+/*\r
+ * Send string to the pipe; assumes in message oriented mode\r
+ */\r
+\r
+static Py_ssize_t\r
+conn_send_string(ConnectionObject *conn, char *string, size_t length)\r
+{\r
+ DWORD amount_written;\r
+\r
+ return WriteFile(conn->handle, string, length, &amount_written, NULL)\r
+ ? MP_SUCCESS : MP_STANDARD_ERROR;\r
+}\r
+\r
+/*\r
+ * Attempts to read into buffer, or if buffer too small into *newbuffer.\r
+ *\r
+ * Returns number of bytes read. Assumes in message oriented mode.\r
+ */\r
+\r
+static Py_ssize_t\r
+conn_recv_string(ConnectionObject *conn, char *buffer, \r
+ size_t buflength, char **newbuffer, size_t maxlength)\r
+{\r
+ DWORD left, length, full_length, err;\r
+\r
+ *newbuffer = NULL;\r
+\r
+ if (ReadFile(conn->handle, buffer, MIN(buflength, maxlength), \r
+ &length, NULL))\r
+ return length;\r
+\r
+ err = GetLastError();\r
+ if (err != ERROR_MORE_DATA) {\r
+ if (err == ERROR_BROKEN_PIPE)\r
+ return MP_END_OF_FILE;\r
+ return MP_STANDARD_ERROR;\r
+ }\r
+\r
+ if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))\r
+ return MP_STANDARD_ERROR;\r
+\r
+ full_length = length + left;\r
+ if (full_length > maxlength)\r
+ return MP_BAD_MESSAGE_LENGTH;\r
+\r
+ *newbuffer = PyMem_Malloc(full_length);\r
+ if (*newbuffer == NULL)\r
+ return MP_MEMORY_ERROR;\r
+\r
+ memcpy(*newbuffer, buffer, length);\r
+\r
+ if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {\r
+ assert(length == left);\r
+ return full_length;\r
+ } else {\r
+ PyMem_Free(*newbuffer);\r
+ return MP_STANDARD_ERROR;\r
+ }\r
+}\r
+\r
+/*\r
+ * Check whether any data is available for reading\r
+ */\r
+\r
+#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)\r
+\r
+static int\r
+conn_poll_save(ConnectionObject *conn, double timeout, PyThreadState *_save)\r
+{\r
+ DWORD bytes, deadline, delay;\r
+ int difference, res;\r
+ BOOL block = FALSE;\r
+\r
+ if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))\r
+ return MP_STANDARD_ERROR;\r
+\r
+ if (timeout == 0.0)\r
+ return bytes > 0;\r
+\r
+ if (timeout < 0.0)\r
+ block = TRUE;\r
+ else\r
+ /* XXX does not check for overflow */\r
+ deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);\r
+\r
+ Sleep(0);\r
+\r
+ for (delay = 1 ; ; delay += 1) {\r
+ if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))\r
+ return MP_STANDARD_ERROR;\r
+ else if (bytes > 0)\r
+ return TRUE;\r
+\r
+ if (!block) {\r
+ difference = deadline - GetTickCount();\r
+ if (difference < 0)\r
+ return FALSE;\r
+ if ((int)delay > difference)\r
+ delay = difference;\r
+ }\r
+\r
+ if (delay > 20)\r
+ delay = 20;\r
+\r
+ Sleep(delay);\r
+\r
+ /* check for signals */\r
+ Py_BLOCK_THREADS \r
+ res = PyErr_CheckSignals();\r
+ Py_UNBLOCK_THREADS\r
+\r
+ if (res)\r
+ return MP_EXCEPTION_HAS_BEEN_SET;\r
+ }\r
+}\r
+\r
+/*\r
+ * "connection.h" defines the PipeConnection type using the definitions above\r
+ */\r
+\r
+#define CONNECTION_NAME "PipeConnection"\r
+#define CONNECTION_TYPE PipeConnectionType\r
+\r
+#include "connection.h"\r
--- /dev/null
+/*
+ * A type which wraps a semaphore
+ *
+ * semaphore.c
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#include "multiprocessing.h"
+
+enum { RECURSIVE_MUTEX, SEMAPHORE };
+
+typedef struct {
+ PyObject_HEAD
+ SEM_HANDLE handle;
+ long last_tid;
+ int count;
+ int maxvalue;
+ int kind;
+} SemLockObject;
+
+#define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid)
+
+
+#ifdef MS_WINDOWS
+
+/*
+ * Windows definitions
+ */
+
+#define SEM_FAILED NULL
+
+#define SEM_CLEAR_ERROR() SetLastError(0)
+#define SEM_GET_LAST_ERROR() GetLastError()
+#define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL)
+#define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1)
+#define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval)
+#define SEM_UNLINK(name) 0
+
+static int
+_GetSemaphoreValue(HANDLE handle, long *value)
+{
+ long previous;
+
+ switch (WaitForSingleObject(handle, 0)) {
+ case WAIT_OBJECT_0:
+ if (!ReleaseSemaphore(handle, 1, &previous))
+ return MP_STANDARD_ERROR;
+ *value = previous + 1;
+ return 0;
+ case WAIT_TIMEOUT:
+ *value = 0;
+ return 0;
+ default:
+ return MP_STANDARD_ERROR;
+ }
+}
+
+static PyObject *
+semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
+{
+ int blocking = 1;
+ double timeout;
+ PyObject *timeout_obj = Py_None;
+ DWORD res, full_msecs, msecs, start, ticks;
+
+ static char *kwlist[] = {"block", "timeout", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
+ &blocking, &timeout_obj))
+ return NULL;
+
+ /* calculate timeout */
+ if (!blocking) {
+ full_msecs = 0;
+ } else if (timeout_obj == Py_None) {
+ full_msecs = INFINITE;
+ } else {
+ timeout = PyFloat_AsDouble(timeout_obj);
+ if (PyErr_Occurred())
+ return NULL;
+ timeout *= 1000.0; /* convert to millisecs */
+ if (timeout < 0.0) {
+ timeout = 0.0;
+ } else if (timeout >= 0.5 * INFINITE) { /* 25 days */
+ PyErr_SetString(PyExc_OverflowError,
+ "timeout is too large");
+ return NULL;
+ }
+ full_msecs = (DWORD)(timeout + 0.5);
+ }
+
+ /* check whether we already own the lock */
+ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
+ ++self->count;
+ Py_RETURN_TRUE;
+ }
+
+ /* check whether we can acquire without blocking */
+ if (WaitForSingleObject(self->handle, 0) == WAIT_OBJECT_0) {
+ self->last_tid = GetCurrentThreadId();
+ ++self->count;
+ Py_RETURN_TRUE;
+ }
+
+ msecs = full_msecs;
+ start = GetTickCount();
+
+ for ( ; ; ) {
+ HANDLE handles[2] = {self->handle, sigint_event};
+
+ /* do the wait */
+ Py_BEGIN_ALLOW_THREADS
+ ResetEvent(sigint_event);
+ res = WaitForMultipleObjects(2, handles, FALSE, msecs);
+ Py_END_ALLOW_THREADS
+
+ /* handle result */
+ if (res != WAIT_OBJECT_0 + 1)
+ break;
+
+ /* got SIGINT so give signal handler a chance to run */
+ Sleep(1);
+
+ /* if this is main thread let KeyboardInterrupt be raised */
+ if (PyErr_CheckSignals())
+ return NULL;
+
+ /* recalculate timeout */
+ if (msecs != INFINITE) {
+ ticks = GetTickCount();
+ if ((DWORD)(ticks - start) >= full_msecs)
+ Py_RETURN_FALSE;
+ msecs = full_msecs - (ticks - start);
+ }
+ }
+
+ /* handle result */
+ switch (res) {
+ case WAIT_TIMEOUT:
+ Py_RETURN_FALSE;
+ case WAIT_OBJECT_0:
+ self->last_tid = GetCurrentThreadId();
+ ++self->count;
+ Py_RETURN_TRUE;
+ case WAIT_FAILED:
+ return PyErr_SetFromWindowsErr(0);
+ default:
+ PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or "
+ "WaitForMultipleObjects() gave unrecognized "
+ "value %d", res);
+ return NULL;
+ }
+}
+
+static PyObject *
+semlock_release(SemLockObject *self, PyObject *args)
+{
+ if (self->kind == RECURSIVE_MUTEX) {
+ if (!ISMINE(self)) {
+ PyErr_SetString(PyExc_AssertionError, "attempt to "
+ "release recursive lock not owned "
+ "by thread");
+ return NULL;
+ }
+ if (self->count > 1) {
+ --self->count;
+ Py_RETURN_NONE;
+ }
+ assert(self->count == 1);
+ }
+
+ if (!ReleaseSemaphore(self->handle, 1, NULL)) {
+ if (GetLastError() == ERROR_TOO_MANY_POSTS) {
+ PyErr_SetString(PyExc_ValueError, "semaphore or lock "
+ "released too many times");
+ return NULL;
+ } else {
+ return PyErr_SetFromWindowsErr(0);
+ }
+ }
+
+ --self->count;
+ Py_RETURN_NONE;
+}
+
+#else /* !MS_WINDOWS */
+
+/*
+ * Unix definitions
+ */
+
+#define SEM_CLEAR_ERROR()
+#define SEM_GET_LAST_ERROR() 0
+#define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val)
+#define SEM_CLOSE(sem) sem_close(sem)
+#define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval)
+#define SEM_UNLINK(name) sem_unlink(name)
+
+#if HAVE_BROKEN_SEM_UNLINK
+# define sem_unlink(name) 0
+#endif
+
+#if !HAVE_SEM_TIMEDWAIT
+# define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save)
+
+int
+sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save)
+{
+ int res;
+ unsigned long delay, difference;
+ struct timeval now, tvdeadline, tvdelay;
+
+ errno = 0;
+ tvdeadline.tv_sec = deadline->tv_sec;
+ tvdeadline.tv_usec = deadline->tv_nsec / 1000;
+
+ for (delay = 0 ; ; delay += 1000) {
+ /* poll */
+ if (sem_trywait(sem) == 0)
+ return 0;
+ else if (errno != EAGAIN)
+ return MP_STANDARD_ERROR;
+
+ /* get current time */
+ if (gettimeofday(&now, NULL) < 0)
+ return MP_STANDARD_ERROR;
+
+ /* check for timeout */
+ if (tvdeadline.tv_sec < now.tv_sec ||
+ (tvdeadline.tv_sec == now.tv_sec &&
+ tvdeadline.tv_usec <= now.tv_usec)) {
+ errno = ETIMEDOUT;
+ return MP_STANDARD_ERROR;
+ }
+
+ /* calculate how much time is left */
+ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 +
+ (tvdeadline.tv_usec - now.tv_usec);
+
+ /* check delay not too long -- maximum is 20 msecs */
+ if (delay > 20000)
+ delay = 20000;
+ if (delay > difference)
+ delay = difference;
+
+ /* sleep */
+ tvdelay.tv_sec = delay / 1000000;
+ tvdelay.tv_usec = delay % 1000000;
+ if (select(0, NULL, NULL, NULL, &tvdelay) < 0)
+ return MP_STANDARD_ERROR;
+
+ /* check for signals */
+ Py_BLOCK_THREADS
+ res = PyErr_CheckSignals();
+ Py_UNBLOCK_THREADS
+
+ if (res) {
+ errno = EINTR;
+ return MP_EXCEPTION_HAS_BEEN_SET;
+ }
+ }
+}
+
+#endif /* !HAVE_SEM_TIMEDWAIT */
+
+static PyObject *
+semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
+{
+ int blocking = 1, res;
+ double timeout;
+ PyObject *timeout_obj = Py_None;
+ struct timespec deadline = {0};
+ struct timeval now;
+ long sec, nsec;
+
+ static char *kwlist[] = {"block", "timeout", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
+ &blocking, &timeout_obj))
+ return NULL;
+
+ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
+ ++self->count;
+ Py_RETURN_TRUE;
+ }
+
+ if (timeout_obj != Py_None) {
+ timeout = PyFloat_AsDouble(timeout_obj);
+ if (PyErr_Occurred())
+ return NULL;
+ if (timeout < 0.0)
+ timeout = 0.0;
+
+ if (gettimeofday(&now, NULL) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ sec = (long) timeout;
+ nsec = (long) (1e9 * (timeout - sec) + 0.5);
+ deadline.tv_sec = now.tv_sec + sec;
+ deadline.tv_nsec = now.tv_usec * 1000 + nsec;
+ deadline.tv_sec += (deadline.tv_nsec / 1000000000);
+ deadline.tv_nsec %= 1000000000;
+ }
+
+ do {
+ Py_BEGIN_ALLOW_THREADS
+ if (blocking && timeout_obj == Py_None)
+ res = sem_wait(self->handle);
+ else if (!blocking)
+ res = sem_trywait(self->handle);
+ else
+ res = sem_timedwait(self->handle, &deadline);
+ Py_END_ALLOW_THREADS
+ if (res == MP_EXCEPTION_HAS_BEEN_SET)
+ break;
+ } while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
+
+ if (res < 0) {
+ if (errno == EAGAIN || errno == ETIMEDOUT)
+ Py_RETURN_FALSE;
+ else if (errno == EINTR)
+ return NULL;
+ else
+ return PyErr_SetFromErrno(PyExc_OSError);
+ }
+
+ ++self->count;
+ self->last_tid = PyThread_get_thread_ident();
+
+ Py_RETURN_TRUE;
+}
+
+static PyObject *
+semlock_release(SemLockObject *self, PyObject *args)
+{
+ if (self->kind == RECURSIVE_MUTEX) {
+ if (!ISMINE(self)) {
+ PyErr_SetString(PyExc_AssertionError, "attempt to "
+ "release recursive lock not owned "
+ "by thread");
+ return NULL;
+ }
+ if (self->count > 1) {
+ --self->count;
+ Py_RETURN_NONE;
+ }
+ assert(self->count == 1);
+ } else {
+#if HAVE_BROKEN_SEM_GETVALUE
+ /* We will only check properly the maxvalue == 1 case */
+ if (self->maxvalue == 1) {
+ /* make sure that already locked */
+ if (sem_trywait(self->handle) < 0) {
+ if (errno != EAGAIN) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ /* it is already locked as expected */
+ } else {
+ /* it was not locked so undo wait and raise */
+ if (sem_post(self->handle) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ PyErr_SetString(PyExc_ValueError, "semaphore "
+ "or lock released too many "
+ "times");
+ return NULL;
+ }
+ }
+#else
+ int sval;
+
+ /* This check is not an absolute guarantee that the semaphore
+ does not rise above maxvalue. */
+ if (sem_getvalue(self->handle, &sval) < 0) {
+ return PyErr_SetFromErrno(PyExc_OSError);
+ } else if (sval >= self->maxvalue) {
+ PyErr_SetString(PyExc_ValueError, "semaphore or lock "
+ "released too many times");
+ return NULL;
+ }
+#endif
+ }
+
+ if (sem_post(self->handle) < 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+
+ --self->count;
+ Py_RETURN_NONE;
+}
+
+#endif /* !MS_WINDOWS */
+
+/*
+ * All platforms
+ */
+
+static PyObject *
+newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue)
+{
+ SemLockObject *self;
+
+ self = PyObject_New(SemLockObject, type);
+ if (!self)
+ return NULL;
+ self->handle = handle;
+ self->kind = kind;
+ self->count = 0;
+ self->last_tid = 0;
+ self->maxvalue = maxvalue;
+ return (PyObject*)self;
+}
+
+static PyObject *
+semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ char buffer[256];
+ SEM_HANDLE handle = SEM_FAILED;
+ int kind, maxvalue, value;
+ PyObject *result;
+ static char *kwlist[] = {"kind", "value", "maxvalue", NULL};
+ static int counter = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist,
+ &kind, &value, &maxvalue))
+ return NULL;
+
+ if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) {
+ PyErr_SetString(PyExc_ValueError, "unrecognized kind");
+ return NULL;
+ }
+
+ PyOS_snprintf(buffer, sizeof(buffer), "/mp%d-%d", getpid(), counter++);
+
+ SEM_CLEAR_ERROR();
+ handle = SEM_CREATE(buffer, value, maxvalue);
+ /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */
+ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0)
+ goto failure;
+
+ if (SEM_UNLINK(buffer) < 0)
+ goto failure;
+
+ result = newsemlockobject(type, handle, kind, maxvalue);
+ if (!result)
+ goto failure;
+
+ return result;
+
+ failure:
+ if (handle != SEM_FAILED)
+ SEM_CLOSE(handle);
+ mp_SetError(NULL, MP_STANDARD_ERROR);
+ return NULL;
+}
+
+static PyObject *
+semlock_rebuild(PyTypeObject *type, PyObject *args)
+{
+ SEM_HANDLE handle;
+ int kind, maxvalue;
+
+ if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii",
+ &handle, &kind, &maxvalue))
+ return NULL;
+
+ return newsemlockobject(type, handle, kind, maxvalue);
+}
+
+static void
+semlock_dealloc(SemLockObject* self)
+{
+ if (self->handle != SEM_FAILED)
+ SEM_CLOSE(self->handle);
+ PyObject_Del(self);
+}
+
+static PyObject *
+semlock_count(SemLockObject *self)
+{
+ return PyInt_FromLong((long)self->count);
+}
+
+static PyObject *
+semlock_ismine(SemLockObject *self)
+{
+ /* only makes sense for a lock */
+ return PyBool_FromLong(ISMINE(self));
+}
+
+static PyObject *
+semlock_getvalue(SemLockObject *self)
+{
+#if HAVE_BROKEN_SEM_GETVALUE
+ PyErr_SetNone(PyExc_NotImplementedError);
+ return NULL;
+#else
+ int sval;
+ if (SEM_GETVALUE(self->handle, &sval) < 0)
+ return mp_SetError(NULL, MP_STANDARD_ERROR);
+ /* some posix implementations use negative numbers to indicate
+ the number of waiting threads */
+ if (sval < 0)
+ sval = 0;
+ return PyInt_FromLong((long)sval);
+#endif
+}
+
+static PyObject *
+semlock_iszero(SemLockObject *self)
+{
+ int sval;
+#if HAVE_BROKEN_SEM_GETVALUE
+ if (sem_trywait(self->handle) < 0) {
+ if (errno == EAGAIN)
+ Py_RETURN_TRUE;
+ return mp_SetError(NULL, MP_STANDARD_ERROR);
+ } else {
+ if (sem_post(self->handle) < 0)
+ return mp_SetError(NULL, MP_STANDARD_ERROR);
+ Py_RETURN_FALSE;
+ }
+#else
+ if (SEM_GETVALUE(self->handle, &sval) < 0)
+ return mp_SetError(NULL, MP_STANDARD_ERROR);
+ return PyBool_FromLong((long)sval == 0);
+#endif
+}
+
+static PyObject *
+semlock_afterfork(SemLockObject *self)
+{
+ self->count = 0;
+ Py_RETURN_NONE;
+}
+
+/*
+ * Semaphore methods
+ */
+
+static PyMethodDef semlock_methods[] = {
+ {"acquire", (PyCFunction)semlock_acquire, METH_VARARGS | METH_KEYWORDS,
+ "acquire the semaphore/lock"},
+ {"release", (PyCFunction)semlock_release, METH_NOARGS,
+ "release the semaphore/lock"},
+ {"__enter__", (PyCFunction)semlock_acquire, METH_VARARGS,
+ "enter the semaphore/lock"},
+ {"__exit__", (PyCFunction)semlock_release, METH_VARARGS,
+ "exit the semaphore/lock"},
+ {"_count", (PyCFunction)semlock_count, METH_NOARGS,
+ "num of `acquire()`s minus num of `release()`s for this process"},
+ {"_is_mine", (PyCFunction)semlock_ismine, METH_NOARGS,
+ "whether the lock is owned by this thread"},
+ {"_get_value", (PyCFunction)semlock_getvalue, METH_NOARGS,
+ "get the value of the semaphore"},
+ {"_is_zero", (PyCFunction)semlock_iszero, METH_NOARGS,
+ "returns whether semaphore has value zero"},
+ {"_rebuild", (PyCFunction)semlock_rebuild, METH_VARARGS | METH_CLASS,
+ ""},
+ {"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS,
+ "rezero the net acquisition count after fork()"},
+ {NULL}
+};
+
+/*
+ * Member table
+ */
+
+static PyMemberDef semlock_members[] = {
+ {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY,
+ ""},
+ {"kind", T_INT, offsetof(SemLockObject, kind), READONLY,
+ ""},
+ {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY,
+ ""},
+ {NULL}
+};
+
+/*
+ * Semaphore type
+ */
+
+PyTypeObject SemLockType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ /* tp_name */ "_multiprocessing.SemLock",
+ /* tp_basicsize */ sizeof(SemLockObject),
+ /* tp_itemsize */ 0,
+ /* tp_dealloc */ (destructor)semlock_dealloc,
+ /* tp_print */ 0,
+ /* tp_getattr */ 0,
+ /* tp_setattr */ 0,
+ /* tp_compare */ 0,
+ /* tp_repr */ 0,
+ /* tp_as_number */ 0,
+ /* tp_as_sequence */ 0,
+ /* tp_as_mapping */ 0,
+ /* tp_hash */ 0,
+ /* tp_call */ 0,
+ /* tp_str */ 0,
+ /* tp_getattro */ 0,
+ /* tp_setattro */ 0,
+ /* tp_as_buffer */ 0,
+ /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ /* tp_doc */ "Semaphore/Mutex type",
+ /* tp_traverse */ 0,
+ /* tp_clear */ 0,
+ /* tp_richcompare */ 0,
+ /* tp_weaklistoffset */ 0,
+ /* tp_iter */ 0,
+ /* tp_iternext */ 0,
+ /* tp_methods */ semlock_methods,
+ /* tp_members */ semlock_members,
+ /* tp_getset */ 0,
+ /* tp_base */ 0,
+ /* tp_dict */ 0,
+ /* tp_descr_get */ 0,
+ /* tp_descr_set */ 0,
+ /* tp_dictoffset */ 0,
+ /* tp_init */ 0,
+ /* tp_alloc */ 0,
+ /* tp_new */ semlock_new,
+};
--- /dev/null
+/*
+ * A type which wraps a socket
+ *
+ * socket_connection.c
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#include "multiprocessing.h"
+
+#ifdef MS_WINDOWS
+# define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0)
+# define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0)
+# define CLOSE(h) closesocket((SOCKET)h)
+#else
+# define WRITE(h, buffer, length) write(h, buffer, length)
+# define READ(h, buffer, length) read(h, buffer, length)
+# define CLOSE(h) close(h)
+#endif
+
+/*
+ * Send string to file descriptor
+ */
+
+static Py_ssize_t
+_conn_sendall(HANDLE h, char *string, size_t length)
+{
+ char *p = string;
+ Py_ssize_t res;
+
+ while (length > 0) {
+ res = WRITE(h, p, length);
+ if (res < 0)
+ return MP_SOCKET_ERROR;
+ length -= res;
+ p += res;
+ }
+
+ return MP_SUCCESS;
+}
+
+/*
+ * Receive string of exact length from file descriptor
+ */
+
+static Py_ssize_t
+_conn_recvall(HANDLE h, char *buffer, size_t length)
+{
+ size_t remaining = length;
+ Py_ssize_t temp;
+ char *p = buffer;
+
+ while (remaining > 0) {
+ temp = READ(h, p, remaining);
+ if (temp <= 0) {
+ if (temp == 0)
+ return remaining == length ?
+ MP_END_OF_FILE : MP_EARLY_END_OF_FILE;
+ else
+ return temp;
+ }
+ remaining -= temp;
+ p += temp;
+ }
+
+ return MP_SUCCESS;
+}
+
+/*
+ * Send a string prepended by the string length in network byte order
+ */
+
+static Py_ssize_t
+conn_send_string(ConnectionObject *conn, char *string, size_t length)
+{
+ /* The "header" of the message is a 32 bit unsigned number (in
+ network order) which specifies the length of the "body". If
+ the message is shorter than about 16kb then it is quicker to
+ combine the "header" and the "body" of the message and send
+ them at once. */
+ if (length < (16*1024)) {
+ char *message;
+ int res;
+
+ message = PyMem_Malloc(length+4);
+ if (message == NULL)
+ return MP_MEMORY_ERROR;
+
+ *(UINT32*)message = htonl((UINT32)length);
+ memcpy(message+4, string, length);
+ res = _conn_sendall(conn->handle, message, length+4);
+ PyMem_Free(message);
+ return res;
+ } else {
+ UINT32 lenbuff;
+
+ if (length > MAX_MESSAGE_LENGTH)
+ return MP_BAD_MESSAGE_LENGTH;
+
+ lenbuff = htonl((UINT32)length);
+ return _conn_sendall(conn->handle, (char*)&lenbuff, 4) ||
+ _conn_sendall(conn->handle, string, length);
+ }
+}
+
+/*
+ * Attempts to read into buffer, or failing that into *newbuffer
+ *
+ * Returns number of bytes read.
+ */
+
+static Py_ssize_t
+conn_recv_string(ConnectionObject *conn, char *buffer,
+ size_t buflength, char **newbuffer, size_t maxlength)
+{
+ int res;
+ UINT32 ulength;
+
+ *newbuffer = NULL;
+
+ res = _conn_recvall(conn->handle, (char*)&ulength, 4);
+ if (res < 0)
+ return res;
+
+ ulength = ntohl(ulength);
+ if (ulength > maxlength)
+ return MP_BAD_MESSAGE_LENGTH;
+
+ if (ulength <= buflength) {
+ res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
+ return res < 0 ? res : ulength;
+ } else {
+ *newbuffer = PyMem_Malloc((size_t)ulength);
+ if (*newbuffer == NULL)
+ return MP_MEMORY_ERROR;
+ res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
+ return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
+ }
+}
+
+/*
+ * Check whether any data is available for reading -- neg timeout blocks
+ */
+
+static int
+conn_poll(ConnectionObject *conn, double timeout)
+{
+ int res;
+ fd_set rfds;
+
+ FD_ZERO(&rfds);
+ FD_SET((SOCKET)conn->handle, &rfds);
+
+ if (timeout < 0.0) {
+ res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
+ } else {
+ struct timeval tv;
+ tv.tv_sec = (long)timeout;
+ tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5);
+ res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv);
+ }
+
+ if (res < 0) {
+ return MP_SOCKET_ERROR;
+ } else if (FD_ISSET(conn->handle, &rfds)) {
+ return TRUE;
+ } else {
+ assert(res == 0);
+ return FALSE;
+ }
+}
+
+/*
+ * "connection.h" defines the Connection type using defs above
+ */
+
+#define CONNECTION_NAME "Connection"
+#define CONNECTION_TYPE ConnectionType
+
+#include "connection.h"
--- /dev/null
+/*\r
+ * Win32 functions used by multiprocessing package\r
+ *\r
+ * win32_functions.c\r
+ *\r
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
+ */\r
+\r
+#include "multiprocessing.h"\r
+\r
+\r
+#define WIN32_FUNCTION(func) \\r
+ {#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}\r
+\r
+#define WIN32_CONSTANT(fmt, con) \\r
+ PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))\r
+\r
+\r
+static PyObject *\r
+win32_CloseHandle(PyObject *self, PyObject *args)\r
+{\r
+ HANDLE hObject;\r
+ BOOL success;\r
+\r
+ if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))\r
+ return NULL;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ success = CloseHandle(hObject); \r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (!success)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ Py_RETURN_NONE;\r
+}\r
+\r
+static PyObject *\r
+win32_ConnectNamedPipe(PyObject *self, PyObject *args)\r
+{\r
+ HANDLE hNamedPipe;\r
+ LPOVERLAPPED lpOverlapped;\r
+ BOOL success;\r
+\r
+ if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER, \r
+ &hNamedPipe, &lpOverlapped))\r
+ return NULL;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ success = ConnectNamedPipe(hNamedPipe, lpOverlapped);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (!success)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ Py_RETURN_NONE;\r
+}\r
+\r
+static PyObject *\r
+win32_CreateFile(PyObject *self, PyObject *args)\r
+{\r
+ LPCTSTR lpFileName;\r
+ DWORD dwDesiredAccess;\r
+ DWORD dwShareMode;\r
+ LPSECURITY_ATTRIBUTES lpSecurityAttributes;\r
+ DWORD dwCreationDisposition;\r
+ DWORD dwFlagsAndAttributes;\r
+ HANDLE hTemplateFile;\r
+ HANDLE handle;\r
+\r
+ if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER \r
+ F_DWORD F_DWORD F_HANDLE,\r
+ &lpFileName, &dwDesiredAccess, &dwShareMode, \r
+ &lpSecurityAttributes, &dwCreationDisposition, \r
+ &dwFlagsAndAttributes, &hTemplateFile))\r
+ return NULL;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ handle = CreateFile(lpFileName, dwDesiredAccess, \r
+ dwShareMode, lpSecurityAttributes, \r
+ dwCreationDisposition, \r
+ dwFlagsAndAttributes, hTemplateFile);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (handle == INVALID_HANDLE_VALUE)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ return Py_BuildValue(F_HANDLE, handle);\r
+}\r
+\r
+static PyObject *\r
+win32_CreateNamedPipe(PyObject *self, PyObject *args)\r
+{\r
+ LPCTSTR lpName;\r
+ DWORD dwOpenMode;\r
+ DWORD dwPipeMode;\r
+ DWORD nMaxInstances;\r
+ DWORD nOutBufferSize;\r
+ DWORD nInBufferSize;\r
+ DWORD nDefaultTimeOut;\r
+ LPSECURITY_ATTRIBUTES lpSecurityAttributes;\r
+ HANDLE handle;\r
+\r
+ if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD \r
+ F_DWORD F_DWORD F_DWORD F_POINTER,\r
+ &lpName, &dwOpenMode, &dwPipeMode, \r
+ &nMaxInstances, &nOutBufferSize, \r
+ &nInBufferSize, &nDefaultTimeOut,\r
+ &lpSecurityAttributes))\r
+ return NULL;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, \r
+ nMaxInstances, nOutBufferSize, \r
+ nInBufferSize, nDefaultTimeOut,\r
+ lpSecurityAttributes);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (handle == INVALID_HANDLE_VALUE)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ return Py_BuildValue(F_HANDLE, handle);\r
+}\r
+\r
+static PyObject *\r
+win32_ExitProcess(PyObject *self, PyObject *args)\r
+{\r
+ UINT uExitCode;\r
+\r
+ if (!PyArg_ParseTuple(args, "I", &uExitCode))\r
+ return NULL;\r
+\r
+ ExitProcess(uExitCode);\r
+\r
+ return NULL;\r
+}\r
+\r
+static PyObject *\r
+win32_GetLastError(PyObject *self, PyObject *args)\r
+{\r
+ return Py_BuildValue(F_DWORD, GetLastError());\r
+}\r
+\r
+static PyObject *\r
+win32_OpenProcess(PyObject *self, PyObject *args)\r
+{\r
+ DWORD dwDesiredAccess;\r
+ BOOL bInheritHandle;\r
+ DWORD dwProcessId;\r
+ HANDLE handle;\r
+\r
+ if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD, \r
+ &dwDesiredAccess, &bInheritHandle, &dwProcessId))\r
+ return NULL;\r
+\r
+ handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId); \r
+ if (handle == NULL)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ return Py_BuildValue(F_HANDLE, handle);\r
+}\r
+\r
+static PyObject *\r
+win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)\r
+{\r
+ HANDLE hNamedPipe;\r
+ PyObject *oArgs[3];\r
+ DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};\r
+ int i;\r
+\r
+ if (!PyArg_ParseTuple(args, F_HANDLE "OOO", \r
+ &hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))\r
+ return NULL;\r
+\r
+ PyErr_Clear();\r
+\r
+ for (i = 0 ; i < 3 ; i++) {\r
+ if (oArgs[i] != Py_None) {\r
+ dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);\r
+ if (PyErr_Occurred())\r
+ return NULL;\r
+ pArgs[i] = &dwArgs[i];\r
+ }\r
+ }\r
+\r
+ if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ Py_RETURN_NONE;\r
+}\r
+\r
+static PyObject *\r
+win32_WaitNamedPipe(PyObject *self, PyObject *args)\r
+{\r
+ LPCTSTR lpNamedPipeName;\r
+ DWORD nTimeOut;\r
+ BOOL success;\r
+\r
+ if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))\r
+ return NULL;\r
+\r
+ Py_BEGIN_ALLOW_THREADS\r
+ success = WaitNamedPipe(lpNamedPipeName, nTimeOut);\r
+ Py_END_ALLOW_THREADS\r
+\r
+ if (!success)\r
+ return PyErr_SetFromWindowsErr(0);\r
+\r
+ Py_RETURN_NONE;\r
+}\r
+\r
+static PyMethodDef win32_methods[] = {\r
+ WIN32_FUNCTION(CloseHandle),\r
+ WIN32_FUNCTION(GetLastError),\r
+ WIN32_FUNCTION(OpenProcess),\r
+ WIN32_FUNCTION(ExitProcess),\r
+ WIN32_FUNCTION(ConnectNamedPipe),\r
+ WIN32_FUNCTION(CreateFile),\r
+ WIN32_FUNCTION(CreateNamedPipe),\r
+ WIN32_FUNCTION(SetNamedPipeHandleState),\r
+ WIN32_FUNCTION(WaitNamedPipe),\r
+ {NULL}\r
+};\r
+\r
+\r
+PyTypeObject Win32Type = {\r
+ PyVarObject_HEAD_INIT(NULL, 0)\r
+};\r
+\r
+\r
+PyObject *\r
+create_win32_namespace(void)\r
+{\r
+ Win32Type.tp_name = "_multiprocessing.win32";\r
+ Win32Type.tp_methods = win32_methods;\r
+ if (PyType_Ready(&Win32Type) < 0)\r
+ return NULL;\r
+ Py_INCREF(&Win32Type);\r
+\r
+ WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);\r
+ WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);\r
+ WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);\r
+ WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);\r
+ WIN32_CONSTANT(F_DWORD, GENERIC_READ);\r
+ WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);\r
+ WIN32_CONSTANT(F_DWORD, INFINITE);\r
+ WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);\r
+ WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);\r
+ WIN32_CONSTANT(F_DWORD, PIPE_WAIT);\r
+ WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);\r
+\r
+ WIN32_CONSTANT("i", NULL);\r
+\r
+ return (PyObject*)&Win32Type;\r
+}\r
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
+ # Richard Oudkerk's multiprocessing module
+ if platform == 'win32': # Windows
+ macros = dict()
+ libraries = ['ws2_32']
+
+ elif platform == 'darwin': # Mac OSX
+ macros = dict(
+ HAVE_SEM_OPEN=1,
+ HAVE_SEM_TIMEDWAIT=0,
+ HAVE_FD_TRANSFER=1,
+ HAVE_BROKEN_SEM_GETVALUE=1
+ )
+ libraries = []
+
+ elif platform == 'cygwin': # Cygwin
+ macros = dict(
+ HAVE_SEM_OPEN=1,
+ HAVE_SEM_TIMEDWAIT=1,
+ HAVE_FD_TRANSFER=0,
+ HAVE_BROKEN_SEM_UNLINK=1
+ )
+ libraries = []
+ else: # Linux and other unices
+ macros = dict(
+ HAVE_SEM_OPEN=1,
+ HAVE_SEM_TIMEDWAIT=1,
+ HAVE_FD_TRANSFER=1
+ )
+ libraries = ['rt']
+
+ if platform == 'win32':
+ multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
+ '_multiprocessing/semaphore.c',
+ '_multiprocessing/pipe_connection.c',
+ '_multiprocessing/socket_connection.c',
+ '_multiprocessing/win32_functions.c'
+ ]
+
+ else:
+ multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
+ '_multiprocessing/socket_connection.c'
+ ]
+
+ if macros.get('HAVE_SEM_OPEN', False):
+ multiprocessing_srcs.append('_multiprocessing/semaphore.c')
+
+ exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
+ define_macros=macros.items(),
+ include_dirs=["Modules/_multiprocessing"]))
+ # End multiprocessing
+
+
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules