PK!<Ϳdummy/__init__.pynu[# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array import itertools from multiprocessing import TimeoutError, cpu_count from multiprocessing.dummy.connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event from Queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): assert self._parent is current_process() self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # class Condition(threading._Condition): notify_all = threading._Condition.notify_all.im_func # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = self.__dict__.items() temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def _get(self): return self._value def _set(self, value): self._value = value value = property(_get, _set) def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from multiprocessing.pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue PK!q#TTdummy/__init__.pycnu[ {fc@sdddddddddd d d d d dgZddlZddlZddlZddlZddlZddlmZmZddl m Z ddlm Z m Z m Z mZddlmZddlmZdejfdYZdejfdYZeZejZeje_dZdZdefdYZeZeZe dZ!defdYZ"d Z#d!Z$ddd#d"Z&eZ'dS($tProcesstcurrent_processtactive_childrentfreeze_supporttLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventtQueuetManagertPipetPoolt JoinableQueueiN(t TimeoutErrort cpu_count(R (RRRR(R (R t DummyProcesscBs8eZddddidZdZedZRS(cCsPtjj||||||d|_tj|_t|_ t |_ dS(N( t threadingtThreadt__init__tNonet_pidtweakreftWeakKeyDictionaryt _childrentFalset _start_calledRt_parent(tselftgroupttargettnametargstkwargs((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR?s   cCsZ|jtkstt|_t|jdrFd|jj|eZedZdZdZeeeZdZRS(cCs||_||_dS(N(t _typecodet_value(RRBR?RD((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyRs cCs|jS(N(RH(R((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt_getscCs ||_dS(N(RH(RR?((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt_setscCs dt|j|j|jfS(Ns <%s(%r, %r)>(ttypeR)RGRH(R((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR@s( R)R*R$RRIRJR+R?R@(((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyRFs    cCs tjtS(N(tsystmodulesR)(((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR scCsdS(N((((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pytshutdownscCs ddlm}||||S(Ni(t ThreadPool(tmultiprocessing.poolRO(t processest initializertinitargsRO((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR s(((t__all__RRLRRAt itertoolstmultiprocessingRRt multiprocessing.dummy.connectionR RRRRR R RRR,RRtcurrent_threadRRRRRtobjectR3tdictR/R$RERFR RNRR R(((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt$s8      "       PK!""dummy/__init__.pyonu[ {fc@sdddddddddd d d d d dgZddlZddlZddlZddlZddlZddlmZmZddl m Z ddlm Z m Z m Z mZddlmZddlmZdejfdYZdejfdYZeZejZeje_dZdZdefdYZeZeZe dZ!defdYZ"d Z#d!Z$ddd#d"Z&eZ'dS($tProcesstcurrent_processtactive_childrentfreeze_supporttLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventtQueuetManagertPipetPoolt JoinableQueueiN(t TimeoutErrort cpu_count(R (RRRR(R (R t DummyProcesscBs8eZddddidZdZedZRS(cCsPtjj||||||d|_tj|_t|_ t |_ dS(N( t threadingtThreadt__init__tNonet_pidtweakreftWeakKeyDictionaryt _childrentFalset _start_calledRt_parent(tselftgroupttargettnametargstkwargs((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR?s   cCsBt|_t|jdr.d|jj|eZedZdZdZeeeZdZRS(cCs||_||_dS(N(t _typecodet_value(RRAR>RC((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyRs cCs|jS(N(RG(R((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt_getscCs ||_dS(N(RG(RR>((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt_setscCs dt|j|j|jfS(Ns <%s(%r, %r)>(ttypeR(RFRG(R((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR?s( R(R)R#RRHRIR*R>R?(((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyREs    cCs tjtS(N(tsystmodulesR((((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR scCsdS(N((((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pytshutdownscCs ddlm}||||S(Ni(t ThreadPool(tmultiprocessing.poolRN(t processest initializertinitargsRN((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyR s(((t__all__RRKRR@t itertoolstmultiprocessingRRt multiprocessing.dummy.connectionR RRRRR R RRR+RRtcurrent_threadRRRRRtobjectR2tdictR.R#RDRER RMRR R(((s6/usr/lib64/python2.7/multiprocessing/dummy/__init__.pyt$s8      "       PK!x dummy/connection.pynu[# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from Queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None address = property(lambda self: self._backlog_queue) def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False self._in.not_empty.acquire() self._in.not_empty.wait(timeout) self._in.not_empty.release() return self._in.qsize() > 0 def close(self): pass PK! dummy/connection.pycnu[ {fc@smdddgZddlmZd gZdefdYZdZedZdefd YZ d S( tClienttListenertPipei(tQueuecBs;eZddddZdZdZedZRS(icCst||_dS(N(Rt_backlog_queue(tselftaddresstfamilytbacklog((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt__init__-scCst|jjS(N(t ConnectionRtget(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytaccept0scCs d|_dS(N(tNoneR(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytclose3scCs|jS(N(R(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt6tN(t__name__t __module__R R R RtpropertyR(((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR+s  cCs3tt}}|j||ft||S(N(RtputR (Rt_int_out((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR9scCs/tt}}t||t||fS(N(RR (tduplextatb((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR?sR cBs&eZdZddZdZRS(cCs<||_||_|j|_|_|j|_|_dS(N(RRRtsendt send_bytesR trecvt recv_bytes(RRR((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR Fs  gcCso|jjdkrtS|dkr)tS|jjj|jjj||jjj|jjdkS(Nig(RtqsizetTruetFalset not_emptytacquiretwaittrelease(Rttimeout((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytpollLs cCsdS(N((R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyRVs(RRR R&R(((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR Ds  N( t__all__RR tfamiliestobjectRRRRR (((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt#s    PK! dummy/connection.pyonu[ {fc@smdddgZddlmZd gZdefdYZdZedZdefd YZ d S( tClienttListenertPipei(tQueuecBs;eZddddZdZdZedZRS(icCst||_dS(N(Rt_backlog_queue(tselftaddresstfamilytbacklog((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt__init__-scCst|jjS(N(t ConnectionRtget(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytaccept0scCs d|_dS(N(tNoneR(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytclose3scCs|jS(N(R(R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt6tN(t__name__t __module__R R R RtpropertyR(((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR+s  cCs3tt}}|j||ft||S(N(RtputR (Rt_int_out((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR9scCs/tt}}t||t||fS(N(RR (tduplextatb((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR?sR cBs&eZdZddZdZRS(cCs<||_||_|j|_|_|j|_|_dS(N(RRRtsendt send_bytesR trecvt recv_bytes(RRR((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR Fs  gcCso|jjdkrtS|dkr)tS|jjj|jjj||jjj|jjdkS(Nig(RtqsizetTruetFalset not_emptytacquiretwaittrelease(Rttimeout((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pytpollLs cCsdS(N((R((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyRVs(RRR R&R(((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyR Ds  N( t__all__RR tfamiliestobjectRRRRR (((s8/usr/lib64/python2.7/multiprocessing/dummy/connection.pyt#s    PK!nFX}} __init__.pynu[# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Try calling `multiprocessing.doc.main()` to read the html # documentation in a webbrowser. # # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __version__ = '0.70a1' __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger', 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', ] __author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)' # # Imports # import os import sys from multiprocessing.process import Process, current_process, active_children from multiprocessing.util import SUBDEBUG, SUBWARNING # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # This is down here because _multiprocessing uses BufferTooShort import _multiprocessing # # Definitions not depending on native semaphores # def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m def Pipe(duplex=True): ''' Returns two connection object connected by a pipe ''' from multiprocessing.connection import Pipe return Pipe(duplex) def cpu_count(): ''' Returns the number of CPUs in the system ''' if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': comm = '/sbin/sysctl -n hw.ncpu' if sys.platform == 'darwin': comm = '/usr' + comm try: with os.popen(comm) as p: num = int(p.read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') def freeze_support(): ''' Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from multiprocessing.forking import freeze_support freeze_support() def get_logger(): ''' Return package logger -- if it does not already exist then it is created ''' from multiprocessing.util import get_logger return get_logger() def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' from multiprocessing.util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(): ''' Install support for sending connections and sockets between processes ''' from multiprocessing import reduction # # Definitions depending on native semaphores # def Lock(): ''' Returns a non-recursive lock object ''' from multiprocessing.synchronize import Lock return Lock() def RLock(): ''' Returns a recursive lock object ''' from multiprocessing.synchronize import RLock return RLock() def Condition(lock=None): ''' Returns a condition object ''' from multiprocessing.synchronize import Condition return Condition(lock) def Semaphore(value=1): ''' Returns a semaphore object ''' from multiprocessing.synchronize import Semaphore return Semaphore(value) def BoundedSemaphore(value=1): ''' Returns a bounded semaphore object ''' from multiprocessing.synchronize import BoundedSemaphore return BoundedSemaphore(value) def Event(): ''' Returns an event object ''' from multiprocessing.synchronize import Event return Event() def Queue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import Queue return Queue(maxsize) def JoinableQueue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import JoinableQueue return JoinableQueue(maxsize) def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None): ''' Returns a process pool object ''' from multiprocessing.pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild) def RawValue(typecode_or_type, *args): ''' Returns a shared object ''' from multiprocessing.sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(typecode_or_type, *args, **kwds): ''' Returns a synchronized shared object ''' from multiprocessing.sharedctypes import Value return Value(typecode_or_type, *args, **kwds) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Returns a synchronized shared array ''' from multiprocessing.sharedctypes import Array return Array(typecode_or_type, size_or_initializer, **kwds) # # # if sys.platform == 'win32': def set_executable(executable): ''' Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. ''' from multiprocessing.forking import set_executable set_executable(executable) __all__ += ['set_executable'] PK!fw!! __init__.pycnu[ {fc@sdZddddddddd d d d d ddddddddddddddgZdZddlZddlZddlmZmZmZdd l m Z m Z d!e fd"YZ d e fd#YZd e fd$YZd%e fd&YZddlZd'Zed(Zd)Zd*Zd+Zdd,Zd-Zd.Zd/Zdd0Zd1d2Zd1d3Zd4Z d5d6Z!d5d7Z"ddd@dd8Z#d9Z$d:Z%d;Z&d<Z'ej(d=krd>Z)ed?g7ZndS(As0.70a1tProcesstcurrent_processtactive_childrentfreeze_supporttManagertPipet cpu_countt log_to_stderrt get_loggertallow_connection_picklingtBufferTooShortt TimeoutErrortLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventtQueuet JoinableQueuetPooltValuetArraytRawValuetRawArraytSUBDEBUGt SUBWARNINGs"R. Oudkerk (r.m.oudkerk@gmail.com)iN(RRR(RRt ProcessErrorcBseZRS((t__name__t __module__(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRGscBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR JscBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR MstAuthenticationErrorcBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRPscCs'ddlm}|}|j|S(s Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. i(t SyncManager(tmultiprocessing.managersRtstart(Rtm((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRZs  cCsddlm}||S(s; Returns two connection object connected by a pipe i(R(tmultiprocessing.connectionR(tduplexR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRfscCs*tjdkrIyttjd}Wq ttfk rEd}q Xndtjksgtjdkrd}tjdkrd|}ny.tj|}t|j}WdQXWq tk rd}q Xn6ytj d }Wn tt t fk r d}nX|d kr|St d dS( s2 Returns the number of CPUs in the system twin32tNUMBER_OF_PROCESSORSitbsdtdarwins/sbin/sysctl -n hw.ncpus/usrNtSC_NPROCESSORS_ONLNiscannot determine number of cpus( tsystplatformtinttostenviront ValueErrortKeyErrortpopentreadtsysconftOSErrortAttributeErrortNotImplementedError(tnumtcommtp((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRms*      cCs?tjdkr;ttdtr;ddlm}|ndS(s Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. R%tfrozeni(RN(R*R+tgetattrtFalsetmultiprocessing.forkingR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRs!cCsddlm}|S(sR Return package logger -- if it does not already exist then it is created i(R(tmultiprocessing.utilR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(sB Turn on logging and add a handler which prints to stderr i(R(R>R(tlevelR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}dS(sO Install support for sending connections and sockets between processes i(t reductionN(tmultiprocessingR@(R@((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}|S(s- Returns a non-recursive lock object i(R (tmultiprocessing.synchronizeR (R ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}|S(s) Returns a recursive lock object i(R (RBR (R ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}||S(s$ Returns a condition object i(R(RBR(tlockR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsicCsddlm}||S(s$ Returns a semaphore object i(R(RBR(tvalueR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(s, Returns a bounded semaphore object i(R(RBR(RDR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}|S(s! Returns an event object i(R(RBR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsicCsddlm}||S(s Returns a queue object i(R(tmultiprocessing.queuesR(tmaxsizeR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(s Returns a queue object i(R(RER(RFR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCs#ddlm}|||||S(s' Returns a process pool object i(R(tmultiprocessing.poolR(t processest initializertinitargstmaxtasksperchildR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscGsddlm}|||S(s! Returns a shared object i(R(tmultiprocessing.sharedctypesR(ttypecode_or_typetargsR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}|||S(s Returns a shared array i(R(RLR(RMtsize_or_initializerR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscOs ddlm}||||S(s. Returns a synchronized shared object i(R(RLR(RMRNtkwdsR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscKs ddlm}||||S(s- Returns a synchronized shared array i(R(RLR(RMRORPR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsR%cCsddlm}||dS(s Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. i(tset_executableN(R=RQ(t executableRQ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRQ sRQ((*t __version__t__all__t __author__R-R*tmultiprocessing.processRRRR>RRt ExceptionRR R Rt_multiprocessingRtTrueRRRRtNoneRR R R RRRRRRRRRRRR+RQ(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyt,sN                      PK!fw!! __init__.pyonu[ {fc@sdZddddddddd d d d d ddddddddddddddgZdZddlZddlZddlmZmZmZdd l m Z m Z d!e fd"YZ d e fd#YZd e fd$YZd%e fd&YZddlZd'Zed(Zd)Zd*Zd+Zdd,Zd-Zd.Zd/Zdd0Zd1d2Zd1d3Zd4Z d5d6Z!d5d7Z"ddd@dd8Z#d9Z$d:Z%d;Z&d<Z'ej(d=krd>Z)ed?g7ZndS(As0.70a1tProcesstcurrent_processtactive_childrentfreeze_supporttManagertPipet cpu_countt log_to_stderrt get_loggertallow_connection_picklingtBufferTooShortt TimeoutErrortLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventtQueuet JoinableQueuetPooltValuetArraytRawValuetRawArraytSUBDEBUGt SUBWARNINGs"R. Oudkerk (r.m.oudkerk@gmail.com)iN(RRR(RRt ProcessErrorcBseZRS((t__name__t __module__(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRGscBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR JscBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR MstAuthenticationErrorcBseZRS((RR(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRPscCs'ddlm}|}|j|S(s Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. i(t SyncManager(tmultiprocessing.managersRtstart(Rtm((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRZs  cCsddlm}||S(s; Returns two connection object connected by a pipe i(R(tmultiprocessing.connectionR(tduplexR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRfscCs*tjdkrIyttjd}Wq ttfk rEd}q Xndtjksgtjdkrd}tjdkrd|}ny.tj|}t|j}WdQXWq tk rd}q Xn6ytj d }Wn tt t fk r d}nX|d kr|St d dS( s2 Returns the number of CPUs in the system twin32tNUMBER_OF_PROCESSORSitbsdtdarwins/sbin/sysctl -n hw.ncpus/usrNtSC_NPROCESSORS_ONLNiscannot determine number of cpus( tsystplatformtinttostenviront ValueErrortKeyErrortpopentreadtsysconftOSErrortAttributeErrortNotImplementedError(tnumtcommtp((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRms*      cCs?tjdkr;ttdtr;ddlm}|ndS(s Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. R%tfrozeni(RN(R*R+tgetattrtFalsetmultiprocessing.forkingR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRs!cCsddlm}|S(sR Return package logger -- if it does not already exist then it is created i(R(tmultiprocessing.utilR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(sB Turn on logging and add a handler which prints to stderr i(R(R>R(tlevelR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}dS(sO Install support for sending connections and sockets between processes i(t reductionN(tmultiprocessingR@(R@((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}|S(s- Returns a non-recursive lock object i(R (tmultiprocessing.synchronizeR (R ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}|S(s) Returns a recursive lock object i(R (RBR (R ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyR scCsddlm}||S(s$ Returns a condition object i(R(RBR(tlockR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsicCsddlm}||S(s$ Returns a semaphore object i(R(RBR(tvalueR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(s, Returns a bounded semaphore object i(R(RBR(RDR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}|S(s! Returns an event object i(R(RBR(R((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsicCsddlm}||S(s Returns a queue object i(R(tmultiprocessing.queuesR(tmaxsizeR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}||S(s Returns a queue object i(R(RER(RFR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCs#ddlm}|||||S(s' Returns a process pool object i(R(tmultiprocessing.poolR(t processest initializertinitargstmaxtasksperchildR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscGsddlm}|||S(s! Returns a shared object i(R(tmultiprocessing.sharedctypesR(ttypecode_or_typetargsR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscCsddlm}|||S(s Returns a shared array i(R(RLR(RMtsize_or_initializerR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscOs ddlm}||||S(s. Returns a synchronized shared object i(R(RLR(RMRNtkwdsR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRscKs ddlm}||||S(s- Returns a synchronized shared array i(R(RLR(RMRORPR((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRsR%cCsddlm}||dS(s Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. i(tset_executableN(R=RQ(t executableRQ((s0/usr/lib64/python2.7/multiprocessing/__init__.pyRQ sRQ((*t __version__t__all__t __author__R-R*tmultiprocessing.processRRRR>RRt ExceptionRR R Rt_multiprocessingRtTrueRRRRtNoneRR R R RRRRRRRRRRRR+RQ(((s0/usr/lib64/python2.7/multiprocessing/__init__.pyt,sN                      PK!.:: connection.pynu[# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'Client', 'Listener', 'Pipe' ] import os import sys import socket import errno import time import tempfile import itertools import _multiprocessing from multiprocessing import current_process, AuthenticationError from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug from multiprocessing.forking import duplicate, close # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. # The hmac module implicitly defaults to using MD5. # Support using a stronger algorithm for the challenge/response code: HMAC_DIGEST_NAME='sha256' _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return time.time() + timeout def _check_timeout(t): return time.time() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), _mmap_counter.next()), dir="") else: raise ValueError('unrecognized family') def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str: return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError, 'authkey should be a byte string' self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' return self._listener.close() address = property(lambda self: self._listener._address) last_accepted = property(lambda self: self._listener._last_accepted) def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError, 'authkey should be a byte string' if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = _multiprocessing.Connection(os.dup(s1.fileno())) c2 = _multiprocessing.Connection(os.dup(s2.fileno())) s1.close() s2.close() else: fd1, fd2 = os.pipe() c1 = _multiprocessing.Connection(fd1, writable=False) c2 = _multiprocessing.Connection(fd2, readable=False) return c1, c2 else: from _multiprocessing import win32 def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = win32.PIPE_ACCESS_DUPLEX access = win32.GENERIC_READ | win32.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = win32.PIPE_ACCESS_INBOUND access = win32.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = win32.CreateNamedPipe( address, openmode, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, 1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) h2 = win32.CreateFile( address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL ) win32.SetNamedPipeHandleState( h2, win32.PIPE_READMODE_MESSAGE, None, None ) try: win32.ConnectNamedPipe(h1, win32.NULL) except WindowsError, e: if e.args[0] != win32.ERROR_PIPE_CONNECTED: raise c1 = _multiprocessing.PipeConnection(h1, writable=duplex) c2 = _multiprocessing.PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except socket.error: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX': self._unlink = Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): while True: try: s, self._last_accepted = self._socket.accept() except socket.error as e: if e.args[0] != errno.EINTR: raise else: break s.setblocking(True) fd = duplicate(s.fileno()) conn = _multiprocessing.Connection(fd) s.close() return conn def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = getattr(socket, address_type(address)) t = _init_timeout() while 1: s = socket.socket(family) s.setblocking(True) try: s.connect(address) except socket.error, e: s.close() if e.args[0] != errno.ECONNREFUSED or _check_timeout(t): debug('failed to connect to address %s', address) raise time.sleep(0.01) else: break else: raise fd = duplicate(s.fileno()) conn = _multiprocessing.Connection(fd) s.close() return conn # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address handle = win32.CreateNamedPipe( address, win32.PIPE_ACCESS_DUPLEX, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) self._handle_queue = [handle] self._last_accepted = None sub_debug('listener created with address=%r', self._address) self.close = Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def accept(self): newhandle = win32.CreateNamedPipe( self._address, win32.PIPE_ACCESS_DUPLEX, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) self._handle_queue.append(newhandle) handle = self._handle_queue.pop(0) try: win32.ConnectNamedPipe(handle, win32.NULL) except WindowsError, e: # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. if e.args[0] not in (win32.ERROR_PIPE_CONNECTED, win32.ERROR_NO_DATA): raise return _multiprocessing.PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): sub_debug('closing listener with address=%r', address) for handle in queue: close(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: win32.WaitNamedPipe(address, 1000) h = win32.CreateFile( address, win32.GENERIC_READ | win32.GENERIC_WRITE, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL ) except WindowsError, e: if e.args[0] not in (win32.ERROR_SEM_TIMEOUT, win32.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise win32.SetNamedPipeHandleState( h, win32.PIPE_READMODE_MESSAGE, None, None ) return _multiprocessing.PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def get_digestmod_for_hmac(): import hashlib return getattr(hashlib, HMAC_DIGEST_NAME) def deliver_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, get_digestmod_for_hmac()).digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, get_digestmod_for_hmac()).digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1) def _xml_loads(s): (obj,), method = xmlrpclib.loads(s) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) PK!+J9J9connection.pycnu[ {fc@sdddgZddlZddlZddlZddlZddlZddlZddlZddlZddl m Z m Z ddl m Z mZmZmZddlmZmZdZd Zd ZejZd Zd gZeed rd Zed g7Znejd kr4dZedg7ZnedZdZdZdZ de!fdYZ"dddZ$ejd kre%dZ&nddlm'Z'e%dZ&de!fdYZ(dZ)ejd kr de!fdYZ*dZ+ndZ,dZ-d Z.d!Z/d"Z0d#Z1d$Z2d%e!fd&YZ3d'Z4d(Z5d)e"fd*YZ6d+Z7dS(,tClienttListenertPipeiN(tcurrent_processtAuthenticationError(t get_temp_dirtFinalizet sub_debugtdebug(t duplicatetclosei g4@tsha256tAF_INETtAF_UNIXtwin32tAF_PIPEcCstj|S(N(ttime(ttimeout((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _init_timeoutMscCstj|kS(N(R(tt((s2/usr/lib64/python2.7/multiprocessing/connection.pyt_check_timeoutPscCs}|dkrd S|dkr5tjdddtS|dkrmtjdd tjtjfdd Std d S(s? Return an arbitrary free address for the given family R t localhostiR tprefixs listener-tdirRs\\.\pipe\pyc-%d-%d-tsunrecognized familyN(Ri(ttempfiletmktempRtostgetpidt _mmap_countertnextt ValueError(tfamily((s2/usr/lib64/python2.7/multiprocessing/connection.pytarbitrary_addressWs     cCset|tkrdSt|tkr;|jdr;dSt|tkrQdStd|dS(s] Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' R s\\RR saddress type of %r unrecognizedN(ttypettupletstrt startswithR(taddress((s2/usr/lib64/python2.7/multiprocessing/connection.pyt address_typefs!cBsSeZdZdddddZdZdZedZedZ RS(s Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. icCs|p|rt|pt}|p-t|}|dkrQt|||_nt||||_|dk rt|t rt dn||_ dS(NRsauthkey should be a byte string( R'tdefault_familyR!t PipeListenert _listenertSocketListenertNonet isinstancetbytest TypeErrort_authkey(tselfR&R tbacklogtauthkey((s2/usr/lib64/python2.7/multiprocessing/connection.pyt__init__s  cCs?|jj}|jr;t||jt||jn|S(sz Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. (R*tacceptR0tdeliver_challengetanswer_challenge(R1tc((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s  cCs |jjS(sA Close the bound socket or named pipe of `self`. (R*R (R1((s2/usr/lib64/python2.7/multiprocessing/connection.pyR scCs |jjS(N(R*t_address(R1((s2/usr/lib64/python2.7/multiprocessing/connection.pytRcCs |jjS(N(R*t_last_accepted(R1((s2/usr/lib64/python2.7/multiprocessing/connection.pyR:RN( t__name__t __module__t__doc__R,R4R5R tpropertyR&t last_accepted(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRys  cCs|pt|}|dkr-t|}n t|}|dk rat|t ratdn|dk rt||t||n|S(s= Returns a connection to the address of a `Listener` Rsauthkey should be a byte stringN( R't PipeClientt SocketClientR,R-R.R/R7R6(R&R R3R8((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs     cCs|rtj\}}|jt|jttjtj|j}tjtj|j}|j |j n<tj \}}tj|dt }tj|dt }||fS(sL Returns pair of connection objects at either end of a pipe twritabletreadable( tsockett socketpairt setblockingtTruet_multiprocessingt ConnectionRtduptfilenoR tpipetFalse(tduplexts1ts2tc1tc2tfd1tfd2((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs    (Rc CsUtd}|r;tj}tjtjB}tt}}ntj}tj}dt}}tj||tjtj Btj Bd||tj tj }tj ||dtj tjdtj }tj|tj ddytj|tj Wn/tk r }|jdtjkr!q!nXtj|d|} tj|d|} | | fS(sL Returns pair of connection objects at either end of a pipe RiiRCRDN(R!RtPIPE_ACCESS_DUPLEXt GENERIC_READt GENERIC_WRITEtBUFSIZEtPIPE_ACCESS_INBOUNDtCreateNamedPipetPIPE_TYPE_MESSAGEtPIPE_READMODE_MESSAGEt PIPE_WAITtNMPWAIT_WAIT_FOREVERtNULLt CreateFilet OPEN_EXISTINGtSetNamedPipeHandleStateR,tConnectNamedPipet WindowsErrortargstERROR_PIPE_CONNECTEDRItPipeConnection( ROR&topenmodetaccesstobsizetibsizeth1th2teRRRS((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs2      $ R+cBs,eZdZddZdZdZRS(sO Representation of a socket which is bound to an address and listening icCstjtt||_yb|jjtjtjd|jjt|jj||jj ||jj |_ Wn$tj k r|jj nX||_d|_|dkrt|tjd|fdd|_n d|_dS(NiR Rft exitpriorityi(REtgetattrt_sockett setsockoptt SOL_SOCKETt SO_REUSEADDRRGRHtbindtlistent getsocknameR9terrorR t_familyR,R;RRtunlinkt_unlink(R1R&R R2((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4s     $cCsx\tr^y|jj\}|_Wn2tjk rY}|jdtjkr[q[qXPqW|j tt |j }t j |}|j|S(Ni(RHRrR5R;RERyRfterrnotEINTRRGR RLRIRJR (R1tsRotfdtconn((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s    cCsAz|jjWd|j}|dk r<d|_|nXdS(N(RrR R|R,(R1R{((s2/usr/lib64/python2.7/multiprocessing/connection.pyR #s    (R<R=R>R4R5R (((s2/usr/lib64/python2.7/multiprocessing/connection.pyR+s  cCsttt|}t}xtj|}|jty|j|Wnbtjk r}|j|j dt j kst |rt d|ntjdq!XPq!Wt|j}tj|}|j|S(sO Return a connection object connected to the socket given by `address` isfailed to connect to address %sg{Gz?(RqRER'RRGRHtconnectRyR RfR}t ECONNREFUSEDRRRtsleepR RLRIRJ(R&R RRRoRR((s2/usr/lib64/python2.7/multiprocessing/connection.pyRB-s&   "  R)cBs2eZdZddZdZedZRS(s0 Representation of a named pipe c Cs||_tj|tjtjtjBtjBtjtttj tj }|g|_ d|_ td|jt|tjd|j |jfdd|_dS(Ns listener created with address=%rRfRpi(R9RR[RVR\R]R^tPIPE_UNLIMITED_INSTANCESRYR_R`t _handle_queueR,R;RRR)t_finalize_pipe_listenerR (R1R&R2thandle((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4Ss       c Cstj|jtjtjtjBtjBtjtttj tj }|j j ||j j d}ytj|tj Wn8tk r}|jdtjtjfkrqnXtj|S(Ni(RR[R9RVR\R]R^RRYR_R`RtappendtpopRdReRfRgt ERROR_NO_DATARIRh(R1t newhandleRRo((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5fs    cCs,td|x|D]}t|qWdS(Ns closing listener with address=%r(RR (tqueueR&R((s2/usr/lib64/python2.7/multiprocessing/connection.pyRzs  N(R<R=R>R,R4R5t staticmethodR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyR)Os  cCst}xyHtj|dtj|tjtjBdtjtjdtj}WnDtk r}|j dtj tj fkst |rqq XPq Wtj |tjddtj|S(sU Return a connection object connected to the pipe given by `address` iiN(RRt WaitNamedPipeRaRWRXR`RbReRftERROR_SEM_TIMEOUTtERROR_PIPE_BUSYRRcR]R,RIRh(R&RthRo((s2/usr/lib64/python2.7/multiprocessing/connection.pyRAs  " is #CHALLENGE#s #WELCOME#s #FAILURE#cCsddl}t|tS(Ni(thashlibRqtHMAC_DIGEST_NAME(R((s2/usr/lib64/python2.7/multiprocessing/connection.pytget_digestmod_for_hmacs cCsddl}t|ts!ttjt}|jt||j ||t j }|j d}||kr|jt n|jttddS(Niisdigest received was wrong(thmacR-R.tAssertionErrorRturandomtMESSAGE_LENGTHt send_bytest CHALLENGEtnewRtdigestt recv_bytestWELCOMEtFAILURER(t connectionR3RtmessageRtresponse((s2/usr/lib64/python2.7/multiprocessing/connection.pyR6s   cCsddl}t|ts!t|jd}|tt tksVtd||tt}|j||tj }|j ||jd}|t krt dndS(Niis message = %rsdigest sent was rejected( RR-R.RRtlenRRRRRRR(RR3RRRR((s2/usr/lib64/python2.7/multiprocessing/connection.pyR7s &  tConnectionWrappercBs#eZdZdZdZRS(cCsO||_||_||_x-dD]%}t||}t|||q"WdS(NRLR tpollRR(RLR RRR(t_connt_dumpst_loadsRqtsetattr(R1Rtdumpstloadstattrtobj((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4s     cCs#|j|}|jj|dS(N(RRR(R1RR((s2/usr/lib64/python2.7/multiprocessing/connection.pytsendscCs|jj}|j|S(N(RRR(R1R((s2/usr/lib64/python2.7/multiprocessing/connection.pytrecvs(R<R=R4RR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs  cCstj|fddddS(Ni(t xmlrpclibRR,(R((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _xml_dumpsscCstj|\\}}|S(N(RR(RRtmethod((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _xml_loadsst XmlListenercBseZdZRS(cCs+ddlatj|}t|ttS(Ni(RRR5RRR(R1R((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s (R<R=R5(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRscOs%ddlatt||ttS(Ni(RRRRR(Rftkwds((s2/usr/lib64/python2.7/multiprocessing/connection.pyt XmlClients (8t__all__RtsysRER}RRt itertoolsRItmultiprocessingRRtmultiprocessing.utilRRRRtmultiprocessing.forkingR R RYtCONNECTION_TIMEOUTRtcountRR(tfamiliesthasattrtplatformRRR!R'tobjectRR,RRHRRR+RBR)RARRRRRR6R7RRRRR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyt#sd        "      , *2 1     PK!G88connection.pyonu[ {fc@sdddgZddlZddlZddlZddlZddlZddlZddlZddlZddl m Z m Z ddl m Z mZmZmZddlmZmZdZd Zd ZejZd Zd gZeed rd Zed g7Znejd kr4dZedg7ZnedZdZdZdZ de!fdYZ"dddZ$ejd kre%dZ&nddlm'Z'e%dZ&de!fdYZ(dZ)ejd kr de!fdYZ*dZ+ndZ,dZ-d Z.d!Z/d"Z0d#Z1d$Z2d%e!fd&YZ3d'Z4d(Z5d)e"fd*YZ6d+Z7dS(,tClienttListenertPipeiN(tcurrent_processtAuthenticationError(t get_temp_dirtFinalizet sub_debugtdebug(t duplicatetclosei g4@tsha256tAF_INETtAF_UNIXtwin32tAF_PIPEcCstj|S(N(ttime(ttimeout((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _init_timeoutMscCstj|kS(N(R(tt((s2/usr/lib64/python2.7/multiprocessing/connection.pyt_check_timeoutPscCs}|dkrd S|dkr5tjdddtS|dkrmtjdd tjtjfdd Std d S(s? Return an arbitrary free address for the given family R t localhostiR tprefixs listener-tdirRs\\.\pipe\pyc-%d-%d-tsunrecognized familyN(Ri(ttempfiletmktempRtostgetpidt _mmap_countertnextt ValueError(tfamily((s2/usr/lib64/python2.7/multiprocessing/connection.pytarbitrary_addressWs     cCset|tkrdSt|tkr;|jdr;dSt|tkrQdStd|dS(s] Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' R s\\RR saddress type of %r unrecognizedN(ttypettupletstrt startswithR(taddress((s2/usr/lib64/python2.7/multiprocessing/connection.pyt address_typefs!cBsSeZdZdddddZdZdZedZedZ RS(s Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. icCs|p|rt|pt}|p-t|}|dkrQt|||_nt||||_|dk rt|t rt dn||_ dS(NRsauthkey should be a byte string( R'tdefault_familyR!t PipeListenert _listenertSocketListenertNonet isinstancetbytest TypeErrort_authkey(tselfR&R tbacklogtauthkey((s2/usr/lib64/python2.7/multiprocessing/connection.pyt__init__s  cCs?|jj}|jr;t||jt||jn|S(sz Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. (R*tacceptR0tdeliver_challengetanswer_challenge(R1tc((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s  cCs |jjS(sA Close the bound socket or named pipe of `self`. (R*R (R1((s2/usr/lib64/python2.7/multiprocessing/connection.pyR scCs |jjS(N(R*t_address(R1((s2/usr/lib64/python2.7/multiprocessing/connection.pytRcCs |jjS(N(R*t_last_accepted(R1((s2/usr/lib64/python2.7/multiprocessing/connection.pyR:RN( t__name__t __module__t__doc__R,R4R5R tpropertyR&t last_accepted(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRys  cCs|pt|}|dkr-t|}n t|}|dk rat|t ratdn|dk rt||t||n|S(s= Returns a connection to the address of a `Listener` Rsauthkey should be a byte stringN( R't PipeClientt SocketClientR,R-R.R/R7R6(R&R R3R8((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs     cCs|rtj\}}|jt|jttjtj|j}tjtj|j}|j |j n<tj \}}tj|dt }tj|dt }||fS(sL Returns pair of connection objects at either end of a pipe twritabletreadable( tsockett socketpairt setblockingtTruet_multiprocessingt ConnectionRtduptfilenoR tpipetFalse(tduplexts1ts2tc1tc2tfd1tfd2((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs    (Rc CsUtd}|r;tj}tjtjB}tt}}ntj}tj}dt}}tj||tjtj Btj Bd||tj tj }tj ||dtj tjdtj }tj|tj ddytj|tj Wn/tk r }|jdtjkr!q!nXtj|d|} tj|d|} | | fS(sL Returns pair of connection objects at either end of a pipe RiiRCRDN(R!RtPIPE_ACCESS_DUPLEXt GENERIC_READt GENERIC_WRITEtBUFSIZEtPIPE_ACCESS_INBOUNDtCreateNamedPipetPIPE_TYPE_MESSAGEtPIPE_READMODE_MESSAGEt PIPE_WAITtNMPWAIT_WAIT_FOREVERtNULLt CreateFilet OPEN_EXISTINGtSetNamedPipeHandleStateR,tConnectNamedPipet WindowsErrortargstERROR_PIPE_CONNECTEDRItPipeConnection( ROR&topenmodetaccesstobsizetibsizeth1th2teRRRS((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs2      $ R+cBs,eZdZddZdZdZRS(sO Representation of a socket which is bound to an address and listening icCstjtt||_yb|jjtjtjd|jjt|jj||jj ||jj |_ Wn$tj k r|jj nX||_d|_|dkrt|tjd|fdd|_n d|_dS(NiR Rft exitpriorityi(REtgetattrt_sockett setsockoptt SOL_SOCKETt SO_REUSEADDRRGRHtbindtlistent getsocknameR9terrorR t_familyR,R;RRtunlinkt_unlink(R1R&R R2((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4s     $cCsx\tr^y|jj\}|_Wn2tjk rY}|jdtjkr[q[qXPqW|j tt |j }t j |}|j|S(Ni(RHRrR5R;RERyRfterrnotEINTRRGR RLRIRJR (R1tsRotfdtconn((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s    cCsAz|jjWd|j}|dk r<d|_|nXdS(N(RrR R|R,(R1R{((s2/usr/lib64/python2.7/multiprocessing/connection.pyR #s    (R<R=R>R4R5R (((s2/usr/lib64/python2.7/multiprocessing/connection.pyR+s  cCsttt|}t}xtj|}|jty|j|Wnbtjk r}|j|j dt j kst |rt d|ntjdq!XPq!Wt|j}tj|}|j|S(sO Return a connection object connected to the socket given by `address` isfailed to connect to address %sg{Gz?(RqRER'RRGRHtconnectRyR RfR}t ECONNREFUSEDRRRtsleepR RLRIRJ(R&R RRRoRR((s2/usr/lib64/python2.7/multiprocessing/connection.pyRB-s&   "  R)cBs2eZdZddZdZedZRS(s0 Representation of a named pipe c Cs||_tj|tjtjtjBtjBtjtttj tj }|g|_ d|_ td|jt|tjd|j |jfdd|_dS(Ns listener created with address=%rRfRpi(R9RR[RVR\R]R^tPIPE_UNLIMITED_INSTANCESRYR_R`t _handle_queueR,R;RRR)t_finalize_pipe_listenerR (R1R&R2thandle((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4Ss       c Cstj|jtjtjtjBtjBtjtttj tj }|j j ||j j d}ytj|tj Wn8tk r}|jdtjtjfkrqnXtj|S(Ni(RR[R9RVR\R]R^RRYR_R`RtappendtpopRdReRfRgt ERROR_NO_DATARIRh(R1t newhandleRRo((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5fs    cCs,td|x|D]}t|qWdS(Ns closing listener with address=%r(RR (tqueueR&R((s2/usr/lib64/python2.7/multiprocessing/connection.pyRzs  N(R<R=R>R,R4R5t staticmethodR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyR)Os  cCst}xyHtj|dtj|tjtjBdtjtjdtj}WnDtk r}|j dtj tj fkst |rqq XPq Wtj |tjddtj|S(sU Return a connection object connected to the pipe given by `address` iiN(RRt WaitNamedPipeRaRWRXR`RbReRftERROR_SEM_TIMEOUTtERROR_PIPE_BUSYRRcR]R,RIRh(R&RthRo((s2/usr/lib64/python2.7/multiprocessing/connection.pyRAs  " is #CHALLENGE#s #WELCOME#s #FAILURE#cCsddl}t|tS(Ni(thashlibRqtHMAC_DIGEST_NAME(R((s2/usr/lib64/python2.7/multiprocessing/connection.pytget_digestmod_for_hmacs cCsddl}tjt}|jt||j||tj}|j d}||kru|jt n|jt t ddS(Niisdigest received was wrong( thmacRturandomtMESSAGE_LENGTHt send_bytest CHALLENGEtnewRtdigestt recv_bytestWELCOMEtFAILURER(t connectionR3RtmessageRtresponse((s2/usr/lib64/python2.7/multiprocessing/connection.pyR6s   cCsddl}|jd}|tt}|j||tj}|j||jd}|tkrt dndS(Niisdigest sent was rejected( RRtlenRRRRRRR(RR3RRRR((s2/usr/lib64/python2.7/multiprocessing/connection.pyR7s   tConnectionWrappercBs#eZdZdZdZRS(cCsO||_||_||_x-dD]%}t||}t|||q"WdS(NRLR tpollRR(RLR RRR(t_connt_dumpst_loadsRqtsetattr(R1Rtdumpstloadstattrtobj((s2/usr/lib64/python2.7/multiprocessing/connection.pyR4s     cCs#|j|}|jj|dS(N(RRR(R1RR((s2/usr/lib64/python2.7/multiprocessing/connection.pytsendscCs|jj}|j|S(N(RRR(R1R((s2/usr/lib64/python2.7/multiprocessing/connection.pytrecvs(R<R=R4RR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRs  cCstj|fddddS(Ni(t xmlrpclibRR,(R((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _xml_dumpsscCstj|\\}}|S(N(RR(RRtmethod((s2/usr/lib64/python2.7/multiprocessing/connection.pyt _xml_loadsst XmlListenercBseZdZRS(cCs+ddlatj|}t|ttS(Ni(RRR5RRR(R1R((s2/usr/lib64/python2.7/multiprocessing/connection.pyR5s (R<R=R5(((s2/usr/lib64/python2.7/multiprocessing/connection.pyRscOs%ddlatt||ttS(Ni(RRRRR(Rftkwds((s2/usr/lib64/python2.7/multiprocessing/connection.pyt XmlClients (8t__all__RtsysRER}RRt itertoolsRItmultiprocessingRRtmultiprocessing.utilRRRRtmultiprocessing.forkingR R RYtCONNECTION_TIMEOUTRtcountRR(tfamiliesthasattrtplatformRRR!R'tobjectRR,RRHRRR+RBR)RARRRRRR6R7RRRRR(((s2/usr/lib64/python2.7/multiprocessing/connection.pyt#sd        "      , *2 1     PK!rKOCC forking.pynu[# # Module for starting a process object using os.fork() or CreateProcess() # # multiprocessing/forking.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import os import sys import signal import errno from multiprocessing import util, process __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler'] # # Check that the current thread is spawning a child process # def assert_spawning(self): if not Popen.thread_is_spawning(): raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(self).__name__ ) # # Try making some callable types picklable # from pickle import Pickler class ForkingPickler(Pickler): dispatch = Pickler.dispatch.copy() @classmethod def register(cls, type, reduce): def dispatcher(self, obj): rv = reduce(obj) self.save_reduce(obj=obj, *rv) cls.dispatch[type] = dispatcher def _reduce_method(m): if m.im_self is None: return getattr, (m.im_class, m.im_func.func_name) else: return getattr, (m.im_self, m.im_func.func_name) ForkingPickler.register(type(ForkingPickler.save), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) ForkingPickler.register(type(list.append), _reduce_method_descriptor) ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) #def _reduce_builtin_function_or_method(m): # return getattr, (m.__self__, m.__name__) #ForkingPickler.register(type(list().append), _reduce_builtin_function_or_method) #ForkingPickler.register(type(int().__add__), _reduce_builtin_function_or_method) try: from functools import partial except ImportError: pass else: def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return partial(func, *args, **keywords) ForkingPickler.register(partial, _reduce_partial) # # Unix # if sys.platform != 'win32': import time exit = os._exit duplicate = os.dup close = os.close # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() self.returncode = None self.pid = os.fork() if self.pid == 0: if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() sys.stdout.flush() sys.stderr.flush() os._exit(code) def poll(self, flag=os.WNOHANG): if self.returncode is None: while True: try: pid, sts = os.waitpid(self.pid, flag) except os.error as e: if e.errno == errno.EINTR: continue # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None else: break if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if timeout is None: return self.poll(0) deadline = time.time() + timeout delay = 0.0005 while 1: res = self.poll() if res is not None: break remaining = deadline - time.time() if remaining <= 0: break delay = min(delay * 2, remaining, 0.05) time.sleep(delay) return res def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError, e: if self.wait(timeout=0.1) is None: raise @staticmethod def thread_is_spawning(): return False # # Windows # else: import thread import msvcrt import _subprocess import time from _multiprocessing import win32, Connection, PipeConnection from .util import Finalize #try: # from cPickle import dump, load, HIGHEST_PROTOCOL #except ImportError: from pickle import load, HIGHEST_PROTOCOL def dump(obj, file, protocol=None): ForkingPickler(file, protocol).dump(obj) # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") exit = win32.ExitProcess close = win32.CloseHandle # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe # # # def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() return _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS ).Detach() # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' _tls = thread._local() def __init__(self, process_obj): # create pipe for communication with child rfd, wfd = os.pipe() # get handle for read end of the pipe and make it inheritable rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) os.close(rfd) # start process cmd = get_command_line() + [rhandle] cmd = ' '.join('"%s"' % x for x in cmd) hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) ht.Close() close(rhandle) # set attributes of self self.pid = pid self.returncode = None self._handle = hp # send information to child prep_data = get_preparation_data(process_obj._name) to_child = os.fdopen(wfd, 'wb') Popen._tls.process_handle = int(hp) try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del Popen._tls.process_handle to_child.close() @staticmethod def thread_is_spawning(): return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return duplicate(handle, Popen._tls.process_handle) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _subprocess.TerminateProcess(int(self._handle), TERMINATE) except WindowsError: if self.wait(timeout=0.1) is None: raise # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': assert len(argv) == 3 return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): main() sys.exit() def get_command_line(): ''' Returns prefix of command line used for spawning a child process ''' if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that you are on Windows and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.''') if getattr(sys, 'frozen', False): return [sys.executable, '--multiprocessing-fork'] else: prog = 'from multiprocessing.forking import main; main()' opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def main(): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv) handle = int(sys.argv[-1]) fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) from_parent = os.fdopen(fd, 'rb') process.current_process()._inheriting = True preparation_data = load(from_parent) prepare(preparation_data) self = load(from_parent) process.current_process()._inheriting = False from_parent.close() exitcode = self._bootstrap() exit(exitcode) def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' from .util import _logger, _log_to_stderr d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE and not WINSERVICE and \ not d['sys_argv'][0].lower().endswith('pythonservice.exe'): main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if not os.path.isabs(main_path) and \ process.ORIGINAL_DIR is not None: main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['main_path'] = os.path.normpath(main_path) return d # # Make (Pipe)Connection picklable # def reduce_connection(conn): if not Popen.thread_is_spawning(): raise RuntimeError( 'By default %s objects can only be shared between processes\n' 'using inheritance' % type(conn).__name__ ) return type(conn), (Popen.duplicate_for_child(conn.fileno()), conn.readable, conn.writable) ForkingPickler.register(Connection, reduce_connection) ForkingPickler.register(PipeConnection, reduce_connection) # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' old_main_modules.append(sys.modules['__main__']) if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process()._authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'main_path' in data: # XXX (ncoghlan): The following code makes several bogus # assumptions regarding the relationship between __file__ # and a module's real name. See PEP 302 and issue #10845 # The problem is resolved properly in Python 3.4+, as # described in issue #19946 main_path = data['main_path'] main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == '__init__': main_name = os.path.basename(os.path.dirname(main_path)) if main_name == '__main__': # For directory and zipfile execution, we assume an implicit # "if __name__ == '__main__':" around the module, and don't # rerun the main module code in spawned processes main_module = sys.modules['__main__'] main_module.__file__ = main_path elif main_name != 'ipython': # Main modules not actually called __main__.py may # contain additional code that should still be executed import imp if main_path is None: dirs = None elif os.path.basename(main_path).startswith('__init__.py'): dirs = [os.path.dirname(os.path.dirname(main_path))] else: dirs = [os.path.dirname(main_path)] assert main_name not in sys.modules, main_name file, path_name, etc = imp.find_module(main_name, dirs) try: # We would like to do "imp.load_module('__main__', ...)" # here. However, that would cause 'if __name__ == # "__main__"' clauses to be executed. main_module = imp.load_module( '__parents_main__', file, path_name, etc ) finally: if file: file.close() sys.modules['__main__'] = main_module main_module.__name__ = '__main__' # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. for obj in main_module.__dict__.values(): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' except Exception: pass PK!}88 forking.pycnu[ {fc@sddlZddlZddlZddlZddlmZmZddddddgZd Zdd l m Z de fd YZ d Z e j ee je d Ze j eejee j eejeyddlmZWnek rn#XdZdZe j eeejdkrddlZejZejZej Z de!fdYZ"n}ddl#Z#ddl$Z$ddl%Z%ddlZddl&m'Z'm(Z(m)Z)ddlm*Z*ddl m+Z+m,Z,ddZ.dZ/ejdko%e0ede1Z2ej3j4j5dZ6e'j7Ze'j8Z e6rsej9j:ej;da<n ej3a<dZ=de1dZde!fdYZ"dZ>d Z?d!Z@d"ZAd#ZBd$ZCe j e(eCe j e)eCgZDd%ZEdS(&iN(tutiltprocesstPopentassert_spawningtexitt duplicatetclosetForkingPicklercCs,tjs(tdt|jndS(NsF%s objects should only be shared between processes through inheritance(Rtthread_is_spawningt RuntimeErrorttypet__name__(tself((s//usr/lib64/python2.7/multiprocessing/forking.pyR0s (tPicklercBs&eZejjZedZRS(cs fd}||j|siitwb(R1tpipeRtmsvcrtt get_osfhandleR;Rtget_command_linetjoinRft CreateProcessRcRtCloseR3R0t_handletget_preparation_datat_nametfdopentintRt_tlstprocess_handleR_R^( R R8trfdtwfdtrhandletcmdthpthtR3ttidt prep_datatto_child((s//usr/lib64/python2.7/multiprocessing/forking.pyR:s( -      cCsttjdddk S(NR(RRR~R(((s//usr/lib64/python2.7/multiprocessing/forking.pyRscCst|tjjS(N(RRR~R(Rk((s//usr/lib64/python2.7/multiprocessing/forking.pytduplicate_for_childscCs|jdkr|dkr'tj}ntdt|dd}tjt|j|}|tjkrtj |j}|t krt j }n||_qn|jS(Niig?( R0RRftINFINITEtmaxR}tWaitForSingleObjectRyt WAIT_OBJECT_0tGetExitCodeProcesst TERMINATERSRT(R RLtmsecsROR9((s//usr/lib64/python2.7/multiprocessing/forking.pyRQ"s    cCs|jddS(NRLi(RQ(R ((s//usr/lib64/python2.7/multiprocessing/forking.pyRH2scCse|jdkraytjt|jtWqatk r]|jdddkr^q^qaXndS(NRLg?( R0RRftTerminateProcessR}RyRt WindowsErrorRQ(R ((s//usr/lib64/python2.7/multiprocessing/forking.pyRV5s  N(R Rt__doc__tthreadt_localR~R:RYRRRRQRHRV(((s//usr/lib64/python2.7/multiprocessing/forking.pyRs  !  cCsFt|dkr>|ddkr>t|dks:ttStSdS(sE Return whether commandline indicates we are forking iis--multiprocessing-forkiN(tlenRCR;RW(targv((s//usr/lib64/python2.7/multiprocessing/forking.pyt is_forkingAs"cCs'ttjr#ttjndS(sM Run code for process object if this in not the main process N(RR,RtmainR(((s//usr/lib64/python2.7/multiprocessing/forking.pytfreeze_supportLscCstttjdtr'tdnttdtrFtjdgSd}tj}t g|d|dgSdS(sR Returns prefix of command line used for spawning a child process t _inheritings Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that you are on Windows and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.Rbs--multiprocessing-forks0from multiprocessing.forking import main; main()s-cN( RRtcurrent_processRWR R,t executableRt_args_from_interpreter_flagsRc(tprogtopts((s//usr/lib64/python2.7/multiprocessing/forking.pyRuUs   cCsttjstttjd}tj|tj}tj |d}t t j _ t|}t|t|}tt j _ |j|j}t|dS(s? Run code specified by data received over pipe itrbN(RR,RRCR}Rstopen_osfhandleR1tO_RDONLYR|R;RRRR]tprepareRWRR6R(Rktfdt from_parenttpreparation_dataR texitcode((s//usr/lib64/python2.7/multiprocessing/forking.pyRps     c CsLddlm}m}td|dtjdtjd|dtjdtj j }|dk rt|j |d tmultiprocessingRRt__all__RtpickleR RR RR tsaveR"tlistRR}t__add__t functoolsR#t ImportErrorR)R$tplatformRIR7RtdupRRtobjectRRRsRft_multiprocessingR*RZR[R\R]R^RR_RRRWRRRRRt ExitProcesst CloseHandleRRvt exec_prefixRcReRRRuRRzRRR(((s//usr/lib64/python2.7/multiprocessing/forking.pyt#sn              G     !     N   # PK!v77 forking.pyonu[ {fc@sddlZddlZddlZddlZddlmZmZddddddgZd Zdd l m Z de fd YZ d Z e j ee je d Ze j eejee j eejeyddlmZWnek rn#XdZdZe j eeejdkrddlZejZejZej Z de!fdYZ"n}ddl#Z#ddl$Z$ddl%Z%ddlZddl&m'Z'm(Z(m)Z)ddlm*Z*ddl m+Z+m,Z,ddZ.dZ/ejdko%e0ede1Z2ej3j4j5dZ6e'j7Ze'j8Z e6rsej9j:ej;da<n ej3a<dZ=de1dZde!fdYZ"dZ>d Z?d!Z@d"ZAd#ZBd$ZCe j e(eCe j e)eCgZDd%ZEdS(&iN(tutiltprocesstPopentassert_spawningtexitt duplicatetclosetForkingPicklercCs,tjs(tdt|jndS(NsF%s objects should only be shared between processes through inheritance(Rtthread_is_spawningt RuntimeErrorttypet__name__(tself((s//usr/lib64/python2.7/multiprocessing/forking.pyR0s (tPicklercBs&eZejjZedZRS(cs fd}||j|siitwb(R1tpipeRtmsvcrtt get_osfhandleR;Rtget_command_linetjoinRdt CreateProcessRaRtCloseR3R0t_handletget_preparation_datat_nametfdopentintRt_tlstprocess_handleR]R\( R R8trfdtwfdtrhandletcmdthpthtR3ttidt prep_datatto_child((s//usr/lib64/python2.7/multiprocessing/forking.pyR:s( -      cCsttjdddk S(NR}(RRR|R(((s//usr/lib64/python2.7/multiprocessing/forking.pyRscCst|tjjS(N(RRR|R}(Ri((s//usr/lib64/python2.7/multiprocessing/forking.pytduplicate_for_childscCs|jdkr|dkr'tj}ntdt|dd}tjt|j|}|tjkrtj |j}|t krt j }n||_qn|jS(Niig?( R0RRdtINFINITEtmaxR{tWaitForSingleObjectRwt WAIT_OBJECT_0tGetExitCodeProcesst TERMINATERQRR(R RJtmsecsRMR9((s//usr/lib64/python2.7/multiprocessing/forking.pyRO"s    cCs|jddS(NRJi(RO(R ((s//usr/lib64/python2.7/multiprocessing/forking.pyRF2scCse|jdkraytjt|jtWqatk r]|jdddkr^q^qaXndS(NRJg?( R0RRdtTerminateProcessR{RwRt WindowsErrorRO(R ((s//usr/lib64/python2.7/multiprocessing/forking.pyRT5s  N(R Rt__doc__tthreadt_localR|R:RWRRRRORFRT(((s//usr/lib64/python2.7/multiprocessing/forking.pyRs  !  cCs.t|dkr&|ddkr&tStSdS(sE Return whether commandline indicates we are forking iis--multiprocessing-forkN(tlenR;RU(targv((s//usr/lib64/python2.7/multiprocessing/forking.pyt is_forkingAs"cCs'ttjr#ttjndS(sM Run code for process object if this in not the main process N(RR,RtmainR(((s//usr/lib64/python2.7/multiprocessing/forking.pytfreeze_supportLscCstttjdtr'tdnttdtrFtjdgSd}tj}t g|d|dgSdS(sR Returns prefix of command line used for spawning a child process t _inheritings Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that you are on Windows and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.R`s--multiprocessing-forks0from multiprocessing.forking import main; main()s-cN( RRtcurrent_processRUR R,t executableRt_args_from_interpreter_flagsRa(tprogtopts((s//usr/lib64/python2.7/multiprocessing/forking.pyRsUs   cCsttjd}tj|tj}tj|d}tt j _ t |}t |t |}tt j _ |j|j}t|dS(s? Run code specified by data received over pipe itrbN(R{R,RRqtopen_osfhandleR1tO_RDONLYRzR;RRRR[tprepareRURR6R(Ritfdt from_parenttpreparation_dataR texitcode((s//usr/lib64/python2.7/multiprocessing/forking.pyRps     c CsLddlm}m}td|dtjdtjd|dtjdtj j }|dk rt|j |d tmultiprocessingRRt__all__RtpickleR RR RR tsaveR"tlistRR{t__add__t functoolsR#t ImportErrorR)R$tplatformRGR7RtdupRRtobjectRRRqRdt_multiprocessingR*RXRYRZR[R\RR]RRRURRRRRt ExitProcesst CloseHandleRRtt exec_prefixRaRcRRRsRRxRRR(((s//usr/lib64/python2.7/multiprocessing/forking.pyt#sn              G     !     N   # PK!S\߅!!heap.pynu[# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import bisect import mmap import tempfile import os import sys import threading import itertools import _multiprocessing from multiprocessing.util import Finalize, info from multiprocessing.forking import assert_spawning __all__ = ['BufferWrapper'] # # Inheirtable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': from _multiprocessing import win32 class Arena(object): _counter = itertools.count() def __init__(self, size): self.size = size self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next()) self.buffer = mmap.mmap(-1, self.size, tagname=self.name) assert win32.GetLastError() == 0, 'tagname already in use' self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state self.buffer = mmap.mmap(-1, self.size, tagname=self.name) assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS else: class Arena(object): def __init__(self, size): self.buffer = mmap.mmap(-1, size) self.size = size self.name = None # # Class allowing allocation of chunks of memory from arenas # class Heap(object): _alignment = 8 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() self._size = size self._lengths = [] self._len_to_seq = {} self._start_to_block = {} self._stop_to_block = {} self._allocated_blocks = set() self._arenas = [] # list of pending blocks to free - see free() comment below self._pending_free_blocks = [] @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): length = self._roundup(max(self._size, size), mmap.PAGESIZE) self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _free(self, block): # free location and try to merge with neighbours (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._allocated_blocks.remove(block) self._free(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under cPython it's atomic thanks to the GIL). assert os.getpid() == self._lastpid if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._free_pending_blocks() self._allocated_blocks.remove(block) self._free(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) assert 0 <= size < sys.maxint if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork self._lock.acquire() self._free_pending_blocks() try: size = self._roundup(max(size,1), self._alignment) (arena, start, stop) = self._malloc(size) new_stop = start + size if new_stop < stop: self._free((arena, new_stop, stop)) block = (arena, start, new_stop) self._allocated_blocks.add(block) return block finally: self._lock.release() # # Class representing a chunk of an mmap -- can be inherited # class BufferWrapper(object): _heap = Heap() def __init__(self, size): assert 0 <= size < sys.maxint block = BufferWrapper._heap.malloc(size) self._state = (block, size) Finalize(self, BufferWrapper._heap.free, args=(block,)) def get_address(self): (arena, start, stop), size = self._state address, length = _multiprocessing.address_of_buffer(arena.buffer) assert size <= length return address + start def get_size(self): return self._state[1] PK!Xvheap.pycnu[ {fc@s ddlZddlZddlZddlZddlZddlZddlZddlZddlm Z m Z ddl m Z dgZ ejdkrddlmZdefdYZndefd YZd efd YZdefd YZdS( iN(tFinalizetinfo(tassert_spawningt BufferWrappertwin32(RtArenacBs/eZejZdZdZdZRS(cCs||_dtjtjjf|_tjd|jd|j|_t j dksjt d|j|jf|_ dS(Ns pym-%d-%dittagnameistagname already in use( tsizetostgetpidRt_countertnexttnametmmaptbufferRt GetLastErrortAssertionErrort_state(tselfR((s,/usr/lib64/python2.7/multiprocessing/heap.pyt__init__=s  "!cCst||jS(N(RR(R((s,/usr/lib64/python2.7/multiprocessing/heap.pyt __getstate__Ds cCsY|\|_|_|_tjd|jd|j|_tjtjksUtdS(NiR( RR RR RRRtERROR_ALREADY_EXISTSR(Rtstate((s,/usr/lib64/python2.7/multiprocessing/heap.pyt __setstate__Hs!(t__name__t __module__t itertoolstcountR RRR(((s,/usr/lib64/python2.7/multiprocessing/heap.pyR9s   cBseZdZRS(cCs+tjd||_||_d|_dS(Ni(R RRtNoneR (RR((s,/usr/lib64/python2.7/multiprocessing/heap.pyRQs (RRR(((s,/usr/lib64/python2.7/multiprocessing/heap.pyROstHeapcBsbeZdZejdZedZdZdZ dZ dZ dZ dZ RS( icCsmtj|_tj|_||_g|_i|_i|_ i|_ t |_ g|_ g|_dS(N(RR t_lastpidt threadingtLockt_lockt_sizet_lengthst _len_to_seqt_start_to_blockt_stop_to_blocktsett_allocated_blockst_arenast_pending_free_blocks(RR((s,/usr/lib64/python2.7/multiprocessing/heap.pyR^s       cCs|d}|||@S(Ni((tnt alignmenttmask((s,/usr/lib64/python2.7/multiprocessing/heap.pyt_roundupks c Cstj|j|}|t|jkr|jt|j|tj}|jd9_t d|t |}|j j ||d|fS|j|}|j |}|j}|s|j |=|j|=n|\}}}|j||f=|j||f=|S(Nis"allocating a new mmap of length %di(tbisectt bisect_leftR#tlenR.tmaxR"R tPAGESIZERRR)tappendR$tpopR%R&( RRtitlengthtarenatseqtblocktstarttstop((s,/usr/lib64/python2.7/multiprocessing/heap.pyt_mallocqs"!      c Cs!|\}}}y|j||f}Wntk r9nX|j|\}}y|j||f}Wntk rynX|j|\}}|||f}||}y|j|j|Wn4tk r|g|j|#s          PK!/^heap.pyonu[ {fc@s ddlZddlZddlZddlZddlZddlZddlZddlZddlm Z m Z ddl m Z dgZ ejdkrddlmZdefdYZndefd YZd efd YZdefd YZdS( iN(tFinalizetinfo(tassert_spawningt BufferWrappertwin32(RtArenacBs/eZejZdZdZdZRS(cCse||_dtjtjjf|_tjd|jd|j|_|j|jf|_ dS(Ns pym-%d-%dittagname( tsizetostgetpidRt_countertnexttnametmmaptbuffert_state(tselfR((s,/usr/lib64/python2.7/multiprocessing/heap.pyt__init__=s "!cCst||jS(N(RR(R((s,/usr/lib64/python2.7/multiprocessing/heap.pyt __getstate__Ds cCs>|\|_|_|_tjd|jd|j|_dS(NiR(RR RR R(Rtstate((s,/usr/lib64/python2.7/multiprocessing/heap.pyt __setstate__Hs!(t__name__t __module__t itertoolstcountR RRR(((s,/usr/lib64/python2.7/multiprocessing/heap.pyR9s   cBseZdZRS(cCs+tjd||_||_d|_dS(Ni(R RRtNoneR (RR((s,/usr/lib64/python2.7/multiprocessing/heap.pyRQs (RRR(((s,/usr/lib64/python2.7/multiprocessing/heap.pyROstHeapcBsbeZdZejdZedZdZdZ dZ dZ dZ dZ RS( icCsmtj|_tj|_||_g|_i|_i|_ i|_ t |_ g|_ g|_dS(N(RR t_lastpidt threadingtLockt_lockt_sizet_lengthst _len_to_seqt_start_to_blockt_stop_to_blocktsett_allocated_blockst_arenast_pending_free_blocks(RR((s,/usr/lib64/python2.7/multiprocessing/heap.pyR^s       cCs|d}|||@S(Ni((tnt alignmenttmask((s,/usr/lib64/python2.7/multiprocessing/heap.pyt_roundupks c Cstj|j|}|t|jkr|jt|j|tj}|jd9_t d|t |}|j j ||d|fS|j|}|j |}|j}|s|j |=|j|=n|\}}}|j||f=|j||f=|S(Nis"allocating a new mmap of length %di(tbisectt bisect_leftR tlenR+tmaxRR tPAGESIZERRR&tappendR!tpopR"R#( RRtitlengthtarenatseqtblocktstarttstop((s,/usr/lib64/python2.7/multiprocessing/heap.pyt_mallocqs"!      c Cs!|\}}}y|j||f}Wntk r9nX|j|\}}y|j||f}Wntk rynX|j|\}}|||f}||}y|j|j|Wn4tk r|g|j|#s          PK! 5kk managers.pynu[# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import os import sys import weakref import threading import array import Queue from traceback import format_exc from multiprocessing import Process, current_process, active_children, Pool, util, connection from multiprocessing.process import AuthenticationString from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler from multiprocessing.util import Finalize, info try: from cPickle import PicklingError except ImportError: from pickle import PicklingError # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tostring()) ForkingPickler.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] # # Type for identifying shared objects # class Token(object): ''' Type to uniquely indentify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return 'Token(typeid=%r, address=%r, id=%r)' % \ (self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if hasattr(func, '__call__'): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.mutex = threading.RLock() self.stop = 0 def serve_forever(self): ''' Run the server forever ''' current_process()._manager_server = self try: try: while 1: try: c = self.listener.accept() except (OSError, IOError): continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() except (KeyboardInterrupt, SystemExit): pass finally: self.stop = 999 self.listener.close() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception, e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop: try: methodname = obj = None request = recv() ident, methodname, args, kwds = request obj, exposed, gettypeid = id_to_obj[ident] if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception, e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception, e: send(('#UNSERIALIZABLE', format_exc())) except Exception, e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' self.mutex.acquire() try: result = [] keys = self.id_to_obj.keys() keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) finally: self.mutex.release() def number_of_objects(self, c): ''' Number of shared objects ''' return len(self.id_to_obj) - 1 # don't count ident='0' def shutdown(self, c): ''' Shutdown this process ''' try: try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ util._run_finalizers(0) for p in active_children(): util.debug('terminating a child process of manager') p.terminate() for p in active_children(): util.debug('terminating a child process of manager') p.join() util._run_finalizers() util.info('manager exiting with exitcode 0') except: import traceback traceback.print_exc() finally: exit(0) def create(self, c, typeid, *args, **kwds): ''' Create a new shared object and return its id ''' self.mutex.acquire() try: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 # increment the reference count immediately, to avoid # this object being garbage collected before a Proxy # object for it can be created. The caller of create() # is responsible for doing a decref once the Proxy object # has been created. self.incref(c, ident) return ident, tuple(exposed) finally: self.mutex.release() def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): self.mutex.acquire() try: self.id_to_refcount[ident] += 1 finally: self.mutex.release() def decref(self, c, ident): self.mutex.acquire() try: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_obj[ident], self.id_to_refcount[ident] util.debug('disposing of obj with id %r', ident) finally: self.mutex.release() # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle'): if authkey is None: authkey = current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] def __reduce__(self): return type(self).from_address, \ (self._address, self._authkey, self._serializer) def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL if initializer is not None and not hasattr(initializer, '__call__'): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' self._process.join(timeout) def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=0.2) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in method_to_typeid.items(): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True): BaseProxy._mutex.acquire() try: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset finally: BaseProxy._mutex.release() # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] if authkey is not None: self._authkey = AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referrent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception, e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception, e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if Popen.thread_is_spawning(): kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %s>' % \ (type(self).__name__, self._token.typeid, '0x%x' % id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec '''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = self.__dict__.items() temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): # XXX remove methods for Py3.0 and Py2.6 _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def next(self, *args): return self._callmethod('next', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True): return self._callmethod('acquire', (blocking,)) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): # XXX will Condition.notfyAll() name be available in Py3.0? _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__delslice__', '__getitem__', '__getslice__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', '__setslice__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 PoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'terminate' )) PoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', Queue.Queue) SyncManager.register('JoinableQueue', Queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Pool', Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) PK!5%j2 managers.pycnu[ {fc@sddddgZddlZddlZddlZddlZddlZddlZddlmZddl m Z m Z m Z m Z mZmZddlmZdd lmZmZmZmZdd lmZmZydd lmZWn!ek rdd lmZnXd Zejejegd ddfD]Z e!e"ie ^qDZ#de$fdYZ%fidZ&dZ'de(fdYZ)dZ*dZ+de$fdYZ,de$fdYZ-iej.ej/fd6ej0ej1fd6Z2de$fdYZ3de4fdYZ5de$fd YZ6d!Z7id"Z8e9e9e9e:d#Z;d$e$fd%YZ<d&e$fd'YZ=e:d(Z>d)e6fd*YZ?d+e6fd,YZ@d-e@fd.YZAd/e6fd0YZBd1e6fd2YZCd3e6fd4YZDe8d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKfZEdLeEfdMYZFe8dNd7d8d:dOd<d@dPdQdRdSd ddGdTdUdVdfZGidWdO6eG_He8dXd<d:d@d;dAfZIe8dYdZd[d\d]d^d_d`dadbf ZJidcd[6dcda6dWd]6dWd^6eJ_Hde3fddYZKeKjdeejeKjdfejeKjdgejLeBeKjdhejMe@eKjdiejNe@eKjdjejOe@eKjdkejPe@eKjdlejQeAeKjdme eJeKjdneReFeKjdoeSeGeKjd&e=eDeKjdpe>eIeKjd$e<eCeKjdWdqe?dreTeKjdcdreTdS(st BaseManagert SyncManagert BaseProxytTokeniN(t format_exc(tProcesstcurrent_processtactive_childrentPooltutilt connection(tAuthenticationString(texittPopentassert_spawningtForkingPickler(tFinalizetinfo(t PicklingErrorcCstj|j|jffS(N(tarrayttypecodettostring(ta((s0/usr/lib64/python2.7/multiprocessing/managers.pyt reduce_array@stitemstkeystvaluescBs8eZdZdZdZdZdZdZRS( s4 Type to uniquely indentify a shared object ttypeidtaddresstidcCs!||||_|_|_dS(N(RRR(tselfRRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__init__PscCs|j|j|jfS(N(RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __getstate__SscCs|\|_|_|_dS(N(RRR(Rtstate((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __setstate__VscCsd|j|j|jfS(Ns#Token(typeid=%r, address=%r, id=%r)(RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__repr__Ys(RRR(t__name__t __module__t__doc__t __slots__RR R"R#(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRJs    cCsN|j||||f|j\}}|dkr;|St||dS(sL Send a message to manager using connection `c` and return response s#RETURNN(tsendtrecvtconvert_to_error(tcRt methodnametargstkwdstkindtresult((s0/usr/lib64/python2.7/multiprocessing/managers.pytdispatchas  cCs~|dkr|S|dkr>t|tks4tt|S|dkrpt|tksbttd|StdSdS(Ns#ERRORs #TRACEBACKs#UNSERIALIZABLEsUnserializable message: %s sUnrecognized message type(ttypetstrtAssertionErrort RemoteErrort ValueError(R/R0((s0/usr/lib64/python2.7/multiprocessing/managers.pyR*ks    R5cBseZdZRS(cCs)ddddt|jdddS(Ns t-iKi(R3R-(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__str__xs(R$R%R8(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR5wscCsOg}xBt|D]4}t||}t|dr|j|qqW|S(s4 Return a list of names of methods of `obj` t__call__(tdirtgetattrthasattrtappend(tobjttemptnametfunc((s0/usr/lib64/python2.7/multiprocessing/managers.pyt all_methodss cCs-gt|D]}|ddkr |^q S(sP Return a list of names of methods of `obj` which do not start with '_' it_(RB(R>R@((s0/usr/lib64/python2.7/multiprocessing/managers.pytpublic_methodsstServerc BseZdZddddddddd g Zd Zd Zd Zd ZdZdZ dZ ie d6e d6ed6Z dZ dZ dZdZdZdZdZdZdZRS(sM Server class which runs in a process controlled by a manager object tshutdowntcreatetaccept_connectiont get_methodst debug_infotnumber_of_objectstdummytincreftdecrefcCst|tst||_t||_t|\}}|d|dd|_|jj|_iddfd6|_ i|_ t j |_d|_dS(NRtbacklogit0i((t isinstancetbytesR4tregistryR tauthkeytlistener_clienttlistenerRtNonet id_to_objtid_to_refcountt threadingtRLocktmutextstop(RRSRRTt serializertListenertClient((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  cCs|t_zylxey|jj}Wnttfk rDqnXtjd|jd|f}t |_ |j qWWnt t fk rnXWdd|_|jjXdS(s( Run the server forever ttargetR-Ni(Rt_manager_serverRVtaccepttOSErrortIOErrorRZtThreadthandle_requesttTruetdaemontstarttKeyboardInterruptt SystemExitR]tclose(RR+tt((s0/usr/lib64/python2.7/multiprocessing/managers.pyt serve_forevers   c Csd}}}yvtj||jtj||j|j}|\}}}}||jksttd|t||}Wn t k rdt f} nFXy||||}Wn t k rdt f} n Xd|f} y|j | Wnqt k rp} y|j dt fWnt k r<nXt j d| t j d|t j d| nX|jdS(s) Handle a new connection s%r unrecognizeds #TRACEBACKs#RETURNsFailure to send message: %rs ... request was %rs ... exception was %rN(RWR tdeliver_challengeRTtanswer_challengeR)tpublicR4R;t ExceptionRR(R RRm( RR+tfuncnameR0trequesttignoreR-R.RAtmsgte((s0/usr/lib64/python2.7/multiprocessing/managers.pyRgs4     cCstjdtjj|j}|j}|j}x|jsyd}}|}|\}}} } ||\}} } || krt d|t || fnt ||} y| | | }Wnt k r}d|f}npX| o| j|d}|rP|j|||\}}t||j|}d||ff}n d|f}Wnt k r|dkrdtf}q?y8|j|}|||||| | }d|f}Wq?t k rdtf}q?XnVtk rtjdtjjtjdn t k r>dtf}nXy;y||Wn&t k rx}|d tfnXWq7t k r}tjd tjjtjd |tjd ||jtjd q7Xq7WdS(sQ Handle requests from the proxies in a particular process/thread s$starting server thread to service %rs+method %r of %r object is not in exposed=%rs#ERRORs#PROXYs#RETURNs #TRACEBACKs$got EOF -- exiting thread serving %ris#UNSERIALIZABLEsexception in thread serving %rs ... message was %rs ... exception was %riN(R tdebugRZtcurrent_threadR@R)R(RXR]RWtAttributeErrorR2R;RstgetRGRRRtfallback_mappingtEOFErrortsysR RRm(RtconnR)R(RXR,R>RutidentR-R.texposedt gettypeidtfunctiontresRxRwRtridenttrexposedttokent fallback_funcR0((s0/usr/lib64/python2.7/multiprocessing/managers.pyt serve_clientsl                 cCs|S(N((RRRR>((s0/usr/lib64/python2.7/multiprocessing/managers.pytfallback_getvalue+scCs t|S(N(R3(RRRR>((s0/usr/lib64/python2.7/multiprocessing/managers.pyt fallback_str.scCs t|S(N(trepr(RRRR>((s0/usr/lib64/python2.7/multiprocessing/managers.pyt fallback_repr1sR8R#s #GETVALUEcCsdS(N((RR+((s0/usr/lib64/python2.7/multiprocessing/managers.pyRL:scCs|jjzg}|jj}|jxS|D]K}|dkr6|jd||j|t|j|dd fq6q6Wdj|SWd|jj XdS(sO Return some info --- useful to spot problems with refcounting RPs %s: refcount=%s %siiKs N( R\tacquireRXRtsortR=RYR3tjointrelease(RR+R0RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRJ=s      'cCst|jdS(s* Number of shared objects i(tlenRX(RR+((s0/usr/lib64/python2.7/multiprocessing/managers.pyRKOscCszytjd|jd tjtjkrZtjdtjt_tjt_ntj dx(t D]}tjd|j qqWx(t D]}tjd|j qWtj tj dWnddl}|jnXWdtdXdS( s' Shutdown this process s!manager received shutdown messages#RETURNsresetting stdout, stderris&terminating a child process of managersmanager exiting with exitcode 0iN(s#RETURNN(R RyR(RWRtstdoutt __stdout__t __stderr__tstderrt_run_finalizersRt terminateRRt tracebackt print_excR (RR+tpR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRFUs*         c Os\|jjz:|j|\}}}}|dkrat|dkrN| sTt|d} n|||} |dkrt| }n|dk rt|tkstt |t |}ndt | } t j d|| | t ||f|j| <| |jkr&d|j| Create a new shared object and return its id iis%xs&%r callable returned object with id %rN(R\RRSRWRR4RDR2tdicttlistRR RytsetRXRYRMttupleR( RR+RR-R.tcallableRtmethod_to_typeidt proxytypeR>R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRGus(     cCst|j|jdS(sL Return the methods of the shared object indicated by token i(RRXR(RR+R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRIscCs-|tj_|jd|j|dS(s= Spawn a new thread to serve this connection s#RETURNN(s#RETURNN(RZRzR@R(RWR(RR+R@((s0/usr/lib64/python2.7/multiprocessing/managers.pyRHs cCs9|jjz|j|cd7 Connect manager object to the server process RTRLN( RURRRR1RWRRRR(RR_R`R((s0/usr/lib64/python2.7/multiprocessing/managers.pytconnectsc Csg|jjtjkst|d k rFt|d rFtdntj dt \}}t dt |j d|j|j|j|j|||f|_djd|jjD}t |jd||j_|jj|j|j|_|jtj|j_tj|t |jd|j|j|j|j|jfd d |_d S( s@ Spawn a server process for this manager object R9sinitializer must be a callabletduplexRaR-t:css|]}t|VqdS(N(R3(t.0ti((s0/usr/lib64/python2.7/multiprocessing/managers.pys sR7t exitpriorityiN( RRRRR4RWR<t TypeErrorR tPipetFalseRR2t _run_serverRRRRt_processRt _identityR$R@RjRmR)RR Rt_finalize_managerRRF(Rt initializertinitargstreadertwriterR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRjs(   c Csl|dk r||n|j||||}|j|j|jtjd|j|jdS(s@ Create a server, report its address and run it smanager serving at %rN(RWt_ServerR(RRmR RRo( tclsRSRRTR^RRRtserver((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs   cOs|jjtjks!td|j|jd|j}z)t|dd|f||\}}Wd|j Xt ||j||fS(sP Create a new shared object; return the token and exposed tuple sserver not yet startedRTRGN( RRRRR4RRRR1RWRmR(RRR-R.RRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt_create0s !) cCs|jj|dS(sC Join the manager process (if it has been spawned) N(RR(Rttimeout((s0/usr/lib64/python2.7/multiprocessing/managers.pyR<scCsA|j|jd|j}zt|ddSWd|jXdS(sS Return some info about the servers shared objects and connections RTRJN(RRRR1RWRm(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt _debug_infoBscCsA|j|jd|j}zt|ddSWd|jXdS(s5 Return the number of shared objects RTRKN(RRRR1RWRm(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt_number_of_objectsLscCs|S(N((R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __enter__VscCs|jdS(N(RF(Rtexc_typetexc_valtexc_tb((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__exit__YscCs|jrtjdy8||d|}zt|ddWd|jXWntk rdnX|jdd|jrtjdt|drtjd |j |jdd |jrtjd qqqnt j |_ yt j|=Wntk rnXdS( sQ Shutdown the manager process; will be registered as a finalizer s#sending shutdown message to managerRTRFNRg?smanager still aliveRs'trying to `terminate()` manager processg?s#manager still alive after terminate(tis_aliveR RR1RWRmRsRR<RRRRRt_address_to_localtKeyError(tprocessRRTR!RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyR\s.          cCs|jS(N(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt|tc s$d|jkr$|jj|_ndkr9tn|pNtdd}|pftdd}|rxa|jD]P\}}t|tkst d|t|tks|t d|q|Wn|||f|j<|r fd} | _ t || ndS(s9 Register a typeid with the manager type Rt _exposed_t_method_to_typeid_s%r is not a stringc stjd|j||\}}||jd|d|jd|}|j|jd|j}t|dd|j f|S(Ns)requesting creation of a shared %r objecttmanagerRTRRN( R RyRRRRRR1RWR(RR-R.RtexptproxyR(RR(s0/usr/lib64/python2.7/multiprocessing/managers.pyR?sN( t__dict__RtcopyRWt AutoProxyR;RR2R3R4R$tsetattr( RRRRRRt create_methodtkeyRR?((RRs0/usr/lib64/python2.7/multiprocessing/managers.pytregister~s   ") N(((R$R%R&RRERRWRRRRRjt classmethodRRRRRRRt staticmethodRtpropertyRRhR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs*   $     tProcessLocalSetcBseZdZdZRS(cCstj|ddS(NcSs |jS(N(tclear(R>((s0/usr/lib64/python2.7/multiprocessing/managers.pyRR(R tregister_after_fork(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCst|dfS(N((R2(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(R$R%RR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs cBseZdZiZejZd d d edZ dZ d idZ dZ dZ edZdZdZd Zd Zd ZRS(s. A base for proxies of shared objects cCs;tjjzPtjj|jd}|dkr\tjt f}|tj|js0x%x(R2R$RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#OscCs8y|jdSWn tk r3t|d dSXdS(sV Return representation of the referent (or a fall-back if that fails) R#is; '__str__()' failed>N(RRsR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR8Ss N((R$R%R&RR tForkAwareThreadLockRRWRhRRRRRRRRRRR#R8(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs %      cCsttdd}|r?|j|jkr?|j|jdS|jdtodttdt }|||d||SdS(s Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. RbiRMt _inheritingN( R;RRWRRXRtpopRhR(RARR^R.RRM((s0/usr/lib64/python2.7/multiprocessing/managers.pyR`s cBse|}y|||fSWnek r1nXi}x |D]}d||f|Uq?We|ef|}||_||||f<|S(sB Return a proxy type whose methods are given by `exposed` sLdef %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)(RRR2RR(R@Rt_cachetdictmetht ProxyType((s0/usr/lib64/python2.7/multiprocessing/managers.pyt MakeProxyTypeus    c Cst|d}|dkr\||jd|}zt|dd|f}Wd|jXn|dkr|dk r|j}n|dkrtj}ntd|j |}|||d|d|d|} t | _ | S(s* Return an auto-proxy for `token` iRTRINs AutoProxy[%s]RRM( RURWRR1RmRRRTR RRhR( RR^RRTRRMRRR R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs     t NamespacecBseZdZdZRS(cKs|jj|dS(N(Rtupdate(RR.((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCss|jj}g}x=|D]5\}}|jds|jd||fqqW|jdtjd|S(NRCs%s=%rs Namespace(%s)s, (RRt startswithR=RR3R(RRR?R@R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#s (R$R%RR#(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR s tValuecBs>eZedZdZdZdZeeeZRS(cCs||_||_dS(N(t _typecodet_value(RRRtlock((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs cCs|jS(N(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR|scCs ||_dS(N(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs dt|j|j|jfS(Ns %s(%r, %r)(R2R$RR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#s( R$R%RhRR|RR#RR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs     cCstj||S(N(R(RtsequenceR((s0/usr/lib64/python2.7/multiprocessing/managers.pytArrayst IteratorProxycBsDeZd ZdZdZdZdZd Zd ZRS( t__next__tnextR(tthrowRmcCs|S(N((R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__iter__scGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NR((R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyR(scGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NRm(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRms(RRR(RRm( R$R%RRRRR(RRm(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs     t AcquirerProxycBs5eZdZedZdZdZdZRS(RRcCs|jd|fS(NR(R(Rtblocking((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(RRRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RR(R$R%RRhRRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs    tConditionProxycBs,eZdZd dZdZdZRS( RRtwaittnotifyt notify_allcCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RRRRRN(R$R%RRWRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  t EventProxycBs5eZdZdZdZdZd dZRS( tis_setRRRcCs |jdS(NR!(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR!scCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(R!RRRN(R$R%RR!RRRWR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR s    tNamespaceProxycBs)eZdZdZdZdZRS(t__getattribute__t __setattr__t __delattr__cCsB|ddkr tj||Stj|d}|d|fS(NiRCRR#(tobjectR#(RRt callmethod((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __getattr__scCsH|ddkr#tj|||Stj|d}|d||fS(NiRCRR$(R&R$R#(RRRR'((s0/usr/lib64/python2.7/multiprocessing/managers.pyR$scCsB|ddkr tj||Stj|d}|d|fS(NiRCRR%(R&R%R#(RRR'((s0/usr/lib64/python2.7/multiprocessing/managers.pyR%s(R#R$R%(R$R%RR(R$R%(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR"s  t ValueProxycBs/eZdZdZdZeeeZRS(R|RcCs |jdS(NR|(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR|scCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(R|R(R$R%RR|RRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR) s  t BaseListProxyt__add__t __contains__t __delitem__t __delslice__t __getitem__t __getslice__t__len__t__mul__t __reversed__t__rmul__t __setitem__t __setslice__R=tcounttextendtindextinsertRtremovetreverseRt__imul__t ListProxycBseZdZdZRS(cCs|jd|f|S(NR8(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__iadd__scCs|jd|f|S(NR=(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyR= s(R$R%R?R=(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR>s t DictProxyRRRR|thas_keytpopitemt setdefaultR tIteratort ArrayProxyt PoolProxytapplyt apply_asyncRmtimaptimap_unorderedRtmapt map_asyncRt AsyncResultcBseZdZRS(s( Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. (R$R%R&(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRCs tQueuet JoinableQueuetEventtLockR[t SemaphoretBoundedSemaphoret ConditionRRRRRR(Ut__all__tosRtweakrefRZRRNRRtmultiprocessingRRRRR R tmultiprocessing.processR tmultiprocessing.forkingR R RRtmultiprocessing.utilRRtcPickleRt ImportErrorRRRR@R2R;t view_typesR&RR1R*RsR5RBRDRERR_R`t XmlListenert XmlClientRURRRRRR RWRhRR RRRRRR R"R)R*R>R@RRERFRRPRQR[RRRSRTRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyt$s      ."  4  ,                 PK!8 managers.pyonu[ {fc@sddddgZddlZddlZddlZddlZddlZddlZddlmZddl m Z m Z m Z m Z mZmZddlmZdd lmZmZmZmZdd lmZmZydd lmZWn!ek rdd lmZnXd Zejejegd ddfD]Z e!e"ie ^qDZ#de$fdYZ%fidZ&dZ'de(fdYZ)dZ*dZ+de$fdYZ,de$fdYZ-iej.ej/fd6ej0ej1fd6Z2de$fdYZ3de4fdYZ5de$fd YZ6d!Z7id"Z8e9e9e9e:d#Z;d$e$fd%YZ<d&e$fd'YZ=e:d(Z>d)e6fd*YZ?d+e6fd,YZ@d-e@fd.YZAd/e6fd0YZBd1e6fd2YZCd3e6fd4YZDe8d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKfZEdLeEfdMYZFe8dNd7d8d:dOd<d@dPdQdRdSd ddGdTdUdVdfZGidWdO6eG_He8dXd<d:d@d;dAfZIe8dYdZd[d\d]d^d_d`dadbf ZJidcd[6dcda6dWd]6dWd^6eJ_Hde3fddYZKeKjdeejeKjdfejeKjdgejLeBeKjdhejMe@eKjdiejNe@eKjdjejOe@eKjdkejPe@eKjdlejQeAeKjdme eJeKjdneReFeKjdoeSeGeKjd&e=eDeKjdpe>eIeKjd$e<eCeKjdWdqe?dreTeKjdcdreTdS(st BaseManagert SyncManagert BaseProxytTokeniN(t format_exc(tProcesstcurrent_processtactive_childrentPooltutilt connection(tAuthenticationString(texittPopentassert_spawningtForkingPickler(tFinalizetinfo(t PicklingErrorcCstj|j|jffS(N(tarrayttypecodettostring(ta((s0/usr/lib64/python2.7/multiprocessing/managers.pyt reduce_array@stitemstkeystvaluescBs8eZdZdZdZdZdZdZRS( s4 Type to uniquely indentify a shared object ttypeidtaddresstidcCs!||||_|_|_dS(N(RRR(tselfRRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__init__PscCs|j|j|jfS(N(RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __getstate__SscCs|\|_|_|_dS(N(RRR(Rtstate((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __setstate__VscCsd|j|j|jfS(Ns#Token(typeid=%r, address=%r, id=%r)(RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__repr__Ys(RRR(t__name__t __module__t__doc__t __slots__RR R"R#(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRJs    cCsN|j||||f|j\}}|dkr;|St||dS(sL Send a message to manager using connection `c` and return response s#RETURNN(tsendtrecvtconvert_to_error(tcRt methodnametargstkwdstkindtresult((s0/usr/lib64/python2.7/multiprocessing/managers.pytdispatchas  cCsN|dkr|S|dkr&t|S|dkr@td|StdSdS(Ns#ERRORs #TRACEBACKs#UNSERIALIZABLEsUnserializable message: %s sUnrecognized message type(t RemoteErrort ValueError(R/R0((s0/usr/lib64/python2.7/multiprocessing/managers.pyR*ks    R2cBseZdZRS(cCs)ddddt|jdddS(Ns t-iKi(tstrR-(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__str__xs(R$R%R6(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR2wscCsOg}xBt|D]4}t||}t|dr|j|qqW|S(s4 Return a list of names of methods of `obj` t__call__(tdirtgetattrthasattrtappend(tobjttemptnametfunc((s0/usr/lib64/python2.7/multiprocessing/managers.pyt all_methodss cCs-gt|D]}|ddkr |^q S(sP Return a list of names of methods of `obj` which do not start with '_' it_(R@(R<R>((s0/usr/lib64/python2.7/multiprocessing/managers.pytpublic_methodsstServerc BseZdZddddddddd g Zd Zd Zd Zd ZdZdZ dZ ie d6e d6ed6Z dZ dZ dZdZdZdZdZdZdZRS(sM Server class which runs in a process controlled by a manager object tshutdowntcreatetaccept_connectiont get_methodst debug_infotnumber_of_objectstdummytincreftdecrefcCs||_t||_t|\}}|d|dd|_|jj|_iddfd6|_i|_t j |_ d|_ dS(NRtbacklogit0i(( tregistryR tauthkeytlistener_clienttlistenerRtNonet id_to_objtid_to_refcountt threadingtRLocktmutextstop(RRORRPt serializertListenertClient((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  cCs|t_zylxey|jj}Wnttfk rDqnXtjd|jd|f}t |_ |j qWWnt t fk rnXWdd|_|jjXdS(s( Run the server forever ttargetR-Ni(Rt_manager_serverRRtaccepttOSErrortIOErrorRVtThreadthandle_requesttTruetdaemontstarttKeyboardInterruptt SystemExitRYtclose(RR+tt((s0/usr/lib64/python2.7/multiprocessing/managers.pyt serve_forevers   c Cs`d}}}yWtj||jtj||j|j}|\}}}}t||}Wn tk rdtf} nFXy||||}Wn tk rdtf} n Xd|f} y|j | Wnqtk rQ} y|j dtfWntk rnXt j d| t j d|t j d| nX|j dS(s) Handle a new connection s #TRACEBACKs#RETURNsFailure to send message: %rs ... request was %rs ... exception was %rN( RSR tdeliver_challengeRPtanswer_challengeR)R9t ExceptionRR(R RRi( RR+tfuncnameR0trequesttignoreR-R.R?tmsgte((s0/usr/lib64/python2.7/multiprocessing/managers.pyRcs2     cCstjdtjj|j}|j}|j}x|jsyd}}|}|\}}} } ||\}} } || krt d|t || fnt ||} y| | | }Wnt k r}d|f}npX| o| j|d}|rP|j|||\}}t||j|}d||ff}n d|f}Wnt k r|dkrdtf}q?y8|j|}|||||| | }d|f}Wq?t k rdtf}q?XnVtk rtjdtjjtjdn t k r>dtf}nXy;y||Wn&t k rx}|d tfnXWq7t k r}tjd tjjtjd |tjd ||jtjd q7Xq7WdS(sQ Handle requests from the proxies in a particular process/thread s$starting server thread to service %rs+method %r of %r object is not in exposed=%rs#ERRORs#PROXYs#RETURNs #TRACEBACKs$got EOF -- exiting thread serving %ris#UNSERIALIZABLEsexception in thread serving %rs ... message was %rs ... exception was %riN(R tdebugRVtcurrent_threadR>R)R(RTRYRStAttributeErrorttypeR9RntgetRERRRtfallback_mappingtEOFErrortsysR RRi(RtconnR)R(RTR,R<RptidentR-R.texposedt gettypeidtfunctiontresRsRrRtridenttrexposedttokent fallback_funcR0((s0/usr/lib64/python2.7/multiprocessing/managers.pyt serve_clientsl                 cCs|S(N((RR|R}R<((s0/usr/lib64/python2.7/multiprocessing/managers.pytfallback_getvalue+scCs t|S(N(R5(RR|R}R<((s0/usr/lib64/python2.7/multiprocessing/managers.pyt fallback_str.scCs t|S(N(trepr(RR|R}R<((s0/usr/lib64/python2.7/multiprocessing/managers.pyt fallback_repr1sR6R#s #GETVALUEcCsdS(N((RR+((s0/usr/lib64/python2.7/multiprocessing/managers.pyRJ:scCs|jjzg}|jj}|jxS|D]K}|dkr6|jd||j|t|j|dd fq6q6Wdj|SWd|jj XdS(sO Return some info --- useful to spot problems with refcounting RNs %s: refcount=%s %siiKs N( RXtacquireRTRtsortR;RUR5tjointrelease(RR+R0RR}((s0/usr/lib64/python2.7/multiprocessing/managers.pyRH=s      'cCst|jdS(s* Number of shared objects i(tlenRT(RR+((s0/usr/lib64/python2.7/multiprocessing/managers.pyRIOscCszytjd|jd tjtjkrZtjdtjt_tjt_ntj dx(t D]}tjd|j qqWx(t D]}tjd|j qWtj tj dWnddl}|jnXWdtdXdS( s' Shutdown this process s!manager received shutdown messages#RETURNsresetting stdout, stderris&terminating a child process of managersmanager exiting with exitcode 0iN(s#RETURNN(R RtR(RSR{tstdoutt __stdout__t __stderr__tstderrt_run_finalizersRt terminateRRt tracebackt print_excR (RR+tpR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRDUs*         c Os%|jjz|j|\}}}}|dkrB|d} n|||} |dkrlt| }n|dk rt|t|}ndt| } tjd|| | t ||f|j | <| |j krd|j | Create a new shared object and return its id is%xs&%r callable returned object with id %rN(RXRRORSRBtlistRR RttsetRTRURKttupleR( RR+RR-R.tcallableR~tmethod_to_typeidt proxytypeR<R}((s0/usr/lib64/python2.7/multiprocessing/managers.pyREus$     cCst|j|jdS(sL Return the methods of the shared object indicated by token i(RRTR(RR+R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRGscCs-|tj_|jd|j|dS(s= Spawn a new thread to serve this connection s#RETURNN(s#RETURNN(RVRuR>R(RSR(RR+R>((s0/usr/lib64/python2.7/multiprocessing/managers.pyRFs cCs9|jjz|j|cd7 Connect manager object to the server process RPRJN( RQRRRR1RSRRRR(RR[R\R|((s0/usr/lib64/python2.7/multiprocessing/managers.pytconnectsc CsL|d k r+t|d r+tdntjdt\}}tdt|jd|j |j |j |j |||f|_ djd|j jD}t|jd||j _|j j|j|j|_ |jtj|j_tj|t|jd|j |j |j |j|jfd d |_d S( s@ Spawn a server process for this manager object R7sinitializer must be a callabletduplexR]R-t:css|]}t|VqdS(N(R5(t.0ti((s0/usr/lib64/python2.7/multiprocessing/managers.pys sR4t exitpriorityiN(RSR:t TypeErrorR tPipetFalseRRwt _run_serverRRRRt_processRt _identityR$R>RfRiR)RRRRR Rt_finalize_managerRRD(Rt initializertinitargstreadertwriterR}((s0/usr/lib64/python2.7/multiprocessing/managers.pyRfs&   c Csl|dk r||n|j||||}|j|j|jtjd|j|jdS(s@ Create a server, report its address and run it smanager serving at %rN(RSt_ServerR(RRiR RRk( tclsRORRPRZRRRtserver((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs   cOsk|j|jd|j}z)t|dd|f||\}}Wd|jXt||j||fS(sP Create a new shared object; return the token and exposed tuple RPREN(RRRR1RSRiR(RRR-R.R|RR~((s0/usr/lib64/python2.7/multiprocessing/managers.pyt_create0s ) cCs|jj|dS(sC Join the manager process (if it has been spawned) N(RR(Rttimeout((s0/usr/lib64/python2.7/multiprocessing/managers.pyR<scCsA|j|jd|j}zt|ddSWd|jXdS(sS Return some info about the servers shared objects and connections RPRHN(RRRR1RSRi(RR|((s0/usr/lib64/python2.7/multiprocessing/managers.pyt _debug_infoBscCsA|j|jd|j}zt|ddSWd|jXdS(s5 Return the number of shared objects RPRIN(RRRR1RSRi(RR|((s0/usr/lib64/python2.7/multiprocessing/managers.pyt_number_of_objectsLscCs|S(N((R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __enter__VscCs|jdS(N(RD(Rtexc_typetexc_valtexc_tb((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__exit__YscCs|jrtjdy8||d|}zt|ddWd|jXWntk rdnX|jdd|jrtjdt|drtjd |j |jdd |jrtjd qqqnt j |_ yt j|=Wntk rnXdS( sQ Shutdown the manager process; will be registered as a finalizer s#sending shutdown message to managerRPRDNRg?smanager still aliveRs'trying to `terminate()` manager processg?s#manager still alive after terminate(tis_aliveR RR1RSRiRnRR:RRRRRt_address_to_localtKeyError(tprocessRRPR!RR|((s0/usr/lib64/python2.7/multiprocessing/managers.pyR\s.          cCs|jS(N(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt|tc sd|jkr$|jj|_ndkr9tn|pNtdd}|pftdd}|rx|jD] \}}q|Wn|||f|j<|rfd} | _t|| ndS(s9 Register a typeid with the manager type Rt _exposed_t_method_to_typeid_c stjd|j||\}}||jd|d|jd|}|j|jd|j}t|dd|j f|S(Ns)requesting creation of a shared %r objecttmanagerRPR~RL( R RtRRRRRR1RSR(RR-R.RtexptproxyR|(RR(s0/usr/lib64/python2.7/multiprocessing/managers.pyR=sN( t__dict__RtcopyRSt AutoProxyR9RR$tsetattr( RRRRR~Rt create_methodtkeyRR=((RRs0/usr/lib64/python2.7/multiprocessing/managers.pytregister~s   N(((R$R%R&RRCRRSRRRRRft classmethodRRRRRRRt staticmethodRtpropertyRRdR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs*   $     tProcessLocalSetcBseZdZdZRS(cCstj|ddS(NcSs |jS(N(tclear(R<((s0/usr/lib64/python2.7/multiprocessing/managers.pyRR(R tregister_after_fork(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCst|dfS(N((Rw(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(R$R%RR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs cBseZdZiZejZd d d edZ dZ d idZ dZ dZ edZdZdZd Zd Zd ZRS(s. A base for proxies of shared objects cCs;tjjzPtjj|jd}|dkr\tjt f}|tj|jRVRuRRRRR1RSRR (RR>R|((s0/usr/lib64/python2.7/multiprocessing/managers.pyt_connects  c CsDy|jj}Wn@tk rRtjdtjj|j|jj}nX|j |j |||f|j \}}|dkr|S|dkr1|\}}|j j |jd} |jj|_| ||jd|j d|jd|} |j|jd|j}t|d d|jf| St||d S( sW Try to call a method of the referrent and return a copy of the result s#thread %r does not own a connections#RETURNs#PROXYiRRPR~RLN(RR RvR RtRVRuR>RR(RR)RRRRRRRRR1RSRR*( RR,R-R.R|R/R0R~RRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt _callmethods,      cCs |jdS(s9 Get a copy of the value of the referent s #GETVALUE(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt _getvaluesc Cs|j|jjd|j}t|dd|jftjd|jj |j j |j|j or|j j }tj|tjd|j|j||j|j |jfdd|_dS(NRPRKs INCREF %rR-Ri (RRRRR1RSRR RtRRtaddRRRRt_decrefRt_close(RR|R!((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs cCs|j|j|dks.|jtjkryEtjd|j||jd|}t |dd|jfWqt k r}tjd|qXntjd|j| rt |drtjdt j j|jj|`ndS(Ns DECREF %rRPRLs... decref failed %ss%DECREF %r -- manager already shutdownR s-thread %r has no more proxies so closing conn(tdiscardRRSRRRR RtRR1RnR:RVRuR>R Ri(RRPR!ttlstidsetRR|Rs((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  cCsBd|_y|jWn$tk r=}tjd|nXdS(Nsincref failed: %s(RSRRRnR R(RRs((s0/usr/lib64/python2.7/multiprocessing/managers.pyR7s  cCsi}tjr"|j|ds0x%x(RwR$RRR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#OscCs8y|jdSWn tk r3t|d dSXdS(sV Return representation of the referent (or a fall-back if that fails) R#is; '__str__()' failed>N(RRnR(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR6Ss N((R$R%R&RR tForkAwareThreadLockRRSRdRRRRRRRRRRR#R6(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs %      cCsttdd}|r?|j|jkr?|j|jdS|jdtodttdt }|||d||SdS(s Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. R^iRKt _inheritingN( R9RRSRRTRtpopRdR(R?RRZR.RRK((s0/usr/lib64/python2.7/multiprocessing/managers.pyR`s cBse|}y|||fSWnek r1nXi}x |D]}d||f|Uq?We|ef|}||_||||f<|S(sB Return a proxy type whose methods are given by `exposed` sLdef %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)(RRRwRR(R>R~t_cachetdictmetht ProxyType((s0/usr/lib64/python2.7/multiprocessing/managers.pyt MakeProxyTypeus    c Cst|d}|dkr\||jd|}zt|dd|f}Wd|jXn|dkr|dk r|j}n|dkrtj}ntd|j |}|||d|d|d|} t | _ | S(s* Return an auto-proxy for `token` iRPRGNs AutoProxy[%s]RRK( RQRSRR1RiRRRPRRRdR( RRZRRPR~RKRR|RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs     t NamespacecBseZdZdZRS(cKs|jj|dS(N(Rtupdate(RR.((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCss|jj}g}x=|D]5\}}|jds|jd||fqqW|jdtjd|S(NRAs%s=%rs Namespace(%s)s, (RRt startswithR;RR5R(RRR=R>R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#s (R$R%RR#(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs tValuecBs>eZedZdZdZdZeeeZRS(cCs||_||_dS(N(t _typecodet_value(RRRtlock((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs cCs|jS(N(R (R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRxscCs ||_dS(N(R (RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs dt|j|j|jfS(Ns %s(%r, %r)(RwR$R R (R((s0/usr/lib64/python2.7/multiprocessing/managers.pyR#s( R$R%RdRRxRR#RR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR s     cCstj||S(N(R(RtsequenceR((s0/usr/lib64/python2.7/multiprocessing/managers.pytArrayst IteratorProxycBsDeZd ZdZdZdZdZd Zd ZRS( t__next__tnextR(tthrowRicCs|S(N((R((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__iter__scGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NR((R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyR(scGs|jd|S(NR(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscGs|jd|S(NRi(R(RR-((s0/usr/lib64/python2.7/multiprocessing/managers.pyRis(RRR(RRi( R$R%RRRRR(RRi(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs     t AcquirerProxycBs5eZdZedZdZdZdZRS(RRcCs|jd|fS(NR(R(Rtblocking((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(RRRR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RR(R$R%RRdRRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs    tConditionProxycBs,eZdZd dZdZdZRS( RRtwaittnotifyt notify_allcCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RRRRRN(R$R%RRSRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  t EventProxycBs5eZdZdZdZdZd dZRS( tis_setRRRcCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs |jdS(NR(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRscCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RRRRN(R$R%RRRRRSR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs    tNamespaceProxycBs)eZdZdZdZdZRS(t__getattribute__t __setattr__t __delattr__cCsB|ddkr tj||Stj|d}|d|fS(NiRARR(tobjectR(RRt callmethod((s0/usr/lib64/python2.7/multiprocessing/managers.pyt __getattr__scCsH|ddkr#tj|||Stj|d}|d||fS(NiRARR (R"R R(RRRR#((s0/usr/lib64/python2.7/multiprocessing/managers.pyR scCsB|ddkr tj||Stj|d}|d|fS(NiRARR!(R"R!R(RRR#((s0/usr/lib64/python2.7/multiprocessing/managers.pyR!s(RR R!(R$R%RR$R R!(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs  t ValueProxycBs/eZdZdZdZeeeZRS(RxRcCs |jdS(NRx(R(R((s0/usr/lib64/python2.7/multiprocessing/managers.pyRxscCs|jd|fS(NR(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyRs(RxR(R$R%RRxRRR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR% s  t BaseListProxyt__add__t __contains__t __delitem__t __delslice__t __getitem__t __getslice__t__len__t__mul__t __reversed__t__rmul__t __setitem__t __setslice__R;tcounttextendtindextinsertRtremovetreverseRt__imul__t ListProxycBseZdZdZRS(cCs|jd|f|S(NR4(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyt__iadd__scCs|jd|f|S(NR9(R(RR((s0/usr/lib64/python2.7/multiprocessing/managers.pyR9 s(R$R%R;R9(((s0/usr/lib64/python2.7/multiprocessing/managers.pyR:s t DictProxyRRRRxthas_keytpopitemt setdefaultR tIteratort ArrayProxyt PoolProxytapplyt apply_asyncRitimaptimap_unorderedRtmapt map_asyncRt AsyncResultcBseZdZRS(s( Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. (R$R%R&(((s0/usr/lib64/python2.7/multiprocessing/managers.pyRCs tQueuet JoinableQueuetEventtLockRWt SemaphoretBoundedSemaphoret ConditionRRtdictRRR(Ut__all__tosR{tweakrefRVRRJRRtmultiprocessingRRRRR R tmultiprocessing.processR tmultiprocessing.forkingR R RRtmultiprocessing.utilRRtcPickleRt ImportErrorRRRR>RwR9t view_typesR"RR1R*RnR2R@RBRCRR[R\t XmlListenert XmlClientRQRRRRRRRSRdRRR RRRRRRR%R&R:R<RRARBRRLRMRWRNRORPRRQR(((s0/usr/lib64/python2.7/multiprocessing/managers.pyt$s      ."  4  ,                 PK!^^pool.pynu[# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = ['Pool'] # # Imports # import threading import Queue import itertools import collections import time from multiprocessing import Process, cpu_count, TimeoutError from multiprocessing.util import Finalize, debug # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return map(*args) # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "" % str(self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): assert maxtasks is None or (type(maxtasks) in (int, long) and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, IOError): debug('worker got EOFError or IOError -- exiting') break if task is None: debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception, e: result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 debug('worker exiting after %d tasks' % completed) # # Class representing a process pool # class Pool(object): ''' Class which supports an async version of the `apply()` builtin ''' Process = Process def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): self._setup_queues() self._taskqueue = Queue.Queue() self._cache = {} self._state = RUN self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: try: processes = cpu_count() except NotImplementedError: processes = 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not hasattr(initializer, '__call__'): raise TypeError('initializer must be a callable') self._processes = processes self._pool = [] self._repopulate_pool() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self, ) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) def _join_exited_workers(self): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(self._pool))): worker = self._pool[i] if worker.exitcode is not None: # worker exited debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] return cleaned def _repopulate_pool(self): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): w = self.Process(target=worker, args=(self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild) ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() debug('added worker') def _maintain_pool(self): """Clean up any exited workers and start replacements for them. """ if self._join_exited_workers(): self._repopulate_pool() def _setup_queues(self): from .queues import SimpleQueue self._inqueue = SimpleQueue() self._outqueue = SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def apply(self, func, args=(), kwds={}): ''' Equivalent of `apply()` builtin ''' assert self._state == RUN return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Equivalent of `map()` builtin ''' assert self._state == RUN return self.map_async(func, iterable, chunksize).get() def imap(self, func, iterable, chunksize=1): ''' Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()` ''' assert self._state == RUN if chunksize == 1: result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary ''' assert self._state == RUN if chunksize == 1: result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None): ''' Asynchronous equivalent of `apply()` builtin ''' assert self._state == RUN result = ApplyResult(self._cache, callback) self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None): ''' Asynchronous equivalent of `map()` builtin ''' assert self._state == RUN if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _handle_workers(pool): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (pool._cache and thread._state != TERMINATE): pool._maintain_pool() time.sleep(0.1) # send sentinel to stop workers pool._taskqueue.put(None) debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None i = -1 try: for i, task in enumerate(taskseq): if thread._state: debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, ind = task[:2] try: cache[job]._set(ind, (False, e)) except KeyError: pass else: if set_length: debug('doing set_length()') set_length(i+1) continue break except Exception as ex: job, ind = task[:2] if task else (0, 0) if job in cache: cache[job]._set(ind + 1, (False, ex)) if set_length: debug('doing set_length()') set_length(i+1) finally: task = taskseq = job = None else: debug('task handler got sentinel') try: # tell result handler to finish when cache is empty debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work debug('task handler sending sentinel to workers') for p in pool: put(None) except IOError: debug('task handler got IOError when sending sentinels') debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if thread._state: assert thread._state == TERMINATE debug('result handler found thread._state=TERMINATE') break if task is None: debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if task is None: debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (IOError, EOFError): pass debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE def terminate(self): debug('terminating pool') self._state = TERMINATE self._worker_handler._state = TERMINATE self._terminate() def join(self): debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once debug('finalizing pool') worker_handler._state = TERMINATE task_handler._state = TERMINATE debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 result_handler._state = TERMINATE outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join(1e100) # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join(1e100) debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join(1e100) if pool and hasattr(pool[0], 'terminate'): debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited debug('cleaning up worker %d' % p.pid) p.join() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, cache, callback): self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() self._cache = cache self._ready = False self._callback = callback cache[self._job] = self def ready(self): return self._ready def successful(self): assert self._ready return self._success def wait(self, timeout=None): self._cond.acquire() try: if not self._ready: self._cond.wait(timeout) finally: self._cond.release() def get(self, timeout=None): self.wait(timeout) if not self._ready: raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() del self._cache[self._job] AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback): ApplyResult.__init__(self, cache, callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._ready = True del cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() else: self._success = False self._value = result del self._cache[self._job] self._cond.acquire() try: self._ready = True self._cond.notify() finally: self._cond.release() # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, cache): self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() self._cache = cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): self._cond.acquire() try: try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration raise TimeoutError finally: self._cond.release() success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): self._cond.acquire() try: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] finally: self._cond.release() def _set_length(self, length): self._cond.acquire() try: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] finally: self._cond.release() # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): self._cond.acquire() try: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] finally: self._cond.release() # # # class ThreadPool(Pool): from .dummy import Process def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = Queue.Queue() self._outqueue = Queue.Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # put sentinels at head of inqueue to make workers finish inqueue.not_empty.acquire() try: inqueue.queue.clear() inqueue.queue.extend([None] * size) inqueue.not_empty.notify_all() finally: inqueue.not_empty.release() PK!'@X@Xpool.pycnu[ {fc@sTdgZddlZddlZddlZddlZddlZddlmZmZm Z ddl m Z m Z dZ dZdZejZdZd efd YZefed Zdefd YZd efdYZeZdefdYZdefdYZdefdYZdefdYZdS(tPooliN(tProcesst cpu_countt TimeoutError(tFinalizetdebugiiicCs t|S(N(tmap(targs((s,/usr/lib64/python2.7/multiprocessing/pool.pytmapstar@stMaybeEncodingErrorcBs)eZdZdZdZdZRS(sVWraps possible unpickleable errors, so they can be safely sent through the socket.cCsAt||_t||_tt|j|j|jdS(N(treprtexctvaluetsuperR t__init__(tselfR R ((s,/usr/lib64/python2.7/multiprocessing/pool.pyRKscCsd|j|jfS(Ns(Error sending result: '%s'. Reason: '%s'(R R (R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt__str__Ps cCsdt|S(Ns(tstr(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt__repr__Ts(t__name__t __module__t__doc__RRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyR Gs  cCs|dks6t|ttfkr0|dks6t|j}|j}t|drt|jj |j j n|dk r||nd}xI|dks|r||kry |}Wn"t t fk rt dPnX|dkrt dPn|\} } } } } yt| | | f}Wntk rO}t|f}nXy|| | |fWnMtk r}t||d}t d||| | t|ffnXd}} }} } } |d7}qWt d|dS(Nit_writers)worker got EOFError or IOError -- exitingsworker got sentinel -- exitingis0Possible encoding error while sending result: %ssworker exiting after %d tasks(tNonettypetinttlongtAssertionErrortputtgetthasattrRtcloset_readertEOFErrortIOErrorRtTruet ExceptiontFalseR (tinqueuetoutqueuet initializertinitargstmaxtasksRRt completedttasktjobtitfuncRtkwdstresulttetwrapped((s,/usr/lib64/python2.7/multiprocessing/pool.pytworkerXsB6     !    cBseZdZeZdddddZdZdZdZdZ didZ ddZ dd Z dd Z didd Zddd Zed ZedZedZedZdZdZdZdZedZedZRS(sH Class which supports an async version of the `apply()` builtin c Cs/|jtj|_i|_t|_||_||_||_|dkr|y t }Wq|t k rxd}q|Xn|dkrt dn|dk rt |d rtdn||_g|_|jtjdtjd|f|_t|j_t|j_|jjtjdtjd|j|j|j|j|jf|_t|j_t|j_|jjtjdtjd|j|j|jf|_ t|j _t|j _|j jt!||j"d|j|j#|j|j|j|j|j |jfdd|_$dS( Nis&Number of processes must be at least 1t__call__sinitializer must be a callablettargetRt exitpriorityi(%t _setup_queuestQueuet _taskqueuet_cachetRUNt_statet_maxtasksperchildt _initializert _initargsRRtNotImplementedErrort ValueErrorRt TypeErrort _processest_poolt_repopulate_poolt threadingtThreadRt_handle_workerst_worker_handlerR#tdaemontstartt _handle_taskst _quick_putt _outqueuet _task_handlert_handle_resultst _quick_gett_result_handlerRt_terminate_poolt_inqueuet _terminate(Rt processesR(R)tmaxtasksperchild((s,/usr/lib64/python2.7/multiprocessing/pool.pyRsX                               cCswt}xjttt|jD]M}|j|}|jdk r"td||jt }|j|=q"q"W|S(sCleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. scleaning up worker %dN( R%treversedtrangetlenREtexitcodeRRtjoinR#(RtcleanedR.R4((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_join_exited_workerss"  c Csxt|jt|jD]}|jdtd|j|j|j|j |j f}|jj ||j j dd|_ t|_|jtdqWdS(sBring the number of pool processes up to the specified number, for use after reaping workers which have exited. R6RRt PoolWorkers added workerN(RZRDR[RERR4RUROR?R@R>tappendtnametreplaceR#RKRLR(RR.tw((s,/usr/lib64/python2.7/multiprocessing/pool.pyRFs#   cCs|jr|jndS(sEClean up any exited workers and start replacements for them. N(R_RF(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_maintain_pools cCsPddlm}||_||_|jjj|_|jjj|_ dS(Ni(t SimpleQueue( tqueuesRfRURORtsendRNR trecvRR(RRf((s,/usr/lib64/python2.7/multiprocessing/pool.pyR8s   cCs.|jtkst|j|||jS(s1 Equivalent of `apply()` builtin (R=R<Rt apply_asyncR(RR/RR0((s,/usr/lib64/python2.7/multiprocessing/pool.pytapplyscCs.|jtkst|j|||jS(s/ Equivalent of `map()` builtin (R=R<Rt map_asyncR(RR/titerablet chunksize((s,/usr/lib64/python2.7/multiprocessing/pool.pyRsics|jtkst|dkrft|j|jjfdt|DjfS|dksxtt j ||}t|j|jjfdt|DjfdDSdS(sZ Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()` ic3s0|]&\}}j||fifVqdS(N(t_job(t.0R.tx(R/R1(s,/usr/lib64/python2.7/multiprocessing/pool.pys sc3s0|]&\}}j|t|fifVqdS(N(RoR(RpR.Rq(R1(s,/usr/lib64/python2.7/multiprocessing/pool.pys scss"|]}|D] }|Vq qdS(N((Rptchunktitem((s,/usr/lib64/python2.7/multiprocessing/pool.pys sN( R=R<Rt IMapIteratorR;R:Rt enumeratet _set_lengthRt _get_tasks(RR/RmRnt task_batches((R/R1s,/usr/lib64/python2.7/multiprocessing/pool.pytimaps cs|jtkst|dkrft|j|jjfdt|DjfS|dksxtt j ||}t|j|jjfdt|DjfdDSdS(sK Like `imap()` method but ordering of results is arbitrary ic3s0|]&\}}j||fifVqdS(N(Ro(RpR.Rq(R/R1(s,/usr/lib64/python2.7/multiprocessing/pool.pys sc3s0|]&\}}j|t|fifVqdS(N(RoR(RpR.Rq(R1(s,/usr/lib64/python2.7/multiprocessing/pool.pys scss"|]}|D] }|Vq qdS(N((RpRrRs((s,/usr/lib64/python2.7/multiprocessing/pool.pys !sN( R=R<RtIMapUnorderedIteratorR;R:RRuRvRRw(RR/RmRnRx((R/R1s,/usr/lib64/python2.7/multiprocessing/pool.pytimap_unordereds cCsV|jtkstt|j|}|jj|jd|||fgdf|S(s> Asynchronous equivalent of `apply()` builtin N( R=R<Rt ApplyResultR;R:RRoR(RR/RR0tcallbackR1((s,/usr/lib64/python2.7/multiprocessing/pool.pyRj#s+cs|jtkstt|ds3t|}n|dkr}tt|t|jd\}}|r}|d7}q}nt|dkrd}nt j |||}t |j |t|||j jfdt|DdfS(s< Asynchronous equivalent of `map()` builtin t__len__iiic3s0|]&\}}j|t|fifVqdS(N(RoR(RpR.Rq(R1(s,/usr/lib64/python2.7/multiprocessing/pool.pys =sN(R=R<RRtlistRtdivmodR[RERRwt MapResultR;R:RRu(RR/RmRnR}textraRx((R1s,/usr/lib64/python2.7/multiprocessing/pool.pyRl,s ( cCsotj}xB|jtks6|jrP|jtkrP|jtjdqW|j j dt ddS(Ng?sworker handler exiting( RGtcurrent_threadR=R<R;t TERMINATERettimetsleepR:RRR(tpooltthread((s,/usr/lib64/python2.7/multiprocessing/pool.pyRIAs  * cCs tj}xt|jdD]q\}}d}d} zGyxt|D]\} }|jrmtdPny||WqJtk r} |d \} } y|| j | t | fWqt k rqXqJXqJW|rtd|| dnwPWn}tk r|} |r|d nd \} } | |krX|| j | dt | fn|r}td|| dq}nXWdd}}} XqWtdy@td|j dtd x|D]}|dqWWnt k rtd nXtd dS( Nis'task handler found thread._state != RUNisdoing set_length()iistask handler got sentinels/task handler sending sentinel to result handlers(task handler sending sentinel to workerss/task handler got IOError when sending sentinelsstask handler exiting(ii(RGRtiterRRRuR=RR$t_setR%tKeyErrorRR"(t taskqueueRR'RtcacheRttaskseqt set_lengthR,R.R2R-tindtextp((s,/usr/lib64/python2.7/multiprocessing/pool.pyRMNsR       !       cCstj}xy |}Wn"ttfk r@tddSX|jrm|jtks_ttdPn|dkrtdPn|\}}}y||j ||Wnt k rnXd}}}qWx|r|jtkry |}Wn"ttfk rtddSX|dkr:tdqn|\}}}y||j ||Wnt k rtnXd}}}qWt |drtdy5x.t dD] }|j jsPn|qWWqttfk rqXntdt||jdS( Ns.result handler got EOFError/IOError -- exitings,result handler found thread._state=TERMINATEsresult handler got sentinels&result handler ignoring extra sentinelR s"ensuring that outqueue is not fulli s7result handler exiting: len(cache)=%s, thread._state=%s(RGRR"R!RR=RRRRRRRZR tpollR[(R'RRRR,R-R.tobj((s,/usr/lib64/python2.7/multiprocessing/pool.pyRQs\              ccsDt|}x1ttj||}|s1dS||fVqWdS(N(Rttuplet itertoolstislice(R/tittsizeRq((s,/usr/lib64/python2.7/multiprocessing/pool.pyRws  cCstddS(Ns:pool objects cannot be passed between processes or pickled(RA(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt __reduce__scCs5td|jtkr1t|_t|j_ndS(Ns closing pool(RR=R<tCLOSERJ(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyRs  cCs-tdt|_t|j_|jdS(Nsterminating pool(RRR=RJRV(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt terminates   cCsntd|jttfks%t|jj|jj|jjx|j D]}|jqVWdS(Ns joining pool( RR=RRRRJR]RPRSRE(RR((s,/usr/lib64/python2.7/multiprocessing/pool.pyR]s    cCsWtd|jjx9|jrR|jjrR|jjtjdqWdS(Ns7removing tasks from inqueue until task handler finishedi( Rt_rlocktacquiretis_aliveR RRiRR(R&t task_handlerR((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_help_stuff_finishs    c Cstdt|_t|_td|j||t||jsct|dksctt|_|jdtdt j |k r|j dn|rt |ddrtdx-|D]"} | j dkr| jqqWntdt j |k r$|j dntd t j |k rP|j dn|rt |ddrtd x;|D]0} | jrztd | j| j qzqzWndS( Nsfinalizing pools&helping task handler/workers to finishisjoining worker handlerg}Ô%ITRsterminating workerssjoining task handlersjoining result handlersjoining pool workersscleaning up worker %d(RRR=RR[RRRRRGRR]RR\Rtpid( tclsRR&R'Rtworker_handlerRtresult_handlerRR((s,/usr/lib64/python2.7/multiprocessing/pool.pyRTs8    $          N((((RRRRRRR_RFReR8RkRRyR{RjRlt staticmethodRIRMRQRwRRRR]Rt classmethodRT(((s,/usr/lib64/python2.7/multiprocessing/pool.pyRs0  :         4<     R|cBsDeZdZdZdZddZddZdZRS(cCsStjtj|_tj|_||_t|_ ||_ |||js  N( RRRRRRRRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyR|s      RcBseZdZdZRS(cCstj|||t|_dg||_||_|dkr`d|_t|_||j =n||t |||_dS(Ni( R|RR#RRRt _chunksizet _number_leftRRotbool(RRRntlengthR}((s,/usr/lib64/python2.7/multiprocessing/pool.pyRRs      cCs|\}}|r||j||j|d|j+|jd8_|jdkr|jrn|j|jn|j|j=|jjzt|_ |jj Wd|jj XqnWt |_ ||_|j|j=|jjzt|_ |jj Wd|jj XdS(Nii(RRRRR;RoRRR#RRRR%R(RR.tsuccess_resulttsuccessR1((s,/usr/lib64/python2.7/multiprocessing/pool.pyR^s* "         (RRRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyRPs RtcBs>eZdZdZddZeZdZdZRS(cCsktjtj|_tj|_||_tj |_ d|_ d|_ i|_|||j#s,        ,.-IPK!8(tstr(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt__repr__Ts(t__name__t __module__t__doc__RRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyR Gs  cCs|j}|j}t|dr>|jj|jjn|dk rW||nd}xI|dks~|r||kry |}Wn"ttfk rt dPnX|dkrt dPn|\} } } } } yt | | | f}Wnt k r}t |f}nXy|| | |fWnMt k r}t ||d}t d||| | t |ffnXd}} }} } } |d7}q`Wt d|dS(Nt_writeris)worker got EOFError or IOError -- exitingsworker got sentinel -- exitingis0Possible encoding error while sending result: %ssworker exiting after %d tasks(tputtgetthasattrRtcloset_readertNonetEOFErrortIOErrorRtTruet ExceptiontFalseR (tinqueuetoutqueuet initializertinitargstmaxtasksRRt completedttasktjobtitfuncRtkwdstresulttetwrapped((s,/usr/lib64/python2.7/multiprocessing/pool.pytworkerXs@     !    cBseZdZeZdddddZdZdZdZdZ didZ ddZ dd Z dd Z didd Zddd Zed ZedZedZedZdZdZdZdZedZedZRS(sH Class which supports an async version of the `apply()` builtin c Cs/|jtj|_i|_t|_||_||_||_|dkr|y t }Wq|t k rxd}q|Xn|dkrt dn|dk rt |d rtdn||_g|_|jtjdtjd|f|_t|j_t|j_|jjtjdtjd|j|j|j|j|jf|_t|j_t|j_|jjtjdtjd|j|j|jf|_ t|j _t|j _|j jt!||j"d|j|j#|j|j|j|j|j |jfdd|_$dS( Nis&Number of processes must be at least 1t__call__sinitializer must be a callablettargetRt exitpriorityi(%t _setup_queuestQueuet _taskqueuet_cachetRUNt_statet_maxtasksperchildt _initializert _initargsRRtNotImplementedErrort ValueErrorRt TypeErrort _processest_poolt_repopulate_poolt threadingtThreadRt_handle_workerst_worker_handlerRtdaemontstartt _handle_taskst _quick_putt _outqueuet _task_handlert_handle_resultst _quick_gett_result_handlerRt_terminate_poolt_inqueuet _terminate(Rt processesR$R%tmaxtasksperchild((s,/usr/lib64/python2.7/multiprocessing/pool.pyRsX                               cCswt}xjttt|jD]M}|j|}|jdk r"td||jt }|j|=q"q"W|S(sCleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. scleaning up worker %dN( R!treversedtrangetlenRAtexitcodeRRtjoinR(RtcleanedR*R0((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_join_exited_workerss"  c Csxt|jt|jD]}|jdtd|j|j|j|j |j f}|jj ||j j dd|_ t|_|jtdqWdS(sBring the number of pool processes up to the specified number, for use after reaping workers which have exited. R2RRt PoolWorkers added workerN(RVR@RWRARR0RQRKR;R<R:tappendtnametreplaceRRGRHR(RR*tw((s,/usr/lib64/python2.7/multiprocessing/pool.pyRBs#   cCs|jr|jndS(sEClean up any exited workers and start replacements for them. N(R[RB(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_maintain_pools cCsPddlm}||_||_|jjj|_|jjj|_ dS(Ni(t SimpleQueue( tqueuesRbRQRKRtsendRJRtrecvRN(RRb((s,/usr/lib64/python2.7/multiprocessing/pool.pyR4s   cCs|j|||jS(s1 Equivalent of `apply()` builtin (t apply_asyncR(RR+RR,((s,/usr/lib64/python2.7/multiprocessing/pool.pytapplyscCs|j|||jS(s/ Equivalent of `map()` builtin (t map_asyncR(RR+titerablet chunksize((s,/usr/lib64/python2.7/multiprocessing/pool.pyRsics|dkrQt|j|jjfdt|DjfStj||}t|j|jjfdt|DjfdDSdS(sZ Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()` ic3s0|]&\}}j||fifVqdS(N(t_job(t.0R*tx(R+R-(s,/usr/lib64/python2.7/multiprocessing/pool.pys sc3s0|]&\}}j|t|fifVqdS(N(RkR(RlR*Rm(R-(s,/usr/lib64/python2.7/multiprocessing/pool.pys scss"|]}|D] }|Vq qdS(N((Rltchunktitem((s,/usr/lib64/python2.7/multiprocessing/pool.pys sN(t IMapIteratorR7R6Rt enumeratet _set_lengthRt _get_tasks(RR+RiRjt task_batches((R+R-s,/usr/lib64/python2.7/multiprocessing/pool.pytimaps cs|dkrQt|j|jjfdt|DjfStj||}t|j|jjfdt|DjfdDSdS(sK Like `imap()` method but ordering of results is arbitrary ic3s0|]&\}}j||fifVqdS(N(Rk(RlR*Rm(R+R-(s,/usr/lib64/python2.7/multiprocessing/pool.pys sc3s0|]&\}}j|t|fifVqdS(N(RkR(RlR*Rm(R-(s,/usr/lib64/python2.7/multiprocessing/pool.pys scss"|]}|D] }|Vq qdS(N((RlRnRo((s,/usr/lib64/python2.7/multiprocessing/pool.pys !sN(tIMapUnorderedIteratorR7R6RRqRrRRs(RR+RiRjRt((R+R-s,/usr/lib64/python2.7/multiprocessing/pool.pytimap_unordereds cCsAt|j|}|jj|jd|||fgdf|S(s> Asynchronous equivalent of `apply()` builtin N(t ApplyResultR7R6RRkR(RR+RR,tcallbackR-((s,/usr/lib64/python2.7/multiprocessing/pool.pyRf#s+cst|dst|}n|dkrhtt|t|jd\}}|rh|d7}qhnt|dkrd}ntj|||}t|j |t|||j j fdt |DdfS(s< Asynchronous equivalent of `map()` builtin t__len__iiic3s0|]&\}}j|t|fifVqdS(N(RkR(RlR*Rm(R-(s,/usr/lib64/python2.7/multiprocessing/pool.pys =sN( RtlistRtdivmodRWRARRst MapResultR7R6RRq(RR+RiRjRytextraRt((R-s,/usr/lib64/python2.7/multiprocessing/pool.pyRh,s ( cCsotj}xB|jtks6|jrP|jtkrP|jtjdqW|j j dt ddS(Ng?sworker handler exiting( RCtcurrent_threadR9R8R7t TERMINATERattimetsleepR6RRR(tpooltthread((s,/usr/lib64/python2.7/multiprocessing/pool.pyREAs  * cCs tj}xt|jdD]q\}}d}d} zGyxt|D]\} }|jrmtdPny||WqJtk r} |d \} } y|| j | t | fWqt k rqXqJXqJW|rtd|| dnwPWn}tk r|} |r|d nd \} } | |krX|| j | dt | fn|r}td|| dq}nXWdd}}} XqWtdy@td|j dtd x|D]}|dqWWnt k rtd nXtd dS( Nis'task handler found thread._state != RUNisdoing set_length()iistask handler got sentinels/task handler sending sentinel to result handlers(task handler sending sentinel to workerss/task handler got IOError when sending sentinelsstask handler exiting(ii(RCRtiterRRRqR9RR t_setR!tKeyErrorRR(t taskqueueRR#RtcacheRttaskseqt set_lengthR(R*R.R)tindtextp((s,/usr/lib64/python2.7/multiprocessing/pool.pyRINsR       !       cCstj}xy |}Wn"ttfk r@tddSX|jrXtdPn|dkrrtdPn|\}}}y||j||Wntk rnXd}}}qWx|rq|jt krqy |}Wn"ttfk rtddSX|dkr%tdqn|\}}}y||j||Wntk r_nXd}}}qWt |drtdy5x.t dD] }|j j sPn|qWWqttfk rqXntdt||jdS( Ns.result handler got EOFError/IOError -- exitings,result handler found thread._state=TERMINATEsresult handler got sentinels&result handler ignoring extra sentinelRs"ensuring that outqueue is not fulli s7result handler exiting: len(cache)=%s, thread._state=%s(RCRRRRR9RRRRRRVRtpollRW(R#RRRR(R)R*tobj((s,/usr/lib64/python2.7/multiprocessing/pool.pyRMsZ              ccsDt|}x1ttj||}|s1dS||fVqWdS(N(Rttuplet itertoolstislice(R+tittsizeRm((s,/usr/lib64/python2.7/multiprocessing/pool.pyRss  cCstddS(Ns:pool objects cannot be passed between processes or pickled(R=(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt __reduce__scCs5td|jtkr1t|_t|j_ndS(Ns closing pool(RR9R8tCLOSERF(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyRs  cCs-tdt|_t|j_|jdS(Nsterminating pool(RRR9RFRR(R((s,/usr/lib64/python2.7/multiprocessing/pool.pyt terminates   cCsStd|jj|jj|jjx|jD]}|jq;WdS(Ns joining pool(RRFRYRLRORA(RR((s,/usr/lib64/python2.7/multiprocessing/pool.pyRYs     cCsWtd|jjx9|jrR|jjrR|jjtjdqWdS(Ns7removing tasks from inqueue until task handler finishedi( Rt_rlocktacquiretis_aliveRRReRR(R"t task_handlerR((s,/usr/lib64/python2.7/multiprocessing/pool.pyt_help_stuff_finishs    c Cstdt|_t|_td|j||t|t|_|jdtdtj|k r|j dn|rt |ddrtdx-|D]"} | j dkr| j qqWntdtj|k r|j dntd tj|k r,|j dn|rt |ddrtd x;|D]0} | j rVtd | j| j qVqVWndS( Nsfinalizing pools&helping task handler/workers to finishsjoining worker handlerg}Ô%ITiRsterminating workerssjoining task handlersjoining result handlersjoining pool workersscleaning up worker %d(RRR9RRWRRRCRRYRRXRRtpid( tclsRR"R#Rtworker_handlerRtresult_handlerRR((s,/usr/lib64/python2.7/multiprocessing/pool.pyRPs6              N((((RRRRRRR[RBRaR4RgRRuRwRfRht staticmethodRERIRMRsRRRRYRt classmethodRP(((s,/usr/lib64/python2.7/multiprocessing/pool.pyRs0  :         4<     RxcBsDeZdZdZdZddZddZdZRS(cCsStjtj|_tj|_||_t|_ ||_ |||js  N( RRRRRRRRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyRxs      R}cBseZdZdZRS(cCstj|||t|_dg||_||_|dkr`d|_t|_||j =n||t |||_dS(Ni( RxRRRRRt _chunksizet _number_leftRRktbool(RRRjtlengthRy((s,/usr/lib64/python2.7/multiprocessing/pool.pyRRs      cCs|\}}|r||j||j|d|j+|jd8_|jdkr|jrn|j|jn|j|j=|jjzt|_ |jj Wd|jj XqnWt |_ ||_|j|j=|jjzt|_ |jj Wd|jj XdS(Nii(RRRRR7RkRRRRRRR!R(RR*tsuccess_resulttsuccessR-((s,/usr/lib64/python2.7/multiprocessing/pool.pyR^s* "         (RRRR(((s,/usr/lib64/python2.7/multiprocessing/pool.pyR}Ps RpcBs>eZdZdZddZeZdZdZRS(cCsktjtj|_tj|_||_tj |_ d|_ d|_ i|_|||j#s,        ,.-IPK!~%% process.pynu[# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = ['Process', 'current_process', 'active_children'] # # Imports # import os import sys import signal import itertools # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_current_process._children) # # # def _cleanup(): # check for processes which have finished for p in list(_current_process._children): if p._popen.poll() is not None: _current_process._children.discard(p) # # The `Process` class # class Process(object): ''' Process objects represent activity that is run in a separate process The class is analagous to `threading.Thread` ''' _Popen = None def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): assert group is None, 'group argument must be None for now' count = _current_process._counter.next() self._identity = _current_process._identity + (count,) self._authkey = _current_process._authkey self._daemonic = _current_process._daemonic self._tempdir = _current_process._tempdir self._parent_pid = os.getpid() self._popen = None self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._daemonic, \ 'daemonic processes are not allowed to have children' _cleanup() if self._Popen is not None: Popen = self._Popen else: from .forking import Popen self._popen = Popen(self) # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _current_process._children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._popen.terminate() def join(self, timeout=None): ''' Wait until child process terminates ''' assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _current_process._children.discard(self) def is_alive(self): ''' Return whether process is alive ''' if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _current_process._children.discard(self) return False @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, basestring), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._daemonic @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._daemonic = daemonic @property def authkey(self): return self._authkey @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._authkey = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident def __repr__(self): if self is _current_process: status = 'started' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: if self._popen.poll() is not None: status = self.exitcode else: status = 'started' if type(status) in (int, long): if status == 0: status = 'stopped' else: status = 'stopped[%s]' % _exitcode_to_name.get(status, status) return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self._daemonic and ' daemon' or '') ## def _bootstrap(self): from . import util global _current_process try: self._children = set() self._counter = itertools.count(1) try: sys.stdin.close() sys.stdin = open(os.devnull) except (OSError, ValueError): pass _current_process = self util._finalizer_registry.clear() util._run_after_forkers() util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit, e: if not e.args: exitcode = 1 elif isinstance(e.args[0], (int, long)): exitcode = int(e.args[0]) else: sys.stderr.write(str(e.args[0]) + '\n') sys.stderr.flush() exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) sys.stderr.flush() traceback.print_exc() util.info('process exiting with exitcode %d' % exitcode) return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .forking import Popen if not Popen.thread_is_spawning(): raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the main process # class _MainProcess(Process): def __init__(self): self._identity = () self._daemonic = False self._name = 'MainProcess' self._parent_pid = None self._popen = None self._counter = itertools.count(1) self._children = set() self._authkey = AuthenticationString(os.urandom(32)) self._tempdir = None _current_process = _MainProcess() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in signal.__dict__.items(): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = name PK!X%% process.pycnu[ {fc@s2dddgZddlZddlZddlZddlZyejjejZWne k rtdZnXdZ dZ dZ defdYZd efd YZd efd YZea[iZxGejjD]6\ZZed dkrdekreee ks(R tAssertionErrorRt_countertnextt _identityt_authkeyt _daemonict_tempdirtostgetpidt _parent_pidRt_targetttuplet_argstdictt_kwargsttypet__name__tjoint_name(tselftgroupttargettnametargstkwargstcount((s//usr/lib64/python2.7/multiprocessing/process.pyt__init__^s     cCs&|jr"|j|j|jndS(sQ Method to be run in sub-process; can be overridden in sub-class N(RRR(R$((s//usr/lib64/python2.7/multiprocessing/process.pytrunms cCs|jdkstd|jtjks<tdtj sRtdt|j dk rt|j }nddl m }|||_|` |` |`tjj|dS(s% Start child process scannot start a process twices:can only start a process object created by current processs3daemonic processes are not allowed to have childreni(tPopenN(RR RRRRRRRt_PopentforkingR-RRRRtadd(R$R-((s//usr/lib64/python2.7/multiprocessing/process.pytstartts    cCs|jjdS(sT Terminate process; sends SIGTERM signal or uses TerminateProcess() N(Rt terminate(R$((s//usr/lib64/python2.7/multiprocessing/process.pyR2scCsq|jtjks!td|jdk s<td|jj|}|dk rmtjj |ndS(s5 Wait until child process terminates scan only join a child processscan only join a started processN( RRRRRR twaitRRR (R$ttimeouttres((s//usr/lib64/python2.7/multiprocessing/process.pyR"s ! cCs{|tkrtS|jtjks1td|jdkrDtS|jj }|dkrctStj j |tSdS(s1 Return whether process is alive scan only test a child processN( RtTrueRRRRRR tFalseRRR (R$t returncode((s//usr/lib64/python2.7/multiprocessing/process.pytis_alives ! cCs|jS(N(R#(R$((s//usr/lib64/python2.7/multiprocessing/process.pyR'scCs(t|tstd||_dS(Nsname must be a string(t isinstancet basestringRR#(R$R'((s//usr/lib64/python2.7/multiprocessing/process.pyR'scCs|jS(s4 Return whether process is a daemon (R(R$((s//usr/lib64/python2.7/multiprocessing/process.pytdaemonscCs(|jdkstd||_dS(s1 Set whether process is a daemon sprocess has already startedN(RR RR(R$tdaemonic((s//usr/lib64/python2.7/multiprocessing/process.pyR<scCs|jS(N(R(R$((s//usr/lib64/python2.7/multiprocessing/process.pytauthkeyscCst||_dS(s2 Set authorization key of process N(tAuthenticationStringR(R$R>((s//usr/lib64/python2.7/multiprocessing/process.pyR>scCs#|jdkr|jS|jjS(sM Return exit code of process or `None` if it has yet to stop N(RR R(R$((s//usr/lib64/python2.7/multiprocessing/process.pytexitcodescCs-|tkrtjS|jo(|jjSdS(sU Return identifier (PID) of process or `None` if it has yet to start N(RRRRtpid(R$((s//usr/lib64/python2.7/multiprocessing/process.pytidents  cCs|tkrd}n]|jtjkr3d}n?|jdkrKd}n'|jjdk rl|j}nd}t|t t fkr|dkrd}qdt j ||}ndt|j |j||jrdpd fS( Ntstartedtunknowntinitialitstoppeds stopped[%s]s<%s(%s, %s%s)>s daemont(RRRRRR RR@R tinttlongt_exitcode_to_nametgetR!R#R(R$tstatus((s//usr/lib64/python2.7/multiprocessing/process.pyt__repr__s       cCsddlm}yt|_tjd|_y#tjj t t j t_Wnt tfk rmnX|a|jj|j|jdz|jd}Wd|jXWntk rH}|jsd}qt|jdttfrt|jd}qtjjt|jddtjjd}nGd}ddl}tjjd|j tjj|j!nX|jd||S( Ni(tutils child process calling self.run()is is Process %s: s process exiting with exitcode %d("RGRNtsetRt itertoolsR*RtsyststdintclosetopenRtdevnulltOSErrort ValueErrorRt_finalizer_registrytcleart_run_after_forkerstinfoR,t_exit_functiont SystemExitR(R:RHRItstderrtwriteRtflusht tracebackR't print_exc(R$RNR@teRa((s//usr/lib64/python2.7/multiprocessing/process.pyt _bootstrapsB         !    N((R!t __module__t__doc__R R.R+R,R1R2R"R9tpropertyR'tsetterR<R>R@RBRARMRd(((s//usr/lib64/python2.7/multiprocessing/process.pyRVs&       R?cBseZdZRS(cCs>ddlm}|js+tdntt|ffS(Ni(R-sJPickling an AuthenticationString object is disallowed for security reasons(R/R-tthread_is_spawningt TypeErrorR?tbytes(R$R-((s//usr/lib64/python2.7/multiprocessing/process.pyt __reduce__'s   (R!ReRl(((s//usr/lib64/python2.7/multiprocessing/process.pyR?&st _MainProcesscBseZdZRS(cCspd|_t|_d|_d|_d|_tjd|_ t |_ t t jd|_d|_dS(Nt MainProcessii ((RR7RR#R RRRPR*RRORR?RturandomRR(R$((s//usr/lib64/python2.7/multiprocessing/process.pyR+6s      (R!ReR+(((s//usr/lib64/python2.7/multiprocessing/process.pyRm4sitSIGt_(t__all__RRQtsignalRPtpathtabspathtgetcwdt ORIGINAL_DIRRVR RRRtobjectRRkR?RmRRJt__dict__titemsR'tsignum(((s//usr/lib64/python2.7/multiprocessing/process.pyt#s(         PK!n$"" process.pyonu[ {fc@s2dddgZddlZddlZddlZddlZyejjejZWne k rtdZnXdZ dZ dZ defdYZd efd YZd efd YZea[iZxGejjD]6\ZZed dkrdekreee ks(Rt_countertnextt _identityt_authkeyt _daemonict_tempdirtostgetpidt _parent_pidR Rt_targetttuplet_argstdictt_kwargsttypet__name__tjoint_name(tselftgroupttargettnametargstkwargstcount((s//usr/lib64/python2.7/multiprocessing/process.pyt__init__^s     cCs&|jr"|j|j|jndS(sQ Method to be run in sub-process; can be overridden in sub-class N(RRR(R#((s//usr/lib64/python2.7/multiprocessing/process.pytrunms cCsgt|jdk r"|j}nddlm}|||_|`|`|`t j j |dS(s% Start child process i(tPopenN( Rt_PopenR tforkingR,RRRRRRtadd(R#R,((s//usr/lib64/python2.7/multiprocessing/process.pytstartts  cCs|jjdS(sT Terminate process; sends SIGTERM signal or uses TerminateProcess() N(Rt terminate(R#((s//usr/lib64/python2.7/multiprocessing/process.pyR1scCs5|jj|}|dk r1tjj|ndS(s5 Wait until child process terminates N(RtwaitR RRR (R#ttimeouttres((s//usr/lib64/python2.7/multiprocessing/process.pyR!s cCsZ|tkrtS|jdkr#tS|jj}|dkrBtStjj|tSdS(s1 Return whether process is alive N(RtTrueRR tFalseRRR (R#t returncode((s//usr/lib64/python2.7/multiprocessing/process.pytis_alives  cCs|jS(N(R"(R#((s//usr/lib64/python2.7/multiprocessing/process.pyR&scCs ||_dS(N(R"(R#R&((s//usr/lib64/python2.7/multiprocessing/process.pyR&scCs|jS(s4 Return whether process is a daemon (R(R#((s//usr/lib64/python2.7/multiprocessing/process.pytdaemonscCs ||_dS(s1 Set whether process is a daemon N(R(R#tdaemonic((s//usr/lib64/python2.7/multiprocessing/process.pyR9scCs|jS(N(R(R#((s//usr/lib64/python2.7/multiprocessing/process.pytauthkeyscCst||_dS(s2 Set authorization key of process N(tAuthenticationStringR(R#R;((s//usr/lib64/python2.7/multiprocessing/process.pyR;scCs#|jdkr|jS|jjS(sM Return exit code of process or `None` if it has yet to stop N(RR R(R#((s//usr/lib64/python2.7/multiprocessing/process.pytexitcodescCs-|tkrtjS|jo(|jjSdS(sU Return identifier (PID) of process or `None` if it has yet to start N(RRRRtpid(R#((s//usr/lib64/python2.7/multiprocessing/process.pytidents  cCs|tkrd}n]|jtjkr3d}n?|jdkrKd}n'|jjdk rl|j}nd}t|t t fkr|dkrd}qdt j ||}ndt|j |j||jrdpd fS( Ntstartedtunknowntinitialitstoppeds stopped[%s]s<%s(%s, %s%s)>s daemont(RRRRRR RR=Rtinttlongt_exitcode_to_nametgetR R"R(R#tstatus((s//usr/lib64/python2.7/multiprocessing/process.pyt__repr__s       cCsddlm}yt|_tjd|_y#tjj t t j t_Wnt tfk rmnX|a|jj|j|jdz|jd}Wd|jXWntk rH}|jsd}qt|jdttfrt|jd}qtjjt|jddtjjd}nGd}ddl}tjjd|j tjj|j!nX|jd||S( Ni(tutils child process calling self.run()is is Process %s: s process exiting with exitcode %d("RDRKtsetRt itertoolsR)RtsyststdintclosetopenRtdevnulltOSErrort ValueErrorRt_finalizer_registrytcleart_run_after_forkerstinfoR+t_exit_functiont SystemExitR't isinstanceRERFtstderrtwriteRtflusht tracebackR&t print_exc(R#RKR=teR_((s//usr/lib64/python2.7/multiprocessing/process.pyt _bootstrapsB         !    N((R t __module__t__doc__R R-R*R+R0R1R!R8tpropertyR&tsetterR9R;R=R?R>RJRb(((s//usr/lib64/python2.7/multiprocessing/process.pyRVs&       R<cBseZdZRS(cCs>ddlm}|js+tdntt|ffS(Ni(R,sJPickling an AuthenticationString object is disallowed for security reasons(R.R,tthread_is_spawningt TypeErrorR<tbytes(R#R,((s//usr/lib64/python2.7/multiprocessing/process.pyt __reduce__'s   (R RcRj(((s//usr/lib64/python2.7/multiprocessing/process.pyR<&st _MainProcesscBseZdZRS(cCspd|_t|_d|_d|_d|_tjd|_ t |_ t t jd|_d|_dS(Nt MainProcessii ((RR6RR"R RRRMR)RRLRR<RturandomRR(R#((s//usr/lib64/python2.7/multiprocessing/process.pyR*6s      (R RcR*(((s//usr/lib64/python2.7/multiprocessing/process.pyRk4sitSIGt_(t__all__RRNtsignalRMtpathtabspathtgetcwdt ORIGINAL_DIRRSR RRRtobjectRRiR<RkRRGt__dict__titemsR&tsignum(((s//usr/lib64/python2.7/multiprocessing/process.pyt#s(         PK!Wc,00 queues.pynu[# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import atexit import weakref from Queue import Empty, Full import _multiprocessing from . import Pipe from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition from .util import debug, info, Finalize, register_after_fork, is_exiting from .forking import assert_spawning # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() def get(self, block=True, timeout=None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - time.time() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True try: self._reader.close() finally: close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception as e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0): Queue.__init__(self, maxsize) self._unfinished_tasks = Semaphore(0) self._cond = Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() self._cond.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() finally: self._cond.release() self._notempty.release() def task_done(self): self._cond.acquire() try: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() finally: self._cond.release() def join(self): self._cond.acquire() try: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() finally: self._cond.release() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self): self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._make_methods() def empty(self): return not self._reader.poll() def __getstate__(self): assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._make_methods() def _make_methods(self): recv = self._reader.recv racquire, rrelease = self._rlock.acquire, self._rlock.release def get(): racquire() try: return recv() finally: rrelease() self.get = get if self._wlock is None: # writes to a message oriented win32 pipe are atomic self.put = self._writer.send else: send = self._writer.send wacquire, wrelease = self._wlock.acquire, self._wlock.release def put(obj): wacquire() try: return send(obj) finally: wrelease() self.put = put PK!zG,, queues.pycnu[ {fc@s>dddgZddlZddlZddlZddlZddlZddlZddlZddlm Z m Z ddl Z ddl m Z ddlmZmZmZmZdd lmZmZmZmZmZdd lmZdefd YZeZdefd YZdefd YZdS(tQueuet SimpleQueuet JoinableQueueiN(tEmptytFulli(tPipe(tLocktBoundedSemaphoret Semaphoret Condition(tdebugtinfotFinalizetregister_after_forkt is_exiting(tassert_spawningcBseZddZdZdZdZeddZeddZ dZ dZ d Z d Z d Zd Zd ZdZdZedZedZedZRS(icCs|dkrtjj}n||_tdt\|_|_t|_ t j |_ t jdkrud|_n t|_t||_|jt jdkrt|tjndS(Nitduplextwin32(t_multiprocessingtSemLockt SEM_VALUE_MAXt_maxsizeRtFalset_readert_writerRt_rlocktostgetpidt_opidtsystplatformtNonet_wlockRt_semt _after_forkR R(tselftmaxsize((s./usr/lib64/python2.7/multiprocessing/queues.pyt__init__:s      cCs8t||j|j|j|j|j|j|jfS(N(RRRRRR R!R(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt __getstate__Ls cCs>|\|_|_|_|_|_|_|_|jdS(N(RRRRR R!RR"(R#tstate((s./usr/lib64/python2.7/multiprocessing/queues.pyt __setstate__Qs0cCstdtjtj|_tj|_d|_ d|_ t |_ t |_ d|_|jj|_|jj|_|jj|_dS(NsQueue._after_fork()(R t threadingR Rt _notemptyt collectionstdequet_bufferRt_threadt _jointhreadRt_joincancelledt_closedt_closeRtsendt_sendRtrecvt_recvtpollt_poll(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR"Vs      cCs|j st|jj||s.tn|jjz=|jdkrZ|jn|j j ||jj Wd|jj XdS(N( R1tAssertionErrorR!tacquireRR*R.Rt _start_threadR-tappendtnotifytrelease(R#tobjtblockttimeout((s./usr/lib64/python2.7/multiprocessing/queues.pytputcs   cCs|rT|dkrT|jjz!|j}|jj|SWd|jjXn|rmtj|}n|jj||stnzg|r|tj}|j|stqn|jstn|j}|jj|SWd|jjXdS(N( RRR:R6R!R>ttimeRR8(R#R@RAtrestdeadline((s./usr/lib64/python2.7/multiprocessing/queues.pytgetqs,         cCs|j|jjjS(N(RR!t_semlockt _get_value(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytqsizescCs |j S(N(R8(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytemptyscCs|jjjS(N(R!RGt_is_zero(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytfullscCs |jtS(N(RFR(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt get_nowaitscCs|j|tS(N(RBR(R#R?((s./usr/lib64/python2.7/multiprocessing/queues.pyt put_nowaitscCsDt|_z|jjWd|j}|r?d|_|nXdS(N(tTrueR1RtcloseR2R(R#RP((s./usr/lib64/python2.7/multiprocessing/queues.pyRPs   cCs3td|jst|jr/|jndS(NsQueue.join_thread()(R R1R9R/(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt join_threads  cCs<tdt|_y|jjWntk r7nXdS(NsQueue.cancel_join_thread()(R ROR0R/tcanceltAttributeError(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytcancel_join_threads    c Cstd|jjtjdtjd|j|j|j|j |j j fdd|_ t |j _td|j jtd|jst|j tjtj|j gdd |_nt|tj|j|jgdd |_dS( NsQueue._start_thread()ttargettargstnametQueueFeederThreadsdoing self._thread.start()s... done self._thread.start()t exitpriorityii (R R-tclearR)tThreadRt_feedR*R4R RRPR.ROtdaemontstartR0R t_finalize_jointweakreftrefR/t_finalize_closeR2(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR;s(            cCsDtd|}|dk r6|jtdn tddS(Nsjoining queue threads... queue thread joineds... queue thread already dead(R Rtjoin(ttwrtthread((s./usr/lib64/python2.7/multiprocessing/queues.pyR_s      cCsAtd|jz|jt|jWd|jXdS(Nstelling queue thread to quit(R R:R<t _sentinelR=R>(tbuffertnotempty((s./usr/lib64/python2.7/multiprocessing/queues.pyRbs    cCs[td|j}|j}|j}|j}t} tjdkrX|j} |j} nd} xy|z|s~|nWd|Xynxg|} | | krtd|dS| dkr|| q| z|| Wd| XqWWnt k r nXWqat k rR} t r9t d| dSddl }|jqaXqaWdS(Ns$starting thread to feed data to pipeRs%feeder thread got sentinel -- exitingserror in queue thread: %si(R R:R>twaittpopleftRfRRRt IndexErrort ExceptionRR t tracebackt print_exc(RgRhR3t writelockRPtnacquiretnreleasetnwaittbpoplefttsentineltwacquiretwreleaseR?teRm((s./usr/lib64/python2.7/multiprocessing/queues.pyR\sL                N(t__name__t __module__R%R&R(R"RORRBRFRIRJRLRMRNRPRQRTR;t staticmethodR_RbR\(((s./usr/lib64/python2.7/multiprocessing/queues.pyR8s$              cBsGeZddZdZdZeddZdZdZ RS(icCs/tj||td|_t|_dS(Ni(RR%Rt_unfinished_tasksR t_cond(R#R$((s./usr/lib64/python2.7/multiprocessing/queues.pyR%)scCstj||j|jfS(N(RR&R|R{(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR&.scCs.tj||d |d\|_|_dS(Ni(RR(R|R{(R#R'((s./usr/lib64/python2.7/multiprocessing/queues.pyR(1scCs|j st|jj||s.tn|jj|jjzJ|jdkrg|j n|j j ||j j |jjWd|jj |jj XdS(N(R1R9R!R:RR*R|R.RR;R-R<R{R>R=(R#R?R@RA((s./usr/lib64/python2.7/multiprocessing/queues.pyRB5s      cCsi|jjzG|jjts1tdn|jjjrS|jjnWd|jjXdS(Ns!task_done() called too many times( R|R:R{Rt ValueErrorRGRKt notify_allR>(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt task_doneFs cCsH|jjz&|jjjs2|jjnWd|jjXdS(N(R|R:R{RGRKRiR>(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyRcPs  N( RxRyR%R&R(RORRBRRc(((s./usr/lib64/python2.7/multiprocessing/queues.pyR's     cBs5eZdZdZdZdZdZRS(cCs\tdt\|_|_t|_tjdkrBd|_ n t|_ |j dS(NRR( RRRRRRRRRR t _make_methods(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR%^s    cCs|jj S(N(RR7(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyRJgscCs&t||j|j|j|jfS(N(RRRRR (R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR&js cCs,|\|_|_|_|_|jdS(N(RRRR R(R#R'((s./usr/lib64/python2.7/multiprocessing/queues.pyR(nscs|jj|jj|jjfd}||_|jdkrd|jj |_ nC|jj |jj|jjfd}||_ dS(Ncs!z SWdXdS(N(((tracquireR5trrelease(s./usr/lib64/python2.7/multiprocessing/queues.pyRFus cs$z|SWdXdS(N((R?(R3RuRv(s./usr/lib64/python2.7/multiprocessing/queues.pyRBs( RR5RR:R>RFR RRR3RB(R#RFRB((RR5RR3RuRvs./usr/lib64/python2.7/multiprocessing/queues.pyRrs   (RxRyR%RJR&R(R(((s./usr/lib64/python2.7/multiprocessing/queues.pyR\s    (t__all__RRR)R+RCtatexitR`RRRRtRt synchronizeRRRR tutilR R R R RtforkingRtobjectRfRR(((s./usr/lib64/python2.7/multiprocessing/queues.pyt#s"        "( 5PK!,, queues.pyonu[ {fc@s>dddgZddlZddlZddlZddlZddlZddlZddlZddlm Z m Z ddl Z ddl m Z ddlmZmZmZmZdd lmZmZmZmZmZdd lmZdefd YZeZdefd YZdefd YZdS(tQueuet SimpleQueuet JoinableQueueiN(tEmptytFulli(tPipe(tLocktBoundedSemaphoret Semaphoret Condition(tdebugtinfotFinalizetregister_after_forkt is_exiting(tassert_spawningcBseZddZdZdZdZeddZeddZ dZ dZ d Z d Z d Zd Zd ZdZdZedZedZedZRS(icCs|dkrtjj}n||_tdt\|_|_t|_ t j |_ t jdkrud|_n t|_t||_|jt jdkrt|tjndS(Nitduplextwin32(t_multiprocessingtSemLockt SEM_VALUE_MAXt_maxsizeRtFalset_readert_writerRt_rlocktostgetpidt_opidtsystplatformtNonet_wlockRt_semt _after_forkR R(tselftmaxsize((s./usr/lib64/python2.7/multiprocessing/queues.pyt__init__:s      cCs8t||j|j|j|j|j|j|jfS(N(RRRRRR R!R(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt __getstate__Ls cCs>|\|_|_|_|_|_|_|_|jdS(N(RRRRR R!RR"(R#tstate((s./usr/lib64/python2.7/multiprocessing/queues.pyt __setstate__Qs0cCstdtjtj|_tj|_d|_ d|_ t |_ t |_ d|_|jj|_|jj|_|jj|_dS(NsQueue._after_fork()(R t threadingR Rt _notemptyt collectionstdequet_bufferRt_threadt _jointhreadRt_joincancelledt_closedt_closeRtsendt_sendRtrecvt_recvtpollt_poll(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR"Vs      cCs}|jj||stn|jjz=|jdkrJ|jn|jj||jj Wd|jj XdS(N( R!tacquireRR*R.Rt _start_threadR-tappendtnotifytrelease(R#tobjtblockttimeout((s./usr/lib64/python2.7/multiprocessing/queues.pytputcs   cCs|rT|dkrT|jjz!|j}|jj|SWd|jjXn|rmtj|}n|jj||stnzg|r|tj}|j|stqn|jstn|j}|jj|SWd|jjXdS(N( RRR9R6R!R=ttimeRR8(R#R?R@trestdeadline((s./usr/lib64/python2.7/multiprocessing/queues.pytgetqs,         cCs|j|jjjS(N(RR!t_semlockt _get_value(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytqsizescCs |j S(N(R8(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytemptyscCs|jjjS(N(R!RFt_is_zero(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytfullscCs |jtS(N(RER(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt get_nowaitscCs|j|tS(N(RAR(R#R>((s./usr/lib64/python2.7/multiprocessing/queues.pyt put_nowaitscCsDt|_z|jjWd|j}|r?d|_|nXdS(N(tTrueR1RtcloseR2R(R#RO((s./usr/lib64/python2.7/multiprocessing/queues.pyROs   cCs$td|jr |jndS(NsQueue.join_thread()(R R/(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt join_threads  cCs<tdt|_y|jjWntk r7nXdS(NsQueue.cancel_join_thread()(R RNR0R/tcanceltAttributeError(R#((s./usr/lib64/python2.7/multiprocessing/queues.pytcancel_join_threads    c Cstd|jjtjdtjd|j|j|j|j |j j fdd|_ t |j _td|j jtd|jst|j tjtj|j gdd |_nt|tj|j|jgdd |_dS( NsQueue._start_thread()ttargettargstnametQueueFeederThreadsdoing self._thread.start()s... done self._thread.start()t exitpriorityii (R R-tclearR)tThreadRt_feedR*R4R RROR.RNtdaemontstartR0R t_finalize_jointweakreftrefR/t_finalize_closeR2(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR:s(            cCsDtd|}|dk r6|jtdn tddS(Nsjoining queue threads... queue thread joineds... queue thread already dead(R Rtjoin(ttwrtthread((s./usr/lib64/python2.7/multiprocessing/queues.pyR^s      cCsAtd|jz|jt|jWd|jXdS(Nstelling queue thread to quit(R R9R;t _sentinelR<R=(tbuffertnotempty((s./usr/lib64/python2.7/multiprocessing/queues.pyRas    cCs[td|j}|j}|j}|j}t} tjdkrX|j} |j} nd} xy|z|s~|nWd|Xynxg|} | | krtd|dS| dkr|| q| z|| Wd| XqWWnt k r nXWqat k rR} t r9t d| dSddl }|jqaXqaWdS(Ns$starting thread to feed data to pipeRs%feeder thread got sentinel -- exitingserror in queue thread: %si(R R9R=twaittpopleftReRRRt IndexErrort ExceptionRR t tracebackt print_exc(RfRgR3t writelockROtnacquiretnreleasetnwaittbpoplefttsentineltwacquiretwreleaseR>teRl((s./usr/lib64/python2.7/multiprocessing/queues.pyR[sL                N(t__name__t __module__R%R&R(R"RNRRARERHRIRKRLRMRORPRSR:t staticmethodR^RaR[(((s./usr/lib64/python2.7/multiprocessing/queues.pyR8s$              cBsGeZddZdZdZeddZdZdZ RS(icCs/tj||td|_t|_dS(Ni(RR%Rt_unfinished_tasksR t_cond(R#R$((s./usr/lib64/python2.7/multiprocessing/queues.pyR%)scCstj||j|jfS(N(RR&R{Rz(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR&.scCs.tj||d |d\|_|_dS(Ni(RR(R{Rz(R#R'((s./usr/lib64/python2.7/multiprocessing/queues.pyR(1scCs|jj||stn|jj|jjzJ|jdkrW|jn|jj ||j j |jj Wd|jj |jj XdS(N( R!R9RR*R{R.RR:R-R;RzR=R<(R#R>R?R@((s./usr/lib64/python2.7/multiprocessing/queues.pyRA5s      cCsi|jjzG|jjts1tdn|jjjrS|jjnWd|jjXdS(Ns!task_done() called too many times( R{R9RzRt ValueErrorRFRJt notify_allR=(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyt task_doneFs cCsH|jjz&|jjjs2|jjnWd|jjXdS(N(R{R9RzRFRJRhR=(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyRbPs  N( RwRxR%R&R(RNRRAR~Rb(((s./usr/lib64/python2.7/multiprocessing/queues.pyR's     cBs5eZdZdZdZdZdZRS(cCs\tdt\|_|_t|_tjdkrBd|_ n t|_ |j dS(NRR( RRRRRRRRRR t _make_methods(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR%^s    cCs|jj S(N(RR7(R#((s./usr/lib64/python2.7/multiprocessing/queues.pyRIgscCs&t||j|j|j|jfS(N(RRRRR (R#((s./usr/lib64/python2.7/multiprocessing/queues.pyR&js cCs,|\|_|_|_|_|jdS(N(RRRR R(R#R'((s./usr/lib64/python2.7/multiprocessing/queues.pyR(nscs|jj|jj|jjfd}||_|jdkrd|jj |_ nC|jj |jj|jjfd}||_ dS(Ncs!z SWdXdS(N(((tracquireR5trrelease(s./usr/lib64/python2.7/multiprocessing/queues.pyREus cs$z|SWdXdS(N((R>(R3RtRu(s./usr/lib64/python2.7/multiprocessing/queues.pyRAs( RR5RR9R=RER RRR3RA(R#RERA((RR5RR3RtRus./usr/lib64/python2.7/multiprocessing/queues.pyRrs   (RwRxR%RIR&R(R(((s./usr/lib64/python2.7/multiprocessing/queues.pyR\s    (t__all__RRR)R+RBtatexitR_RRRRtRt synchronizeRRRR tutilR R R R RtforkingRtobjectReRR(((s./usr/lib64/python2.7/multiprocessing/queues.pyt#s"        "( 5PK!Ȍ reduction.pynu[# # Module to allow connection and socket objects to be transferred # between processes # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [] import os import sys import socket import threading import _multiprocessing from multiprocessing import current_process from multiprocessing.forking import Popen, duplicate, close, ForkingPickler from multiprocessing.util import register_after_fork, debug, sub_debug from multiprocessing.connection import Client, Listener # # # if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')): raise ImportError('pickling of connections not supported') # # Platform specific definitions # if sys.platform == 'win32': import _subprocess from _multiprocessing import win32 def send_handle(conn, handle, destination_pid): process_handle = win32.OpenProcess( win32.PROCESS_ALL_ACCESS, False, destination_pid ) try: new_handle = duplicate(handle, process_handle) conn.send(new_handle) finally: close(process_handle) def recv_handle(conn): return conn.recv() else: def send_handle(conn, handle, destination_pid): _multiprocessing.sendfd(conn.fileno(), handle) def recv_handle(conn): return _multiprocessing.recvfd(conn.fileno()) # # Support for a per-process server thread which caches pickled handles # _cache = set() def _reset(obj): global _lock, _listener, _cache for h in _cache: close(h) _cache.clear() _lock = threading.Lock() _listener = None _reset(None) register_after_fork(_reset, _reset) def _get_listener(): global _listener if _listener is None: _lock.acquire() try: if _listener is None: debug('starting listener and thread for sending handles') _listener = Listener(authkey=current_process().authkey) t = threading.Thread(target=_serve) t.daemon = True t.start() finally: _lock.release() return _listener def _serve(): from .util import is_exiting, sub_warning while 1: try: conn = _listener.accept() handle_wanted, destination_pid = conn.recv() _cache.remove(handle_wanted) send_handle(conn, handle_wanted, destination_pid) close(handle_wanted) conn.close() except: if not is_exiting(): import traceback sub_warning( 'thread for sharing handles raised exception :\n' + '-'*79 + '\n' + traceback.format_exc() + '-'*79 ) # # Functions to be used for pickling/unpickling objects with handles # def reduce_handle(handle): if Popen.thread_is_spawning(): return (None, Popen.duplicate_for_child(handle), True) dup_handle = duplicate(handle) _cache.add(dup_handle) sub_debug('reducing handle %d', handle) return (_get_listener().address, dup_handle, False) def rebuild_handle(pickled_data): address, handle, inherited = pickled_data if inherited: return handle sub_debug('rebuilding handle %d', handle) conn = Client(address, authkey=current_process().authkey) conn.send((handle, os.getpid())) new_handle = recv_handle(conn) conn.close() return new_handle # # Register `_multiprocessing.Connection` with `ForkingPickler` # def reduce_connection(conn): rh = reduce_handle(conn.fileno()) return rebuild_connection, (rh, conn.readable, conn.writable) def rebuild_connection(reduced_handle, readable, writable): handle = rebuild_handle(reduced_handle) return _multiprocessing.Connection( handle, readable=readable, writable=writable ) ForkingPickler.register(_multiprocessing.Connection, reduce_connection) # # Register `socket.socket` with `ForkingPickler` # def fromfd(fd, family, type_, proto=0): s = socket.fromfd(fd, family, type_, proto) if s.__class__ is not socket.socket: s = socket.socket(_sock=s) return s def reduce_socket(s): reduced_handle = reduce_handle(s.fileno()) return rebuild_socket, (reduced_handle, s.family, s.type, s.proto) def rebuild_socket(reduced_handle, family, type_, proto): fd = rebuild_handle(reduced_handle) _sock = fromfd(fd, family, type_, proto) close(fd) return _sock ForkingPickler.register(socket.socket, reduce_socket) # # Register `_multiprocessing.PipeConnection` with `ForkingPickler` # if sys.platform == 'win32': def reduce_pipe_connection(conn): rh = reduce_handle(conn.fileno()) return rebuild_pipe_connection, (rh, conn.readable, conn.writable) def rebuild_pipe_connection(reduced_handle, readable, writable): handle = rebuild_handle(reduced_handle) return _multiprocessing.PipeConnection( handle, readable=readable, writable=writable ) ForkingPickler.register(_multiprocessing.PipeConnection, reduce_pipe_connection) PK!-'~~ reduction.pycnu[ {fc@sgZddlZddlZddlZddlZddlZddlmZddlm Z m Z m Z m Z ddl mZmZmZddlmZmZejdkpeedsednejdkrddlZdd lmZd Zd Znd Zd ZeadZedeeedZdZ dZ!dZ"dZ#dZ$e j%ej&e#ddZ'dZ(dZ)e j%eje(ejdkrdZ*dZ+e j%ej,e*ndS(iN(tcurrent_process(tPopent duplicatetclosetForkingPickler(tregister_after_forktdebugt sub_debug(tClienttListenertwin32trecvfds%pickling of connections not supported(R cCsJtjtjt|}z t||}|j|Wdt|XdS(N(R t OpenProcesstPROCESS_ALL_ACCESStFalseRtsendR(tconnthandletdestination_pidtprocess_handlet new_handle((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt send_handleAs cCs |jS(N(trecv(R((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt recv_handleKscCstj|j|dS(N(t_multiprocessingtsendfdtfileno(RRR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyROscCstj|jS(N(RR R(R((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRRscCs;xtD]}t|qWtjtjadadS(N(t_cacheRtcleart threadingtLockt_locktNonet _listener(tobjth((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt_reset[s    cCstdkr~tjzWtdkrltdtdtjatj dt }t |_ |j nWdtjXntS(Ns0starting listener and thread for sending handlestauthkeyttarget(R!R RtacquireRR RR%RtThreadt_servetTruetdaemontstarttrelease(tt((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt _get_listenerfs     cCsddlm}m}xyStj}|j\}}tj|t|||t ||j Wq|sddl }|dddd|j ddqqXqWdS(Ni(t is_exitingt sub_warningis.thread for sharing handles raised exception : t-iOs ( tutilR0R1R!tacceptRRtremoveRRt tracebackt format_exc(R0R1Rt handle_wantedRR6((s1/usr/lib64/python2.7/multiprocessing/reduction.pyR)ws     cCs[tjr"dtj|tfSt|}tj|td|t j |t fS(Nsreducing handle %d( Rtthread_is_spawningR tduplicate_for_childR*RRtaddRR/taddressR(Rt dup_handle((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt reduce_handles     cCsq|\}}}|r|Std|t|dtj}|j|tjft|}|j|S(Nsrebuilding handle %dR%( RRRR%RtostgetpidRR(t pickled_dataR<Rt inheritedRR((s1/usr/lib64/python2.7/multiprocessing/reduction.pytrebuild_handles   cCs+t|j}t||j|jffS(N(R>Rtrebuild_connectiontreadabletwritable(Rtrh((s1/usr/lib64/python2.7/multiprocessing/reduction.pytreduce_connectionscCs%t|}tj|d|d|S(NRERF(RCRt Connection(treduced_handleRERFR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRDs icCsCtj||||}|jtjk r?tjd|}n|S(Nt_sock(tsockettfromfdt __class__(tfdtfamilyttype_tprotots((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRMscCs1t|j}t||j|j|jffS(N(R>Rtrebuild_socketRPttypeRR(RSRJ((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt reduce_socketscCs/t|}t||||}t||S(N(RCRMR(RJRPRQRRRORK((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRTs  cCs+t|j}t||j|jffS(N(R>Rtrebuild_pipe_connectionRERF(RRG((s1/usr/lib64/python2.7/multiprocessing/reduction.pytreduce_pipe_connectionscCs%t|}tj|d|d|S(NRERF(RCRtPipeConnection(RJRERFR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRWs (-t__all__R?tsysRLRRtmultiprocessingRtmultiprocessing.forkingRRRRtmultiprocessing.utilRRRtmultiprocessing.connectionRR tplatformthasattrt ImportErrort _subprocessR RRtsetRR$R R/R)R>RCRHRDtregisterRIRMRVRTRXRWRY(((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt$sJ     "                   PK!-'~~ reduction.pyonu[ {fc@sgZddlZddlZddlZddlZddlZddlmZddlm Z m Z m Z m Z ddl mZmZmZddlmZmZejdkpeedsednejdkrddlZdd lmZd Zd Znd Zd ZeadZedeeedZdZ dZ!dZ"dZ#dZ$e j%ej&e#ddZ'dZ(dZ)e j%eje(ejdkrdZ*dZ+e j%ej,e*ndS(iN(tcurrent_process(tPopent duplicatetclosetForkingPickler(tregister_after_forktdebugt sub_debug(tClienttListenertwin32trecvfds%pickling of connections not supported(R cCsJtjtjt|}z t||}|j|Wdt|XdS(N(R t OpenProcesstPROCESS_ALL_ACCESStFalseRtsendR(tconnthandletdestination_pidtprocess_handlet new_handle((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt send_handleAs cCs |jS(N(trecv(R((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt recv_handleKscCstj|j|dS(N(t_multiprocessingtsendfdtfileno(RRR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyROscCstj|jS(N(RR R(R((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRRscCs;xtD]}t|qWtjtjadadS(N(t_cacheRtcleart threadingtLockt_locktNonet _listener(tobjth((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt_reset[s    cCstdkr~tjzWtdkrltdtdtjatj dt }t |_ |j nWdtjXntS(Ns0starting listener and thread for sending handlestauthkeyttarget(R!R RtacquireRR RR%RtThreadt_servetTruetdaemontstarttrelease(tt((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt _get_listenerfs     cCsddlm}m}xyStj}|j\}}tj|t|||t ||j Wq|sddl }|dddd|j ddqqXqWdS(Ni(t is_exitingt sub_warningis.thread for sharing handles raised exception : t-iOs ( tutilR0R1R!tacceptRRtremoveRRt tracebackt format_exc(R0R1Rt handle_wantedRR6((s1/usr/lib64/python2.7/multiprocessing/reduction.pyR)ws     cCs[tjr"dtj|tfSt|}tj|td|t j |t fS(Nsreducing handle %d( Rtthread_is_spawningR tduplicate_for_childR*RRtaddRR/taddressR(Rt dup_handle((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt reduce_handles     cCsq|\}}}|r|Std|t|dtj}|j|tjft|}|j|S(Nsrebuilding handle %dR%( RRRR%RtostgetpidRR(t pickled_dataR<Rt inheritedRR((s1/usr/lib64/python2.7/multiprocessing/reduction.pytrebuild_handles   cCs+t|j}t||j|jffS(N(R>Rtrebuild_connectiontreadabletwritable(Rtrh((s1/usr/lib64/python2.7/multiprocessing/reduction.pytreduce_connectionscCs%t|}tj|d|d|S(NRERF(RCRt Connection(treduced_handleRERFR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRDs icCsCtj||||}|jtjk r?tjd|}n|S(Nt_sock(tsockettfromfdt __class__(tfdtfamilyttype_tprotots((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRMscCs1t|j}t||j|j|jffS(N(R>Rtrebuild_socketRPttypeRR(RSRJ((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt reduce_socketscCs/t|}t||||}t||S(N(RCRMR(RJRPRQRRRORK((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRTs  cCs+t|j}t||j|jffS(N(R>Rtrebuild_pipe_connectionRERF(RRG((s1/usr/lib64/python2.7/multiprocessing/reduction.pytreduce_pipe_connectionscCs%t|}tj|d|d|S(NRERF(RCRtPipeConnection(RJRERFR((s1/usr/lib64/python2.7/multiprocessing/reduction.pyRWs (-t__all__R?tsysRLRRtmultiprocessingRtmultiprocessing.forkingRRRRtmultiprocessing.utilRRRtmultiprocessing.connectionRR tplatformthasattrt ImportErrort _subprocessR RRtsetRR$R R/R)R>RCRHRDtregisterRIRMRVRTRXRWRY(((s1/usr/lib64/python2.7/multiprocessing/reduction.pyt$sJ     "                   PK!*c1LLsharedctypes.pynu[# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import sys import ctypes import weakref from multiprocessing import heap, RLock from multiprocessing.forking import assert_spawning, ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'f': ctypes.c_float, 'd': ctypes.c_double } try: typecode_to_type['u'] = ctypes.c_wchar except AttributeError: pass # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, (int, long)): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, **kwds): ''' Return a synchronization wrapper for a Value ''' lock = kwds.pop('lock', None) if kwds: raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys()) obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): lock = RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Return a synchronization wrapper for a RawArray ''' lock = kwds.pop('lock', None) if kwds: raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys()) obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): lock = RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock) return SynchronizedArray(obj, lock) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = dict((name, make_property(name)) for name in names) classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length ForkingPickler.register(type_, reduce_ctype) obj = type_.from_address(wrapper.get_address()) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec template % ((name,)*7) in d prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None): self._obj = obj self._lock = lock or RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): self.acquire() try: return self._obj[i] finally: self.release() def __setitem__(self, i, value): self.acquire() try: self._obj[i] = value finally: self.release() def __getslice__(self, start, stop): self.acquire() try: return self._obj[start:stop] finally: self.release() def __setslice__(self, start, stop, values): self.acquire() try: self._obj[start:stop] = values finally: self.release() class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') PK!j!!sharedctypes.pycnu[ {fc@sddlZddlZddlZddlmZmZddlmZmZdddddd gZ i ej d 6ej d 6ej d 6ej d 6ejd6ejd6ejd6ejd6ejd6ejd6ejd6Zyejed Returns a ctypes object allocated from shared memory i(ttypecode_to_typetgetRRtmemsett addressofRt__init__(ttypecode_or_typetargsRtobj((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRGs  % cCstj||}t|ttfrf||}t|}tjtj|dtj ||S|t |}t|}|j ||SdS(s= Returns a ctypes array allocated from shared memory iN( RR t isinstancetinttlongRRR!R"RtlenR#(R$tsize_or_initializerRR&tresult((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRQs  %  cOs|jdd}|r1td|jnt||}|tkrP|S|tdfkrnt}nt|dst d|nt ||S(s6 Return a synchronization wrapper for a Value tlocks$unrecognized keyword argument(s): %stacquires'%r' has no method 'acquire'N( tpopRt ValueErrortkeysRtFalsetTrueRthasattrtAttributeErrorR (R$R%tkwdsR-R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRas  cKs|jdd}|r1td|jnt||}|tkrP|S|tdfkrnt}nt|dst d|nt ||S(s9 Return a synchronization wrapper for a RawArray R-s$unrecognized keyword argument(s): %sR.s'%r' has no method 'acquire'N( R/RR0R1RR2R3RR4R5R (R$R+R6R-R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRqs  cCs)tt|}|tj|d<|S(Ni(RttypeRtpointer(R&tnew_obj((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRscCst|t stdt|tjr;t||St|tjry|jtjkrlt ||St ||St |}yt |}Wnqt k rg|jD]}|d^q}td|D}d|j}t |tf|}t |st Synchronized(R'tSynchronizedBasetAssertionErrorRt _SimpleCDataR=Rt_type_tc_chartSynchronizedStringtSynchronizedArrayR7t class_cachetKeyErrort_fields_tdictt__name__(R&R-tclstsclstfieldtnamesRt classname((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR s        !cCsXt|t|tjr8t|j|j|jffStt||jdffSdS(N( RR'RRRRAt_wrappert_length_R7R(R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt reduce_ctypes cCsK|dk r||}ntj|t|j|j}||_|S(N(RRtregisterRQt from_addresst get_addressRO(RRtlengthR&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRs    cBsPy e|SWn=ek rKi}e|fd|U||e|<||SXdS(Ni(t prop_cacheRFttemplate(R<R((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR:s  s def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) R>cBs8eZddZdZdZdZdZRS(cCs=||_|pt|_|jj|_|jj|_dS(N(t_objRt_lockR.trelease(tselfR&R-((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR#s cCs t|t|j|jffS(N(RR RXRY(R[((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt __reduce__s cCs|jS(N(RX(R[((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pytget_objscCs|jS(N(RY(R[((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pytget_lockscCsdt|j|jfS(Ns<%s wrapper for %s>(R7RIRX(R[((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt__repr__sN(RIt __module__RR#R\R]R^R_(((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR>s     R=cBseZedZRS(tvalue(RIR`R:Ra(((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR=sRDcBs5eZdZdZdZdZdZRS(cCs t|jS(N(R*RX(R[((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt__len__scCs+|jz|j|SWd|jXdS(N(R.RXRZ(R[R((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt __getitem__s cCs-|jz||j|R=RDRC(((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt#sB              "PK!aŃg!g!sharedctypes.pyonu[ {fc@sddlZddlZddlZddlmZmZddlmZmZdddddd gZ i ej d 6ej d 6ej d 6ej d 6ejd6ejd6ejd6ejd6ejd6ejd6ejd6Zyejed Returns a ctypes object allocated from shared memory i(ttypecode_to_typetgetRRtmemsett addressofRt__init__(ttypecode_or_typetargsRtobj((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRGs  % cCstj||}t|ttfrf||}t|}tjtj|dtj ||S|t |}t|}|j ||SdS(s= Returns a ctypes array allocated from shared memory iN( RR t isinstancetinttlongRRR!R"RtlenR#(R$tsize_or_initializerRR&tresult((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRQs  %  cOs|jdd}|r1td|jnt||}|tkrP|S|tdfkrnt}nt|dst d|nt ||S(s6 Return a synchronization wrapper for a Value tlocks$unrecognized keyword argument(s): %stacquires'%r' has no method 'acquire'N( tpopRt ValueErrortkeysRtFalsetTrueRthasattrtAttributeErrorR (R$R%tkwdsR-R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRas  cKs|jdd}|r1td|jnt||}|tkrP|S|tdfkrnt}nt|dst d|nt ||S(s9 Return a synchronization wrapper for a RawArray R-s$unrecognized keyword argument(s): %sR.s'%r' has no method 'acquire'N( R/RR0R1RR2R3RR4R5R (R$R+R6R-R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRqs  cCs)tt|}|tj|d<|S(Ni(RttypeRtpointer(R&tnew_obj((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRscCst|tjrt||St|tjr]|jtjkrPt||St||St |}yt |}Wnqt k rg|j D]}|d^q}t d|D}d|j}t |tf|}t |st Synchronized(R'Rt _SimpleCDataR=Rt_type_tc_chartSynchronizedStringtSynchronizedArrayR7t class_cachetKeyErrort_fields_tdictt__name__tSynchronizedBase(R&R-tclstsclstfieldtnamesRt classname((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR s       !cCsXt|t|tjr8t|j|j|jffStt||jdffSdS(N( RR'RRRR?t_wrappert_length_R7R(R&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt reduce_ctypes cCsK|dk r||}ntj|t|j|j}||_|S(N(RRtregisterRPt from_addresst get_addressRN(RRtlengthR&((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRs    cBsPy e|SWn=ek rKi}e|fd|U||e|<||SXdS(Ni(t prop_cacheRDttemplate(R<R((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR:s  s def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) RHcBs8eZddZdZdZdZdZRS(cCs=||_|pt|_|jj|_|jj|_dS(N(t_objRt_lockR.trelease(tselfR&R-((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR#s cCs t|t|j|jffS(N(RR RWRX(RZ((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt __reduce__s cCs|jS(N(RW(RZ((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pytget_objscCs|jS(N(RX(RZ((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pytget_lockscCsdt|j|jfS(Ns<%s wrapper for %s>(R7RGRW(RZ((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt__repr__sN(RGt __module__RR#R[R\R]R^(((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyRHs     R=cBseZedZRS(tvalue(RGR_R:R`(((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyR=sRBcBs5eZdZdZdZdZdZRS(cCs t|jS(N(R*RW(RZ((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt__len__scCs+|jz|j|SWd|jXdS(N(R.RWRY(RZR((s4/usr/lib64/python2.7/multiprocessing/sharedctypes.pyt __getitem__s cCs-|jz||j|#sB              "PK!K˞))synchronize.pynu[# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import os import sys from time import time as _time, sleep as _sleep import _multiprocessing from multiprocessing.process import current_process from multiprocessing.util import Finalize, register_after_fork, debug from multiprocessing.forking import assert_spawning, Popen # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocessing import SemLock except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = range(2) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): def __init__(self, kind, value, maxvalue): sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue) debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() register_after_fork(self, _after_fork) def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): assert_spawning(self) sl = self._semlock return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) debug('recreated blocker with handle %r' % state[0]) self._make_methods() # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '' % value # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1): SemLock.__init__(self, SEMAPHORE, value, value) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '' % \ (value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self): SemLock.__init__(self, SEMAPHORE, 1, 1) def __repr__(self): try: if self._semlock._is_mine(): name = current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '' % name # # Recursive lock # class RLock(SemLock): def __init__(self): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1) def __repr__(self): try: if self._semlock._is_mine(): name = current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '' % (name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None): self._lock = lock or RLock() self._sleeping_count = Semaphore(0) self._woken_count = Semaphore(0) self._wait_semaphore = Semaphore(0) self._make_methods() def __getstate__(self): assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '' % (self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in xrange(count): self._lock.release() try: # wait for notification or timeout self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in xrange(count): self._lock.acquire() def notify(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res if self._sleeping_count.acquire(False): # try grabbing a sleeper self._wait_semaphore.release() # wake up one sleeper self._woken_count.acquire() # wait for the sleeper to wake # rezero _wait_semaphore in case a timeout just happened self._wait_semaphore.acquire(False) def notify_all(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res sleepers = 0 while self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in xrange(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass # # Event # class Event(object): def __init__(self): self._cond = Condition(Lock()) self._flag = Semaphore(0) def is_set(self): self._cond.acquire() try: if self._flag.acquire(False): self._flag.release() return True return False finally: self._cond.release() def set(self): self._cond.acquire() try: self._flag.acquire(False) self._flag.release() self._cond.notify_all() finally: self._cond.release() def clear(self): self._cond.acquire() try: self._flag.acquire(False) finally: self._cond.release() def wait(self, timeout=None): self._cond.acquire() try: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False finally: self._cond.release() PK!٩++synchronize.pycnu[ {fc@sddddddgZddlZddlZddlZddlmZmZddlZdd l m Z dd l m Z m Z mZdd lmZmZydd lmZWn)ek red dddnXed\ZZejjZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdS(tLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventiN(ttimetsleep(tcurrent_process(tFinalizetregister_after_forktdebug(tassert_spawningtPopen(tSemLocks*This platform lacks a functioning sem_opens( implementation, therefore, the requireds+ synchronization primitives needed will nots function, see issue 3770.iRcBs>eZdZdZdZdZdZdZRS(cCsctj|||}|_td|j|jtjdkr_d}t||ndS(Nscreated semlock with handle %stwin32cSs|jjdS(N(t_semlockt _after_fork(tobj((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRPs( t_multiprocessingRRR thandlet _make_methodstsystplatformR (tselftkindtvaluetmaxvaluetslR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt__init__Js   cCs"|jj|_|jj|_dS(N(Rtacquiretrelease(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRTscCs |jjS(N(Rt __enter__(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR XscGs|jj|S(N(Rt__exit__(Rtargs((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR![scCs2t||j}tj|j|j|jfS(N(R RR tduplicate_for_childRRR(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt __getstate__^s  cCs5tjj||_td|d|jdS(Ns recreated blocker with handle %ri(RRt_rebuildRR R(Rtstate((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt __setstate__cs(t__name__t __module__RRR R!R$R'(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRHs     cBs&eZddZdZdZRS(icCstj|t|tdS(N(RRt SEMAPHOREt SEM_VALUE_MAX(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRnscCs |jjS(N(Rt _get_value(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt get_valueqscCs5y|jj}Wntk r,d}nXd|S(Ntunknowns(RR,t Exception(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt__repr__ts   (R(R)RR-R0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRls  cBseZddZdZRS(icCstj|t||dS(N(RRR*(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsAy|jj}Wntk r,d}nXd||jjfS(NR.s)(RR,R/R(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s   (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBseZdZdZRS(cCstj|tdddS(Ni(RRR*(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsy|jjrPtj}tjjdkr|dtjj7}qnB|jjdkrnd}n$|jjdkrd}nd}Wntk rd}nXd |S( Nt MainThreadt|itNoneitSomeOtherThreadtSomeOtherProcessR.s( Rt_is_mineRtnamet threadingtcurrent_threadR,t_countR/(RR7((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s      (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBseZdZdZRS(cCstj|tdddS(Ni(RRtRECURSIVE_MUTEX(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsy|jjr_tj}tjjdkrM|dtjj7}n|jj}nT|jjdkrd \}}n0|jjdkrd \}}n d \}}Wntk rd\}}nXd ||fS(NR1R2iR3iR4tnonzeroR5R.s(R3i(R4R<(R5R<(R.R.( RR6RR7R8R9R:R,R/(RR7tcount((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s  (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBsheZd dZdZdZdZdZdZdZ d dZ dZ d Z RS( cCsM|p t|_td|_td|_td|_|jdS(Ni(Rt_lockRt_sleeping_countt _woken_countt_wait_semaphoreR(Rtlock((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cCs&t||j|j|j|jfS(N(R R>R?R@RA(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR$s  cCs,|\|_|_|_|_|jdS(N(R>R?R@RAR(RR&((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR'scCs |jjS(N(R>R (R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR scGs|jj|S(N(R>R!(RR"((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR!scCs"|jj|_|jj|_dS(N(R>RR(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsQy&|jjj|jjj}Wntk r?d}nXd|j|fS(NR.s(R?RR,R@R/R>(Rt num_waiters((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s   cCs|jjjstd|jj|jjj}x!t|D]}|jjqJWz|jj t |Wd|j jx!t|D]}|jj qWXdS(Ns,must acquire() condition before using wait()( R>RR6tAssertionErrorR?RR:txrangeRARtTrueR@(RttimeoutR=ti((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytwaits   cCs|jjjstd|jjt s7tx4|jjtrm|jjt}|s:tq:W|jjtr|jj |jj|jjtndS(Nslock is not owned( R>RR6RDRARtFalseR@R?R(Rtres((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytnotifys  cCs|jjjstd|jjt s7tx4|jjtrm|jjt}|s:tq:Wd}x-|jjtr|jj |d7}qwW|rx!t |D]}|jjqWx|jjtrqWndS(Nslock is not ownedii( R>RR6RDRARRJR@R?RRE(RRKtsleepersRH((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt notify_alls N( R(R)R3RR$R'R R!RR0RIRLRN(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs         cBs8eZdZdZdZdZddZRS(cCs%tt|_td|_dS(Ni(RRt_condRt_flag(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR-scCsM|jjz+|jjtr3|jjtStSWd|jjXdS(N(RORRPRJRRF(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytis_set1s  cCsP|jjz.|jjt|jj|jjWd|jjXdS(N(RORRPRJRRN(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytset;s   cCs6|jjz|jjtWd|jjXdS(N(RORRPRJR(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytclearDs cCs|jjz]|jjtr2|jjn|jj||jjtre|jjtStSWd|jjXdS(N(RORRPRJRRIRF(RRG((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRIKs  N(R(R)RRQRRRSR3RI(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR+s   (t__all__R8tosRRt_timeRt_sleepRtmultiprocessing.processRtmultiprocessing.utilR R R tmultiprocessing.forkingR R Rt ImportErrortrangeR;R*R+tobjectRRRRRR(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt$s.       $hPK!tA))synchronize.pyonu[ {fc@sddddddgZddlZddlZddlZddlmZmZddlZdd l m Z dd l m Z m Z mZdd lmZmZydd lmZWn)ek red dddnXed\ZZejjZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdS(tLocktRLockt SemaphoretBoundedSemaphoret ConditiontEventiN(ttimetsleep(tcurrent_process(tFinalizetregister_after_forktdebug(tassert_spawningtPopen(tSemLocks*This platform lacks a functioning sem_opens( implementation, therefore, the requireds+ synchronization primitives needed will nots function, see issue 3770.iRcBs>eZdZdZdZdZdZdZRS(cCsctj|||}|_td|j|jtjdkr_d}t||ndS(Nscreated semlock with handle %stwin32cSs|jjdS(N(t_semlockt _after_fork(tobj((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRPs( t_multiprocessingRRR thandlet _make_methodstsystplatformR (tselftkindtvaluetmaxvaluetslR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt__init__Js   cCs"|jj|_|jj|_dS(N(Rtacquiretrelease(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRTscCs |jjS(N(Rt __enter__(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR XscGs|jj|S(N(Rt__exit__(Rtargs((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR![scCs2t||j}tj|j|j|jfS(N(R RR tduplicate_for_childRRR(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt __getstate__^s  cCs5tjj||_td|d|jdS(Ns recreated blocker with handle %ri(RRt_rebuildRR R(Rtstate((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt __setstate__cs(t__name__t __module__RRR R!R$R'(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRHs     cBs&eZddZdZdZRS(icCstj|t|tdS(N(RRt SEMAPHOREt SEM_VALUE_MAX(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRnscCs |jjS(N(Rt _get_value(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt get_valueqscCs5y|jj}Wntk r,d}nXd|S(Ntunknowns(RR,t Exception(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt__repr__ts   (R(R)RR-R0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRls  cBseZddZdZRS(icCstj|t||dS(N(RRR*(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsAy|jj}Wntk r,d}nXd||jjfS(NR.s)(RR,R/R(RR((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s   (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBseZdZdZRS(cCstj|tdddS(Ni(RRR*(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsy|jjrPtj}tjjdkr|dtjj7}qnB|jjdkrnd}n$|jjdkrd}nd}Wntk rd}nXd |S( Nt MainThreadt|itNoneitSomeOtherThreadtSomeOtherProcessR.s( Rt_is_mineRtnamet threadingtcurrent_threadR,t_countR/(RR7((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s      (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBseZdZdZRS(cCstj|tdddS(Ni(RRtRECURSIVE_MUTEX(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsy|jjr_tj}tjjdkrM|dtjj7}n|jj}nT|jjdkrd \}}n0|jjdkrd \}}n d \}}Wntk rd\}}nXd ||fS(NR1R2iR3iR4tnonzeroR5R.s(R3i(R4R<(R5R<(R.R.( RR6RR7R8R9R:R,R/(RR7tcount((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s  (R(R)RR0(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cBsheZd dZdZdZdZdZdZdZ d dZ dZ d Z RS( cCsM|p t|_td|_td|_td|_|jdS(Ni(Rt_lockRt_sleeping_countt _woken_countt_wait_semaphoreR(Rtlock((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs cCs&t||j|j|j|jfS(N(R R>R?R@RA(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR$s  cCs,|\|_|_|_|_|jdS(N(R>R?R@RAR(RR&((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR'scCs |jjS(N(R>R (R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR scGs|jj|S(N(R>R!(RR"((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR!scCs"|jj|_|jj|_dS(N(R>RR(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRscCsQy&|jjj|jjj}Wntk r?d}nXd|j|fS(NR.s(R?RR,R@R/R>(Rt num_waiters((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR0s   cCs|jj|jjj}x!t|D]}|jjq,Wz|jjt|Wd|j jx!t|D]}|jjqwWXdS(N( R?RR>RR:txrangeRARtTrueR@(RttimeoutR=ti((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytwaits  cCsnx(|jjtr*|jjt}qW|jjtrj|jj|jj|jjtndS(N(R@RtFalseR?RAR(Rtres((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytnotifys  cCsx(|jjtr*|jjt}qWd}x-|jjtr`|jj|d7}q4W|rx!t|D]}|jjqtWx|jjtrqWndS(Nii(R@RRIR?RARRD(RRJtsleepersRG((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt notify_alls N( R(R)R3RR$R'R R!RR0RHRKRM(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRs         cBs8eZdZdZdZdZddZRS(cCs%tt|_td|_dS(Ni(RRt_condRt_flag(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR-scCsM|jjz+|jjtr3|jjtStSWd|jjXdS(N(RNRRORIRRE(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytis_set1s  cCsP|jjz.|jjt|jj|jjWd|jjXdS(N(RNRRORIRRM(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytset;s   cCs6|jjz|jjtWd|jjXdS(N(RNRRORIR(R((s3/usr/lib64/python2.7/multiprocessing/synchronize.pytclearDs cCs|jjz]|jjtr2|jjn|jj||jjtre|jjtStSWd|jjXdS(N(RNRRORIRRHRE(RRF((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyRHKs  N(R(R)RRPRQRRR3RH(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyR+s   (t__all__R8tosRRt_timeRt_sleepRtmultiprocessing.processRtmultiprocessing.utilR R R tmultiprocessing.forkingR R Rt ImportErrortrangeR;R*R+tobjectRRRRRR(((s3/usr/lib64/python2.7/multiprocessing/synchronize.pyt$s.       $hPK!@Y**util.pynu[# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import os import itertools import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from multiprocessing.process import current_process, active_children __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocessing' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging, atexit logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # # Function returning a temp directory which will be removed on exit # def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up if current_process()._tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) current_process()._tempdir = tempdir return current_process()._tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception, e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): assert exitpriority is None or type(exitpriority) in (int, long) if obj is not None: self._weakref = weakref.ref(obj, self) else: assert exitpriority is not None self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, _finalizer_counter.next()) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != os.getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '' x = '' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0][0] is not None else: f = lambda p : p[0][0] is not None and p[0][0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). items = [x for x in _finalizer_registry.items() if f(x)] items.sort(reverse=True) for key, finalizer in items: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=active_children, current_process=current_process): # NB: we hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # NB: we check if the current process is None here because if # it's None, any call to ``active_children()`` will throw an # AttributeError (active_children winds up trying to get # attributes from util._current_process). This happens in a # variety of shutdown circumstances that are not well-understood # because module-scope variables are not apparently supposed to # be destroyed until after this function is called. However, # they are indeed destroyed before this function is called. See # issues 9775 and 15881. Also related: 4106, 9205, and 9207. for p in active_children(): if p._daemonic: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._reset() register_after_fork(self, ForkAwareThreadLock._reset) def _reset(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () PK!5ꯡ''util.pycnu[ {fc@sddlZddlZddlZddlZddlZddlmZddlmZm Z dddddd d d d d ddddgZ dZ dZ dZ dZdZdZdZdaeadZdZdZdZdZddZdZejZejZ d Z!d!Z"iZ#ejZ$d e%fd"YZ&dd#Z'd$Z(ea)eee'e ed%Z*ej+e*de%fd&YZ,dej-fd'YZ.dS((iN(t_args_from_interpreter_flags(tcurrent_processtactive_childrent sub_debugtdebugtinfot sub_warningt get_loggert log_to_stderrt get_temp_dirtregister_after_forkt is_exitingtFinalizetForkAwareThreadLocktForkAwareLocaltSUBDEBUGt SUBWARNINGiii iitmultiprocessings+[%(levelname)s/%(processName)s] %(message)scGs trtjt||ndS(N(t_loggertlogR(tmsgtargs((s,/usr/lib64/python2.7/multiprocessing/util.pyRDscGs trtjt||ndS(N(RRtDEBUG(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRHscGs trtjt||ndS(N(RRtINFO(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRLscGs trtjt||ndS(N(RRR(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRPscCsddl}ddl}|jzts|jtadt_|jtd|jt dt |dr|j t |j t q|jjt dif|jjt difnWd|jXtS( s0 Returns logger used by multiprocessing iNiRRt unregister(((tloggingtatexitt _acquireLockRt getLoggert LOGGER_NAMEt propagatet addLevelNameRRthasattrRt_exit_functiontregistert _exithandlerstremovetappendt _releaseLock(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRTs     cCsjddl}t}|jt}|j}|j||j||r`|j|nta t S(sB Turn on logging and add a handler which prints to stderr iN( RRt FormattertDEFAULT_LOGGING_FORMATt StreamHandlert setFormattert addHandlertsetLeveltTruet_log_to_stderrR(tlevelRtloggert formatterthandler((s,/usr/lib64/python2.7/multiprocessing/util.pyRqs     cCstjdkrwddl}ddl}|jdd}td|td|jd|gdd|t_ntjS(Nitprefixspymp-screated temp directory %sRt exitpriorityi( Rt_tempdirtNonetshutilttempfiletmkdtempRR trmtree(R7R8ttempdir((s,/usr/lib64/python2.7/multiprocessing/util.pyR s cCsqttj}|jxN|D]F\\}}}}y||Wq#tk rh}td|q#Xq#WdS(Ns after forker raised exception %s(tlistt_afterfork_registrytitemstsortt ExceptionR(R>tindextidenttfunctobjte((s,/usr/lib64/python2.7/multiprocessing/util.pyt_run_after_forkerss cCs#|ttjt||fs( RPtAttributeErrort TypeErrorR6tgetattrRQRRtstrRSRU(RZRDtx((s,/usr/lib64/python2.7/multiprocessing/util.pyt__repr__s    (N( Rdt __module__t__doc__R6R]RaRbRcRk(((s,/usr/lib64/python2.7/multiprocessing/util.pyR s   cstdkrdSdkr(d}nfd}gtjD]}||rD|^qD}|jdtxV|D]N\}}td|y |Wqytk rddl}|jqyXqyWdkrtj ndS(s Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. NcSs|dddk S(Ni(R6(tp((s,/usr/lib64/python2.7/multiprocessing/util.pyttcs&|dddk o%|ddkS(Ni(R6(Rn(t minpriority(s,/usr/lib64/python2.7/multiprocessing/util.pyRo Rptreverses calling %si( RYR6R>R?R-RR@t tracebackt print_exctclear(RqtfRjR>tkeyt finalizerRs((Rqs,/usr/lib64/python2.7/multiprocessing/util.pyt_run_finalizerss    +     cCstptdkS(s6 Returns true if the process is shutting down N(t_exitingR6(((s,/usr/lib64/python2.7/multiprocessing/util.pyR !scCs|d|d|d|dk rx:|D]/}|jr7|d|j|jjq7q7Wx.|D] }|d|j|jqtWn|d|dS(Nsprocess shutting downs2running all "atexit" finalizers with priority >= 0is!calling terminate() for daemon %sscalling join() for process %ss)running the remaining "atexit" finalizers(R6t _daemonictnamet_popent terminatetjoin(RRRyRRRn((s,/usr/lib64/python2.7/multiprocessing/util.pyR!)s      cBseZdZdZRS(cCs|jt|tjdS(N(t_resetR R (RZ((s,/usr/lib64/python2.7/multiprocessing/util.pyR]Ts cCs1tj|_|jj|_|jj|_dS(N(t threadingtLockt_locktacquiretrelease(RZ((s,/usr/lib64/python2.7/multiprocessing/util.pyRXs(RdRlR]R(((s,/usr/lib64/python2.7/multiprocessing/util.pyR Ss cBseZdZdZRS(cCst|ddS(NcSs |jjS(N(t__dict__Ru(RD((s,/usr/lib64/python2.7/multiprocessing/util.pyRo_Rp(R (RZ((s,/usr/lib64/python2.7/multiprocessing/util.pyR]^scCst|dfS(N((RJ(RZ((s,/usr/lib64/python2.7/multiprocessing/util.pyt __reduce__`s(RdRlR]R(((s,/usr/lib64/python2.7/multiprocessing/util.pyR]s (/RVt itertoolsRNRRt subprocessRtmultiprocessing.processRRt__all__tNOTSETRRRRRR(R6RtFalseR.RRRRRRR tWeakValueDictionaryR=tcountRGRFR RYRTtobjectR RyR RzR!R"R tlocalR(((s,/usr/lib64/python2.7/multiprocessing/util.pyt#sT                   N '   "  PK!N8'8'util.pyonu[ {fc@sddlZddlZddlZddlZddlZddlmZddlmZm Z dddddd d d d d ddddgZ dZ dZ dZ dZdZdZdZdaeadZdZdZdZdZddZdZejZejZ d Z!d!Z"iZ#ejZ$d e%fd"YZ&dd#Z'd$Z(ea)eee'e ed%Z*ej+e*de%fd&YZ,dej-fd'YZ.dS((iN(t_args_from_interpreter_flags(tcurrent_processtactive_childrent sub_debugtdebugtinfot sub_warningt get_loggert log_to_stderrt get_temp_dirtregister_after_forkt is_exitingtFinalizetForkAwareThreadLocktForkAwareLocaltSUBDEBUGt SUBWARNINGiii iitmultiprocessings+[%(levelname)s/%(processName)s] %(message)scGs trtjt||ndS(N(t_loggertlogR(tmsgtargs((s,/usr/lib64/python2.7/multiprocessing/util.pyRDscGs trtjt||ndS(N(RRtDEBUG(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRHscGs trtjt||ndS(N(RRtINFO(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRLscGs trtjt||ndS(N(RRR(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRPscCsddl}ddl}|jzts|jtadt_|jtd|jt dt |dr|j t |j t q|jjt dif|jjt difnWd|jXtS( s0 Returns logger used by multiprocessing iNiRRt unregister(((tloggingtatexitt _acquireLockRt getLoggert LOGGER_NAMEt propagatet addLevelNameRRthasattrRt_exit_functiontregistert _exithandlerstremovetappendt _releaseLock(RR((s,/usr/lib64/python2.7/multiprocessing/util.pyRTs     cCsjddl}t}|jt}|j}|j||j||r`|j|nta t S(sB Turn on logging and add a handler which prints to stderr iN( RRt FormattertDEFAULT_LOGGING_FORMATt StreamHandlert setFormattert addHandlertsetLeveltTruet_log_to_stderrR(tlevelRtloggert formatterthandler((s,/usr/lib64/python2.7/multiprocessing/util.pyRqs     cCstjdkrwddl}ddl}|jdd}td|td|jd|gdd|t_ntjS(Nitprefixspymp-screated temp directory %sRt exitpriorityi( Rt_tempdirtNonetshutilttempfiletmkdtempRR trmtree(R7R8ttempdir((s,/usr/lib64/python2.7/multiprocessing/util.pyR s cCsqttj}|jxN|D]F\\}}}}y||Wq#tk rh}td|q#Xq#WdS(Ns after forker raised exception %s(tlistt_afterfork_registrytitemstsortt ExceptionR(R>tindextidenttfunctobjte((s,/usr/lib64/python2.7/multiprocessing/util.pyt_run_after_forkerss cCs#|ttjt||fs( RLtAttributeErrort TypeErrorR6tgetattrRMRNtstrRORQ(RVRDtx((s,/usr/lib64/python2.7/multiprocessing/util.pyt__repr__s    (N( R`t __module__t__doc__R6RYR]R^R_Rg(((s,/usr/lib64/python2.7/multiprocessing/util.pyR s   cstdkrdSdkr(d}nfd}gtjD]}||rD|^qD}|jdtxV|D]N\}}td|y |Wqytk rddl}|jqyXqyWdkrtj ndS(s Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. NcSs|dddk S(Ni(R6(tp((s,/usr/lib64/python2.7/multiprocessing/util.pyttcs&|dddk o%|ddkS(Ni(R6(Rj(t minpriority(s,/usr/lib64/python2.7/multiprocessing/util.pyRk Rltreverses calling %si( RUR6R>R?R-RR@t tracebackt print_exctclear(RmtfRfR>tkeyt finalizerRo((Rms,/usr/lib64/python2.7/multiprocessing/util.pyt_run_finalizerss    +     cCstptdkS(s6 Returns true if the process is shutting down N(t_exitingR6(((s,/usr/lib64/python2.7/multiprocessing/util.pyR !scCs|d|d|d|dk rx:|D]/}|jr7|d|j|jjq7q7Wx.|D] }|d|j|jqtWn|d|dS(Nsprocess shutting downs2running all "atexit" finalizers with priority >= 0is!calling terminate() for daemon %sscalling join() for process %ss)running the remaining "atexit" finalizers(R6t _daemonictnamet_popent terminatetjoin(RRRuRRRj((s,/usr/lib64/python2.7/multiprocessing/util.pyR!)s      cBseZdZdZRS(cCs|jt|tjdS(N(t_resetR R (RV((s,/usr/lib64/python2.7/multiprocessing/util.pyRYTs cCs1tj|_|jj|_|jj|_dS(N(t threadingtLockt_locktacquiretrelease(RV((s,/usr/lib64/python2.7/multiprocessing/util.pyR|Xs(R`RhRYR|(((s,/usr/lib64/python2.7/multiprocessing/util.pyR Ss cBseZdZdZRS(cCst|ddS(NcSs |jjS(N(t__dict__Rq(RD((s,/usr/lib64/python2.7/multiprocessing/util.pyRk_Rl(R (RV((s,/usr/lib64/python2.7/multiprocessing/util.pyRY^scCst|dfS(N((ttype(RV((s,/usr/lib64/python2.7/multiprocessing/util.pyt __reduce__`s(R`RhRYR(((s,/usr/lib64/python2.7/multiprocessing/util.pyR]s (/RRt itertoolsRJRR}t subprocessRtmultiprocessing.processRRt__all__tNOTSETRRRRRR(R6RtFalseR.RRRRRRR tWeakValueDictionaryR=tcountRGRFR RURPtobjectR RuR RvR!R"R tlocalR(((s,/usr/lib64/python2.7/multiprocessing/util.pyt#sT                   N '   "  PK!<Ϳdummy/__init__.pynu[PK!q#TTdummy/__init__.pycnu[PK!""X'dummy/__init__.pyonu[PK!x <dummy/connection.pynu[PK! Gdummy/connection.pycnu[PK! Sdummy/connection.pyonu[PK!nFX}} ^__init__.pynu[PK!fw!! |__init__.pycnu[PK!fw!! __init__.pyonu[PK!.:: hconnection.pynu[PK!+J9J9Dconnection.pycnu[PK!G883connection.pyonu[PK!rKOCC lforking.pynu[PK!}88 forking.pycnu[PK!v77 forking.pyonu[PK!S\߅!! heap.pynu[PK!XvBheap.pycnu[PK!/^]heap.pyonu[PK! 5kk wmanagers.pynu[PK!5%j2 Emanagers.pycnu[PK!8 fmanagers.pyonu[PK!^^.1pool.pynu[PK!'@X@Xpool.pycnu[PK!8process.pynu[PK!X%% dprocess.pycnu[PK!n$"" process.pyonu[PK!Wc,00 uqueues.pynu[PK!zG,, queues.pycnu[PK!,,  queues.pyonu[PK!Ȍ 7reduction.pynu[PK!-'~~ Qreduction.pycnu[PK!-'~~ Lireduction.pyonu[PK!*c1LLsharedctypes.pynu[PK!j!!sharedctypes.pycnu[PK!aŃg!g!sharedctypes.pyonu[PK!K˞))1synchronize.pynu[PK!٩++ synchronize.pycnu[PK!tA))]8synchronize.pyonu[PK!@Y**`butil.pynu[PK!5ꯡ''util.pycnu[PK!N8'8'nutil.pyonu[PK**