mirror of
https://github.com/Sneed-Group/Poodletooth-iLand
synced 2024-12-23 11:42:39 -06:00
Merge branch 'master' of https://gitlab.com/ToontownUnited/src
This commit is contained in:
commit
97a3ea0b7a
313 changed files with 60788 additions and 66 deletions
BIN
panda/bin/apply_patch.exe
Normal file
BIN
panda/bin/apply_patch.exe
Normal file
Binary file not shown.
BIN
panda/bin/bam-info.exe
Normal file
BIN
panda/bin/bam-info.exe
Normal file
Binary file not shown.
BIN
panda/bin/bam2egg.exe
Normal file
BIN
panda/bin/bam2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/build_patch.exe
Normal file
BIN
panda/bin/build_patch.exe
Normal file
Binary file not shown.
BIN
panda/bin/cgc.exe
Normal file
BIN
panda/bin/cgc.exe
Normal file
Binary file not shown.
BIN
panda/bin/cgfxcat.exe
Normal file
BIN
panda/bin/cgfxcat.exe
Normal file
Binary file not shown.
BIN
panda/bin/cginfo.exe
Normal file
BIN
panda/bin/cginfo.exe
Normal file
Binary file not shown.
BIN
panda/bin/check_adler.exe
Normal file
BIN
panda/bin/check_adler.exe
Normal file
Binary file not shown.
BIN
panda/bin/check_crc.exe
Normal file
BIN
panda/bin/check_crc.exe
Normal file
Binary file not shown.
BIN
panda/bin/check_md5.exe
Normal file
BIN
panda/bin/check_md5.exe
Normal file
Binary file not shown.
BIN
panda/bin/dxf-points.exe
Normal file
BIN
panda/bin/dxf-points.exe
Normal file
Binary file not shown.
BIN
panda/bin/dxf2egg.exe
Normal file
BIN
panda/bin/dxf2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-crop.exe
Normal file
BIN
panda/bin/egg-crop.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-list-textures.exe
Normal file
BIN
panda/bin/egg-list-textures.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-make-tube.exe
Normal file
BIN
panda/bin/egg-make-tube.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-mkfont.exe
Normal file
BIN
panda/bin/egg-mkfont.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-optchar.exe
Normal file
BIN
panda/bin/egg-optchar.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-palettize.exe
Normal file
BIN
panda/bin/egg-palettize.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-qtess.exe
Normal file
BIN
panda/bin/egg-qtess.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-rename.exe
Normal file
BIN
panda/bin/egg-rename.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-retarget-anim.exe
Normal file
BIN
panda/bin/egg-retarget-anim.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-texture-cards.exe
Normal file
BIN
panda/bin/egg-texture-cards.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-topstrip.exe
Normal file
BIN
panda/bin/egg-topstrip.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg-trans.exe
Normal file
BIN
panda/bin/egg-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2bam.exe
Normal file
BIN
panda/bin/egg2bam.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2c.exe
Normal file
BIN
panda/bin/egg2c.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2dxf.exe
Normal file
BIN
panda/bin/egg2dxf.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2flt.exe
Normal file
BIN
panda/bin/egg2flt.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2obj.exe
Normal file
BIN
panda/bin/egg2obj.exe
Normal file
Binary file not shown.
BIN
panda/bin/egg2x.exe
Normal file
BIN
panda/bin/egg2x.exe
Normal file
Binary file not shown.
BIN
panda/bin/eggcacher.exe
Normal file
BIN
panda/bin/eggcacher.exe
Normal file
Binary file not shown.
BIN
panda/bin/flt-info.exe
Normal file
BIN
panda/bin/flt-info.exe
Normal file
Binary file not shown.
BIN
panda/bin/flt-trans.exe
Normal file
BIN
panda/bin/flt-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/flt2egg.exe
Normal file
BIN
panda/bin/flt2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/fltcopy.exe
Normal file
BIN
panda/bin/fltcopy.exe
Normal file
Binary file not shown.
BIN
panda/bin/image-info.exe
Normal file
BIN
panda/bin/image-info.exe
Normal file
Binary file not shown.
BIN
panda/bin/image-resize.exe
Normal file
BIN
panda/bin/image-resize.exe
Normal file
Binary file not shown.
BIN
panda/bin/image-trans.exe
Normal file
BIN
panda/bin/image-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/interrogate.exe
Normal file
BIN
panda/bin/interrogate.exe
Normal file
Binary file not shown.
BIN
panda/bin/interrogate_module.exe
Normal file
BIN
panda/bin/interrogate_module.exe
Normal file
Binary file not shown.
BIN
panda/bin/lwo-scan.exe
Normal file
BIN
panda/bin/lwo-scan.exe
Normal file
Binary file not shown.
BIN
panda/bin/lwo2egg.exe
Normal file
BIN
panda/bin/lwo2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/make-prc-key.exe
Normal file
BIN
panda/bin/make-prc-key.exe
Normal file
Binary file not shown.
BIN
panda/bin/multify.exe
Normal file
BIN
panda/bin/multify.exe
Normal file
Binary file not shown.
BIN
panda/bin/obj2egg.exe
Normal file
BIN
panda/bin/obj2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/p3dWrapper.exe
Normal file
BIN
panda/bin/p3dWrapper.exe
Normal file
Binary file not shown.
BIN
panda/bin/p3dcparse.exe
Normal file
BIN
panda/bin/p3dcparse.exe
Normal file
Binary file not shown.
BIN
panda/bin/packpanda.exe
Normal file
BIN
panda/bin/packpanda.exe
Normal file
Binary file not shown.
BIN
panda/bin/parse_file.exe
Normal file
BIN
panda/bin/parse_file.exe
Normal file
Binary file not shown.
BIN
panda/bin/pdecrypt.exe
Normal file
BIN
panda/bin/pdecrypt.exe
Normal file
Binary file not shown.
BIN
panda/bin/pencrypt.exe
Normal file
BIN
panda/bin/pencrypt.exe
Normal file
Binary file not shown.
BIN
panda/bin/pfm-bba.exe
Normal file
BIN
panda/bin/pfm-bba.exe
Normal file
Binary file not shown.
BIN
panda/bin/pfm-trans.exe
Normal file
BIN
panda/bin/pfm-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/pstats.exe
Normal file
BIN
panda/bin/pstats.exe
Normal file
Binary file not shown.
BIN
panda/bin/punzip.exe
Normal file
BIN
panda/bin/punzip.exe
Normal file
Binary file not shown.
BIN
panda/bin/pview.exe
Normal file
BIN
panda/bin/pview.exe
Normal file
Binary file not shown.
BIN
panda/bin/pzip.exe
Normal file
BIN
panda/bin/pzip.exe
Normal file
Binary file not shown.
BIN
panda/bin/show_ddb.exe
Normal file
BIN
panda/bin/show_ddb.exe
Normal file
Binary file not shown.
BIN
panda/bin/softcvs.exe
Normal file
BIN
panda/bin/softcvs.exe
Normal file
Binary file not shown.
BIN
panda/bin/test_interrogate.exe
Normal file
BIN
panda/bin/test_interrogate.exe
Normal file
Binary file not shown.
BIN
panda/bin/text-stats.exe
Normal file
BIN
panda/bin/text-stats.exe
Normal file
Binary file not shown.
BIN
panda/bin/vrml-trans.exe
Normal file
BIN
panda/bin/vrml-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/vrml2egg.exe
Normal file
BIN
panda/bin/vrml2egg.exe
Normal file
Binary file not shown.
BIN
panda/bin/x-trans.exe
Normal file
BIN
panda/bin/x-trans.exe
Normal file
Binary file not shown.
BIN
panda/bin/x2egg.exe
Normal file
BIN
panda/bin/x2egg.exe
Normal file
Binary file not shown.
|
@ -233,6 +233,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
self.handleObjLocation(di)
|
||||
elif msgType in (DBSERVER_CREATE_OBJECT_RESP,
|
||||
DBSERVER_OBJECT_GET_ALL_RESP,
|
||||
DBSERVER_OBJECT_GET_FIELDS_RESP,
|
||||
DBSERVER_OBJECT_GET_FIELD_RESP,
|
||||
DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP,
|
||||
DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP):
|
||||
self.dbInterface.handleDatagram(msgType, di)
|
||||
|
@ -327,7 +329,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
"""
|
||||
Send a field update for the given object.
|
||||
|
||||
You should probably use do.sendUpdate(...) instead.
|
||||
You should use do.sendUpdate(...) instead. This is not meant to be
|
||||
called directly unless you really know what you are doing.
|
||||
"""
|
||||
|
||||
self.sendUpdateToChannel(do, do.doId, fieldName, args)
|
||||
|
@ -339,7 +342,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
This is useful for directing the update to a specific client or node,
|
||||
rather than at the State Server managing the object.
|
||||
|
||||
You should probably use do.sendUpdateToChannel(...) instead.
|
||||
You should use do.sendUpdateToChannel(...) instead. This is not meant
|
||||
to be called directly unless you really know what you are doing.
|
||||
"""
|
||||
|
||||
dclass = do.dclass
|
||||
|
@ -410,7 +414,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
"""
|
||||
Generate an object onto the State Server, choosing an ID from the pool.
|
||||
|
||||
You should probably use do.generateWithRequired(...) instead.
|
||||
You should use do.generateWithRequired(...) instead. This is not meant
|
||||
to be called directly unless you really know what you are doing.
|
||||
"""
|
||||
|
||||
doId = self.allocateChannel()
|
||||
|
@ -420,7 +425,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
"""
|
||||
Generate an object onto the State Server, specifying its ID and location.
|
||||
|
||||
You should probably use do.generateWithRequiredAndId(...) instead.
|
||||
You should use do.generateWithRequiredAndId(...) instead. This is not
|
||||
meant to be called directly unless you really know what you are doing.
|
||||
"""
|
||||
|
||||
do.doId = doId
|
||||
|
@ -431,7 +437,8 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
"""
|
||||
Request the deletion of an object that already exists on the State Server.
|
||||
|
||||
You should probably use do.requestDelete() instead.
|
||||
You should use do.requestDelete() instead. This is not meant to be
|
||||
called directly unless you really know what you are doing.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
|
@ -540,3 +547,73 @@ class AstronInternalRepository(ConnectionRepository):
|
|||
dg = PyDatagram()
|
||||
msgpack_encode(dg, log)
|
||||
self.eventSocket.Send(dg.getMessage())
|
||||
|
||||
def setAI(self, doId, aiChannel):
|
||||
"""
|
||||
Sets the AI of the specified DistributedObjectAI to be the specified channel.
|
||||
Generally, you should not call this method, and instead call DistributedObjectAI.setAI.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(doId, aiChannel, STATESERVER_OBJECT_SET_AI)
|
||||
dg.add_uint64(aiChannel)
|
||||
self.send(dg)
|
||||
|
||||
def eject(self, clientChannel, reasonCode, reason):
|
||||
"""
|
||||
Kicks the client residing at the specified clientChannel, using the specifed reasoning.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_EJECT)
|
||||
dg.add_uint16(reasonCode)
|
||||
dg.addString(reason)
|
||||
self.send(dg)
|
||||
|
||||
def setClientState(self, clientChannel, state):
|
||||
"""
|
||||
Sets the state of the client on the CA.
|
||||
Useful for logging in and logging out, and for little else.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_SET_STATE)
|
||||
dg.add_uint16(state)
|
||||
self.send(dg)
|
||||
|
||||
def clientAddSessionObject(self, clientChannel, doId):
|
||||
"""
|
||||
Declares the specified DistributedObject to be a "session object",
|
||||
meaning that it is destroyed when the client disconnects.
|
||||
Generally used for avatars owned by the client.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_ADD_SESSION_OBJECT)
|
||||
dg.add_uint32(doId)
|
||||
self.send(dg)
|
||||
|
||||
def clientAddInterest(self, clientChannel, interestId, parentId, zoneId):
|
||||
"""
|
||||
Opens an interest on the behalf of the client. This, used in conjunction
|
||||
with add_interest: visible (or preferably, disabled altogether), will mitigate
|
||||
possible security risks.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_ADD_INTEREST)
|
||||
dg.add_uint16(interestId)
|
||||
dg.add_uint32(parentId)
|
||||
dg.add_uint32(zoneId)
|
||||
self.send(dg)
|
||||
|
||||
def setOwner(self, doId, newOwner):
|
||||
"""
|
||||
Sets the owner of a DistributedObject. This will enable the new owner to send "ownsend" fields,
|
||||
and will generate an OwnerView.
|
||||
"""
|
||||
|
||||
dg = PyDatagram()
|
||||
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_OWNER)
|
||||
dg.add_uint64(newOwner)
|
||||
self.send(dg)
|
|
@ -579,6 +579,11 @@ class ClientRepositoryBase(ConnectionRepository):
|
|||
return None
|
||||
return worldNP
|
||||
|
||||
def isLive(self):
|
||||
if base.config.GetBool('force-live', 0):
|
||||
return True
|
||||
return not (__dev__ or launcher.isTestServer())
|
||||
|
||||
def isLocalId(self, id):
|
||||
# By default, no ID's are local. See also
|
||||
# ClientRepository.isLocalId().
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
"""MsgTypes module: contains distributed object message types"""
|
||||
|
||||
from direct.showbase.PythonUtil import invertDictLossless
|
||||
|
@ -15,12 +16,10 @@ MsgName2Id = {
|
|||
'CLIENT_HEARTBEAT': 5,
|
||||
|
||||
'CLIENT_OBJECT_SET_FIELD': 120,
|
||||
'CLIENT_OBJECT_SET_FIELDS': 121,
|
||||
'CLIENT_OBJECT_LEAVING': 132,
|
||||
'CLIENT_OBJECT_LEAVING_OWNER': 161,
|
||||
'CLIENT_ENTER_OBJECT_REQUIRED': 142,
|
||||
'CLIENT_ENTER_OBJECT_REQUIRED_OTHER': 143,
|
||||
'CLIENT_ENTER_OBJECT_REQUIRED_OWNER': 172,
|
||||
'CLIENT_ENTER_OBJECT_REQUIRED_OTHER_OWNER': 173,
|
||||
|
||||
'CLIENT_DONE_INTEREST_RESP': 204,
|
||||
|
@ -40,7 +39,7 @@ MsgName2Id = {
|
|||
'CONTROL_ADD_RANGE': 9002,
|
||||
'CONTROL_REMOVE_RANGE': 9003,
|
||||
'CONTROL_ADD_POST_REMOVE': 9010,
|
||||
'CONTROL_CLEAR_POST_REMOVES': 9011,
|
||||
'CONTROL_CLEAR_POST_REMOVE': 9011,
|
||||
|
||||
# State Server control messages:
|
||||
'STATESERVER_CREATE_OBJECT_WITH_REQUIRED': 2000,
|
||||
|
@ -90,11 +89,10 @@ MsgName2Id = {
|
|||
# DBSS-backed-object messages:
|
||||
'DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS': 2200,
|
||||
'DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS_OTHER': 2201,
|
||||
'DBSS_OBJECT_GET_ACTIVATED': 2207,
|
||||
'DBSS_OBJECT_GET_ACTIVATED_RESP': 2208,
|
||||
'DBSS_OBJECT_DELETE_FIELD_DISK': 2230,
|
||||
'DBSS_OBJECT_DELETE_FIELDS_DISK': 2231,
|
||||
'DBSS_OBJECT_DELETE_DISK': 2232,
|
||||
'DBSS_OBJECT_GET_ACTIVATED_RESP': 2208,
|
||||
|
||||
# Database Server control messages:
|
||||
'DBSERVER_CREATE_OBJECT': 3000,
|
||||
|
@ -139,9 +137,11 @@ MsgName2Id = {
|
|||
|
||||
# create id->name table for debugging
|
||||
MsgId2Names = invertDictLossless(MsgName2Id)
|
||||
|
||||
|
||||
# put msg names in module scope, assigned to msg value
|
||||
globals().update(MsgName2Id)
|
||||
for name, value in MsgName2Id.items():
|
||||
exec '%s = %s' % (name, value)
|
||||
del name, value
|
||||
|
||||
# These messages are ignored when the client is headed to the quiet zone
|
||||
QUIET_ZONE_IGNORED_LIST = [
|
||||
|
@ -149,7 +149,7 @@ QUIET_ZONE_IGNORED_LIST = [
|
|||
# We mustn't ignore updates, because some updates for localToon
|
||||
# are always important.
|
||||
#CLIENT_OBJECT_UPDATE_FIELD,
|
||||
|
||||
|
||||
# These are now handled. If it is a create for a class that is in the
|
||||
# uber zone, we should create it.
|
||||
#CLIENT_CREATE_OBJECT_REQUIRED,
|
||||
|
|
20
panda/direct/showbase/LerpBlendHelpers.py
Normal file
20
panda/direct/showbase/LerpBlendHelpers.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
__all__ = ['getBlend']
|
||||
|
||||
from pandac.PandaModules import *
|
||||
|
||||
easeIn = EaseInBlendType()
|
||||
easeOut = EaseOutBlendType()
|
||||
easeInOut = EaseInOutBlendType()
|
||||
noBlend = NoBlendType()
|
||||
|
||||
def getBlend(blendType):
|
||||
if blendType == 'easeIn':
|
||||
return easeIn
|
||||
elif blendType == 'easeOut':
|
||||
return easeOut
|
||||
elif blendType == 'easeInOut':
|
||||
return easeInOut
|
||||
elif blendType == 'noBlend':
|
||||
return noBlend
|
||||
else:
|
||||
raise Exception('Error: LerpInterval.__getBlend: Unknown blend type')
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
"""Undocumented Module"""
|
||||
|
||||
__all__ = ['unique', 'indent', 'nonRepeatingRandomList',
|
||||
__all__ = ['enumerate', 'unique', 'indent', 'nonRepeatingRandomList',
|
||||
'writeFsmTree', 'StackTrace', 'traceFunctionCall', 'traceParentCall',
|
||||
'printThisCall', 'tron', 'trace', 'troff', 'getClassLineage', 'pdir',
|
||||
'_pdir', '_is_variadic', '_has_keywordargs', '_varnames', '_getcode',
|
||||
|
@ -45,6 +45,7 @@ import os
|
|||
import sys
|
||||
import random
|
||||
import time
|
||||
import new
|
||||
import gc
|
||||
#if __debug__:
|
||||
import traceback
|
||||
|
@ -60,10 +61,31 @@ import bisect
|
|||
__report_indent = 3
|
||||
|
||||
from direct.directutil import Verify
|
||||
# Don't import libpandaexpressModules, which doesn't get built until
|
||||
# genPyCode.
|
||||
# import direct.extensions_native.extension_native_helpers
|
||||
from panda3d.core import ConfigVariableBool
|
||||
|
||||
ScalarTypes = (types.FloatType, types.IntType, types.LongType)
|
||||
|
||||
import __builtin__
|
||||
if not hasattr(__builtin__, 'enumerate'):
|
||||
def enumerate(L):
|
||||
"""Returns (0, L[0]), (1, L[1]), etc., allowing this syntax:
|
||||
for i, item in enumerate(L):
|
||||
...
|
||||
|
||||
enumerate is a built-in feature in Python 2.3, which implements it
|
||||
using an iterator. For now, we can use this quick & dirty
|
||||
implementation that returns a list of tuples that is completely
|
||||
constructed every time enumerate() is called.
|
||||
"""
|
||||
return zip(xrange(len(L)), L)
|
||||
|
||||
__builtin__.enumerate = enumerate
|
||||
else:
|
||||
enumerate = __builtin__.enumerate
|
||||
|
||||
"""
|
||||
# with one integer positional arg, this uses about 4/5 of the memory of the Functor class below
|
||||
def Functor(function, *args, **kArgs):
|
||||
|
@ -165,7 +187,7 @@ class Queue:
|
|||
def __len__(self):
|
||||
return len(self.__list)
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
q = Queue()
|
||||
assert q.isEmpty()
|
||||
q.clear()
|
||||
|
@ -612,7 +634,7 @@ class Signature:
|
|||
l.append('*' + specials['positional'])
|
||||
if 'keyword' in specials:
|
||||
l.append('**' + specials['keyword'])
|
||||
return "%s(%s)" % (self.name, ', '.join(l))
|
||||
return "%s(%s)" % (self.name, string.join(l, ', '))
|
||||
else:
|
||||
return "%s(?)" % self.name
|
||||
|
||||
|
@ -905,7 +927,7 @@ def binaryRepr(number, max_length = 32):
|
|||
digits = map (operator.mod, shifts, max_length * [2])
|
||||
if not digits.count (1): return 0
|
||||
digits = digits [digits.index (1):]
|
||||
return ''.join([repr(digit) for digit in digits])
|
||||
return string.join (map (repr, digits), '')
|
||||
|
||||
class StdoutCapture:
|
||||
# redirects stdout to a string
|
||||
|
@ -1191,7 +1213,7 @@ def extractProfile(*args, **kArgs):
|
|||
def getSetterName(valueName, prefix='set'):
|
||||
# getSetterName('color') -> 'setColor'
|
||||
# getSetterName('color', 'get') -> 'getColor'
|
||||
return '%s%s%s' % (prefix, valueName[0].upper(), valueName[1:])
|
||||
return '%s%s%s' % (prefix, string.upper(valueName[0]), valueName[1:])
|
||||
def getSetter(targetObj, valueName, prefix='set'):
|
||||
# getSetter(smiley, 'pos') -> smiley.setPos
|
||||
return getattr(targetObj, getSetterName(valueName, prefix))
|
||||
|
@ -1199,15 +1221,16 @@ def getSetter(targetObj, valueName, prefix='set'):
|
|||
def mostDerivedLast(classList):
|
||||
"""pass in list of classes. sorts list in-place, with derived classes
|
||||
appearing after their bases"""
|
||||
|
||||
class ClassSortKey(object):
|
||||
__slots__ = 'classobj',
|
||||
def __init__(self, classobj):
|
||||
self.classobj = classobj
|
||||
def __lt__(self, other):
|
||||
return issubclass(other.classobj, self.classobj)
|
||||
|
||||
classList.sort(key=ClassSortKey)
|
||||
def compare(a, b):
|
||||
if issubclass(a, b):
|
||||
result=1
|
||||
elif issubclass(b, a):
|
||||
result=-1
|
||||
else:
|
||||
result=0
|
||||
#print a, b, result
|
||||
return result
|
||||
classList.sort(compare)
|
||||
|
||||
"""
|
||||
ParamObj/ParamSet
|
||||
|
@ -1424,8 +1447,6 @@ class ParamObj:
|
|||
# we've already compiled the defaults for this class
|
||||
return
|
||||
bases = list(cls.__bases__)
|
||||
if object in bases:
|
||||
bases.remove(object)
|
||||
# bring less-derived classes to the front
|
||||
mostDerivedLast(bases)
|
||||
cls._Params = {}
|
||||
|
@ -1506,7 +1527,7 @@ class ParamObj:
|
|||
# then the applier, or b) call the setter and queue the
|
||||
# applier, depending on whether our params are locked
|
||||
"""
|
||||
setattr(self, setterName, types.MethodType(
|
||||
setattr(self, setterName, new.instancemethod(
|
||||
Functor(setterStub, param, setterFunc), self, self.__class__))
|
||||
"""
|
||||
def setterStub(self, value, param=param, origSetterName=origSetterName):
|
||||
|
@ -1611,7 +1632,7 @@ class ParamObj:
|
|||
argStr += '%s=%s,' % (param, repr(value))
|
||||
return '%s(%s)' % (self.__class__.__name__, argStr)
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
class ParamObjTest(ParamObj):
|
||||
class ParamSet(ParamObj.ParamSet):
|
||||
Params = {
|
||||
|
@ -1808,7 +1829,7 @@ class POD:
|
|||
argStr += '%s=%s,' % (name, repr(getSetter(self, name, 'get')()))
|
||||
return '%s(%s)' % (self.__class__.__name__, argStr)
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
class PODtest(POD):
|
||||
DataSet = {
|
||||
'foo': dict,
|
||||
|
@ -2138,7 +2159,7 @@ def pivotScalar(scalar, pivot):
|
|||
# reflect scalar about pivot; see tests below
|
||||
return pivot + (pivot - scalar)
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
assert pivotScalar(1, 0) == -1
|
||||
assert pivotScalar(-1, 0) == 1
|
||||
assert pivotScalar(3, 5) == 7
|
||||
|
@ -2458,9 +2479,7 @@ def _getDtoolSuperBase():
|
|||
global dtoolSuperBase
|
||||
from pandac.PandaModules import PandaNode
|
||||
dtoolSuperBase = PandaNode('').__class__.__bases__[0].__bases__[0].__bases__[0]
|
||||
assert repr(dtoolSuperBase) == "<type 'libdtoolconfig.DTOOL_SUPER_BASE111'>" \
|
||||
or repr(dtoolSuperBase) == "<type 'libdtoolconfig.DTOOL_SUPPER_BASE111'>" \
|
||||
or repr(dtoolSuperBase) == "<type 'dtoolconfig.DTOOL_SUPER_BASE111'>"
|
||||
assert repr(dtoolSuperBase) == "<type 'libdtoolconfig.DTOOL_SUPPER_BASE111'>"
|
||||
|
||||
safeReprNotify = None
|
||||
|
||||
|
@ -2732,7 +2751,7 @@ def tagRepr(obj, tag):
|
|||
return s
|
||||
oldRepr = Functor(stringer, repr(obj))
|
||||
stringer = None
|
||||
obj.__repr__ = types.MethodType(Functor(reprWithTag, oldRepr, tag), obj, obj.__class__)
|
||||
obj.__repr__ = new.instancemethod(Functor(reprWithTag, oldRepr, tag), obj, obj.__class__)
|
||||
reprWithTag = None
|
||||
return obj
|
||||
|
||||
|
@ -2759,7 +2778,7 @@ def appendStr(obj, st):
|
|||
return s
|
||||
oldStr = Functor(stringer, str(obj))
|
||||
stringer = None
|
||||
obj.__str__ = types.MethodType(Functor(appendedStr, oldStr, st), obj, obj.__class__)
|
||||
obj.__str__ = new.instancemethod(Functor(appendedStr, oldStr, st), obj, obj.__class__)
|
||||
appendedStr = None
|
||||
return obj
|
||||
|
||||
|
@ -3596,9 +3615,9 @@ def recordCreationStackStr(cls):
|
|||
self._creationStackTraceStrLst = StackTrace(start=1).compact().split(',')
|
||||
return self.__moved_init__(*args, **kArgs)
|
||||
def getCreationStackTraceCompactStr(self):
|
||||
return ','.join(self._creationStackTraceStrLst)
|
||||
return string.join(self._creationStackTraceStrLst, ',')
|
||||
def printCreationStackTrace(self):
|
||||
print ','.join(self._creationStackTraceStrLst)
|
||||
print string.join(self._creationStackTraceStrLst, ',')
|
||||
cls.__init__ = __recordCreationStackStr_init__
|
||||
cls.getCreationStackTraceCompactStr = getCreationStackTraceCompactStr
|
||||
cls.printCreationStackTrace = printCreationStackTrace
|
||||
|
@ -3750,7 +3769,7 @@ def flywheel(*args, **kArgs):
|
|||
pass
|
||||
return flywheel
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
f = flywheel(['a','b','c','d'], countList=[11,20,3,4])
|
||||
obj2count = {}
|
||||
for obj in f:
|
||||
|
@ -3944,7 +3963,7 @@ def formatTimeCompact(seconds):
|
|||
result += '%ss' % seconds
|
||||
return result
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
ftc = formatTimeCompact
|
||||
assert ftc(0) == '0s'
|
||||
assert ftc(1) == '1s'
|
||||
|
@ -3980,7 +3999,7 @@ def formatTimeExact(seconds):
|
|||
result += '%ss' % seconds
|
||||
return result
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
fte = formatTimeExact
|
||||
assert fte(0) == '0s'
|
||||
assert fte(1) == '1s'
|
||||
|
@ -4019,7 +4038,7 @@ class AlphabetCounter:
|
|||
break
|
||||
return result
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
def testAlphabetCounter():
|
||||
tempList = []
|
||||
ac = AlphabetCounter()
|
||||
|
@ -4188,14 +4207,14 @@ def unescapeHtmlString(s):
|
|||
char = ' '
|
||||
elif char == '%':
|
||||
if i < (len(s)-2):
|
||||
num = int(s[i+1:i+3], 16)
|
||||
num = eval('0x' + s[i+1:i+3])
|
||||
char = chr(num)
|
||||
i += 2
|
||||
i += 1
|
||||
result += char
|
||||
return result
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
assert unescapeHtmlString('asdf') == 'asdf'
|
||||
assert unescapeHtmlString('as+df') == 'as df'
|
||||
assert unescapeHtmlString('as%32df') == 'as2df'
|
||||
|
@ -4252,7 +4271,7 @@ class HTMLStringToElements(HTMLParser):
|
|||
def str2elements(str):
|
||||
return HTMLStringToElements(str).getElements()
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
s = ScratchPad()
|
||||
assert len(str2elements('')) == 0
|
||||
s.br = str2elements('<br>')
|
||||
|
@ -4292,7 +4311,7 @@ def repeatableRepr(obj):
|
|||
return repeatableRepr(l)
|
||||
return repr(obj)
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
assert repeatableRepr({1: 'a', 2: 'b'}) == repeatableRepr({2: 'b', 1: 'a'})
|
||||
assert repeatableRepr(set([1,2,3])) == repeatableRepr(set([3,2,1]))
|
||||
|
||||
|
@ -4349,7 +4368,7 @@ class PriorityCallbacks:
|
|||
for priority, callback in self._callbacks:
|
||||
callback()
|
||||
|
||||
if __debug__ and __name__ == '__main__':
|
||||
if __debug__:
|
||||
l = []
|
||||
def a(l=l):
|
||||
l.append('a')
|
||||
|
|
|
@ -6,6 +6,13 @@ __all__ = ['SfxPlayer']
|
|||
import math
|
||||
from pandac.PandaModules import *
|
||||
|
||||
class Unior:
|
||||
pass
|
||||
|
||||
UNIOR = Unior()
|
||||
STUPID = not Unior()
|
||||
WANT_FUCKING_AUDIO_CRASH = UNIOR is STUPID
|
||||
|
||||
class SfxPlayer:
|
||||
"""
|
||||
Play sound effects, potentially localized.
|
||||
|
@ -93,9 +100,7 @@ class SfxPlayer:
|
|||
finalVolume = 1
|
||||
if volume is not None:
|
||||
finalVolume *= volume
|
||||
if node is not None:
|
||||
if node is not None and WANT_FUCKING_AUDIO_CRASH:
|
||||
finalVolume *= node.getNetAudioVolume()
|
||||
sfx.setVolume(finalVolume)
|
||||
|
||||
|
||||
|
||||
|
|
@ -122,7 +122,7 @@ cull-bin gui-popup 60 unsorted
|
|||
# This default only comes into play if you try to load a model
|
||||
# and don't specify an extension.
|
||||
|
||||
default-model-extension .bam
|
||||
default-model-extension .egg
|
||||
|
||||
# If we have the SpeedTree library available, we'll want to use it for
|
||||
# loading compiled SpeedTree tree objects, and SpeedTree forest
|
||||
|
|
|
@ -63,16 +63,7 @@ default-directnotify-level warning
|
|||
|
||||
model-path $MAIN_DIR
|
||||
model-path $THIS_PRC_DIR/..
|
||||
model-path $THIS_PRC_DIR/../models/
|
||||
model-path $THIS_PRC_DIR/../resources/
|
||||
model-path $THIS_PRC_DIR/../../models/
|
||||
model-path $THIS_PRC_DIR/../../resources/
|
||||
model-path ../../models/
|
||||
model-path ../../resources/
|
||||
model-path ../models/
|
||||
model-path ../resources/
|
||||
model-path models/
|
||||
model-path resources/
|
||||
model-path $THIS_PRC_DIR/../models
|
||||
|
||||
# This enable the automatic creation of a TK window when running
|
||||
# Direct.
|
||||
|
|
|
@ -101,8 +101,8 @@ def Dtool_PreloadDLL(module):
|
|||
# Nowadays, we can compile libpandaexpress with libpanda into a
|
||||
# .pyd file called panda3d/core.pyd which can be imported without
|
||||
# any difficulty. Let's see if this is the case.
|
||||
if Dtool_FindModule("panda.panda3d.core"):
|
||||
from panda.panda3d.core import *
|
||||
if Dtool_FindModule("panda3d.core"):
|
||||
from panda3d.core import *
|
||||
else:
|
||||
Dtool_PreloadDLL("libpandaexpress")
|
||||
from libpandaexpress import *
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
Metadata-Version: 1.1
|
||||
Name: gevent
|
||||
Version: 1.0.1
|
||||
Summary: Coroutine-based network library
|
||||
Home-page: http://www.gevent.org/
|
||||
Author: Denis Bilenko
|
||||
Author-email: denis.bilenko@gmail.com
|
||||
License: UNKNOWN
|
||||
Description: gevent_
|
||||
=======
|
||||
|
||||
gevent_ is a coroutine-based Python networking library.
|
||||
|
||||
Features include:
|
||||
|
||||
* Fast event loop based on libev_.
|
||||
* Lightweight execution units based on greenlet_.
|
||||
* Familiar API that re-uses concepts from the Python standard library.
|
||||
* Cooperative sockets with SSL support.
|
||||
* DNS queries performed through c-ares_ or a threadpool.
|
||||
* Ability to use standard library and 3rd party modules written for standard blocking sockets
|
||||
|
||||
gevent_ is `inspired by eventlet`_ but features more consistent API, simpler implementation and better performance. Read why others `use gevent`_ and check out the list of the `open source projects based on gevent`_.
|
||||
|
||||
gevent_ is written and maintained by `Denis Bilenko`_ and is licensed under MIT license.
|
||||
|
||||
|
||||
get gevent
|
||||
----------
|
||||
|
||||
Install Python 2.5 or newer and greenlet_ extension.
|
||||
|
||||
Download the latest release from `Python Package Index`_ or clone `the repository`_.
|
||||
|
||||
Read the documentation online at http://www.gevent.org
|
||||
|
||||
Post feedback and issues on the `bug tracker`_, `mailing list`_, blog_ and `twitter (@gevent)`_.
|
||||
|
||||
|
||||
installing from github
|
||||
----------------------
|
||||
|
||||
To install the latest development version:
|
||||
|
||||
pip install cython git+git://github.com/surfly/gevent.git#egg=gevent
|
||||
|
||||
|
||||
running tests
|
||||
-------------
|
||||
|
||||
python setup.py build
|
||||
|
||||
cd greentest
|
||||
|
||||
PYTHONPATH=.. python testrunner.py --config ../known_failures.py
|
||||
|
||||
|
||||
.. _gevent: http://www.gevent.org
|
||||
.. _greenlet: http://pypi.python.org/pypi/greenlet
|
||||
.. _libev: http://libev.schmorp.de/
|
||||
.. _c-ares: http://c-ares.haxx.se/
|
||||
.. _inspired by eventlet: http://blog.gevent.org/2010/02/27/why-gevent/
|
||||
.. _use gevent: http://groups.google.com/group/gevent/browse_thread/thread/4de9703e5dca8271
|
||||
.. _open source projects based on gevent: https://github.com/surfly/gevent/wiki/Projects
|
||||
.. _Denis Bilenko: http://denisbilenko.com
|
||||
.. _Python Package Index: http://pypi.python.org/pypi/gevent
|
||||
.. _the repository: https://github.com/surfly/gevent
|
||||
.. _bug tracker: https://github.com/surfly/gevent/wiki/Projects
|
||||
.. _mailing list: http://groups.google.com/group/gevent
|
||||
.. _blog: http://blog.gevent.org
|
||||
.. _twitter (@gevent): http://twitter.com/gevent
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Programming Language :: Python :: 2.5
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Operating System :: MacOS :: MacOS X
|
||||
Classifier: Operating System :: POSIX
|
||||
Classifier: Operating System :: Microsoft :: Windows
|
||||
Classifier: Topic :: Internet
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Development Status :: 4 - Beta
|
|
@ -0,0 +1,446 @@
|
|||
AUTHORS
|
||||
LICENSE
|
||||
MANIFEST.in
|
||||
Makefile.ext
|
||||
README.rst
|
||||
TODO
|
||||
changelog.rst
|
||||
known_failures.py
|
||||
c-ares/AUTHORS
|
||||
c-ares/CHANGELOG.git
|
||||
c-ares/CHANGES
|
||||
c-ares/CHANGES.0
|
||||
c-ares/README
|
||||
c-ares/README.cares
|
||||
c-ares/RELEASE-NOTES
|
||||
c-ares/TODO
|
||||
c-ares/ares.h
|
||||
c-ares/ares__close_sockets.c
|
||||
c-ares/ares__get_hostent.c
|
||||
c-ares/ares__read_line.c
|
||||
c-ares/ares__timeval.c
|
||||
c-ares/ares_build.h.dist
|
||||
c-ares/ares_build.h.in
|
||||
c-ares/ares_cancel.c
|
||||
c-ares/ares_config.h.in
|
||||
c-ares/ares_create_query.c
|
||||
c-ares/ares_data.c
|
||||
c-ares/ares_data.h
|
||||
c-ares/ares_destroy.c
|
||||
c-ares/ares_dns.h
|
||||
c-ares/ares_expand_name.c
|
||||
c-ares/ares_expand_string.c
|
||||
c-ares/ares_fds.c
|
||||
c-ares/ares_free_hostent.c
|
||||
c-ares/ares_free_string.c
|
||||
c-ares/ares_getenv.c
|
||||
c-ares/ares_getenv.h
|
||||
c-ares/ares_gethostbyaddr.c
|
||||
c-ares/ares_gethostbyname.c
|
||||
c-ares/ares_getnameinfo.c
|
||||
c-ares/ares_getopt.c
|
||||
c-ares/ares_getopt.h
|
||||
c-ares/ares_getsock.c
|
||||
c-ares/ares_init.c
|
||||
c-ares/ares_iphlpapi.h
|
||||
c-ares/ares_ipv6.h
|
||||
c-ares/ares_library_init.c
|
||||
c-ares/ares_library_init.h
|
||||
c-ares/ares_llist.c
|
||||
c-ares/ares_llist.h
|
||||
c-ares/ares_mkquery.c
|
||||
c-ares/ares_nowarn.c
|
||||
c-ares/ares_nowarn.h
|
||||
c-ares/ares_options.c
|
||||
c-ares/ares_parse_a_reply.c
|
||||
c-ares/ares_parse_aaaa_reply.c
|
||||
c-ares/ares_parse_mx_reply.c
|
||||
c-ares/ares_parse_naptr_reply.c
|
||||
c-ares/ares_parse_ns_reply.c
|
||||
c-ares/ares_parse_ptr_reply.c
|
||||
c-ares/ares_parse_soa_reply.c
|
||||
c-ares/ares_parse_srv_reply.c
|
||||
c-ares/ares_parse_txt_reply.c
|
||||
c-ares/ares_platform.c
|
||||
c-ares/ares_platform.h
|
||||
c-ares/ares_private.h
|
||||
c-ares/ares_process.c
|
||||
c-ares/ares_query.c
|
||||
c-ares/ares_rules.h
|
||||
c-ares/ares_search.c
|
||||
c-ares/ares_send.c
|
||||
c-ares/ares_setup.h
|
||||
c-ares/ares_strcasecmp.c
|
||||
c-ares/ares_strcasecmp.h
|
||||
c-ares/ares_strdup.c
|
||||
c-ares/ares_strdup.h
|
||||
c-ares/ares_strerror.c
|
||||
c-ares/ares_timeout.c
|
||||
c-ares/ares_version.c
|
||||
c-ares/ares_version.h
|
||||
c-ares/ares_writev.c
|
||||
c-ares/ares_writev.h
|
||||
c-ares/bitncmp.c
|
||||
c-ares/bitncmp.h
|
||||
c-ares/config-win32.h
|
||||
c-ares/config.guess
|
||||
c-ares/config.sub
|
||||
c-ares/configure
|
||||
c-ares/get_ver.awk
|
||||
c-ares/gitinfo
|
||||
c-ares/inet_net_pton.c
|
||||
c-ares/inet_net_pton.h
|
||||
c-ares/inet_ntop.c
|
||||
c-ares/inet_ntop.h
|
||||
c-ares/install-sh
|
||||
c-ares/missing
|
||||
c-ares/nameser.h
|
||||
c-ares/setup_once.h
|
||||
c-ares/windows_port.c
|
||||
doc/Makefile
|
||||
doc/community.rst
|
||||
doc/conf.py
|
||||
doc/contents.rst
|
||||
doc/generate_rst.py
|
||||
doc/gevent.core.rst
|
||||
doc/gevent.event.rst
|
||||
doc/gevent.hub.rst
|
||||
doc/gevent.queue.rst
|
||||
doc/gevent.rst
|
||||
doc/intro.rst
|
||||
doc/make.bat
|
||||
doc/mysphinxext.py
|
||||
doc/networking.rst
|
||||
doc/reference.rst
|
||||
doc/servers.rst
|
||||
doc/success.rst
|
||||
doc/synchronization.rst
|
||||
doc/whatsnew_1_0.rst
|
||||
doc/_templates/layout.html
|
||||
doc/mytheme/defindex.html
|
||||
doc/mytheme/domainindex.html
|
||||
doc/mytheme/genindex-single.html
|
||||
doc/mytheme/genindex-split.html
|
||||
doc/mytheme/genindex.html
|
||||
doc/mytheme/layout.html
|
||||
doc/mytheme/modindex.html
|
||||
doc/mytheme/page.html
|
||||
doc/mytheme/search.html
|
||||
doc/mytheme/theme.conf
|
||||
doc/mytheme/changes/frameset.html
|
||||
doc/mytheme/changes/rstsource.html
|
||||
doc/mytheme/changes/versionchanges.html
|
||||
doc/mytheme/static/basic.css_t
|
||||
doc/mytheme/static/file.png
|
||||
doc/mytheme/static/minus.png
|
||||
doc/mytheme/static/omegle_48.png
|
||||
doc/mytheme/static/plus.png
|
||||
doc/mytheme/static/spotify_logo.png
|
||||
doc/mytheme/static/transparent.gif
|
||||
doc/mytheme/static/img/main-two-columns.gif
|
||||
examples/concurrent_download.py
|
||||
examples/dns_mass_resolve.py
|
||||
examples/echoserver.py
|
||||
examples/geventsendfile.py
|
||||
examples/portforwarder.py
|
||||
examples/processes.py
|
||||
examples/psycopg2_pool.py
|
||||
examples/server.crt
|
||||
examples/server.key
|
||||
examples/threadpool.py
|
||||
examples/udp_client.py
|
||||
examples/udp_server.py
|
||||
examples/unixsocket_client.py
|
||||
examples/unixsocket_server.py
|
||||
examples/webproxy.py
|
||||
examples/webpy.py
|
||||
examples/wsgiserver.py
|
||||
examples/wsgiserver_ssl.py
|
||||
examples/webchat/README
|
||||
examples/webchat/__init__.py
|
||||
examples/webchat/application.py
|
||||
examples/webchat/manage.py
|
||||
examples/webchat/run_standalone.py
|
||||
examples/webchat/run_uwsgi
|
||||
examples/webchat/settings.py
|
||||
examples/webchat/urls.py
|
||||
examples/webchat/chat/__init__.py
|
||||
examples/webchat/chat/views.py
|
||||
examples/webchat/static/chat.css
|
||||
examples/webchat/static/chat.js
|
||||
examples/webchat/templates/404.html
|
||||
examples/webchat/templates/500.html
|
||||
examples/webchat/templates/index.html
|
||||
examples/webchat/templates/message.html
|
||||
gevent/__init__.py
|
||||
gevent/_semaphore.pyd
|
||||
gevent/_semaphore.pyx
|
||||
gevent/_threading.py
|
||||
gevent/_util.pyd
|
||||
gevent/_util.pyx
|
||||
gevent/ares.pyd
|
||||
gevent/ares.pyx
|
||||
gevent/backdoor.py
|
||||
gevent/baseserver.py
|
||||
gevent/callbacks.c
|
||||
gevent/callbacks.h
|
||||
gevent/cares.pxd
|
||||
gevent/cares_ntop.h
|
||||
gevent/cares_pton.h
|
||||
gevent/core.ppyx
|
||||
gevent/core.pyd
|
||||
gevent/core.pyx
|
||||
gevent/coros.py
|
||||
gevent/dnshelper.c
|
||||
gevent/event.py
|
||||
gevent/fileobject.py
|
||||
gevent/gevent._semaphore.c
|
||||
gevent/gevent._util.c
|
||||
gevent/gevent.ares.c
|
||||
gevent/gevent.ares.h
|
||||
gevent/gevent.core.c
|
||||
gevent/gevent.core.h
|
||||
gevent/greenlet.py
|
||||
gevent/hub.py
|
||||
gevent/libev.h
|
||||
gevent/libev.pxd
|
||||
gevent/libev_vfd.h
|
||||
gevent/local.py
|
||||
gevent/lock.py
|
||||
gevent/monkey.py
|
||||
gevent/os.py
|
||||
gevent/pool.py
|
||||
gevent/python.pxd
|
||||
gevent/pywsgi.py
|
||||
gevent/queue.py
|
||||
gevent/resolver_ares.py
|
||||
gevent/resolver_thread.py
|
||||
gevent/select.py
|
||||
gevent/server.py
|
||||
gevent/socket.py
|
||||
gevent/ssl.py
|
||||
gevent/stathelper.c
|
||||
gevent/subprocess.py
|
||||
gevent/thread.py
|
||||
gevent/threading.py
|
||||
gevent/threadpool.py
|
||||
gevent/timeout.py
|
||||
gevent/util.py
|
||||
gevent/win32util.py
|
||||
gevent/wsgi.py
|
||||
gevent.egg-info/PKG-INFO
|
||||
gevent.egg-info/SOURCES.txt
|
||||
gevent.egg-info/dependency_links.txt
|
||||
gevent.egg-info/requires.txt
|
||||
gevent.egg-info/top_level.txt
|
||||
greentest/badcert.pem
|
||||
greentest/badkey.pem
|
||||
greentest/bench_sendall.py
|
||||
greentest/bench_sleep0.py
|
||||
greentest/bench_spawn.py
|
||||
greentest/greentest.py
|
||||
greentest/https_svn_python_org_root.pem
|
||||
greentest/keycert.pem
|
||||
greentest/lock_tests.py
|
||||
greentest/monkey_test.py
|
||||
greentest/nullcert.pem
|
||||
greentest/patched_tests_setup.py
|
||||
greentest/sha256.pem
|
||||
greentest/six.py
|
||||
greentest/test__GreenletExit.py
|
||||
greentest/test___example_servers.py
|
||||
greentest/test___monkey_patching.py
|
||||
greentest/test__all__.py
|
||||
greentest/test__api.py
|
||||
greentest/test__api_timeout.py
|
||||
greentest/test__ares_host_result.py
|
||||
greentest/test__backdoor.py
|
||||
greentest/test__core.py
|
||||
greentest/test__core_async.py
|
||||
greentest/test__core_callback.py
|
||||
greentest/test__core_loop_run.py
|
||||
greentest/test__core_stat.py
|
||||
greentest/test__core_timer.py
|
||||
greentest/test__core_watcher.py
|
||||
greentest/test__destroy.py
|
||||
greentest/test__doctests.py
|
||||
greentest/test__environ.py
|
||||
greentest/test__event.py
|
||||
greentest/test__example_echoserver.py
|
||||
greentest/test__example_portforwarder.py
|
||||
greentest/test__example_udp_client.py
|
||||
greentest/test__example_udp_server.py
|
||||
greentest/test__examples.py
|
||||
greentest/test__exc_info.py
|
||||
greentest/test__execmodules.py
|
||||
greentest/test__fileobject.py
|
||||
greentest/test__greenio.py
|
||||
greentest/test__greenlet.py
|
||||
greentest/test__greenletset.py
|
||||
greentest/test__greenness.py
|
||||
greentest/test__hub.py
|
||||
greentest/test__issue302monkey.py
|
||||
greentest/test__issue6.py
|
||||
greentest/test__joinall.py
|
||||
greentest/test__local.py
|
||||
greentest/test__loop_callback.py
|
||||
greentest/test__memleak.py
|
||||
greentest/test__monkey.py
|
||||
greentest/test__nondefaultloop.py
|
||||
greentest/test__order.py
|
||||
greentest/test__os.py
|
||||
greentest/test__pool.py
|
||||
greentest/test__pywsgi.py
|
||||
greentest/test__queue.py
|
||||
greentest/test__refcount.py
|
||||
greentest/test__select.py
|
||||
greentest/test__semaphore.py
|
||||
greentest/test__server.py
|
||||
greentest/test__server_pywsgi.py
|
||||
greentest/test__signal.py
|
||||
greentest/test__sleep0.py
|
||||
greentest/test__socket.py
|
||||
greentest/test__socket_close.py
|
||||
greentest/test__socket_dns.py
|
||||
greentest/test__socket_dns6.py
|
||||
greentest/test__socket_errors.py
|
||||
greentest/test__socket_ex.py
|
||||
greentest/test__socket_ssl.py
|
||||
greentest/test__socket_timeout.py
|
||||
greentest/test__ssl.py
|
||||
greentest/test__subprocess.py
|
||||
greentest/test__subprocess_interrupted.py
|
||||
greentest/test__subprocess_poll.py
|
||||
greentest/test__systemerror.py
|
||||
greentest/test__threading.py
|
||||
greentest/test__threading_patched_local.py
|
||||
greentest/test__threading_vs_settrace.py
|
||||
greentest/test__threadpool.py
|
||||
greentest/test__timeout.py
|
||||
greentest/test_ares_timeout.py
|
||||
greentest/test_close_backend_fd.py
|
||||
greentest/test_hub_join.py
|
||||
greentest/test_hub_join_timeout.py
|
||||
greentest/test_issue112.py
|
||||
greentest/test_queue.py
|
||||
greentest/test_server.crt
|
||||
greentest/test_server.key
|
||||
greentest/test_threading_2.py
|
||||
greentest/testrunner.py
|
||||
greentest/tests_that_dont_use_resolver.txt
|
||||
greentest/util.py
|
||||
greentest/wrongcert.pem
|
||||
greentest/xtest__benchmarks.py
|
||||
greentest/xtest__issue91.py
|
||||
greentest/xtest__server_close.py
|
||||
greentest/xtest_signal.py
|
||||
greentest/xtest_stdlib.py
|
||||
greentest/2.5/test_httplib.py
|
||||
greentest/2.5/test_queue.py
|
||||
greentest/2.5/test_select.py
|
||||
greentest/2.5/test_signal.py
|
||||
greentest/2.5/test_socket.py
|
||||
greentest/2.5/test_socket_ssl.py
|
||||
greentest/2.5/test_socketserver.py
|
||||
greentest/2.5/test_subprocess.py
|
||||
greentest/2.5/test_thread.py
|
||||
greentest/2.5/test_threading.py
|
||||
greentest/2.5/test_threading_local.py
|
||||
greentest/2.5/test_timeout.py
|
||||
greentest/2.5/test_urllib.py
|
||||
greentest/2.5/test_urllib2.py
|
||||
greentest/2.5/test_urllib2_localnet.py
|
||||
greentest/2.5/test_urllib2net.py
|
||||
greentest/2.5/test_wsgiref.py
|
||||
greentest/2.5/version
|
||||
greentest/2.6/badcert.pem
|
||||
greentest/2.6/badkey.pem
|
||||
greentest/2.6/https_svn_python_org_root.pem
|
||||
greentest/2.6/keycert.pem
|
||||
greentest/2.6/lock_tests.py
|
||||
greentest/2.6/nullcert.pem
|
||||
greentest/2.6/sha256.pem
|
||||
greentest/2.6/test_asyncore.py
|
||||
greentest/2.6/test_ftplib.py
|
||||
greentest/2.6/test_httplib.py
|
||||
greentest/2.6/test_httpservers.py
|
||||
greentest/2.6/test_queue.py
|
||||
greentest/2.6/test_select.py
|
||||
greentest/2.6/test_signal.py
|
||||
greentest/2.6/test_smtplib.py
|
||||
greentest/2.6/test_socket.py
|
||||
greentest/2.6/test_socketserver.py
|
||||
greentest/2.6/test_ssl.py
|
||||
greentest/2.6/test_subprocess.py
|
||||
greentest/2.6/test_telnetlib.py
|
||||
greentest/2.6/test_thread.py
|
||||
greentest/2.6/test_threading.py
|
||||
greentest/2.6/test_threading_local.py
|
||||
greentest/2.6/test_timeout.py
|
||||
greentest/2.6/test_urllib.py
|
||||
greentest/2.6/test_urllib2.py
|
||||
greentest/2.6/test_urllib2_localnet.py
|
||||
greentest/2.6/test_urllib2net.py
|
||||
greentest/2.6/test_wsgiref.py
|
||||
greentest/2.6/version
|
||||
greentest/2.6/wrongcert.pem
|
||||
greentest/2.7/badcert.pem
|
||||
greentest/2.7/badkey.pem
|
||||
greentest/2.7/https_svn_python_org_root.pem
|
||||
greentest/2.7/keycert.pem
|
||||
greentest/2.7/lock_tests.py
|
||||
greentest/2.7/nokia.pem
|
||||
greentest/2.7/nullcert.pem
|
||||
greentest/2.7/sha256.pem
|
||||
greentest/2.7/test_asyncore.py
|
||||
greentest/2.7/test_ftplib.py
|
||||
greentest/2.7/test_httplib.py
|
||||
greentest/2.7/test_httpservers.py
|
||||
greentest/2.7/test_queue.py
|
||||
greentest/2.7/test_select.py
|
||||
greentest/2.7/test_signal.py
|
||||
greentest/2.7/test_smtplib.py
|
||||
greentest/2.7/test_socket.py
|
||||
greentest/2.7/test_socketserver.py
|
||||
greentest/2.7/test_ssl.py
|
||||
greentest/2.7/test_subprocess.py
|
||||
greentest/2.7/test_telnetlib.py
|
||||
greentest/2.7/test_thread.py
|
||||
greentest/2.7/test_threading.py
|
||||
greentest/2.7/test_threading_local.py
|
||||
greentest/2.7/test_timeout.py
|
||||
greentest/2.7/test_urllib.py
|
||||
greentest/2.7/test_urllib2.py
|
||||
greentest/2.7/test_urllib2_localnet.py
|
||||
greentest/2.7/test_urllib2net.py
|
||||
greentest/2.7/test_wsgiref.py
|
||||
greentest/2.7/version
|
||||
greentest/2.7/wrongcert.pem
|
||||
greentest/2.7/subprocessdata/sigchild_ignore.py
|
||||
libev/Changes
|
||||
libev/LICENSE
|
||||
libev/Makefile.in
|
||||
libev/README
|
||||
libev/config.guess
|
||||
libev/config.h.in
|
||||
libev/config.sub
|
||||
libev/configure
|
||||
libev/ev.c
|
||||
libev/ev.h
|
||||
libev/ev_epoll.c
|
||||
libev/ev_kqueue.c
|
||||
libev/ev_poll.c
|
||||
libev/ev_port.c
|
||||
libev/ev_select.c
|
||||
libev/ev_vars.h
|
||||
libev/ev_win32.c
|
||||
libev/ev_wrap.h
|
||||
libev/install-sh
|
||||
libev/ltmain.sh
|
||||
libev/missing
|
||||
util/cythonpp.py
|
||||
util/makedeb.sh
|
||||
util/makedist.py
|
||||
util/pyflakes.py
|
||||
util/set_version.py
|
||||
util/wintest.py
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,70 @@
|
|||
..\gevent\backdoor.py
|
||||
..\gevent\baseserver.py
|
||||
..\gevent\coros.py
|
||||
..\gevent\event.py
|
||||
..\gevent\fileobject.py
|
||||
..\gevent\greenlet.py
|
||||
..\gevent\hub.py
|
||||
..\gevent\local.py
|
||||
..\gevent\lock.py
|
||||
..\gevent\monkey.py
|
||||
..\gevent\os.py
|
||||
..\gevent\pool.py
|
||||
..\gevent\pywsgi.py
|
||||
..\gevent\queue.py
|
||||
..\gevent\resolver_ares.py
|
||||
..\gevent\resolver_thread.py
|
||||
..\gevent\select.py
|
||||
..\gevent\server.py
|
||||
..\gevent\socket.py
|
||||
..\gevent\ssl.py
|
||||
..\gevent\subprocess.py
|
||||
..\gevent\thread.py
|
||||
..\gevent\threading.py
|
||||
..\gevent\threadpool.py
|
||||
..\gevent\timeout.py
|
||||
..\gevent\util.py
|
||||
..\gevent\win32util.py
|
||||
..\gevent\wsgi.py
|
||||
..\gevent\_threading.py
|
||||
..\gevent\__init__.py
|
||||
..\gevent\backdoor.pyc
|
||||
..\gevent\baseserver.pyc
|
||||
..\gevent\coros.pyc
|
||||
..\gevent\event.pyc
|
||||
..\gevent\fileobject.pyc
|
||||
..\gevent\greenlet.pyc
|
||||
..\gevent\hub.pyc
|
||||
..\gevent\local.pyc
|
||||
..\gevent\lock.pyc
|
||||
..\gevent\monkey.pyc
|
||||
..\gevent\os.pyc
|
||||
..\gevent\pool.pyc
|
||||
..\gevent\pywsgi.pyc
|
||||
..\gevent\queue.pyc
|
||||
..\gevent\resolver_ares.pyc
|
||||
..\gevent\resolver_thread.pyc
|
||||
..\gevent\select.pyc
|
||||
..\gevent\server.pyc
|
||||
..\gevent\socket.pyc
|
||||
..\gevent\ssl.pyc
|
||||
..\gevent\subprocess.pyc
|
||||
..\gevent\thread.pyc
|
||||
..\gevent\threading.pyc
|
||||
..\gevent\threadpool.pyc
|
||||
..\gevent\timeout.pyc
|
||||
..\gevent\util.pyc
|
||||
..\gevent\win32util.pyc
|
||||
..\gevent\wsgi.pyc
|
||||
..\gevent\_threading.pyc
|
||||
..\gevent\__init__.pyc
|
||||
..\gevent\core.pyd
|
||||
..\gevent\ares.pyd
|
||||
..\gevent\_semaphore.pyd
|
||||
..\gevent\_util.pyd
|
||||
.\
|
||||
dependency_links.txt
|
||||
PKG-INFO
|
||||
requires.txt
|
||||
SOURCES.txt
|
||||
top_level.txt
|
|
@ -0,0 +1 @@
|
|||
greenlet
|
|
@ -0,0 +1 @@
|
|||
gevent
|
57
panda/python/Lib/site-packages/gevent/__init__.py
Normal file
57
panda/python/Lib/site-packages/gevent/__init__.py
Normal file
|
@ -0,0 +1,57 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""
|
||||
gevent is a coroutine-based Python networking library that uses greenlet
|
||||
to provide a high-level synchronous API on top of libev event loop.
|
||||
|
||||
See http://www.gevent.org/ for the documentation.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
version_info = (1, 0, 1, 'final', 0)
|
||||
__version__ = '1.0.1'
|
||||
__changeset__ = '1.0.1-0-g747630a'
|
||||
|
||||
|
||||
__all__ = ['get_hub',
|
||||
'Greenlet',
|
||||
'GreenletExit',
|
||||
'spawn',
|
||||
'spawn_later',
|
||||
'spawn_raw',
|
||||
'iwait',
|
||||
'wait',
|
||||
'killall',
|
||||
'Timeout',
|
||||
'with_timeout',
|
||||
'getcurrent',
|
||||
'sleep',
|
||||
'idle',
|
||||
'kill',
|
||||
'signal',
|
||||
'fork',
|
||||
'reinit']
|
||||
|
||||
|
||||
from gevent.hub import get_hub, iwait, wait
|
||||
from gevent.greenlet import Greenlet, joinall, killall
|
||||
spawn = Greenlet.spawn
|
||||
spawn_later = Greenlet.spawn_later
|
||||
from gevent.timeout import Timeout, with_timeout
|
||||
from gevent.hub import getcurrent, GreenletExit, spawn_raw, sleep, idle, kill, signal, reinit
|
||||
try:
|
||||
from gevent.os import fork
|
||||
except ImportError:
|
||||
__all__.remove('fork')
|
||||
|
||||
|
||||
# the following makes hidden imports visible to freezing tools like
|
||||
# py2exe. see https://github.com/surfly/gevent/issues/181
|
||||
def __dependencies_for_freezing():
|
||||
from gevent import core, resolver_thread, resolver_ares, socket,\
|
||||
threadpool, thread, threading, select, subprocess
|
||||
import pprint
|
||||
import traceback
|
||||
import signal
|
||||
|
||||
del __dependencies_for_freezing
|
BIN
panda/python/Lib/site-packages/gevent/_semaphore.pyd
Normal file
BIN
panda/python/Lib/site-packages/gevent/_semaphore.pyd
Normal file
Binary file not shown.
508
panda/python/Lib/site-packages/gevent/_threading.py
Normal file
508
panda/python/Lib/site-packages/gevent/_threading.py
Normal file
|
@ -0,0 +1,508 @@
|
|||
"""A clone of threading module (version 2.7.2) that always
|
||||
targets real OS threads. (Unlike 'threading' which flips between
|
||||
green and OS threads based on whether the monkey patching is in effect
|
||||
or not).
|
||||
|
||||
This module is missing 'Thread' class, but includes 'Queue'.
|
||||
"""
|
||||
from Queue import Full, Empty
|
||||
from collections import deque
|
||||
import heapq
|
||||
from time import time as _time, sleep as _sleep
|
||||
|
||||
from gevent import monkey
|
||||
from gevent.hub import PY3
|
||||
|
||||
|
||||
__all__ = ['Condition',
|
||||
'Event',
|
||||
'Lock',
|
||||
'RLock',
|
||||
'Semaphore',
|
||||
'BoundedSemaphore',
|
||||
'Queue',
|
||||
'local',
|
||||
'stack_size']
|
||||
|
||||
|
||||
thread_name = '_thread' if PY3 else 'thread'
|
||||
start_new_thread, Lock, get_ident, local, stack_size = monkey.get_original(thread_name, [
|
||||
'start_new_thread', 'allocate_lock', 'get_ident', '_local', 'stack_size'])
|
||||
|
||||
|
||||
class RLock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.__block = Lock()
|
||||
self.__owner = None
|
||||
self.__count = 0
|
||||
|
||||
def __repr__(self):
|
||||
owner = self.__owner
|
||||
return "<%s owner=%r count=%d>" % (
|
||||
self.__class__.__name__, owner, self.__count)
|
||||
|
||||
def acquire(self, blocking=1):
|
||||
me = get_ident()
|
||||
if self.__owner == me:
|
||||
self.__count = self.__count + 1
|
||||
return 1
|
||||
rc = self.__block.acquire(blocking)
|
||||
if rc:
|
||||
self.__owner = me
|
||||
self.__count = 1
|
||||
return rc
|
||||
|
||||
__enter__ = acquire
|
||||
|
||||
def release(self):
|
||||
if self.__owner != get_ident():
|
||||
raise RuntimeError("cannot release un-acquired lock")
|
||||
self.__count = count = self.__count - 1
|
||||
if not count:
|
||||
self.__owner = None
|
||||
self.__block.release()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.release()
|
||||
|
||||
# Internal methods used by condition variables
|
||||
|
||||
def _acquire_restore(self, count_owner):
|
||||
count, owner = count_owner
|
||||
self.__block.acquire()
|
||||
self.__count = count
|
||||
self.__owner = owner
|
||||
|
||||
def _release_save(self):
|
||||
count = self.__count
|
||||
self.__count = 0
|
||||
owner = self.__owner
|
||||
self.__owner = None
|
||||
self.__block.release()
|
||||
return (count, owner)
|
||||
|
||||
def _is_owned(self):
|
||||
return self.__owner == get_ident()
|
||||
|
||||
|
||||
class Condition(object):
|
||||
|
||||
def __init__(self, lock=None):
|
||||
if lock is None:
|
||||
lock = RLock()
|
||||
self.__lock = lock
|
||||
# Export the lock's acquire() and release() methods
|
||||
self.acquire = lock.acquire
|
||||
self.release = lock.release
|
||||
# If the lock defines _release_save() and/or _acquire_restore(),
|
||||
# these override the default implementations (which just call
|
||||
# release() and acquire() on the lock). Ditto for _is_owned().
|
||||
try:
|
||||
self._release_save = lock._release_save
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
self._acquire_restore = lock._acquire_restore
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
self._is_owned = lock._is_owned
|
||||
except AttributeError:
|
||||
pass
|
||||
self.__waiters = []
|
||||
|
||||
def __enter__(self):
|
||||
return self.__lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self.__lock.__exit__(*args)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
|
||||
|
||||
def _release_save(self):
|
||||
self.__lock.release() # No state to save
|
||||
|
||||
def _acquire_restore(self, x):
|
||||
self.__lock.acquire() # Ignore saved state
|
||||
|
||||
def _is_owned(self):
|
||||
# Return True if lock is owned by current_thread.
|
||||
# This method is called only if __lock doesn't have _is_owned().
|
||||
if self.__lock.acquire(0):
|
||||
self.__lock.release()
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if not self._is_owned():
|
||||
raise RuntimeError("cannot wait on un-acquired lock")
|
||||
waiter = Lock()
|
||||
waiter.acquire()
|
||||
self.__waiters.append(waiter)
|
||||
saved_state = self._release_save()
|
||||
try: # restore state no matter what (e.g., KeyboardInterrupt)
|
||||
if timeout is None:
|
||||
waiter.acquire()
|
||||
else:
|
||||
# Balancing act: We can't afford a pure busy loop, so we
|
||||
# have to sleep; but if we sleep the whole timeout time,
|
||||
# we'll be unresponsive. The scheme here sleeps very
|
||||
# little at first, longer as time goes on, but never longer
|
||||
# than 20 times per second (or the timeout time remaining).
|
||||
endtime = _time() + timeout
|
||||
delay = 0.0005 # 500 us -> initial delay of 1 ms
|
||||
while True:
|
||||
gotit = waiter.acquire(0)
|
||||
if gotit:
|
||||
break
|
||||
remaining = endtime - _time()
|
||||
if remaining <= 0:
|
||||
break
|
||||
delay = min(delay * 2, remaining, .05)
|
||||
_sleep(delay)
|
||||
if not gotit:
|
||||
try:
|
||||
self.__waiters.remove(waiter)
|
||||
except ValueError:
|
||||
pass
|
||||
finally:
|
||||
self._acquire_restore(saved_state)
|
||||
|
||||
def notify(self, n=1):
|
||||
if not self._is_owned():
|
||||
raise RuntimeError("cannot notify on un-acquired lock")
|
||||
__waiters = self.__waiters
|
||||
waiters = __waiters[:n]
|
||||
if not waiters:
|
||||
return
|
||||
for waiter in waiters:
|
||||
waiter.release()
|
||||
try:
|
||||
__waiters.remove(waiter)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def notify_all(self):
|
||||
self.notify(len(self.__waiters))
|
||||
|
||||
|
||||
class Semaphore(object):
|
||||
|
||||
# After Tim Peters' semaphore class, but not quite the same (no maximum)
|
||||
|
||||
def __init__(self, value=1):
|
||||
if value < 0:
|
||||
raise ValueError("semaphore initial value must be >= 0")
|
||||
self.__cond = Condition(Lock())
|
||||
self.__value = value
|
||||
|
||||
def acquire(self, blocking=1):
|
||||
rc = False
|
||||
self.__cond.acquire()
|
||||
while self.__value == 0:
|
||||
if not blocking:
|
||||
break
|
||||
self.__cond.wait()
|
||||
else:
|
||||
self.__value = self.__value - 1
|
||||
rc = True
|
||||
self.__cond.release()
|
||||
return rc
|
||||
|
||||
__enter__ = acquire
|
||||
|
||||
def release(self):
|
||||
self.__cond.acquire()
|
||||
self.__value = self.__value + 1
|
||||
self.__cond.notify()
|
||||
self.__cond.release()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.release()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""Semaphore that checks that # releases is <= # acquires"""
|
||||
def __init__(self, value=1):
|
||||
Semaphore.__init__(self, value)
|
||||
self._initial_value = value
|
||||
|
||||
def release(self):
|
||||
if self.Semaphore__value >= self._initial_value:
|
||||
raise ValueError("Semaphore released too many times")
|
||||
return Semaphore.release(self)
|
||||
|
||||
|
||||
class Event(object):
|
||||
|
||||
# After Tim Peters' event class (without is_posted())
|
||||
|
||||
def __init__(self):
|
||||
self.__cond = Condition(Lock())
|
||||
self.__flag = False
|
||||
|
||||
def _reset_internal_locks(self):
|
||||
# private! called by Thread._reset_internal_locks by _after_fork()
|
||||
self.__cond.__init__()
|
||||
|
||||
def is_set(self):
|
||||
return self.__flag
|
||||
|
||||
def set(self):
|
||||
self.__cond.acquire()
|
||||
try:
|
||||
self.__flag = True
|
||||
self.__cond.notify_all()
|
||||
finally:
|
||||
self.__cond.release()
|
||||
|
||||
def clear(self):
|
||||
self.__cond.acquire()
|
||||
try:
|
||||
self.__flag = False
|
||||
finally:
|
||||
self.__cond.release()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
self.__cond.acquire()
|
||||
try:
|
||||
if not self.__flag:
|
||||
self.__cond.wait(timeout)
|
||||
return self.__flag
|
||||
finally:
|
||||
self.__cond.release()
|
||||
|
||||
|
||||
class Queue:
|
||||
"""Create a queue object with a given maximum size.
|
||||
|
||||
If maxsize is <= 0, the queue size is infinite.
|
||||
"""
|
||||
def __init__(self, maxsize=0):
|
||||
self.maxsize = maxsize
|
||||
self._init(maxsize)
|
||||
# mutex must be held whenever the queue is mutating. All methods
|
||||
# that acquire mutex must release it before returning. mutex
|
||||
# is shared between the three conditions, so acquiring and
|
||||
# releasing the conditions also acquires and releases mutex.
|
||||
self.mutex = Lock()
|
||||
# Notify not_empty whenever an item is added to the queue; a
|
||||
# thread waiting to get is notified then.
|
||||
self.not_empty = Condition(self.mutex)
|
||||
# Notify not_full whenever an item is removed from the queue;
|
||||
# a thread waiting to put is notified then.
|
||||
self.not_full = Condition(self.mutex)
|
||||
# Notify all_tasks_done whenever the number of unfinished tasks
|
||||
# drops to zero; thread waiting to join() is notified to resume
|
||||
self.all_tasks_done = Condition(self.mutex)
|
||||
self.unfinished_tasks = 0
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items
|
||||
have been processed (meaning that a task_done() call was received
|
||||
for every item that had been put() into the queue).
|
||||
|
||||
Raises a ValueError if called more times than there were items
|
||||
placed in the queue.
|
||||
"""
|
||||
self.all_tasks_done.acquire()
|
||||
try:
|
||||
unfinished = self.unfinished_tasks - 1
|
||||
if unfinished <= 0:
|
||||
if unfinished < 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self.all_tasks_done.notify_all()
|
||||
self.unfinished_tasks = unfinished
|
||||
finally:
|
||||
self.all_tasks_done.release()
|
||||
|
||||
def join(self):
|
||||
"""Blocks until all items in the Queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer thread calls task_done()
|
||||
to indicate the item was retrieved and all work on it is complete.
|
||||
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
self.all_tasks_done.acquire()
|
||||
try:
|
||||
while self.unfinished_tasks:
|
||||
self.all_tasks_done.wait()
|
||||
finally:
|
||||
self.all_tasks_done.release()
|
||||
|
||||
def qsize(self):
|
||||
"""Return the approximate size of the queue (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
try:
|
||||
return self._qsize()
|
||||
finally:
|
||||
self.mutex.release()
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
try:
|
||||
return not self._qsize()
|
||||
finally:
|
||||
self.mutex.release()
|
||||
|
||||
def full(self):
|
||||
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
try:
|
||||
if self.maxsize <= 0:
|
||||
return False
|
||||
if self.maxsize >= self._qsize():
|
||||
return True
|
||||
finally:
|
||||
self.mutex.release()
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If optional args 'block' is true and 'timeout' is None (the default),
|
||||
block if necessary until a free slot is available. If 'timeout' is
|
||||
a positive number, it blocks at most 'timeout' seconds and raises
|
||||
the Full exception if no free slot was available within that time.
|
||||
Otherwise ('block' is false), put an item on the queue if a free slot
|
||||
is immediately available, else raise the Full exception ('timeout'
|
||||
is ignored in that case).
|
||||
"""
|
||||
self.not_full.acquire()
|
||||
try:
|
||||
if self.maxsize > 0:
|
||||
if not block:
|
||||
if self._qsize() >= self.maxsize:
|
||||
raise Full
|
||||
elif timeout is None:
|
||||
while self._qsize() >= self.maxsize:
|
||||
self.not_full.wait()
|
||||
elif timeout < 0:
|
||||
raise ValueError("'timeout' must be a positive number")
|
||||
else:
|
||||
endtime = _time() + timeout
|
||||
while self._qsize() >= self.maxsize:
|
||||
remaining = endtime - _time()
|
||||
if remaining <= 0.0:
|
||||
raise Full
|
||||
self.not_full.wait(remaining)
|
||||
self._put(item)
|
||||
self.unfinished_tasks += 1
|
||||
self.not_empty.notify()
|
||||
finally:
|
||||
self.not_full.release()
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
Only enqueue the item if a free slot is immediately available.
|
||||
Otherwise raise the Full exception.
|
||||
"""
|
||||
return self.put(item, False)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If optional args 'block' is true and 'timeout' is None (the default),
|
||||
block if necessary until an item is available. If 'timeout' is
|
||||
a positive number, it blocks at most 'timeout' seconds and raises
|
||||
the Empty exception if no item was available within that time.
|
||||
Otherwise ('block' is false), return an item if one is immediately
|
||||
available, else raise the Empty exception ('timeout' is ignored
|
||||
in that case).
|
||||
"""
|
||||
self.not_empty.acquire()
|
||||
try:
|
||||
if not block:
|
||||
if not self._qsize():
|
||||
raise Empty
|
||||
elif timeout is None:
|
||||
while not self._qsize():
|
||||
self.not_empty.wait()
|
||||
elif timeout < 0:
|
||||
raise ValueError("'timeout' must be a positive number")
|
||||
else:
|
||||
endtime = _time() + timeout
|
||||
while not self._qsize():
|
||||
remaining = endtime - _time()
|
||||
if remaining <= 0.0:
|
||||
raise Empty
|
||||
self.not_empty.wait(remaining)
|
||||
item = self._get()
|
||||
self.not_full.notify()
|
||||
return item
|
||||
finally:
|
||||
self.not_empty.release()
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue without blocking.
|
||||
|
||||
Only get an item if one is immediately available. Otherwise
|
||||
raise the Empty exception.
|
||||
"""
|
||||
return self.get(False)
|
||||
|
||||
# Override these methods to implement other queue organizations
|
||||
# (e.g. stack or priority queue).
|
||||
# These will only be called with appropriate locks held
|
||||
|
||||
# Initialize the queue representation
|
||||
def _init(self, maxsize):
|
||||
self.queue = deque()
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
# Put a new item in the queue
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
# Get an item from the queue
|
||||
def _get(self):
|
||||
return self.queue.popleft()
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
'''Variant of Queue that retrieves open entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self.queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self.queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
'''Variant of Queue that retrieves most recently added entries first.'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self.queue.pop()
|
BIN
panda/python/Lib/site-packages/gevent/_util.pyd
Normal file
BIN
panda/python/Lib/site-packages/gevent/_util.pyd
Normal file
Binary file not shown.
BIN
panda/python/Lib/site-packages/gevent/ares.pyd
Normal file
BIN
panda/python/Lib/site-packages/gevent/ares.pyd
Normal file
Binary file not shown.
114
panda/python/Lib/site-packages/gevent/backdoor.py
Normal file
114
panda/python/Lib/site-packages/gevent/backdoor.py
Normal file
|
@ -0,0 +1,114 @@
|
|||
# @author Bob Ippolito
|
||||
#
|
||||
# Copyright (c) 2005-2006, Bob Ippolito
|
||||
# Copyright (c) 2007, Linden Research, Inc.
|
||||
# Copyright (c) 2008, Donovan Preston
|
||||
# Copyright (c) 2009-2010, Denis Bilenko
|
||||
# Copyright (c) 2011, gevent contributors
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import sys
|
||||
from code import InteractiveConsole
|
||||
|
||||
from gevent import socket
|
||||
from gevent.greenlet import Greenlet
|
||||
from gevent.server import StreamServer
|
||||
|
||||
__all__ = ['BackdoorServer']
|
||||
|
||||
try:
|
||||
sys.ps1
|
||||
except AttributeError:
|
||||
sys.ps1 = '>>> '
|
||||
try:
|
||||
sys.ps2
|
||||
except AttributeError:
|
||||
sys.ps2 = '... '
|
||||
|
||||
|
||||
class SocketConsole(Greenlet):
|
||||
|
||||
def __init__(self, locals, conn, banner=None):
|
||||
Greenlet.__init__(self)
|
||||
self.locals = locals
|
||||
self.desc = _fileobject(conn)
|
||||
self.banner = banner
|
||||
|
||||
def finalize(self):
|
||||
self.desc = None
|
||||
|
||||
def switch(self, *args, **kw):
|
||||
self.saved = sys.stdin, sys.stderr, sys.stdout
|
||||
sys.stdin = sys.stdout = sys.stderr = self.desc
|
||||
Greenlet.switch(self, *args, **kw)
|
||||
|
||||
def switch_out(self):
|
||||
sys.stdin, sys.stderr, sys.stdout = self.saved
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
try:
|
||||
console = InteractiveConsole(self.locals)
|
||||
# __builtins__ may either be the __builtin__ module or
|
||||
# __builtin__.__dict__ in the latter case typing
|
||||
# locals() at the backdoor prompt spews out lots of
|
||||
# useless stuff
|
||||
import __builtin__
|
||||
console.locals["__builtins__"] = __builtin__
|
||||
console.interact(banner=self.banner)
|
||||
except SystemExit: # raised by quit()
|
||||
sys.exc_clear()
|
||||
finally:
|
||||
self.switch_out()
|
||||
self.finalize()
|
||||
|
||||
|
||||
class BackdoorServer(StreamServer):
|
||||
|
||||
def __init__(self, listener, locals=None, banner=None, **server_args):
|
||||
StreamServer.__init__(self, listener, spawn=None, **server_args)
|
||||
self.locals = locals
|
||||
self.banner = banner
|
||||
# QQQ passing pool instance as 'spawn' is not possible; should it be fixed?
|
||||
|
||||
def handle(self, conn, address):
|
||||
SocketConsole.spawn(self.locals, conn, banner=self.banner)
|
||||
|
||||
|
||||
class _fileobject(socket._fileobject):
|
||||
|
||||
def write(self, data):
|
||||
self._sock.sendall(data)
|
||||
|
||||
def isatty(self):
|
||||
return True
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def readline(self, *a):
|
||||
return socket._fileobject.readline(self, *a).replace("\r\n", "\n")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if not sys.argv[1:]:
|
||||
print ('USAGE: %s PORT' % sys.argv[0])
|
||||
else:
|
||||
BackdoorServer(('127.0.0.1', int(sys.argv[1]))).serve_forever()
|
323
panda/python/Lib/site-packages/gevent/baseserver.py
Normal file
323
panda/python/Lib/site-packages/gevent/baseserver.py
Normal file
|
@ -0,0 +1,323 @@
|
|||
"""Base class for implementing servers"""
|
||||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
import sys
|
||||
import _socket
|
||||
import errno
|
||||
from gevent.greenlet import Greenlet, getfuncname
|
||||
from gevent.event import Event
|
||||
from gevent.hub import string_types, integer_types, get_hub
|
||||
|
||||
|
||||
__all__ = ['BaseServer']
|
||||
|
||||
|
||||
class BaseServer(object):
|
||||
"""An abstract base class that implements some common functionality for the servers in gevent.
|
||||
|
||||
*listener* can either be an address that the server should bind on or a :class:`gevent.socket.socket`
|
||||
instance that is already bound (and put into listening mode in case of TCP socket).
|
||||
|
||||
*spawn*, if provided, is called to create a new greenlet to run the handler. By default, :func:`gevent.spawn` is used.
|
||||
|
||||
Possible values for *spawn*:
|
||||
|
||||
* a :class:`gevent.pool.Pool` instance -- *handle* will be executed
|
||||
using :meth:`Pool.spawn` method only if the pool is not full.
|
||||
While it is full, all the connection are dropped;
|
||||
* :func:`gevent.spawn_raw` -- *handle* will be executed in a raw
|
||||
greenlet which have a little less overhead then :class:`gevent.Greenlet` instances spawned by default;
|
||||
* ``None`` -- *handle* will be executed right away, in the :class:`Hub` greenlet.
|
||||
*handle* cannot use any blocking functions as it means switching to the :class:`Hub`.
|
||||
* an integer -- a shortcut for ``gevent.pool.Pool(integer)``
|
||||
"""
|
||||
# the number of seconds to sleep in case there was an error in accept() call
|
||||
# for consecutive errors the delay will double until it reaches max_delay
|
||||
# when accept() finally succeeds the delay will be reset to min_delay again
|
||||
min_delay = 0.01
|
||||
max_delay = 1
|
||||
|
||||
# Sets the maximum number of consecutive accepts that a process may perform on
|
||||
# a single wake up. High values give higher priority to high connection rates,
|
||||
# while lower values give higher priority to already established connections.
|
||||
# Default is 100. Note, that in case of multiple working processes on the same
|
||||
# listening value, it should be set to a lower value. (pywsgi.WSGIServer sets it
|
||||
# to 1 when environ["wsgi.multiprocess"] is true)
|
||||
max_accept = 100
|
||||
|
||||
_spawn = Greenlet.spawn
|
||||
|
||||
# the default timeout that we wait for the client connections to close in stop()
|
||||
stop_timeout = 1
|
||||
|
||||
fatal_errors = (errno.EBADF, errno.EINVAL, errno.ENOTSOCK)
|
||||
|
||||
def __init__(self, listener, handle=None, spawn='default'):
|
||||
self._stop_event = Event()
|
||||
self._stop_event.set()
|
||||
self._watcher = None
|
||||
self._timer = None
|
||||
self.pool = None
|
||||
try:
|
||||
self.set_listener(listener)
|
||||
self.set_spawn(spawn)
|
||||
self.set_handle(handle)
|
||||
self.delay = self.min_delay
|
||||
self.loop = get_hub().loop
|
||||
if self.max_accept < 1:
|
||||
raise ValueError('max_accept must be positive int: %r' % (self.max_accept, ))
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def set_listener(self, listener):
|
||||
if hasattr(listener, 'accept'):
|
||||
if hasattr(listener, 'do_handshake'):
|
||||
raise TypeError('Expected a regular socket, not SSLSocket: %r' % (listener, ))
|
||||
self.family = listener.family
|
||||
self.address = listener.getsockname()
|
||||
self.socket = listener
|
||||
else:
|
||||
self.family, self.address = parse_address(listener)
|
||||
|
||||
def set_spawn(self, spawn):
|
||||
if spawn == 'default':
|
||||
self.pool = None
|
||||
self._spawn = self._spawn
|
||||
elif hasattr(spawn, 'spawn'):
|
||||
self.pool = spawn
|
||||
self._spawn = spawn.spawn
|
||||
elif isinstance(spawn, (int, long)):
|
||||
from gevent.pool import Pool
|
||||
self.pool = Pool(spawn)
|
||||
self._spawn = self.pool.spawn
|
||||
else:
|
||||
self.pool = None
|
||||
self._spawn = spawn
|
||||
if hasattr(self.pool, 'full'):
|
||||
self.full = self.pool.full
|
||||
if self.pool is not None:
|
||||
self.pool._semaphore.rawlink(self._start_accepting_if_started)
|
||||
|
||||
def set_handle(self, handle):
|
||||
if handle is not None:
|
||||
self.handle = handle
|
||||
if hasattr(self, 'handle'):
|
||||
self._handle = self.handle
|
||||
else:
|
||||
raise TypeError("'handle' must be provided")
|
||||
|
||||
def _start_accepting_if_started(self, _event=None):
|
||||
if self.started:
|
||||
self.start_accepting()
|
||||
|
||||
def start_accepting(self):
|
||||
if self._watcher is None:
|
||||
# just stop watcher without creating a new one?
|
||||
self._watcher = self.loop.io(self.socket.fileno(), 1)
|
||||
self._watcher.start(self._do_read)
|
||||
|
||||
def stop_accepting(self):
|
||||
if self._watcher is not None:
|
||||
self._watcher.stop()
|
||||
self._watcher = None
|
||||
if self._timer is not None:
|
||||
self._timer.stop()
|
||||
self._timer = None
|
||||
|
||||
def do_handle(self, *args):
|
||||
spawn = self._spawn
|
||||
if spawn is None:
|
||||
self._handle(*args)
|
||||
else:
|
||||
spawn(self._handle, *args)
|
||||
|
||||
def _do_read(self):
|
||||
for _ in xrange(self.max_accept):
|
||||
if self.full():
|
||||
self.stop_accepting()
|
||||
return
|
||||
try:
|
||||
args = self.do_read()
|
||||
self.delay = self.min_delay
|
||||
if not args:
|
||||
return
|
||||
except:
|
||||
self.loop.handle_error(self, *sys.exc_info())
|
||||
ex = sys.exc_info()[1]
|
||||
if self.is_fatal_error(ex):
|
||||
self.close()
|
||||
sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex)))
|
||||
return
|
||||
if self.delay >= 0:
|
||||
self.stop_accepting()
|
||||
self._timer = self.loop.timer(self.delay)
|
||||
self._timer.start(self._start_accepting_if_started)
|
||||
self.delay = min(self.max_delay, self.delay * 2)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
self.do_handle(*args)
|
||||
except:
|
||||
self.loop.handle_error((args[1:], self), *sys.exc_info())
|
||||
if self.delay >= 0:
|
||||
self.stop_accepting()
|
||||
self._timer = self.loop.timer(self.delay)
|
||||
self._timer.start(self._start_accepting_if_started)
|
||||
self.delay = min(self.max_delay, self.delay * 2)
|
||||
break
|
||||
|
||||
def full(self):
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s>' % (type(self).__name__, self._formatinfo())
|
||||
|
||||
def _formatinfo(self):
|
||||
if hasattr(self, 'socket'):
|
||||
try:
|
||||
fileno = self.socket.fileno()
|
||||
except Exception:
|
||||
ex = sys.exc_info()[1]
|
||||
fileno = str(ex)
|
||||
result = 'fileno=%s ' % fileno
|
||||
else:
|
||||
result = ''
|
||||
try:
|
||||
if isinstance(self.address, tuple) and len(self.address) == 2:
|
||||
result += 'address=%s:%s' % self.address
|
||||
else:
|
||||
result += 'address=%s' % (self.address, )
|
||||
except Exception:
|
||||
ex = sys.exc_info()[1]
|
||||
result += str(ex) or '<error>'
|
||||
try:
|
||||
handle = getfuncname(self.__dict__['handle'])
|
||||
except Exception:
|
||||
handle = None
|
||||
if handle is not None:
|
||||
result += ' handle=' + handle
|
||||
return result
|
||||
|
||||
@property
|
||||
def server_host(self):
|
||||
"""IP address that the server is bound to (string)."""
|
||||
if isinstance(self.address, tuple):
|
||||
return self.address[0]
|
||||
|
||||
@property
|
||||
def server_port(self):
|
||||
"""Port that the server is bound to (an integer)."""
|
||||
if isinstance(self.address, tuple):
|
||||
return self.address[1]
|
||||
|
||||
def init_socket(self):
|
||||
"""If the user initialized the server with an address rather than socket,
|
||||
then this function will create a socket, bind it and put it into listening mode.
|
||||
|
||||
It is not supposed to be called by the user, it is called by :meth:`start` before starting
|
||||
the accept loop."""
|
||||
pass
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
return not self._stop_event.is_set()
|
||||
|
||||
def start(self):
|
||||
"""Start accepting the connections.
|
||||
|
||||
If an address was provided in the constructor, then also create a socket,
|
||||
bind it and put it into the listening mode.
|
||||
"""
|
||||
self.init_socket()
|
||||
self._stop_event.clear()
|
||||
try:
|
||||
self.start_accepting()
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
"""Close the listener socket and stop accepting."""
|
||||
self._stop_event.set()
|
||||
try:
|
||||
self.stop_accepting()
|
||||
finally:
|
||||
try:
|
||||
self.socket.close()
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
self.__dict__.pop('socket', None)
|
||||
self.__dict__.pop('handle', None)
|
||||
self.__dict__.pop('_handle', None)
|
||||
self.__dict__.pop('_spawn', None)
|
||||
self.__dict__.pop('full', None)
|
||||
if self.pool is not None:
|
||||
self.pool._semaphore.unlink(self._start_accepting_if_started)
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return not hasattr(self, 'socket')
|
||||
|
||||
def stop(self, timeout=None):
|
||||
"""Stop accepting the connections and close the listening socket.
|
||||
|
||||
If the server uses a pool to spawn the requests, then :meth:`stop` also waits
|
||||
for all the handlers to exit. If there are still handlers executing after *timeout*
|
||||
has expired (default 1 second), then the currently running handlers in the pool are killed."""
|
||||
self.close()
|
||||
if timeout is None:
|
||||
timeout = self.stop_timeout
|
||||
if self.pool:
|
||||
self.pool.join(timeout=timeout)
|
||||
self.pool.kill(block=True, timeout=1)
|
||||
|
||||
def serve_forever(self, stop_timeout=None):
|
||||
"""Start the server if it hasn't been already started and wait until it's stopped."""
|
||||
# add test that serve_forever exists on stop()
|
||||
if not self.started:
|
||||
self.start()
|
||||
try:
|
||||
self._stop_event.wait()
|
||||
finally:
|
||||
Greenlet.spawn(self.stop, timeout=stop_timeout).join()
|
||||
|
||||
def is_fatal_error(self, ex):
|
||||
return isinstance(ex, _socket.error) and ex[0] in self.fatal_errors
|
||||
|
||||
|
||||
def _extract_family(host):
|
||||
if host.startswith('[') and host.endswith(']'):
|
||||
host = host[1:-1]
|
||||
return _socket.AF_INET6, host
|
||||
return _socket.AF_INET, host
|
||||
|
||||
|
||||
def _parse_address(address):
|
||||
if isinstance(address, tuple):
|
||||
if ':' in address[0]:
|
||||
return _socket.AF_INET6, address
|
||||
return _socket.AF_INET, address
|
||||
elif isinstance(address, string_types):
|
||||
if ':' in address:
|
||||
host, port = address.rsplit(':', 1)
|
||||
family, host = _extract_family(host)
|
||||
if host == '*':
|
||||
host = ''
|
||||
return family, (host, int(port))
|
||||
else:
|
||||
return _socket.AF_INET, ('', int(address))
|
||||
elif isinstance(address, integer_types):
|
||||
return _socket.AF_INET, ('', int(address))
|
||||
else:
|
||||
raise TypeError('Expected tuple or string, got %s' % type(address))
|
||||
|
||||
|
||||
def parse_address(address):
|
||||
try:
|
||||
return _parse_address(address)
|
||||
except ValueError:
|
||||
raise ValueError('Failed to parse address %r: %s' % (address, sys.exc_info()[1]))
|
BIN
panda/python/Lib/site-packages/gevent/core.pyd
Normal file
BIN
panda/python/Lib/site-packages/gevent/core.pyd
Normal file
Binary file not shown.
6
panda/python/Lib/site-packages/gevent/coros.py
Normal file
6
panda/python/Lib/site-packages/gevent/coros.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
# This module definitely remains in 1.0.x, probably in versions after that too.
|
||||
import warnings
|
||||
warnings.warn('gevent.coros has been renamed to gevent.lock', DeprecationWarning, stacklevel=2)
|
||||
|
||||
from gevent.lock import *
|
||||
from gevent.lock import __all__
|
322
panda/python/Lib/site-packages/gevent/event.py
Normal file
322
panda/python/Lib/site-packages/gevent/event.py
Normal file
|
@ -0,0 +1,322 @@
|
|||
# Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
|
||||
"""Basic synchronization primitives: Event and AsyncResult"""
|
||||
|
||||
import sys
|
||||
from gevent.hub import get_hub, getcurrent, _NONE, PY3
|
||||
from gevent.timeout import Timeout
|
||||
from collections import deque
|
||||
if PY3:
|
||||
xrange = range
|
||||
|
||||
__all__ = ['Event', 'AsyncResult']
|
||||
|
||||
|
||||
class Event(object):
|
||||
"""A synchronization primitive that allows one greenlet to wake up one or more others.
|
||||
It has the same interface as :class:`threading.Event` but works across greenlets.
|
||||
|
||||
An event object manages an internal flag that can be set to true with the
|
||||
:meth:`set` method and reset to false with the :meth:`clear` method. The :meth:`wait` method
|
||||
blocks until the flag is true.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._links = set()
|
||||
self._todo = set()
|
||||
self._flag = False
|
||||
self.hub = get_hub()
|
||||
self._notifier = None
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s _links[%s]>' % (self.__class__.__name__, (self._flag and 'set') or 'clear', len(self._links))
|
||||
|
||||
def is_set(self):
|
||||
"""Return true if and only if the internal flag is true."""
|
||||
return self._flag
|
||||
|
||||
isSet = is_set # makes it a better drop-in replacement for threading.Event
|
||||
ready = is_set # makes it compatible with AsyncResult and Greenlet (for example in wait())
|
||||
|
||||
def set(self):
|
||||
"""Set the internal flag to true. All greenlets waiting for it to become true are awakened.
|
||||
Greenlets that call :meth:`wait` once the flag is true will not block at all.
|
||||
"""
|
||||
self._flag = True
|
||||
self._todo.update(self._links)
|
||||
if self._todo and not self._notifier:
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def clear(self):
|
||||
"""Reset the internal flag to false.
|
||||
Subsequently, threads calling :meth:`wait`
|
||||
will block until :meth:`set` is called to set the internal flag to true again.
|
||||
"""
|
||||
self._flag = False
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Block until the internal flag is true.
|
||||
If the internal flag is true on entry, return immediately. Otherwise,
|
||||
block until another thread calls :meth:`set` to set the flag to true,
|
||||
or until the optional timeout occurs.
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof).
|
||||
|
||||
Return the value of the internal flag (``True`` or ``False``).
|
||||
"""
|
||||
if self._flag:
|
||||
return self._flag
|
||||
else:
|
||||
switch = getcurrent().switch
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
timer = Timeout.start_new(timeout)
|
||||
try:
|
||||
try:
|
||||
result = self.hub.switch()
|
||||
assert result is self, 'Invalid switch into Event.wait(): %r' % (result, )
|
||||
except Timeout:
|
||||
ex = sys.exc_info()[1]
|
||||
if ex is not timer:
|
||||
raise
|
||||
finally:
|
||||
timer.cancel()
|
||||
finally:
|
||||
self.unlink(switch)
|
||||
return self._flag
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""Register a callback to call when the internal flag is set to true.
|
||||
|
||||
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||
*callback* will be passed one argument: this instance.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.add(callback)
|
||||
if self._flag and not self._notifier:
|
||||
self._todo.add(callback)
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def _notify_links(self):
|
||||
while self._todo:
|
||||
link = self._todo.pop()
|
||||
if link in self._links: # check that link was not notified yet and was not removed by the client
|
||||
try:
|
||||
link(self)
|
||||
except:
|
||||
self.hub.handle_error((link, self), *sys.exc_info())
|
||||
|
||||
def _reset_internal_locks(self):
|
||||
# for compatibility with threading.Event (only in case of patch_all(Event=True), by default Event is not pathed)
|
||||
# Exception AttributeError: AttributeError("'Event' object has no attribute '_reset_internal_locks'",)
|
||||
# in <module 'threading' from '/usr/lib/python2.7/threading.pyc'> ignored
|
||||
pass
|
||||
|
||||
|
||||
class AsyncResult(object):
|
||||
"""A one-time event that stores a value or an exception.
|
||||
|
||||
Like :class:`Event` it wakes up all the waiters when :meth:`set` or :meth:`set_exception` method
|
||||
is called. Waiters may receive the passed value or exception by calling :meth:`get`
|
||||
method instead of :meth:`wait`. An :class:`AsyncResult` instance cannot be reset.
|
||||
|
||||
To pass a value call :meth:`set`. Calls to :meth:`get` (those that currently blocking as well as
|
||||
those made in the future) will return the value:
|
||||
|
||||
>>> result = AsyncResult()
|
||||
>>> result.set(100)
|
||||
>>> result.get()
|
||||
100
|
||||
|
||||
To pass an exception call :meth:`set_exception`. This will cause :meth:`get` to raise that exception:
|
||||
|
||||
>>> result = AsyncResult()
|
||||
>>> result.set_exception(RuntimeError('failure'))
|
||||
>>> result.get()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
RuntimeError: failure
|
||||
|
||||
:class:`AsyncResult` implements :meth:`__call__` and thus can be used as :meth:`link` target:
|
||||
|
||||
>>> import gevent
|
||||
>>> result = AsyncResult()
|
||||
>>> gevent.spawn(lambda : 1/0).link(result)
|
||||
>>> result.get()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ZeroDivisionError: integer division or modulo by zero
|
||||
"""
|
||||
def __init__(self):
|
||||
self._links = deque()
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
self.hub = get_hub()
|
||||
self._notifier = None
|
||||
|
||||
def __str__(self):
|
||||
result = '<%s ' % (self.__class__.__name__, )
|
||||
if self.value is not None or self._exception is not _NONE:
|
||||
result += 'value=%r ' % self.value
|
||||
if self._exception is not None and self._exception is not _NONE:
|
||||
result += 'exception=%r ' % self._exception
|
||||
if self._exception is _NONE:
|
||||
result += 'unset '
|
||||
return result + ' _links[%s]>' % len(self._links)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if it holds a value or an exception"""
|
||||
return self._exception is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if it is ready and holds a value"""
|
||||
return self._exception is None
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
"""Holds the exception instance passed to :meth:`set_exception` if :meth:`set_exception` was called.
|
||||
Otherwise ``None``."""
|
||||
if self._exception is not _NONE:
|
||||
return self._exception
|
||||
|
||||
def set(self, value=None):
|
||||
"""Store the value. Wake up the waiters.
|
||||
|
||||
All greenlets blocking on :meth:`get` or :meth:`wait` are woken up.
|
||||
Sequential calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||
"""
|
||||
self.value = value
|
||||
self._exception = None
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Store the exception. Wake up the waiters.
|
||||
|
||||
All greenlets blocking on :meth:`get` or :meth:`wait` are woken up.
|
||||
Sequential calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||
"""
|
||||
self._exception = exception
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Return the stored value or raise the exception.
|
||||
|
||||
If this instance already holds a value / an exception, return / raise it immediatelly.
|
||||
Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception` or
|
||||
until the optional timeout occurs.
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof).
|
||||
"""
|
||||
if self._exception is not _NONE:
|
||||
if self._exception is None:
|
||||
return self.value
|
||||
raise self._exception
|
||||
elif block:
|
||||
switch = getcurrent().switch
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
timer = Timeout.start_new(timeout)
|
||||
try:
|
||||
result = self.hub.switch()
|
||||
assert result is self, 'Invalid switch into AsyncResult.get(): %r' % (result, )
|
||||
finally:
|
||||
timer.cancel()
|
||||
except:
|
||||
self.unlink(switch)
|
||||
raise
|
||||
if self._exception is None:
|
||||
return self.value
|
||||
raise self._exception
|
||||
else:
|
||||
raise Timeout
|
||||
|
||||
def get_nowait(self):
|
||||
"""Return the value or raise the exception without blocking.
|
||||
|
||||
If nothing is available, raise :class:`gevent.Timeout` immediatelly.
|
||||
"""
|
||||
return self.get(block=False)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Block until the instance is ready.
|
||||
|
||||
If this instance already holds a value / an exception, return immediatelly.
|
||||
Otherwise, block until another thread calls :meth:`set` or :meth:`set_exception` or
|
||||
until the optional timeout occurs.
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof).
|
||||
|
||||
Return :attr:`value`.
|
||||
"""
|
||||
if self._exception is not _NONE:
|
||||
return self.value
|
||||
else:
|
||||
switch = getcurrent().switch
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
timer = Timeout.start_new(timeout)
|
||||
try:
|
||||
result = self.hub.switch()
|
||||
assert result is self, 'Invalid switch into AsyncResult.wait(): %r' % (result, )
|
||||
finally:
|
||||
timer.cancel()
|
||||
except Timeout:
|
||||
exc = sys.exc_info()[1]
|
||||
self.unlink(switch)
|
||||
if exc is not timer:
|
||||
raise
|
||||
except:
|
||||
self.unlink(switch)
|
||||
raise
|
||||
# not calling unlink() in non-exception case, because if switch()
|
||||
# finished normally, link was already removed in _notify_links
|
||||
return self.value
|
||||
|
||||
def _notify_links(self):
|
||||
while self._links:
|
||||
link = self._links.popleft()
|
||||
try:
|
||||
link(self)
|
||||
except:
|
||||
self.hub.handle_error((link, self), *sys.exc_info())
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""Register a callback to call when a value or an exception is set.
|
||||
|
||||
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||
*callback* will be passed one argument: this instance.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.append(callback)
|
||||
if self.ready() and not self._notifier:
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# link protocol
|
||||
def __call__(self, source):
|
||||
if source.successful():
|
||||
self.set(source.value)
|
||||
else:
|
||||
self.set_exception(source.exception)
|
322
panda/python/Lib/site-packages/gevent/fileobject.py
Normal file
322
panda/python/Lib/site-packages/gevent/fileobject.py
Normal file
|
@ -0,0 +1,322 @@
|
|||
from __future__ import absolute_import, with_statement
|
||||
import sys
|
||||
import os
|
||||
from gevent.hub import get_hub
|
||||
from gevent.socket import EBADF
|
||||
from gevent.os import _read, _write, ignored_errors
|
||||
from gevent.lock import Semaphore, DummySemaphore
|
||||
|
||||
|
||||
try:
|
||||
from fcntl import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
|
||||
|
||||
__all__ = ['FileObjectPosix',
|
||||
'FileObjectThread',
|
||||
'FileObject']
|
||||
|
||||
|
||||
if fcntl is None:
|
||||
|
||||
__all__.remove('FileObjectPosix')
|
||||
|
||||
else:
|
||||
|
||||
from gevent.socket import _fileobject, _get_memory
|
||||
cancel_wait_ex = IOError(EBADF, 'File descriptor was closed in another greenlet')
|
||||
from gevent.os import make_nonblocking
|
||||
|
||||
try:
|
||||
from gevent._util import SocketAdapter__del__, noop
|
||||
except ImportError:
|
||||
SocketAdapter__del__ = None
|
||||
noop = None
|
||||
|
||||
from types import UnboundMethodType
|
||||
|
||||
class NA(object):
|
||||
|
||||
def __repr__(self):
|
||||
return 'N/A'
|
||||
|
||||
NA = NA()
|
||||
|
||||
class SocketAdapter(object):
|
||||
"""Socket-like API on top of a file descriptor.
|
||||
|
||||
The main purpose of it is to re-use _fileobject to create proper cooperative file objects
|
||||
from file descriptors on POSIX platforms.
|
||||
"""
|
||||
|
||||
def __init__(self, fileno, mode=None, close=True):
|
||||
if not isinstance(fileno, (int, long)):
|
||||
raise TypeError('fileno must be int: %r' % fileno)
|
||||
self._fileno = fileno
|
||||
self._mode = mode or 'rb'
|
||||
self._close = close
|
||||
self._translate = 'U' in self._mode
|
||||
make_nonblocking(fileno)
|
||||
self._eat_newline = False
|
||||
self.hub = get_hub()
|
||||
io = self.hub.loop.io
|
||||
self._read_event = io(fileno, 1)
|
||||
self._write_event = io(fileno, 2)
|
||||
|
||||
def __repr__(self):
|
||||
if self._fileno is None:
|
||||
return '<%s at 0x%x closed>' % (self.__class__.__name__, id(self))
|
||||
else:
|
||||
args = (self.__class__.__name__, id(self), getattr(self, '_fileno', NA), getattr(self, '_mode', NA))
|
||||
return '<%s at 0x%x (%r, %r)>' % args
|
||||
|
||||
def makefile(self, *args, **kwargs):
|
||||
return _fileobject(self, *args, **kwargs)
|
||||
|
||||
def fileno(self):
|
||||
result = self._fileno
|
||||
if result is None:
|
||||
raise IOError(EBADF, 'Bad file descriptor (%s object is closed)' % self.__class__.__name)
|
||||
return result
|
||||
|
||||
def detach(self):
|
||||
x = self._fileno
|
||||
self._fileno = None
|
||||
return x
|
||||
|
||||
def close(self):
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
fileno = self._fileno
|
||||
if fileno is not None:
|
||||
self._fileno = None
|
||||
if self._close:
|
||||
os.close(fileno)
|
||||
|
||||
def sendall(self, data):
|
||||
fileno = self.fileno()
|
||||
bytes_total = len(data)
|
||||
bytes_written = 0
|
||||
while True:
|
||||
try:
|
||||
bytes_written += _write(fileno, _get_memory(data, bytes_written))
|
||||
except (IOError, OSError):
|
||||
code = sys.exc_info()[1].args[0]
|
||||
if code not in ignored_errors:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
if bytes_written >= bytes_total:
|
||||
return
|
||||
self.hub.wait(self._write_event)
|
||||
|
||||
def recv(self, size):
|
||||
while True:
|
||||
try:
|
||||
data = _read(self.fileno(), size)
|
||||
except (IOError, OSError):
|
||||
code = sys.exc_info()[1].args[0]
|
||||
if code not in ignored_errors:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
else:
|
||||
if not self._translate or not data:
|
||||
return data
|
||||
if self._eat_newline:
|
||||
self._eat_newline = False
|
||||
if data.startswith('\n'):
|
||||
data = data[1:]
|
||||
if not data:
|
||||
return self.recv(size)
|
||||
if data.endswith('\r'):
|
||||
self._eat_newline = True
|
||||
return self._translate_newlines(data)
|
||||
self.hub.wait(self._read_event)
|
||||
|
||||
def _translate_newlines(self, data):
|
||||
data = data.replace("\r\n", "\n")
|
||||
data = data.replace("\r", "\n")
|
||||
return data
|
||||
|
||||
if not SocketAdapter__del__:
|
||||
|
||||
def __del__(self, close=os.close):
|
||||
fileno = self._fileno
|
||||
if fileno is not None:
|
||||
close(fileno)
|
||||
|
||||
if SocketAdapter__del__:
|
||||
SocketAdapter.__del__ = UnboundMethodType(SocketAdapter__del__, None, SocketAdapter)
|
||||
|
||||
class FileObjectPosix(_fileobject):
|
||||
|
||||
def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
|
||||
if isinstance(fobj, (int, long)):
|
||||
fileno = fobj
|
||||
fobj = None
|
||||
else:
|
||||
fileno = fobj.fileno()
|
||||
sock = SocketAdapter(fileno, mode, close=close)
|
||||
self._fobj = fobj
|
||||
self._closed = False
|
||||
_fileobject.__init__(self, sock, mode=mode, bufsize=bufsize, close=close)
|
||||
|
||||
def __repr__(self):
|
||||
if self._sock is None:
|
||||
return '<%s closed>' % self.__class__.__name__
|
||||
elif self._fobj is None:
|
||||
return '<%s %s>' % (self.__class__.__name__, self._sock)
|
||||
else:
|
||||
return '<%s %s _fobj=%r>' % (self.__class__.__name__, self._sock, self._fobj)
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
# make sure close() is only ran once when called concurrently
|
||||
# cannot rely on self._sock for this because we need to keep that until flush() is done
|
||||
return
|
||||
self._closed = True
|
||||
sock = self._sock
|
||||
if sock is None:
|
||||
return
|
||||
try:
|
||||
self.flush()
|
||||
finally:
|
||||
if self._fobj is not None or not self._close:
|
||||
sock.detach()
|
||||
self._sock = None
|
||||
self._fobj = None
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != '_fobj'
|
||||
if self._fobj is None:
|
||||
raise FileObjectClosed
|
||||
return getattr(self._fobj, item)
|
||||
|
||||
if not noop:
|
||||
|
||||
def __del__(self):
|
||||
# disable _fileobject's __del__
|
||||
pass
|
||||
|
||||
if noop:
|
||||
FileObjectPosix.__del__ = UnboundMethodType(FileObjectPosix, None, noop)
|
||||
|
||||
|
||||
class FileObjectThread(object):
|
||||
|
||||
def __init__(self, fobj, *args, **kwargs):
|
||||
self._close = kwargs.pop('close', True)
|
||||
self.threadpool = kwargs.pop('threadpool', None)
|
||||
self.lock = kwargs.pop('lock', True)
|
||||
if kwargs:
|
||||
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
|
||||
if self.lock is True:
|
||||
self.lock = Semaphore()
|
||||
elif not self.lock:
|
||||
self.lock = DummySemaphore()
|
||||
if not hasattr(self.lock, '__enter__'):
|
||||
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
|
||||
if isinstance(fobj, (int, long)):
|
||||
if not self._close:
|
||||
# we cannot do this, since fdopen object will close the descriptor
|
||||
raise TypeError('FileObjectThread does not support close=False')
|
||||
fobj = os.fdopen(fobj, *args)
|
||||
self._fobj = fobj
|
||||
if self.threadpool is None:
|
||||
self.threadpool = get_hub().threadpool
|
||||
|
||||
def _apply(self, func, args=None, kwargs=None):
|
||||
with self.lock:
|
||||
return self.threadpool.apply_e(BaseException, func, args, kwargs)
|
||||
|
||||
def close(self):
|
||||
fobj = self._fobj
|
||||
if fobj is None:
|
||||
return
|
||||
self._fobj = None
|
||||
try:
|
||||
self.flush(_fobj=fobj)
|
||||
finally:
|
||||
if self._close:
|
||||
fobj.close()
|
||||
|
||||
def flush(self, _fobj=None):
|
||||
if _fobj is not None:
|
||||
fobj = _fobj
|
||||
else:
|
||||
fobj = self._fobj
|
||||
if fobj is None:
|
||||
raise FileObjectClosed
|
||||
return self._apply(fobj.flush)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s _fobj=%r threadpool=%r>' % (self.__class__.__name__, self._fobj, self.threadpool)
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != '_fobj'
|
||||
if self._fobj is None:
|
||||
raise FileObjectClosed
|
||||
return getattr(self._fobj, item)
|
||||
|
||||
for method in ['read', 'readinto', 'readline', 'readlines', 'write', 'writelines', 'xreadlines']:
|
||||
|
||||
exec '''def %s(self, *args, **kwargs):
|
||||
fobj = self._fobj
|
||||
if fobj is None:
|
||||
raise FileObjectClosed
|
||||
return self._apply(fobj.%s, args, kwargs)
|
||||
''' % (method, method)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
line = self.readline()
|
||||
if line:
|
||||
return line
|
||||
raise StopIteration
|
||||
|
||||
|
||||
FileObjectClosed = IOError(EBADF, 'Bad file descriptor (FileObject was closed)')
|
||||
|
||||
|
||||
try:
|
||||
FileObject = FileObjectPosix
|
||||
except NameError:
|
||||
FileObject = FileObjectThread
|
||||
|
||||
|
||||
class FileObjectBlock(object):
|
||||
|
||||
def __init__(self, fobj, *args, **kwargs):
|
||||
self._close = kwargs.pop('close', True)
|
||||
if kwargs:
|
||||
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
|
||||
if isinstance(fobj, (int, long)):
|
||||
if not self._close:
|
||||
# we cannot do this, since fdopen object will close the descriptor
|
||||
raise TypeError('FileObjectBlock does not support close=False')
|
||||
fobj = os.fdopen(fobj, *args)
|
||||
self._fobj = fobj
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (self._fobj, )
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != '_fobj'
|
||||
if self._fobj is None:
|
||||
raise FileObjectClosed
|
||||
return getattr(self._fobj, item)
|
||||
|
||||
|
||||
config = os.environ.get('GEVENT_FILE')
|
||||
if config:
|
||||
klass = {'thread': 'gevent.fileobject.FileObjectThread',
|
||||
'posix': 'gevent.fileobject.FileObjectPosix',
|
||||
'block': 'gevent.fileobject.FileObjectBlock'}.get(config, config)
|
||||
if klass.startswith('gevent.fileobject.'):
|
||||
FileObject = globals()[klass.split('.', 2)[-1]]
|
||||
else:
|
||||
from gevent.hub import _import
|
||||
FileObject = _import(klass)
|
||||
del klass
|
469
panda/python/Lib/site-packages/gevent/greenlet.py
Normal file
469
panda/python/Lib/site-packages/gevent/greenlet.py
Normal file
|
@ -0,0 +1,469 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
|
||||
import sys
|
||||
from gevent.hub import greenlet, getcurrent, get_hub, GreenletExit, Waiter, PY3, iwait, wait
|
||||
from gevent.timeout import Timeout
|
||||
from collections import deque
|
||||
|
||||
|
||||
__all__ = ['Greenlet',
|
||||
'joinall',
|
||||
'killall']
|
||||
|
||||
|
||||
class SpawnedLink(object):
|
||||
"""A wrapper around link that calls it in another greenlet.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = ['callback']
|
||||
|
||||
def __init__(self, callback):
|
||||
if not callable(callback):
|
||||
raise TypeError("Expected callable: %r" % (callback, ))
|
||||
self.callback = callback
|
||||
|
||||
def __call__(self, source):
|
||||
g = greenlet(self.callback, get_hub())
|
||||
g.switch(source)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.callback)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.callback == getattr(other, 'callback', other)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.callback)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.callback)
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != 'callback'
|
||||
return getattr(self.callback, item)
|
||||
|
||||
|
||||
class SuccessSpawnedLink(SpawnedLink):
|
||||
"""A wrapper around link that calls it in another greenlet only if source succeed.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = []
|
||||
|
||||
def __call__(self, source):
|
||||
if source.successful():
|
||||
return SpawnedLink.__call__(self, source)
|
||||
|
||||
|
||||
class FailureSpawnedLink(SpawnedLink):
|
||||
"""A wrapper around link that calls it in another greenlet only if source failed.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = []
|
||||
|
||||
def __call__(self, source):
|
||||
if not source.successful():
|
||||
return SpawnedLink.__call__(self, source)
|
||||
|
||||
|
||||
class Greenlet(greenlet):
|
||||
"""A light-weight cooperatively-scheduled execution unit."""
|
||||
|
||||
def __init__(self, run=None, *args, **kwargs):
|
||||
hub = get_hub()
|
||||
greenlet.__init__(self, parent=hub)
|
||||
if run is not None:
|
||||
self._run = run
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self._links = deque()
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
self._notifier = None
|
||||
self._start_event = None
|
||||
|
||||
@property
|
||||
def loop(self):
|
||||
# needed by killall
|
||||
return self.parent.loop
|
||||
|
||||
if PY3:
|
||||
def __bool__(self):
|
||||
return self._start_event is not None and self._exception is _NONE
|
||||
else:
|
||||
def __nonzero__(self):
|
||||
return self._start_event is not None and self._exception is _NONE
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
# DEPRECATED
|
||||
return bool(self)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if the greenlet has finished execution."""
|
||||
return self.dead or self._exception is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if the greenlet has finished execution successfully,
|
||||
that is, without raising an error."""
|
||||
return self._exception is None
|
||||
|
||||
def __repr__(self):
|
||||
classname = self.__class__.__name__
|
||||
result = '<%s at %s' % (classname, hex(id(self)))
|
||||
formatted = self._formatinfo()
|
||||
if formatted:
|
||||
result += ': ' + formatted
|
||||
return result + '>'
|
||||
|
||||
def _formatinfo(self):
|
||||
try:
|
||||
return self._formatted_info
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
result = getfuncname(self.__dict__['_run'])
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
args = []
|
||||
if self.args:
|
||||
args = [repr(x)[:50] for x in self.args]
|
||||
if self.kwargs:
|
||||
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
|
||||
if args:
|
||||
result += '(' + ', '.join(args) + ')'
|
||||
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
|
||||
self._formatted_info = result
|
||||
return result
|
||||
return ''
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
"""Holds the exception instance raised by the function if the greenlet has finished with an error.
|
||||
Otherwise ``None``.
|
||||
"""
|
||||
if self._exception is not _NONE:
|
||||
return self._exception
|
||||
|
||||
def throw(self, *args):
|
||||
"""Immediatelly switch into the greenlet and raise an exception in it.
|
||||
|
||||
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
|
||||
To raise an exception in a safely manner from any greenlet, use :meth:`kill`.
|
||||
|
||||
If a greenlet was started but never switched to yet, then also
|
||||
a) cancel the event that will start it
|
||||
b) fire the notifications as if an exception was raised in a greenlet
|
||||
"""
|
||||
if self._start_event is None:
|
||||
self._start_event = _dummy_event
|
||||
else:
|
||||
self._start_event.stop()
|
||||
try:
|
||||
greenlet.throw(self, *args)
|
||||
finally:
|
||||
if self._exception is _NONE and self.dead:
|
||||
# the greenlet was never switched to before and it will never be, _report_error was not called
|
||||
# the result was not set and the links weren't notified. let's do it here.
|
||||
# checking that self.dead is true is essential, because throw() does not necessarily kill the greenlet
|
||||
# (if the exception raised by throw() is caught somewhere inside the greenlet).
|
||||
if len(args) == 1:
|
||||
arg = args[0]
|
||||
#if isinstance(arg, type):
|
||||
if type(arg) is type(Exception):
|
||||
args = (arg, arg(), None)
|
||||
else:
|
||||
args = (type(arg), arg, None)
|
||||
elif not args:
|
||||
args = (GreenletExit, GreenletExit(), None)
|
||||
self._report_error(args)
|
||||
|
||||
def start(self):
|
||||
"""Schedule the greenlet to run in this loop iteration"""
|
||||
if self._start_event is None:
|
||||
self._start_event = self.parent.loop.run_callback(self.switch)
|
||||
|
||||
def start_later(self, seconds):
|
||||
"""Schedule the greenlet to run in the future loop iteration *seconds* later"""
|
||||
if self._start_event is None:
|
||||
self._start_event = self.parent.loop.timer(seconds)
|
||||
self._start_event.start(self.switch)
|
||||
|
||||
@classmethod
|
||||
def spawn(cls, *args, **kwargs):
|
||||
"""Return a new :class:`Greenlet` object, scheduled to start.
|
||||
|
||||
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||
"""
|
||||
g = cls(*args, **kwargs)
|
||||
g.start()
|
||||
return g
|
||||
|
||||
@classmethod
|
||||
def spawn_later(cls, seconds, *args, **kwargs):
|
||||
"""Return a Greenlet object, scheduled to start *seconds* later.
|
||||
|
||||
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||
"""
|
||||
g = cls(*args, **kwargs)
|
||||
g.start_later(seconds)
|
||||
return g
|
||||
|
||||
def kill(self, exception=GreenletExit, block=True, timeout=None):
|
||||
"""Raise the exception in the greenlet.
|
||||
|
||||
If block is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
|
||||
If block is ``False``, the current greenlet is not unscheduled.
|
||||
|
||||
The function always returns ``None`` and never raises an error.
|
||||
|
||||
`Changed in version 0.13.0:` *block* is now ``True`` by default.
|
||||
"""
|
||||
# XXX this function should not switch out if greenlet is not started but it does
|
||||
# XXX fix it (will have to override 'dead' property of greenlet.greenlet)
|
||||
if self._start_event is None:
|
||||
self._start_event = _dummy_event
|
||||
else:
|
||||
self._start_event.stop()
|
||||
if not self.dead:
|
||||
waiter = Waiter()
|
||||
self.parent.loop.run_callback(_kill, self, exception, waiter)
|
||||
if block:
|
||||
waiter.get()
|
||||
self.join(timeout)
|
||||
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
|
||||
# thus it should not raise when the greenlet is already killed (= not started)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Return the result the greenlet has returned or re-raise the exception it has raised.
|
||||
|
||||
If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive.
|
||||
If block is ``True``, unschedule the current greenlet until the result is available
|
||||
or the timeout expires. In the latter case, :class:`gevent.Timeout` is raised.
|
||||
"""
|
||||
if self.ready():
|
||||
if self.successful():
|
||||
return self.value
|
||||
else:
|
||||
raise self._exception
|
||||
if block:
|
||||
switch = getcurrent().switch
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
t = Timeout.start_new(timeout)
|
||||
try:
|
||||
result = self.parent.switch()
|
||||
assert result is self, 'Invalid switch into Greenlet.get(): %r' % (result, )
|
||||
finally:
|
||||
t.cancel()
|
||||
except:
|
||||
# unlinking in 'except' instead of finally is an optimization:
|
||||
# if switch occurred normally then link was already removed in _notify_links
|
||||
# and there's no need to touch the links set.
|
||||
# Note, however, that if "Invalid switch" assert was removed and invalid switch
|
||||
# did happen, the link would remain, causing another invalid switch later in this greenlet.
|
||||
self.unlink(switch)
|
||||
raise
|
||||
if self.ready():
|
||||
if self.successful():
|
||||
return self.value
|
||||
else:
|
||||
raise self._exception
|
||||
else:
|
||||
raise Timeout
|
||||
|
||||
def join(self, timeout=None):
|
||||
"""Wait until the greenlet finishes or *timeout* expires.
|
||||
Return ``None`` regardless.
|
||||
"""
|
||||
if self.ready():
|
||||
return
|
||||
else:
|
||||
switch = getcurrent().switch
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
t = Timeout.start_new(timeout)
|
||||
try:
|
||||
result = self.parent.switch()
|
||||
assert result is self, 'Invalid switch into Greenlet.join(): %r' % (result, )
|
||||
finally:
|
||||
t.cancel()
|
||||
except Timeout:
|
||||
self.unlink(switch)
|
||||
if sys.exc_info()[1] is not t:
|
||||
raise
|
||||
except:
|
||||
self.unlink(switch)
|
||||
raise
|
||||
|
||||
def _report_result(self, result):
|
||||
self._exception = None
|
||||
self.value = result
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
def _report_error(self, exc_info):
|
||||
exception = exc_info[1]
|
||||
if isinstance(exception, GreenletExit):
|
||||
self._report_result(exception)
|
||||
return
|
||||
self._exception = exception
|
||||
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
self.parent.handle_error(self, *exc_info)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
if self._start_event is None:
|
||||
self._start_event = _dummy_event
|
||||
else:
|
||||
self._start_event.stop()
|
||||
try:
|
||||
result = self._run(*self.args, **self.kwargs)
|
||||
except:
|
||||
self._report_error(sys.exc_info())
|
||||
return
|
||||
self._report_result(result)
|
||||
finally:
|
||||
self.__dict__.pop('_run', None)
|
||||
self.__dict__.pop('args', None)
|
||||
self.__dict__.pop('kwargs', None)
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""Register a callable to be executed when the greenlet finishes the execution.
|
||||
|
||||
WARNING: the callable will be called in the HUB greenlet.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.append(callback)
|
||||
if self.ready() and self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
def link(self, callback, SpawnedLink=SpawnedLink):
|
||||
"""Link greenlet's completion to a callable.
|
||||
|
||||
The *callback* will be called with this instance as an argument
|
||||
once this greenlet's dead. A callable is called in its own greenlet.
|
||||
"""
|
||||
self.rawlink(SpawnedLink(callback))
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
|
||||
"""Like :meth:`link` but *callback* is only notified when the greenlet has completed successfully"""
|
||||
self.link(callback, SpawnedLink=SpawnedLink)
|
||||
|
||||
def link_exception(self, callback, SpawnedLink=FailureSpawnedLink):
|
||||
"""Like :meth:`link` but *callback* is only notified when the greenlet dies because of unhandled exception"""
|
||||
self.link(callback, SpawnedLink=SpawnedLink)
|
||||
|
||||
def _notify_links(self):
|
||||
while self._links:
|
||||
link = self._links.popleft()
|
||||
try:
|
||||
link(self)
|
||||
except:
|
||||
self.parent.handle_error((link, self), *sys.exc_info())
|
||||
|
||||
|
||||
class _dummy_event(object):
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
|
||||
_dummy_event = _dummy_event()
|
||||
|
||||
|
||||
def _kill(greenlet, exception, waiter):
|
||||
try:
|
||||
greenlet.throw(exception)
|
||||
except:
|
||||
# XXX do we need this here?
|
||||
greenlet.parent.handle_error(greenlet, *sys.exc_info())
|
||||
waiter.switch()
|
||||
|
||||
|
||||
def joinall(greenlets, timeout=None, raise_error=False, count=None):
|
||||
if not raise_error:
|
||||
wait(greenlets, timeout=timeout)
|
||||
else:
|
||||
for obj in iwait(greenlets, timeout=timeout):
|
||||
if getattr(obj, 'exception', None) is not None:
|
||||
raise obj.exception
|
||||
if count is not None:
|
||||
count -= 1
|
||||
if count <= 0:
|
||||
break
|
||||
|
||||
|
||||
def _killall3(greenlets, exception, waiter):
|
||||
diehards = []
|
||||
for g in greenlets:
|
||||
if not g.dead:
|
||||
try:
|
||||
g.throw(exception)
|
||||
except:
|
||||
g.parent.handle_error(g, *sys.exc_info())
|
||||
if not g.dead:
|
||||
diehards.append(g)
|
||||
waiter.switch(diehards)
|
||||
|
||||
|
||||
def _killall(greenlets, exception):
|
||||
for g in greenlets:
|
||||
if not g.dead:
|
||||
try:
|
||||
g.throw(exception)
|
||||
except:
|
||||
g.parent.handle_error(g, *sys.exc_info())
|
||||
|
||||
|
||||
def killall(greenlets, exception=GreenletExit, block=True, timeout=None):
|
||||
if not greenlets:
|
||||
return
|
||||
loop = greenlets[0].loop
|
||||
if block:
|
||||
waiter = Waiter()
|
||||
loop.run_callback(_killall3, greenlets, exception, waiter)
|
||||
t = Timeout.start_new(timeout)
|
||||
try:
|
||||
alive = waiter.get()
|
||||
if alive:
|
||||
joinall(alive, raise_error=False)
|
||||
finally:
|
||||
t.cancel()
|
||||
else:
|
||||
loop.run_callback(_killall, greenlets, exception)
|
||||
|
||||
|
||||
if PY3:
|
||||
_meth_self = "__self__"
|
||||
else:
|
||||
_meth_self = "im_self"
|
||||
|
||||
|
||||
def getfuncname(func):
|
||||
if not hasattr(func, _meth_self):
|
||||
try:
|
||||
funcname = func.__name__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if funcname != '<lambda>':
|
||||
return funcname
|
||||
return repr(func)
|
||||
|
||||
|
||||
_NONE = Exception("Neither exception nor value")
|
676
panda/python/Lib/site-packages/gevent/hub.py
Normal file
676
panda/python/Lib/site-packages/gevent/hub.py
Normal file
|
@ -0,0 +1,676 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
import greenlet # http://pypi.python.org/pypi/greenlet/
|
||||
greenlet_version = getattr(greenlet, '__version__', None)
|
||||
if greenlet_version:
|
||||
greenlet_version_info = [int(x) for x in greenlet_version.split('.')]
|
||||
if not greenlet_version or greenlet_version_info[:3] < [0, 3, 2]:
|
||||
raise ImportError('''Your version of greenlet (%s) is too old (required >= 0.3.2)
|
||||
You can get a newer version of greenlet from http://pypi.python.org/pypi/greenlet/''' % (greenlet_version, ))
|
||||
from greenlet import greenlet, getcurrent, GreenletExit
|
||||
|
||||
|
||||
__all__ = ['getcurrent',
|
||||
'GreenletExit',
|
||||
'spawn_raw',
|
||||
'sleep',
|
||||
'kill',
|
||||
'signal',
|
||||
'reinit',
|
||||
'get_hub',
|
||||
'Hub',
|
||||
'Waiter']
|
||||
|
||||
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
|
||||
|
||||
if PY3:
|
||||
string_types = str,
|
||||
integer_types = int,
|
||||
else:
|
||||
string_types = basestring,
|
||||
integer_types = (int, long)
|
||||
|
||||
|
||||
if sys.version_info[0] <= 2:
|
||||
import thread
|
||||
else:
|
||||
import _thread as thread
|
||||
threadlocal = thread._local
|
||||
_threadlocal = threadlocal()
|
||||
_threadlocal.Hub = None
|
||||
get_ident = thread.get_ident
|
||||
MAIN_THREAD = get_ident()
|
||||
|
||||
|
||||
def spawn_raw(function, *args, **kwargs):
|
||||
hub = get_hub()
|
||||
g = greenlet(function, hub)
|
||||
hub.loop.run_callback(g.switch, *args, **kwargs)
|
||||
return g
|
||||
|
||||
|
||||
def sleep(seconds=0, ref=True):
|
||||
"""Put the current greenlet to sleep for at least *seconds*.
|
||||
|
||||
*seconds* may be specified as an integer, or a float if fractional seconds
|
||||
are desired.
|
||||
|
||||
If *ref* is false, the greenlet running sleep() will not prevent gevent.wait()
|
||||
from exiting.
|
||||
"""
|
||||
hub = get_hub()
|
||||
loop = hub.loop
|
||||
if seconds <= 0:
|
||||
waiter = Waiter()
|
||||
loop.run_callback(waiter.switch)
|
||||
waiter.get()
|
||||
else:
|
||||
hub.wait(loop.timer(seconds, ref=ref))
|
||||
|
||||
|
||||
def idle(priority=0):
|
||||
hub = get_hub()
|
||||
watcher = hub.loop.idle()
|
||||
if priority:
|
||||
watcher.priority = priority
|
||||
hub.wait(watcher)
|
||||
|
||||
|
||||
def kill(greenlet, exception=GreenletExit):
|
||||
"""Kill greenlet asynchronously. The current greenlet is not unscheduled.
|
||||
|
||||
Note, that :meth:`gevent.Greenlet.kill` method does the same and more. However,
|
||||
MAIN greenlet - the one that exists initially - does not have ``kill()`` method
|
||||
so you have to use this function.
|
||||
"""
|
||||
if not greenlet.dead:
|
||||
get_hub().loop.run_callback(greenlet.throw, exception)
|
||||
|
||||
|
||||
class signal(object):
|
||||
|
||||
greenlet_class = None
|
||||
|
||||
def __init__(self, signalnum, handler, *args, **kwargs):
|
||||
self.hub = get_hub()
|
||||
self.watcher = self.hub.loop.signal(signalnum, ref=False)
|
||||
self.watcher.start(self._start)
|
||||
self.handler = handler
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
if self.greenlet_class is None:
|
||||
from gevent import Greenlet
|
||||
self.greenlet_class = Greenlet
|
||||
|
||||
def _get_ref(self):
|
||||
return self.watcher.ref
|
||||
|
||||
def _set_ref(self, value):
|
||||
self.watcher.ref = value
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
del _get_ref, _set_ref
|
||||
|
||||
def cancel(self):
|
||||
self.watcher.stop()
|
||||
|
||||
def _start(self):
|
||||
try:
|
||||
greenlet = self.greenlet_class(self.handle)
|
||||
greenlet.switch()
|
||||
except:
|
||||
self.hub.handle_error(None, *sys._exc_info())
|
||||
|
||||
def handle(self):
|
||||
try:
|
||||
self.handler(*self.args, **self.kwargs)
|
||||
except:
|
||||
self.hub.handle_error(None, *sys.exc_info())
|
||||
|
||||
|
||||
def reinit():
|
||||
hub = _get_hub()
|
||||
if hub is not None:
|
||||
hub.loop.reinit()
|
||||
|
||||
|
||||
def get_hub_class():
|
||||
"""Return the type of hub to use for the current thread.
|
||||
|
||||
If there's no type of hub for the current thread yet, 'gevent.hub.Hub' is used.
|
||||
"""
|
||||
global _threadlocal
|
||||
try:
|
||||
hubtype = _threadlocal.Hub
|
||||
except AttributeError:
|
||||
hubtype = None
|
||||
if hubtype is None:
|
||||
hubtype = _threadlocal.Hub = Hub
|
||||
return hubtype
|
||||
|
||||
|
||||
def get_hub(*args, **kwargs):
|
||||
"""Return the hub for the current thread.
|
||||
|
||||
If hub does not exists in the current thread, the new one is created with call to :meth:`get_hub_class`.
|
||||
"""
|
||||
global _threadlocal
|
||||
try:
|
||||
return _threadlocal.hub
|
||||
except AttributeError:
|
||||
hubtype = get_hub_class()
|
||||
hub = _threadlocal.hub = hubtype(*args, **kwargs)
|
||||
return hub
|
||||
|
||||
|
||||
def _get_hub():
|
||||
"""Return the hub for the current thread.
|
||||
|
||||
Return ``None`` if no hub has been created yet.
|
||||
"""
|
||||
global _threadlocal
|
||||
try:
|
||||
return _threadlocal.hub
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def set_hub(hub):
|
||||
_threadlocal.hub = hub
|
||||
|
||||
|
||||
def _import(path):
|
||||
if isinstance(path, list):
|
||||
if not path:
|
||||
raise ImportError('Cannot import from empty list: %r' % (path, ))
|
||||
for item in path[:-1]:
|
||||
try:
|
||||
return _import(item)
|
||||
except ImportError:
|
||||
pass
|
||||
return _import(path[-1])
|
||||
if not isinstance(path, string_types):
|
||||
return path
|
||||
if '.' not in path:
|
||||
raise ImportError("Cannot import %r (required format: [path/][package.]module.class)" % path)
|
||||
if '/' in path:
|
||||
package_path, path = path.rsplit('/', 1)
|
||||
sys.path = [package_path] + sys.path
|
||||
else:
|
||||
package_path = None
|
||||
try:
|
||||
module, item = path.rsplit('.', 1)
|
||||
x = __import__(module)
|
||||
for attr in path.split('.')[1:]:
|
||||
oldx = x
|
||||
x = getattr(x, attr, _NONE)
|
||||
if x is _NONE:
|
||||
raise ImportError('Cannot import %r from %r' % (attr, oldx))
|
||||
return x
|
||||
finally:
|
||||
try:
|
||||
sys.path.remove(package_path)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def config(default, envvar):
|
||||
result = os.environ.get(envvar) or default
|
||||
if isinstance(result, string_types):
|
||||
return result.split(',')
|
||||
return result
|
||||
|
||||
|
||||
def resolver_config(default, envvar):
|
||||
result = config(default, envvar)
|
||||
return [_resolvers.get(x, x) for x in result]
|
||||
|
||||
|
||||
_resolvers = {'ares': 'gevent.resolver_ares.Resolver',
|
||||
'thread': 'gevent.resolver_thread.Resolver',
|
||||
'block': 'gevent.socket.BlockingResolver'}
|
||||
|
||||
|
||||
class Hub(greenlet):
|
||||
"""A greenlet that runs the event loop.
|
||||
|
||||
It is created automatically by :func:`get_hub`.
|
||||
"""
|
||||
|
||||
SYSTEM_ERROR = (KeyboardInterrupt, SystemExit, SystemError)
|
||||
NOT_ERROR = (GreenletExit, SystemExit)
|
||||
loop_class = config('gevent.core.loop', 'GEVENT_LOOP')
|
||||
resolver_class = ['gevent.resolver_thread.Resolver',
|
||||
'gevent.resolver_ares.Resolver',
|
||||
'gevent.socket.BlockingResolver']
|
||||
resolver_class = resolver_config(resolver_class, 'GEVENT_RESOLVER')
|
||||
threadpool_class = config('gevent.threadpool.ThreadPool', 'GEVENT_THREADPOOL')
|
||||
backend = config(None, 'GEVENT_BACKEND')
|
||||
format_context = 'pprint.pformat'
|
||||
threadpool_size = 10
|
||||
|
||||
def __init__(self, loop=None, default=None):
|
||||
greenlet.__init__(self)
|
||||
if hasattr(loop, 'run'):
|
||||
if default is not None:
|
||||
raise TypeError("Unexpected argument: default")
|
||||
self.loop = loop
|
||||
else:
|
||||
if default is None and get_ident() != MAIN_THREAD:
|
||||
default = False
|
||||
loop_class = _import(self.loop_class)
|
||||
if loop is None:
|
||||
loop = self.backend
|
||||
self.loop = loop_class(flags=loop, default=default)
|
||||
self._resolver = None
|
||||
self._threadpool = None
|
||||
self.format_context = _import(self.format_context)
|
||||
|
||||
def __repr__(self):
|
||||
if self.loop is None:
|
||||
info = 'destroyed'
|
||||
else:
|
||||
try:
|
||||
info = self.loop._format()
|
||||
except Exception, ex:
|
||||
info = str(ex) or repr(ex) or 'error'
|
||||
result = '<%s at 0x%x %s' % (self.__class__.__name__, id(self), info)
|
||||
if self._resolver is not None:
|
||||
result += ' resolver=%r' % self._resolver
|
||||
if self._threadpool is not None:
|
||||
result += ' threadpool=%r' % self._threadpool
|
||||
return result + '>'
|
||||
|
||||
def handle_error(self, context, type, value, tb):
|
||||
if not issubclass(type, self.NOT_ERROR):
|
||||
self.print_exception(context, type, value, tb)
|
||||
if context is None or issubclass(type, self.SYSTEM_ERROR):
|
||||
self.handle_system_error(type, value)
|
||||
|
||||
def handle_system_error(self, type, value):
|
||||
current = getcurrent()
|
||||
if current is self or current is self.parent or self.loop is None:
|
||||
self.parent.throw(type, value)
|
||||
else:
|
||||
# in case system error was handled and life goes on
|
||||
# switch back to this greenlet as well
|
||||
cb = None
|
||||
try:
|
||||
cb = self.loop.run_callback(current.switch)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
try:
|
||||
self.parent.throw(type, value)
|
||||
finally:
|
||||
if cb is not None:
|
||||
cb.stop()
|
||||
|
||||
def print_exception(self, context, type, value, tb):
|
||||
traceback.print_exception(type, value, tb)
|
||||
del tb
|
||||
if context is not None:
|
||||
if not isinstance(context, str):
|
||||
try:
|
||||
context = self.format_context(context)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
context = repr(context)
|
||||
sys.stderr.write('%s failed with %s\n\n' % (context, getattr(type, '__name__', 'exception'), ))
|
||||
|
||||
def switch(self):
|
||||
switch_out = getattr(getcurrent(), 'switch_out', None)
|
||||
if switch_out is not None:
|
||||
switch_out()
|
||||
return greenlet.switch(self)
|
||||
|
||||
def switch_out(self):
|
||||
raise AssertionError('Impossible to call blocking function in the event loop callback')
|
||||
|
||||
def wait(self, watcher):
|
||||
waiter = Waiter()
|
||||
unique = object()
|
||||
watcher.start(waiter.switch, unique)
|
||||
try:
|
||||
result = waiter.get()
|
||||
assert result is unique, 'Invalid switch into %s: %r (expected %r)' % (getcurrent(), result, unique)
|
||||
finally:
|
||||
watcher.stop()
|
||||
|
||||
def cancel_wait(self, watcher, error):
|
||||
if watcher.callback is not None:
|
||||
self.loop.run_callback(self._cancel_wait, watcher, error)
|
||||
|
||||
def _cancel_wait(self, watcher, error):
|
||||
if watcher.active:
|
||||
switch = watcher.callback
|
||||
if switch is not None:
|
||||
greenlet = getattr(switch, '__self__', None)
|
||||
if greenlet is not None:
|
||||
greenlet.throw(error)
|
||||
|
||||
def run(self):
|
||||
assert self is getcurrent(), 'Do not call Hub.run() directly'
|
||||
while True:
|
||||
loop = self.loop
|
||||
loop.error_handler = self
|
||||
try:
|
||||
loop.run()
|
||||
finally:
|
||||
loop.error_handler = None # break the refcount cycle
|
||||
self.parent.throw(LoopExit('This operation would block forever'))
|
||||
# this function must never return, as it will cause switch() in the parent greenlet
|
||||
# to return an unexpected value
|
||||
# It is still possible to kill this greenlet with throw. However, in that case
|
||||
# switching to it is no longer safe, as switch will return immediatelly
|
||||
|
||||
def join(self, timeout=None):
|
||||
"""Wait for the event loop to finish. Exits only when there are
|
||||
no more spawned greenlets, started servers, active timeouts or watchers.
|
||||
|
||||
If *timeout* is provided, wait no longer for the specified number of seconds.
|
||||
|
||||
Returns True if exited because the loop finished execution.
|
||||
Returns False if exited because of timeout expired.
|
||||
"""
|
||||
assert getcurrent() is self.parent, "only possible from the MAIN greenlet"
|
||||
if self.dead:
|
||||
return True
|
||||
|
||||
waiter = Waiter()
|
||||
|
||||
if timeout is not None:
|
||||
timeout = self.loop.timer(timeout, ref=False)
|
||||
timeout.start(waiter.switch)
|
||||
|
||||
try:
|
||||
try:
|
||||
waiter.get()
|
||||
except LoopExit:
|
||||
return True
|
||||
finally:
|
||||
if timeout is not None:
|
||||
timeout.stop()
|
||||
return False
|
||||
|
||||
def destroy(self, destroy_loop=None):
|
||||
global _threadlocal
|
||||
if self._resolver is not None:
|
||||
self._resolver.close()
|
||||
del self._resolver
|
||||
if self._threadpool is not None:
|
||||
self._threadpool.kill()
|
||||
del self._threadpool
|
||||
if destroy_loop is None:
|
||||
destroy_loop = not self.loop.default
|
||||
if destroy_loop:
|
||||
self.loop.destroy()
|
||||
self.loop = None
|
||||
if getattr(_threadlocal, 'hub', None) is self:
|
||||
del _threadlocal.hub
|
||||
|
||||
def _get_resolver(self):
|
||||
if self._resolver is None:
|
||||
if self.resolver_class is not None:
|
||||
self.resolver_class = _import(self.resolver_class)
|
||||
self._resolver = self.resolver_class(hub=self)
|
||||
return self._resolver
|
||||
|
||||
def _set_resolver(self, value):
|
||||
self._resolver = value
|
||||
|
||||
def _del_resolver(self):
|
||||
del self._resolver
|
||||
|
||||
resolver = property(_get_resolver, _set_resolver, _del_resolver)
|
||||
|
||||
def _get_threadpool(self):
|
||||
if self._threadpool is None:
|
||||
if self.threadpool_class is not None:
|
||||
self.threadpool_class = _import(self.threadpool_class)
|
||||
self._threadpool = self.threadpool_class(self.threadpool_size, hub=self)
|
||||
return self._threadpool
|
||||
|
||||
def _set_threadpool(self, value):
|
||||
self._threadpool = value
|
||||
|
||||
def _del_threadpool(self):
|
||||
del self._threadpool
|
||||
|
||||
threadpool = property(_get_threadpool, _set_threadpool, _del_threadpool)
|
||||
|
||||
|
||||
class LoopExit(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Waiter(object):
|
||||
"""A low level communication utility for greenlets.
|
||||
|
||||
Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them somewhat safer:
|
||||
|
||||
* switching will occur only if the waiting greenlet is executing :meth:`get` method currently;
|
||||
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
|
||||
* if :meth:`switch`/:meth:`throw` is called before the receiver calls :meth:`get`, then :class:`Waiter`
|
||||
will store the value/exception. The following :meth:`get` will return the value/raise the exception.
|
||||
|
||||
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
|
||||
The :meth:`get` method must be called from a greenlet other than :class:`Hub`.
|
||||
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hello from Waiter')
|
||||
>>> result.get() # blocks for 0.1 seconds
|
||||
'hello from Waiter'
|
||||
|
||||
If switch is called before the greenlet gets a chance to call :meth:`get` then
|
||||
:class:`Waiter` stores the value.
|
||||
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hi from Waiter')
|
||||
>>> sleep(0.2)
|
||||
>>> result.get() # returns immediatelly without blocking
|
||||
'hi from Waiter'
|
||||
|
||||
.. warning::
|
||||
|
||||
This a limited and dangerous way to communicate between greenlets. It can easily
|
||||
leave a greenlet unscheduled forever if used incorrectly. Consider using safer
|
||||
:class:`Event`/:class:`AsyncResult`/:class:`Queue` classes.
|
||||
"""
|
||||
|
||||
__slots__ = ['hub', 'greenlet', 'value', '_exception']
|
||||
|
||||
def __init__(self, hub=None):
|
||||
if hub is None:
|
||||
self.hub = get_hub()
|
||||
else:
|
||||
self.hub = hub
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def clear(self):
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def __str__(self):
|
||||
if self._exception is _NONE:
|
||||
return '<%s greenlet=%s>' % (type(self).__name__, self.greenlet)
|
||||
elif self._exception is None:
|
||||
return '<%s greenlet=%s value=%r>' % (type(self).__name__, self.greenlet, self.value)
|
||||
else:
|
||||
return '<%s greenlet=%s exc_info=%r>' % (type(self).__name__, self.greenlet, self.exc_info)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if it holds a value or an exception"""
|
||||
return self._exception is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if it is ready and holds a value"""
|
||||
return self._exception is None
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``."
|
||||
if self._exception is not _NONE:
|
||||
return self._exception
|
||||
|
||||
def switch(self, value=None):
|
||||
"""Switch to the greenlet if one's available. Otherwise store the value."""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self.value = value
|
||||
self._exception = None
|
||||
else:
|
||||
assert getcurrent() is self.hub, "Can only use Waiter.switch method from the Hub greenlet"
|
||||
switch = greenlet.switch
|
||||
try:
|
||||
switch(value)
|
||||
except:
|
||||
self.hub.handle_error(switch, *sys.exc_info())
|
||||
|
||||
def switch_args(self, *args):
|
||||
return self.switch(args)
|
||||
|
||||
def throw(self, *throw_args):
|
||||
"""Switch to the greenlet with the exception. If there's no greenlet, store the exception."""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self._exception = throw_args
|
||||
else:
|
||||
assert getcurrent() is self.hub, "Can only use Waiter.switch method from the Hub greenlet"
|
||||
throw = greenlet.throw
|
||||
try:
|
||||
throw(*throw_args)
|
||||
except:
|
||||
self.hub.handle_error(throw, *sys.exc_info())
|
||||
|
||||
def get(self):
|
||||
"""If a value/an exception is stored, return/raise it. Otherwise until switch() or throw() is called."""
|
||||
if self._exception is not _NONE:
|
||||
if self._exception is None:
|
||||
return self.value
|
||||
else:
|
||||
getcurrent().throw(*self._exception)
|
||||
else:
|
||||
assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
|
||||
self.greenlet = getcurrent()
|
||||
try:
|
||||
return self.hub.switch()
|
||||
finally:
|
||||
self.greenlet = None
|
||||
|
||||
def __call__(self, source):
|
||||
if source.exception is None:
|
||||
self.switch(source.value)
|
||||
else:
|
||||
self.throw(source.exception)
|
||||
|
||||
# can also have a debugging version, that wraps the value in a tuple (self, value) in switch()
|
||||
# and unwraps it in wait() thus checking that switch() was indeed called
|
||||
|
||||
|
||||
def iwait(objects, timeout=None):
|
||||
"""Yield objects as they are ready, until all are ready or timeout expired.
|
||||
|
||||
*objects* must be iterable yielding instance implementing wait protocol (rawlink() and unlink()).
|
||||
"""
|
||||
# QQQ would be nice to support iterable here that can be generated slowly (why?)
|
||||
waiter = Waiter()
|
||||
switch = waiter.switch
|
||||
if timeout is not None:
|
||||
timer = get_hub().loop.timer(timeout, priority=-1)
|
||||
timer.start(waiter.switch, _NONE)
|
||||
try:
|
||||
count = len(objects)
|
||||
for obj in objects:
|
||||
obj.rawlink(switch)
|
||||
for _ in xrange(count):
|
||||
item = waiter.get()
|
||||
waiter.clear()
|
||||
if item is _NONE:
|
||||
return
|
||||
yield item
|
||||
finally:
|
||||
if timeout is not None:
|
||||
timer.stop()
|
||||
for obj in objects:
|
||||
unlink = getattr(obj, 'unlink', None)
|
||||
if unlink:
|
||||
try:
|
||||
unlink(switch)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def wait(objects=None, timeout=None, count=None):
|
||||
"""Wait for *objects* to become ready or for event loop to finish.
|
||||
|
||||
If *objects* is provided, it should be an iterable containg objects implementing wait protocol (rawlink() and
|
||||
unlink() methods):
|
||||
|
||||
- :class:`gevent.Greenlet` instance
|
||||
- :class:`gevent.event.Event` instance
|
||||
- :class:`gevent.lock.Semaphore` instance
|
||||
- :class:`gevent.subprocess.Popen` instance
|
||||
|
||||
If *objects* is ``None`` (the default), ``wait()`` blocks until all event loops has nothing to do:
|
||||
|
||||
- all greenlets have finished
|
||||
- all servers were stopped
|
||||
- all event loop watchers were stopped.
|
||||
|
||||
If *count* is ``None`` (the default), wait for all of *object* to become ready.
|
||||
|
||||
If *count* is a number, wait for *count* object to become ready. (For example, if count is ``1`` then the
|
||||
function exits when any object in the list is ready).
|
||||
|
||||
If *timeout* is provided, it specifies the maximum number of seconds ``wait()`` will block.
|
||||
|
||||
Returns the list of ready objects, in the order in which they were ready.
|
||||
"""
|
||||
if objects is None:
|
||||
return get_hub().join(timeout=timeout)
|
||||
result = []
|
||||
if count is None:
|
||||
return list(iwait(objects, timeout))
|
||||
for obj in iwait(objects=objects, timeout=timeout):
|
||||
result.append(obj)
|
||||
count -= 1
|
||||
if count <= 0:
|
||||
break
|
||||
return result
|
||||
|
||||
|
||||
class linkproxy(object):
|
||||
__slots__ = ['callback', 'obj']
|
||||
|
||||
def __init__(self, callback, obj):
|
||||
self.callback = callback
|
||||
self.obj = obj
|
||||
|
||||
def __call__(self, *args):
|
||||
callback = self.callback
|
||||
obj = self.obj
|
||||
self.callback = None
|
||||
self.obj = None
|
||||
callback(obj)
|
||||
|
||||
|
||||
class _NONE(object):
|
||||
"A special thingy you must never pass to any of gevent API"
|
||||
__slots__ = []
|
||||
|
||||
def __repr__(self):
|
||||
return '<_NONE>'
|
||||
|
||||
_NONE = _NONE()
|
236
panda/python/Lib/site-packages/gevent/local.py
Normal file
236
panda/python/Lib/site-packages/gevent/local.py
Normal file
|
@ -0,0 +1,236 @@
|
|||
"""Greenlet-local objects.
|
||||
|
||||
This module is based on `_threading_local.py`__ from the standard library.
|
||||
|
||||
__ http://svn.python.org/view/python/trunk/Lib/_threading_local.py?view=markup&pathrev=78336
|
||||
|
||||
Greenlet-local objects support the management of greenlet-local data.
|
||||
If you have data that you want to be local to a greenlet, simply create
|
||||
a greenlet-local object and use its attributes:
|
||||
|
||||
>>> mydata = local()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
You can also access the local-object's dictionary:
|
||||
|
||||
>>> mydata.__dict__
|
||||
{'number': 42}
|
||||
>>> mydata.__dict__.setdefault('widgets', [])
|
||||
[]
|
||||
>>> mydata.widgets
|
||||
[]
|
||||
|
||||
What's important about greenlet-local objects is that their data are
|
||||
local to a greenlet. If we access the data in a different greenlet:
|
||||
|
||||
>>> log = []
|
||||
>>> def f():
|
||||
... items = mydata.__dict__.items()
|
||||
... items.sort()
|
||||
... log.append(items)
|
||||
... mydata.number = 11
|
||||
... log.append(mydata.number)
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
>>> log
|
||||
[[], 11]
|
||||
|
||||
we get different data. Furthermore, changes made in the other greenlet
|
||||
don't affect data seen in this greenlet:
|
||||
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
Of course, values you get from a local object, including a __dict__
|
||||
attribute, are for whatever greenlet was current at the time the
|
||||
attribute was read. For that reason, you generally don't want to save
|
||||
these values across greenlets, as they apply only to the greenlet they
|
||||
came from.
|
||||
|
||||
You can create custom local objects by subclassing the local class:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... number = 2
|
||||
... initialized = False
|
||||
... def __init__(self, **kw):
|
||||
... if self.initialized:
|
||||
... raise SystemError('__init__ called too many times')
|
||||
... self.initialized = True
|
||||
... self.__dict__.update(kw)
|
||||
... def squared(self):
|
||||
... return self.number ** 2
|
||||
|
||||
This can be useful to support default values, methods and
|
||||
initialization. Note that if you define an __init__ method, it will be
|
||||
called each time the local object is used in a separate greenlet. This
|
||||
is necessary to initialize each greenlet's dictionary.
|
||||
|
||||
Now if we create a local object:
|
||||
|
||||
>>> mydata = MyLocal(color='red')
|
||||
|
||||
Now we have a default number:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
|
||||
an initial color:
|
||||
|
||||
>>> mydata.color
|
||||
'red'
|
||||
>>> del mydata.color
|
||||
|
||||
And a method that operates on the data:
|
||||
|
||||
>>> mydata.squared()
|
||||
4
|
||||
|
||||
As before, we can access the data in a separate greenlet:
|
||||
|
||||
>>> log = []
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
>>> log
|
||||
[[('color', 'red'), ('initialized', True)], 11]
|
||||
|
||||
without affecting this greenlet's data:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
>>> mydata.color
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||
|
||||
Note that subclasses can define slots, but they are not greenlet
|
||||
local. They are shared across greenlets::
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... __slots__ = 'number'
|
||||
|
||||
>>> mydata = MyLocal()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.color = 'red'
|
||||
|
||||
So, the separate greenlet:
|
||||
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
|
||||
affects what we see:
|
||||
|
||||
>>> mydata.number
|
||||
11
|
||||
|
||||
>>> del mydata
|
||||
"""
|
||||
from weakref import WeakKeyDictionary
|
||||
from copy import copy
|
||||
from gevent.hub import getcurrent
|
||||
from gevent.lock import RLock
|
||||
|
||||
__all__ = ["local"]
|
||||
|
||||
|
||||
class _localbase(object):
|
||||
__slots__ = '_local__args', '_local__lock', '_local__dicts'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
self = object.__new__(cls)
|
||||
object.__setattr__(self, '_local__args', (args, kw))
|
||||
object.__setattr__(self, '_local__lock', RLock())
|
||||
dicts = WeakKeyDictionary()
|
||||
object.__setattr__(self, '_local__dicts', dicts)
|
||||
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
|
||||
# We need to create the greenlet dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it again ourselves.
|
||||
dict = object.__getattribute__(self, '__dict__')
|
||||
dicts[getcurrent()] = dict
|
||||
return self
|
||||
|
||||
|
||||
def _init_locals(self):
|
||||
d = {}
|
||||
dicts = object.__getattribute__(self, '_local__dicts')
|
||||
dicts[getcurrent()] = d
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
|
||||
# we have a new instance dict, so call out __init__ if we have one
|
||||
cls = type(self)
|
||||
if cls.__init__ is not object.__init__:
|
||||
args, kw = object.__getattribute__(self, '_local__args')
|
||||
cls.__init__(self, *args, **kw)
|
||||
|
||||
|
||||
class local(_localbase):
|
||||
|
||||
def __getattribute__(self, name):
|
||||
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
|
||||
if d is None:
|
||||
# it's OK to acquire the lock here and not earlier, because the above code won't switch out
|
||||
# however, subclassed __init__ might switch, so we do need to acquire the lock here
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_init_locals(self)
|
||||
return object.__getattribute__(self, name)
|
||||
finally:
|
||||
lock.release()
|
||||
else:
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
return object.__getattribute__(self, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError("%r object attribute '__dict__' is read-only" % self.__class__.__name__)
|
||||
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
|
||||
if d is None:
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_init_locals(self)
|
||||
return object.__setattr__(self, name, value)
|
||||
finally:
|
||||
lock.release()
|
||||
else:
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
return object.__setattr__(self, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError("%r object attribute '__dict__' is read-only" % self.__class__.__name__)
|
||||
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
|
||||
if d is None:
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_init_locals(self)
|
||||
return object.__delattr__(self, name)
|
||||
finally:
|
||||
lock.release()
|
||||
else:
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
return object.__delattr__(self, name)
|
||||
|
||||
def __copy__(self):
|
||||
currentId = getcurrent()
|
||||
d = object.__getattribute__(self, '_local__dicts').get(currentId)
|
||||
duplicate = copy(d)
|
||||
|
||||
cls = type(self)
|
||||
if cls.__init__ is not object.__init__:
|
||||
args, kw = object.__getattribute__(self, '_local__args')
|
||||
instance = cls(*args, **kw)
|
||||
else:
|
||||
instance = cls()
|
||||
|
||||
object.__setattr__(instance, '_local__dicts', {
|
||||
currentId: duplicate
|
||||
})
|
||||
|
||||
return instance
|
118
panda/python/Lib/site-packages/gevent/lock.py
Normal file
118
panda/python/Lib/site-packages/gevent/lock.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""Locking primitives"""
|
||||
|
||||
from gevent.hub import getcurrent
|
||||
from gevent._semaphore import Semaphore
|
||||
|
||||
|
||||
__all__ = ['Semaphore', 'DummySemaphore', 'BoundedSemaphore', 'RLock']
|
||||
|
||||
|
||||
class DummySemaphore(object):
|
||||
# XXX what is this used for?
|
||||
"""A Semaphore initialized with "infinite" initial value. None of its methods ever block."""
|
||||
|
||||
def __str__(self):
|
||||
return '<%s>' % self.__class__.__name__
|
||||
|
||||
def locked(self):
|
||||
return False
|
||||
|
||||
def release(self):
|
||||
pass
|
||||
|
||||
def rawlink(self, callback):
|
||||
# XXX should still work and notify?
|
||||
pass
|
||||
|
||||
def unlink(self, callback):
|
||||
pass
|
||||
|
||||
def wait(self, timeout=None):
|
||||
pass
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, typ, val, tb):
|
||||
pass
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore checks to make sure its current value doesn't exceed its initial value.
|
||||
If it does, ``ValueError`` is raised. In most situations semaphores are used to guard resources
|
||||
with limited capacity. If the semaphore is released too many times it's a sign of a bug.
|
||||
|
||||
If not given, *value* defaults to 1."""
|
||||
|
||||
def __init__(self, value=1):
|
||||
Semaphore.__init__(self, value)
|
||||
self._initial_value = value
|
||||
|
||||
def release(self):
|
||||
if self.counter >= self._initial_value:
|
||||
raise ValueError("Semaphore released too many times")
|
||||
return Semaphore.release(self)
|
||||
|
||||
|
||||
class RLock(object):
|
||||
|
||||
def __init__(self):
|
||||
self._block = Semaphore(1)
|
||||
self._owner = None
|
||||
self._count = 0
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s at 0x%x _block=%s _count=%r _owner=%r)>" % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self._block,
|
||||
self._count,
|
||||
self._owner)
|
||||
|
||||
def acquire(self, blocking=1):
|
||||
me = getcurrent()
|
||||
if self._owner is me:
|
||||
self._count = self._count + 1
|
||||
return 1
|
||||
rc = self._block.acquire(blocking)
|
||||
if rc:
|
||||
self._owner = me
|
||||
self._count = 1
|
||||
return rc
|
||||
|
||||
def __enter__(self):
|
||||
return self.acquire()
|
||||
|
||||
def release(self):
|
||||
if self._owner is not getcurrent():
|
||||
raise RuntimeError("cannot release un-aquired lock")
|
||||
self._count = count = self._count - 1
|
||||
if not count:
|
||||
self._owner = None
|
||||
self._block.release()
|
||||
|
||||
def __exit__(self, typ, value, tb):
|
||||
self.release()
|
||||
|
||||
# Internal methods used by condition variables
|
||||
|
||||
def _acquire_restore(self, count_owner):
|
||||
count, owner = count_owner
|
||||
self._block.acquire()
|
||||
self._count = count
|
||||
self._owner = owner
|
||||
|
||||
def _release_save(self):
|
||||
count = self._count
|
||||
self._count = 0
|
||||
owner = self._owner
|
||||
self._owner = None
|
||||
self._block.release()
|
||||
return (count, owner)
|
||||
|
||||
def _is_owned(self):
|
||||
return self._owner is getcurrent()
|
250
panda/python/Lib/site-packages/gevent/monkey.py
Normal file
250
panda/python/Lib/site-packages/gevent/monkey.py
Normal file
|
@ -0,0 +1,250 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""Make the standard library cooperative."""
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
from sys import version_info
|
||||
|
||||
__all__ = ['patch_all',
|
||||
'patch_socket',
|
||||
'patch_ssl',
|
||||
'patch_os',
|
||||
'patch_time',
|
||||
'patch_select',
|
||||
'patch_thread',
|
||||
'patch_subprocess',
|
||||
'patch_sys']
|
||||
|
||||
|
||||
# maps module name -> attribute name -> original item
|
||||
# e.g. "time" -> "sleep" -> built-in function sleep
|
||||
saved = {}
|
||||
|
||||
|
||||
def _get_original(name, items):
|
||||
d = saved.get(name, {})
|
||||
values = []
|
||||
module = None
|
||||
for item in items:
|
||||
if item in d:
|
||||
values.append(d[item])
|
||||
else:
|
||||
if module is None:
|
||||
module = __import__(name)
|
||||
values.append(getattr(module, item))
|
||||
return values
|
||||
|
||||
|
||||
def get_original(name, item):
|
||||
if isinstance(item, basestring):
|
||||
return _get_original(name, [item])[0]
|
||||
else:
|
||||
return _get_original(name, item)
|
||||
|
||||
|
||||
def patch_item(module, attr, newitem):
|
||||
NONE = object()
|
||||
olditem = getattr(module, attr, NONE)
|
||||
if olditem is not NONE:
|
||||
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
|
||||
setattr(module, attr, newitem)
|
||||
|
||||
|
||||
def remove_item(module, attr):
|
||||
NONE = object()
|
||||
olditem = getattr(module, attr, NONE)
|
||||
if olditem is NONE:
|
||||
return
|
||||
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
|
||||
delattr(module, attr)
|
||||
|
||||
|
||||
def patch_module(name, items=None):
|
||||
gevent_module = getattr(__import__('gevent.' + name), name)
|
||||
module_name = getattr(gevent_module, '__target__', name)
|
||||
module = __import__(module_name)
|
||||
if items is None:
|
||||
items = getattr(gevent_module, '__implements__', None)
|
||||
if items is None:
|
||||
raise AttributeError('%r does not have __implements__' % gevent_module)
|
||||
for attr in items:
|
||||
patch_item(module, attr, getattr(gevent_module, attr))
|
||||
|
||||
|
||||
def _patch_sys_std(name):
|
||||
from gevent.fileobject import FileObjectThread
|
||||
orig = getattr(sys, name)
|
||||
if not isinstance(orig, FileObjectThread):
|
||||
patch_item(sys, name, FileObjectThread(orig))
|
||||
|
||||
|
||||
def patch_sys(stdin=True, stdout=True, stderr=True):
|
||||
if stdin:
|
||||
_patch_sys_std('stdin')
|
||||
if stdout:
|
||||
_patch_sys_std('stdout')
|
||||
if stderr:
|
||||
_patch_sys_std('stderr')
|
||||
|
||||
|
||||
def patch_os():
|
||||
"""Replace :func:`os.fork` with :func:`gevent.fork`. Does nothing if fork is not available."""
|
||||
patch_module('os')
|
||||
|
||||
|
||||
def patch_time():
|
||||
"""Replace :func:`time.sleep` with :func:`gevent.sleep`."""
|
||||
from gevent.hub import sleep
|
||||
import time
|
||||
patch_item(time, 'sleep', sleep)
|
||||
|
||||
|
||||
def patch_thread(threading=True, _threading_local=True, Event=False):
|
||||
"""Replace the standard :mod:`thread` module to make it greenlet-based.
|
||||
If *threading* is true (the default), also patch ``threading``.
|
||||
If *_threading_local* is true (the default), also patch ``_threading_local.local``.
|
||||
"""
|
||||
patch_module('thread')
|
||||
if threading:
|
||||
patch_module('threading')
|
||||
threading = __import__('threading')
|
||||
if Event:
|
||||
from gevent.event import Event
|
||||
threading.Event = Event
|
||||
if _threading_local:
|
||||
_threading_local = __import__('_threading_local')
|
||||
from gevent.local import local
|
||||
_threading_local.local = local
|
||||
|
||||
|
||||
def patch_socket(dns=True, aggressive=True):
|
||||
"""Replace the standard socket object with gevent's cooperative sockets.
|
||||
|
||||
If *dns* is true, also patch dns functions in :mod:`socket`.
|
||||
"""
|
||||
from gevent import socket
|
||||
# Note: although it seems like it's not strictly necessary to monkey patch 'create_connection',
|
||||
# it's better to do it. If 'create_connection' was not monkey patched, but the rest of socket module
|
||||
# was, create_connection would still use "green" getaddrinfo and "green" socket.
|
||||
# However, because gevent.socket.socket.connect is a Python function, the exception raised by it causes
|
||||
# _socket object to be referenced by the frame, thus causing the next invocation of bind(source_address) to fail.
|
||||
if dns:
|
||||
items = socket.__implements__
|
||||
else:
|
||||
items = set(socket.__implements__) - set(socket.__dns__)
|
||||
patch_module('socket', items=items)
|
||||
if aggressive:
|
||||
if 'ssl' not in socket.__implements__:
|
||||
remove_item(socket, 'ssl')
|
||||
|
||||
|
||||
def patch_dns():
|
||||
from gevent import socket
|
||||
patch_module('socket', items=socket.__dns__)
|
||||
|
||||
|
||||
def patch_ssl():
|
||||
patch_module('ssl')
|
||||
|
||||
|
||||
def patch_select(aggressive=True):
|
||||
"""Replace :func:`select.select` with :func:`gevent.select.select`.
|
||||
|
||||
If aggressive is true (the default), also remove other blocking functions the :mod:`select`.
|
||||
"""
|
||||
patch_module('select')
|
||||
if aggressive:
|
||||
select = __import__('select')
|
||||
# since these are blocking we're removing them here. This makes some other
|
||||
# modules (e.g. asyncore) non-blocking, as they use select that we provide
|
||||
# when none of these are available.
|
||||
remove_item(select, 'poll')
|
||||
remove_item(select, 'epoll')
|
||||
remove_item(select, 'kqueue')
|
||||
remove_item(select, 'kevent')
|
||||
|
||||
|
||||
def patch_subprocess():
|
||||
patch_module('subprocess')
|
||||
|
||||
|
||||
def patch_all(socket=True, dns=True, time=True, select=True, thread=True, os=True, ssl=True, httplib=False,
|
||||
subprocess=False, sys=False, aggressive=True, Event=False):
|
||||
"""Do all of the default monkey patching (calls every other function in this module."""
|
||||
# order is important
|
||||
if os:
|
||||
patch_os()
|
||||
if time:
|
||||
patch_time()
|
||||
if thread:
|
||||
patch_thread(Event=Event)
|
||||
# sys must be patched after thread. in other cases threading._shutdown will be
|
||||
# initiated to _MainThread with real thread ident
|
||||
if sys:
|
||||
patch_sys()
|
||||
if socket:
|
||||
patch_socket(dns=dns, aggressive=aggressive)
|
||||
if select:
|
||||
patch_select(aggressive=aggressive)
|
||||
if ssl:
|
||||
if version_info[:2] > (2, 5):
|
||||
patch_ssl()
|
||||
else:
|
||||
try:
|
||||
patch_ssl()
|
||||
except ImportError:
|
||||
pass # in Python 2.5, 'ssl' is a standalone package not included in stdlib
|
||||
if httplib:
|
||||
raise ValueError('gevent.httplib is no longer provided, httplib must be False')
|
||||
if subprocess:
|
||||
patch_subprocess()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from inspect import getargspec
|
||||
patch_all_args = getargspec(patch_all)[0]
|
||||
modules = [x for x in patch_all_args if 'patch_' + x in globals()]
|
||||
script_help = """gevent.monkey - monkey patch the standard modules to use gevent.
|
||||
|
||||
USAGE: python -m gevent.monkey [MONKEY OPTIONS] script [SCRIPT OPTIONS]
|
||||
|
||||
If no OPTIONS present, monkey patches all the modules it can patch.
|
||||
You can exclude a module with --no-module, e.g. --no-thread. You can
|
||||
specify a module to patch with --module, e.g. --socket. In the latter
|
||||
case only the modules specified on the command line will be patched.
|
||||
|
||||
MONKEY OPTIONS: --verbose %s""" % ', '.join('--[no-]%s' % m for m in modules)
|
||||
args = {}
|
||||
argv = sys.argv[1:]
|
||||
verbose = False
|
||||
while argv and argv[0].startswith('--'):
|
||||
option = argv[0][2:]
|
||||
if option == 'verbose':
|
||||
verbose = True
|
||||
elif option.startswith('no-') and option.replace('no-', '') in patch_all_args:
|
||||
args[option[3:]] = False
|
||||
elif option in patch_all_args:
|
||||
args[option] = True
|
||||
if option in modules:
|
||||
for module in modules:
|
||||
args.setdefault(module, False)
|
||||
else:
|
||||
sys.exit(script_help + '\n\n' + 'Cannot patch %r' % option)
|
||||
del argv[0]
|
||||
# TODO: break on --
|
||||
if verbose:
|
||||
import pprint
|
||||
import os
|
||||
print ('gevent.monkey.patch_all(%s)' % ', '.join('%s=%s' % item for item in args.items()))
|
||||
print ('sys.version=%s' % (sys.version.strip().replace('\n', ' '), ))
|
||||
print ('sys.path=%s' % pprint.pformat(sys.path))
|
||||
print ('sys.modules=%s' % pprint.pformat(sorted(sys.modules.keys())))
|
||||
print ('cwd=%s' % os.getcwd())
|
||||
|
||||
patch_all(**args)
|
||||
if argv:
|
||||
sys.argv = argv
|
||||
__package__ = None
|
||||
globals()['__file__'] = sys.argv[0] # issue #302
|
||||
execfile(sys.argv[0])
|
||||
else:
|
||||
print (script_help)
|
107
panda/python/Lib/site-packages/gevent/os.py
Normal file
107
panda/python/Lib/site-packages/gevent/os.py
Normal file
|
@ -0,0 +1,107 @@
|
|||
"""
|
||||
This module provides cooperative versions of os.read() and os.write().
|
||||
On Posix platforms this uses non-blocking IO, on Windows a threadpool
|
||||
is used.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
from gevent.hub import get_hub, reinit
|
||||
import errno
|
||||
|
||||
EAGAIN = getattr(errno, 'EAGAIN', 11)
|
||||
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
|
||||
__implements__ = ['fork']
|
||||
__extensions__ = ['tp_read', 'tp_write']
|
||||
|
||||
_read = os.read
|
||||
_write = os.write
|
||||
|
||||
|
||||
ignored_errors = [EAGAIN, errno.EINTR]
|
||||
|
||||
|
||||
if fcntl:
|
||||
|
||||
__extensions__ += ['make_nonblocking', 'nb_read', 'nb_write']
|
||||
|
||||
def make_nonblocking(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||
if not bool(flags & os.O_NONBLOCK):
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
||||
return True
|
||||
|
||||
def nb_read(fd, n):
|
||||
"""Read up to `n` bytes from file descriptor `fd`. Return a string
|
||||
containing the bytes read. If end-of-file is reached, an empty string
|
||||
is returned.
|
||||
|
||||
The descriptor must be in non-blocking mode.
|
||||
"""
|
||||
hub, event = None, None
|
||||
while True:
|
||||
try:
|
||||
return _read(fd, n)
|
||||
except OSError, e:
|
||||
if e.errno not in ignored_errors:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
if hub is None:
|
||||
hub = get_hub()
|
||||
event = hub.loop.io(fd, 1)
|
||||
hub.wait(event)
|
||||
|
||||
def nb_write(fd, buf):
|
||||
"""Write bytes from buffer `buf` to file descriptor `fd`. Return the
|
||||
number of bytes written.
|
||||
|
||||
The file descriptor must be in non-blocking mode.
|
||||
"""
|
||||
hub, event = None, None
|
||||
while True:
|
||||
try:
|
||||
return _write(fd, buf)
|
||||
except OSError, e:
|
||||
if e.errno not in ignored_errors:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
if hub is None:
|
||||
hub = get_hub()
|
||||
event = hub.loop.io(fd, 2)
|
||||
hub.wait(event)
|
||||
|
||||
|
||||
def tp_read(fd, n):
|
||||
"""Read up to `n` bytes from file descriptor `fd`. Return a string
|
||||
containing the bytes read. If end-of-file is reached, an empty string
|
||||
is returned."""
|
||||
return get_hub().threadpool.apply_e(BaseException, _read, (fd, n))
|
||||
|
||||
|
||||
def tp_write(fd, buf):
|
||||
"""Write bytes from buffer `buf` to file descriptor `fd`. Return the
|
||||
number of bytes written."""
|
||||
return get_hub().threadpool.apply_e(BaseException, _write, (fd, buf))
|
||||
|
||||
|
||||
if hasattr(os, 'fork'):
|
||||
_fork = os.fork
|
||||
|
||||
def fork():
|
||||
result = _fork()
|
||||
if not result:
|
||||
reinit()
|
||||
return result
|
||||
|
||||
else:
|
||||
__implements__.remove('fork')
|
||||
|
||||
|
||||
__all__ = __implements__ + __extensions__
|
408
panda/python/Lib/site-packages/gevent/pool.py
Normal file
408
panda/python/Lib/site-packages/gevent/pool.py
Normal file
|
@ -0,0 +1,408 @@
|
|||
# Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
|
||||
"""Managing greenlets in a group.
|
||||
|
||||
The :class:`Group` class in this module abstracts a group of running greenlets.
|
||||
When a greenlet dies, it's automatically removed from the group.
|
||||
|
||||
The :class:`Pool` which a subclass of :class:`Group` provides a way to limit
|
||||
concurrency: its :meth:`spawn <Pool.spawn>` method blocks if the number of
|
||||
greenlets in the pool has already reached the limit, until there is a free slot.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from bisect import insort_right
|
||||
|
||||
from gevent.hub import GreenletExit, getcurrent, kill as _kill, PY3
|
||||
from gevent.greenlet import joinall, Greenlet
|
||||
from gevent.timeout import Timeout
|
||||
from gevent.event import Event
|
||||
from gevent.lock import Semaphore, DummySemaphore
|
||||
|
||||
__all__ = ['Group', 'Pool']
|
||||
|
||||
|
||||
class Group(object):
|
||||
"""Maintain a group of greenlets that are still running.
|
||||
|
||||
Links to each item and removes it upon notification.
|
||||
"""
|
||||
greenlet_class = Greenlet
|
||||
|
||||
def __init__(self, *args):
|
||||
assert len(args) <= 1, args
|
||||
self.greenlets = set(*args)
|
||||
if args:
|
||||
for greenlet in args[0]:
|
||||
greenlet.rawlink(self._discard)
|
||||
# each item we kill we place in dying, to avoid killing the same greenlet twice
|
||||
self.dying = set()
|
||||
self._empty_event = Event()
|
||||
self._empty_event.set()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self.greenlets)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.greenlets)
|
||||
|
||||
def __contains__(self, item):
|
||||
return item in self.greenlets
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.greenlets)
|
||||
|
||||
def add(self, greenlet):
|
||||
try:
|
||||
rawlink = greenlet.rawlink
|
||||
except AttributeError:
|
||||
pass # non-Greenlet greenlet, like MAIN
|
||||
else:
|
||||
rawlink(self._discard)
|
||||
self.greenlets.add(greenlet)
|
||||
self._empty_event.clear()
|
||||
|
||||
def _discard(self, greenlet):
|
||||
self.greenlets.discard(greenlet)
|
||||
self.dying.discard(greenlet)
|
||||
if not self.greenlets:
|
||||
self._empty_event.set()
|
||||
|
||||
def discard(self, greenlet):
|
||||
self._discard(greenlet)
|
||||
try:
|
||||
unlink = greenlet.unlink
|
||||
except AttributeError:
|
||||
pass # non-Greenlet greenlet, like MAIN
|
||||
else:
|
||||
unlink(self._discard)
|
||||
|
||||
def start(self, greenlet):
|
||||
self.add(greenlet)
|
||||
greenlet.start()
|
||||
|
||||
def spawn(self, *args, **kwargs):
|
||||
greenlet = self.greenlet_class(*args, **kwargs)
|
||||
self.start(greenlet)
|
||||
return greenlet
|
||||
|
||||
# def close(self):
|
||||
# """Prevents any more tasks from being submitted to the pool"""
|
||||
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
|
||||
|
||||
def join(self, timeout=None, raise_error=False):
|
||||
if raise_error:
|
||||
greenlets = self.greenlets.copy()
|
||||
self._empty_event.wait(timeout=timeout)
|
||||
for greenlet in greenlets:
|
||||
if greenlet.exception is not None:
|
||||
raise greenlet.exception
|
||||
else:
|
||||
self._empty_event.wait(timeout=timeout)
|
||||
|
||||
def kill(self, exception=GreenletExit, block=True, timeout=None):
|
||||
timer = Timeout.start_new(timeout)
|
||||
try:
|
||||
try:
|
||||
while self.greenlets:
|
||||
for greenlet in list(self.greenlets):
|
||||
if greenlet not in self.dying:
|
||||
try:
|
||||
kill = greenlet.kill
|
||||
except AttributeError:
|
||||
_kill(greenlet, exception)
|
||||
else:
|
||||
kill(exception, block=False)
|
||||
self.dying.add(greenlet)
|
||||
if not block:
|
||||
break
|
||||
joinall(self.greenlets)
|
||||
except Timeout:
|
||||
ex = sys.exc_info()[1]
|
||||
if ex is not timer:
|
||||
raise
|
||||
finally:
|
||||
timer.cancel()
|
||||
|
||||
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
|
||||
if greenlet not in self.dying and greenlet in self.greenlets:
|
||||
greenlet.kill(exception, block=False)
|
||||
self.dying.add(greenlet)
|
||||
if block:
|
||||
greenlet.join(timeout)
|
||||
|
||||
def apply(self, func, args=None, kwds=None):
|
||||
"""Equivalent of the apply() builtin function. It blocks till the result is ready."""
|
||||
if args is None:
|
||||
args = ()
|
||||
if kwds is None:
|
||||
kwds = {}
|
||||
if getcurrent() in self:
|
||||
return func(*args, **kwds)
|
||||
else:
|
||||
return self.spawn(func, *args, **kwds).get()
|
||||
|
||||
def apply_cb(self, func, args=None, kwds=None, callback=None):
|
||||
result = self.apply(func, args, kwds)
|
||||
if callback is not None:
|
||||
Greenlet.spawn(callback, result)
|
||||
return result
|
||||
|
||||
def apply_async(self, func, args=None, kwds=None, callback=None):
|
||||
"""A variant of the apply() method which returns a Greenlet object.
|
||||
|
||||
If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
|
||||
callback is applied to it (unless the call failed)."""
|
||||
if args is None:
|
||||
args = ()
|
||||
if kwds is None:
|
||||
kwds = {}
|
||||
if self.full():
|
||||
# cannot call spawn() directly because it will block
|
||||
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
|
||||
else:
|
||||
greenlet = self.spawn(func, *args, **kwds)
|
||||
if callback is not None:
|
||||
greenlet.link(pass_value(callback))
|
||||
return greenlet
|
||||
|
||||
def map(self, func, iterable):
|
||||
return list(self.imap(func, iterable))
|
||||
|
||||
def map_cb(self, func, iterable, callback=None):
|
||||
result = self.map(func, iterable)
|
||||
if callback is not None:
|
||||
callback(result)
|
||||
return result
|
||||
|
||||
def map_async(self, func, iterable, callback=None):
|
||||
"""
|
||||
A variant of the map() method which returns a Greenlet object.
|
||||
|
||||
If callback is specified then it should be a callable which accepts a
|
||||
single argument.
|
||||
"""
|
||||
return Greenlet.spawn(self.map_cb, func, iterable, callback)
|
||||
|
||||
def imap(self, func, iterable):
|
||||
"""An equivalent of itertools.imap()"""
|
||||
return IMap.spawn(func, iterable, spawn=self.spawn)
|
||||
|
||||
def imap_unordered(self, func, iterable):
|
||||
"""The same as imap() except that the ordering of the results from the
|
||||
returned iterator should be considered in arbitrary order."""
|
||||
return IMapUnordered.spawn(func, iterable, spawn=self.spawn)
|
||||
|
||||
def full(self):
|
||||
return False
|
||||
|
||||
def wait_available(self):
|
||||
pass
|
||||
|
||||
|
||||
class IMapUnordered(Greenlet):
|
||||
|
||||
def __init__(self, func, iterable, spawn=None):
|
||||
from gevent.queue import Queue
|
||||
Greenlet.__init__(self)
|
||||
if spawn is not None:
|
||||
self.spawn = spawn
|
||||
self.func = func
|
||||
self.iterable = iterable
|
||||
self.queue = Queue()
|
||||
self.count = 0
|
||||
self.finished = False
|
||||
self.rawlink(self._on_finish)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
value = self.queue.get()
|
||||
if isinstance(value, Failure):
|
||||
raise value.exc
|
||||
return value
|
||||
|
||||
if PY3:
|
||||
__next__ = next
|
||||
del next
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
func = self.func
|
||||
for item in self.iterable:
|
||||
self.count += 1
|
||||
self.spawn(func, item).rawlink(self._on_result)
|
||||
finally:
|
||||
self.__dict__.pop('spawn', None)
|
||||
self.__dict__.pop('func', None)
|
||||
self.__dict__.pop('iterable', None)
|
||||
|
||||
def _on_result(self, greenlet):
|
||||
self.count -= 1
|
||||
if greenlet.successful():
|
||||
self.queue.put(greenlet.value)
|
||||
else:
|
||||
self.queue.put(Failure(greenlet.exception))
|
||||
if self.ready() and self.count <= 0 and not self.finished:
|
||||
self.queue.put(Failure(StopIteration))
|
||||
self.finished = True
|
||||
|
||||
def _on_finish(self, _self):
|
||||
if self.finished:
|
||||
return
|
||||
if not self.successful():
|
||||
self.queue.put(Failure(self.exception))
|
||||
self.finished = True
|
||||
return
|
||||
if self.count <= 0:
|
||||
self.queue.put(Failure(StopIteration))
|
||||
self.finished = True
|
||||
|
||||
|
||||
class IMap(Greenlet):
|
||||
|
||||
def __init__(self, func, iterable, spawn=None):
|
||||
from gevent.queue import Queue
|
||||
Greenlet.__init__(self)
|
||||
if spawn is not None:
|
||||
self.spawn = spawn
|
||||
self.func = func
|
||||
self.iterable = iterable
|
||||
self.queue = Queue()
|
||||
self.count = 0
|
||||
self.waiting = [] # QQQ maybe deque will work faster there?
|
||||
self.index = 0
|
||||
self.maxindex = -1
|
||||
self.finished = False
|
||||
self.rawlink(self._on_finish)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
while True:
|
||||
if self.waiting and self.waiting[0][0] <= self.index:
|
||||
index, value = self.waiting.pop(0)
|
||||
else:
|
||||
index, value = self.queue.get()
|
||||
if index > self.index:
|
||||
insort_right(self.waiting, (index, value))
|
||||
continue
|
||||
self.index += 1
|
||||
if isinstance(value, Failure):
|
||||
raise value.exc
|
||||
return value
|
||||
|
||||
if PY3:
|
||||
__next__ = next
|
||||
del next
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
func = self.func
|
||||
for item in self.iterable:
|
||||
self.count += 1
|
||||
g = self.spawn(func, item)
|
||||
g.rawlink(self._on_result)
|
||||
self.maxindex += 1
|
||||
g.index = self.maxindex
|
||||
finally:
|
||||
self.__dict__.pop('spawn', None)
|
||||
self.__dict__.pop('func', None)
|
||||
self.__dict__.pop('iterable', None)
|
||||
|
||||
def _on_result(self, greenlet):
|
||||
self.count -= 1
|
||||
if greenlet.successful():
|
||||
self.queue.put((greenlet.index, greenlet.value))
|
||||
else:
|
||||
self.queue.put((greenlet.index, Failure(greenlet.exception)))
|
||||
if self.ready() and self.count <= 0 and not self.finished:
|
||||
self.maxindex += 1
|
||||
self.queue.put((self.maxindex, Failure(StopIteration)))
|
||||
self.finished = True
|
||||
|
||||
def _on_finish(self, _self):
|
||||
if self.finished:
|
||||
return
|
||||
if not self.successful():
|
||||
self.maxindex += 1
|
||||
self.queue.put((self.maxindex, Failure(self.exception)))
|
||||
self.finished = True
|
||||
return
|
||||
if self.count <= 0:
|
||||
self.maxindex += 1
|
||||
self.queue.put((self.maxindex, Failure(StopIteration)))
|
||||
self.finished = True
|
||||
|
||||
|
||||
class Failure(object):
|
||||
__slots__ = ['exc']
|
||||
|
||||
def __init__(self, exc):
|
||||
self.exc = exc
|
||||
|
||||
|
||||
class Pool(Group):
|
||||
|
||||
def __init__(self, size=None, greenlet_class=None):
|
||||
if size is not None and size < 0:
|
||||
raise ValueError('size must not be negative: %r' % (size, ))
|
||||
Group.__init__(self)
|
||||
self.size = size
|
||||
if greenlet_class is not None:
|
||||
self.greenlet_class = greenlet_class
|
||||
if size is None:
|
||||
self._semaphore = DummySemaphore()
|
||||
else:
|
||||
self._semaphore = Semaphore(size)
|
||||
|
||||
def wait_available(self):
|
||||
self._semaphore.wait()
|
||||
|
||||
def full(self):
|
||||
return self.free_count() <= 0
|
||||
|
||||
def free_count(self):
|
||||
if self.size is None:
|
||||
return 1
|
||||
return max(0, self.size - len(self))
|
||||
|
||||
def add(self, greenlet):
|
||||
self._semaphore.acquire()
|
||||
try:
|
||||
Group.add(self, greenlet)
|
||||
except:
|
||||
self._semaphore.release()
|
||||
raise
|
||||
|
||||
def _discard(self, greenlet):
|
||||
Group._discard(self, greenlet)
|
||||
self._semaphore.release()
|
||||
|
||||
|
||||
class pass_value(object):
|
||||
__slots__ = ['callback']
|
||||
|
||||
def __init__(self, callback):
|
||||
self.callback = callback
|
||||
|
||||
def __call__(self, source):
|
||||
if source.successful():
|
||||
self.callback(source.value)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.callback)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.callback == getattr(other, 'callback', other)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.callback)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.callback)
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != 'callback'
|
||||
return getattr(self.callback, item)
|
658
panda/python/Lib/site-packages/gevent/pywsgi.py
Normal file
658
panda/python/Lib/site-packages/gevent/pywsgi.py
Normal file
|
@ -0,0 +1,658 @@
|
|||
# Copyright (c) 2005-2009, eventlet contributors
|
||||
# Copyright (c) 2009-2011, gevent contributors
|
||||
|
||||
import errno
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import mimetools
|
||||
from datetime import datetime
|
||||
from urllib import unquote
|
||||
|
||||
from gevent import socket
|
||||
import gevent
|
||||
from gevent.server import StreamServer
|
||||
from gevent.hub import GreenletExit
|
||||
|
||||
|
||||
__all__ = ['WSGIHandler', 'WSGIServer']
|
||||
|
||||
|
||||
MAX_REQUEST_LINE = 8192
|
||||
# Weekday and month names for HTTP date/time formatting; always English!
|
||||
_WEEKDAYNAME = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
||||
_MONTHNAME = [None, # Dummy so we can use 1-based month numbers
|
||||
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
||||
_INTERNAL_ERROR_STATUS = '500 Internal Server Error'
|
||||
_INTERNAL_ERROR_BODY = 'Internal Server Error'
|
||||
_INTERNAL_ERROR_HEADERS = [('Content-Type', 'text/plain'),
|
||||
('Connection', 'close'),
|
||||
('Content-Length', str(len(_INTERNAL_ERROR_BODY)))]
|
||||
_REQUEST_TOO_LONG_RESPONSE = "HTTP/1.1 414 Request URI Too Long\r\nConnection: close\r\nContent-length: 0\r\n\r\n"
|
||||
_BAD_REQUEST_RESPONSE = "HTTP/1.1 400 Bad Request\r\nConnection: close\r\nContent-length: 0\r\n\r\n"
|
||||
_CONTINUE_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
|
||||
|
||||
|
||||
def format_date_time(timestamp):
|
||||
year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
|
||||
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (_WEEKDAYNAME[wd], day, _MONTHNAME[month], year, hh, mm, ss)
|
||||
|
||||
|
||||
class Input(object):
|
||||
|
||||
def __init__(self, rfile, content_length, socket=None, chunked_input=False):
|
||||
self.rfile = rfile
|
||||
self.content_length = content_length
|
||||
self.socket = socket
|
||||
self.position = 0
|
||||
self.chunked_input = chunked_input
|
||||
self.chunk_length = -1
|
||||
|
||||
def _discard(self):
|
||||
if self.socket is None and (self.position < (self.content_length or 0) or self.chunked_input):
|
||||
# ## Read and discard body
|
||||
while 1:
|
||||
d = self.read(16384)
|
||||
if not d:
|
||||
break
|
||||
|
||||
def _send_100_continue(self):
|
||||
if self.socket is not None:
|
||||
self.socket.sendall(_CONTINUE_RESPONSE)
|
||||
self.socket = None
|
||||
|
||||
def _do_read(self, length=None, use_readline=False):
|
||||
if use_readline:
|
||||
reader = self.rfile.readline
|
||||
else:
|
||||
reader = self.rfile.read
|
||||
content_length = self.content_length
|
||||
if content_length is None:
|
||||
# Either Content-Length or "Transfer-Encoding: chunked" must be present in a request with a body
|
||||
# if it was chunked, then this function would have not been called
|
||||
return ''
|
||||
self._send_100_continue()
|
||||
left = content_length - self.position
|
||||
if length is None:
|
||||
length = left
|
||||
elif length > left:
|
||||
length = left
|
||||
if not length:
|
||||
return ''
|
||||
read = reader(length)
|
||||
self.position += len(read)
|
||||
if len(read) < length:
|
||||
if (use_readline and not read.endswith("\n")) or not use_readline:
|
||||
raise IOError("unexpected end of file while reading request at position %s" % (self.position,))
|
||||
|
||||
return read
|
||||
|
||||
def _chunked_read(self, length=None, use_readline=False):
|
||||
rfile = self.rfile
|
||||
self._send_100_continue()
|
||||
|
||||
if length == 0:
|
||||
return ""
|
||||
|
||||
if length < 0:
|
||||
length = None
|
||||
|
||||
if use_readline:
|
||||
reader = self.rfile.readline
|
||||
else:
|
||||
reader = self.rfile.read
|
||||
|
||||
response = []
|
||||
while self.chunk_length != 0:
|
||||
maxreadlen = self.chunk_length - self.position
|
||||
if length is not None and length < maxreadlen:
|
||||
maxreadlen = length
|
||||
|
||||
if maxreadlen > 0:
|
||||
data = reader(maxreadlen)
|
||||
if not data:
|
||||
self.chunk_length = 0
|
||||
raise IOError("unexpected end of file while parsing chunked data")
|
||||
|
||||
datalen = len(data)
|
||||
response.append(data)
|
||||
|
||||
self.position += datalen
|
||||
if self.chunk_length == self.position:
|
||||
rfile.readline()
|
||||
|
||||
if length is not None:
|
||||
length -= datalen
|
||||
if length == 0:
|
||||
break
|
||||
if use_readline and data[-1] == "\n":
|
||||
break
|
||||
else:
|
||||
line = rfile.readline()
|
||||
if not line.endswith("\n"):
|
||||
self.chunk_length = 0
|
||||
raise IOError("unexpected end of file while reading chunked data header")
|
||||
self.chunk_length = int(line.split(";", 1)[0], 16)
|
||||
self.position = 0
|
||||
if self.chunk_length == 0:
|
||||
rfile.readline()
|
||||
return ''.join(response)
|
||||
|
||||
def read(self, length=None):
|
||||
if self.chunked_input:
|
||||
return self._chunked_read(length)
|
||||
return self._do_read(length)
|
||||
|
||||
def readline(self, size=None):
|
||||
if self.chunked_input:
|
||||
return self._chunked_read(size, True)
|
||||
else:
|
||||
return self._do_read(size, use_readline=True)
|
||||
|
||||
def readlines(self, hint=None):
|
||||
return list(self)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
|
||||
class WSGIHandler(object):
|
||||
protocol_version = 'HTTP/1.1'
|
||||
MessageClass = mimetools.Message
|
||||
|
||||
def __init__(self, socket, address, server, rfile=None):
|
||||
self.socket = socket
|
||||
self.client_address = address
|
||||
self.server = server
|
||||
if rfile is None:
|
||||
self.rfile = socket.makefile('rb', -1)
|
||||
else:
|
||||
self.rfile = rfile
|
||||
|
||||
def handle(self):
|
||||
try:
|
||||
while self.socket is not None:
|
||||
self.time_start = time.time()
|
||||
self.time_finish = 0
|
||||
result = self.handle_one_request()
|
||||
if result is None:
|
||||
break
|
||||
if result is True:
|
||||
continue
|
||||
self.status, response_body = result
|
||||
self.socket.sendall(response_body)
|
||||
if self.time_finish == 0:
|
||||
self.time_finish = time.time()
|
||||
self.log_request()
|
||||
break
|
||||
finally:
|
||||
if self.socket is not None:
|
||||
try:
|
||||
# read out request data to prevent error: [Errno 104] Connection reset by peer
|
||||
try:
|
||||
self.socket._sock.recv(16384)
|
||||
finally:
|
||||
self.socket._sock.close() # do not rely on garbage collection
|
||||
self.socket.close()
|
||||
except socket.error:
|
||||
pass
|
||||
self.__dict__.pop('socket', None)
|
||||
self.__dict__.pop('rfile', None)
|
||||
|
||||
def _check_http_version(self):
|
||||
version = self.request_version
|
||||
if not version.startswith("HTTP/"):
|
||||
return False
|
||||
version = tuple(int(x) for x in version[5:].split(".")) # "HTTP/"
|
||||
if version[1] < 0 or version < (0, 9) or version >= (2, 0):
|
||||
return False
|
||||
return True
|
||||
|
||||
def read_request(self, raw_requestline):
|
||||
self.requestline = raw_requestline.rstrip()
|
||||
words = self.requestline.split()
|
||||
if len(words) == 3:
|
||||
self.command, self.path, self.request_version = words
|
||||
if not self._check_http_version():
|
||||
self.log_error('Invalid http version: %r', raw_requestline)
|
||||
return
|
||||
elif len(words) == 2:
|
||||
self.command, self.path = words
|
||||
if self.command != "GET":
|
||||
self.log_error('Expected GET method: %r', raw_requestline)
|
||||
return
|
||||
self.request_version = "HTTP/0.9"
|
||||
# QQQ I'm pretty sure we can drop support for HTTP/0.9
|
||||
else:
|
||||
self.log_error('Invalid HTTP method: %r', raw_requestline)
|
||||
return
|
||||
|
||||
self.headers = self.MessageClass(self.rfile, 0)
|
||||
if self.headers.status:
|
||||
self.log_error('Invalid headers status: %r', self.headers.status)
|
||||
return
|
||||
|
||||
if self.headers.get("transfer-encoding", "").lower() == "chunked":
|
||||
try:
|
||||
del self.headers["content-length"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
content_length = self.headers.get("content-length")
|
||||
if content_length is not None:
|
||||
content_length = int(content_length)
|
||||
if content_length < 0:
|
||||
self.log_error('Invalid Content-Length: %r', content_length)
|
||||
return
|
||||
if content_length and self.command in ('HEAD', ):
|
||||
self.log_error('Unexpected Content-Length')
|
||||
return
|
||||
|
||||
self.content_length = content_length
|
||||
|
||||
if self.request_version == "HTTP/1.1":
|
||||
conntype = self.headers.get("Connection", "").lower()
|
||||
if conntype == "close":
|
||||
self.close_connection = True
|
||||
else:
|
||||
self.close_connection = False
|
||||
else:
|
||||
self.close_connection = True
|
||||
|
||||
return True
|
||||
|
||||
def log_error(self, msg, *args):
|
||||
try:
|
||||
message = msg % args
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
message = '%r %r' % (msg, args)
|
||||
try:
|
||||
message = '%s: %s' % (self.socket, message)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.write(message + '\n')
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
def read_requestline(self):
|
||||
return self.rfile.readline(MAX_REQUEST_LINE)
|
||||
|
||||
def handle_one_request(self):
|
||||
if self.rfile.closed:
|
||||
return
|
||||
|
||||
try:
|
||||
self.requestline = self.read_requestline()
|
||||
except socket.error:
|
||||
# "Connection reset by peer" or other socket errors aren't interesting here
|
||||
return
|
||||
|
||||
if not self.requestline:
|
||||
return
|
||||
|
||||
self.response_length = 0
|
||||
|
||||
if len(self.requestline) >= MAX_REQUEST_LINE:
|
||||
return ('414', _REQUEST_TOO_LONG_RESPONSE)
|
||||
|
||||
try:
|
||||
# for compatibility with older versions of pywsgi, we pass self.requestline as an argument there
|
||||
if not self.read_request(self.requestline):
|
||||
return ('400', _BAD_REQUEST_RESPONSE)
|
||||
except Exception:
|
||||
ex = sys.exc_info()[1]
|
||||
if not isinstance(ex, ValueError):
|
||||
traceback.print_exc()
|
||||
self.log_error('Invalid request: %s', str(ex) or ex.__class__.__name__)
|
||||
return ('400', _BAD_REQUEST_RESPONSE)
|
||||
|
||||
self.environ = self.get_environ()
|
||||
self.application = self.server.application
|
||||
try:
|
||||
self.handle_one_response()
|
||||
except socket.error:
|
||||
ex = sys.exc_info()[1]
|
||||
# Broken pipe, connection reset by peer
|
||||
if ex.args[0] in (errno.EPIPE, errno.ECONNRESET):
|
||||
sys.exc_clear()
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
if self.close_connection:
|
||||
return
|
||||
|
||||
if self.rfile.closed:
|
||||
return
|
||||
|
||||
return True # read more requests
|
||||
|
||||
def finalize_headers(self):
|
||||
if self.provided_date is None:
|
||||
self.response_headers.append(('Date', format_date_time(time.time())))
|
||||
|
||||
if self.code not in (304, 204):
|
||||
# the reply will include message-body; make sure we have either Content-Length or chunked
|
||||
if self.provided_content_length is None:
|
||||
if hasattr(self.result, '__len__'):
|
||||
self.response_headers.append(('Content-Length', str(sum(len(chunk) for chunk in self.result))))
|
||||
else:
|
||||
if self.request_version != 'HTTP/1.0':
|
||||
self.response_use_chunked = True
|
||||
self.response_headers.append(('Transfer-Encoding', 'chunked'))
|
||||
|
||||
def _sendall(self, data):
|
||||
try:
|
||||
self.socket.sendall(data)
|
||||
except socket.error, ex:
|
||||
self.status = 'socket error: %s' % ex
|
||||
if self.code > 0:
|
||||
self.code = -self.code
|
||||
raise
|
||||
self.response_length += len(data)
|
||||
|
||||
def _write(self, data):
|
||||
if not data:
|
||||
return
|
||||
if self.response_use_chunked:
|
||||
## Write the chunked encoding
|
||||
data = "%x\r\n%s\r\n" % (len(data), data)
|
||||
self._sendall(data)
|
||||
|
||||
def write(self, data):
|
||||
if self.code in (304, 204) and data:
|
||||
raise AssertionError('The %s response must have no body' % self.code)
|
||||
|
||||
if self.headers_sent:
|
||||
self._write(data)
|
||||
else:
|
||||
if not self.status:
|
||||
raise AssertionError("The application did not call start_response()")
|
||||
self._write_with_headers(data)
|
||||
|
||||
if sys.version_info[:2] >= (2, 6):
|
||||
|
||||
def _write_with_headers(self, data):
|
||||
towrite = bytearray()
|
||||
self.headers_sent = True
|
||||
self.finalize_headers()
|
||||
|
||||
towrite.extend('HTTP/1.1 %s\r\n' % self.status)
|
||||
for header in self.response_headers:
|
||||
towrite.extend('%s: %s\r\n' % header)
|
||||
|
||||
towrite.extend('\r\n')
|
||||
if data:
|
||||
if self.response_use_chunked:
|
||||
## Write the chunked encoding
|
||||
towrite.extend("%x\r\n%s\r\n" % (len(data), data))
|
||||
else:
|
||||
towrite.extend(data)
|
||||
self._sendall(towrite)
|
||||
|
||||
else:
|
||||
# Python 2.5 does not have bytearray
|
||||
|
||||
def _write_with_headers(self, data):
|
||||
towrite = []
|
||||
self.headers_sent = True
|
||||
self.finalize_headers()
|
||||
|
||||
towrite.append('HTTP/1.1 %s\r\n' % self.status)
|
||||
for header in self.response_headers:
|
||||
towrite.append('%s: %s\r\n' % header)
|
||||
|
||||
towrite.append('\r\n')
|
||||
if data:
|
||||
if self.response_use_chunked:
|
||||
## Write the chunked encoding
|
||||
towrite.append("%x\r\n%s\r\n" % (len(data), data))
|
||||
else:
|
||||
towrite.append(data)
|
||||
self._sendall(''.join(towrite))
|
||||
|
||||
def start_response(self, status, headers, exc_info=None):
|
||||
if exc_info:
|
||||
try:
|
||||
if self.headers_sent:
|
||||
# Re-raise original exception if headers sent
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
finally:
|
||||
# Avoid dangling circular ref
|
||||
exc_info = None
|
||||
self.code = int(status.split(' ', 1)[0])
|
||||
self.status = status
|
||||
self.response_headers = headers
|
||||
|
||||
provided_connection = None
|
||||
self.provided_date = None
|
||||
self.provided_content_length = None
|
||||
|
||||
for header, value in headers:
|
||||
header = header.lower()
|
||||
if header == 'connection':
|
||||
provided_connection = value
|
||||
elif header == 'date':
|
||||
self.provided_date = value
|
||||
elif header == 'content-length':
|
||||
self.provided_content_length = value
|
||||
|
||||
if self.request_version == 'HTTP/1.0' and provided_connection is None:
|
||||
headers.append(('Connection', 'close'))
|
||||
self.close_connection = True
|
||||
elif provided_connection == 'close':
|
||||
self.close_connection = True
|
||||
|
||||
if self.code in (304, 204):
|
||||
if self.provided_content_length is not None and self.provided_content_length != '0':
|
||||
msg = 'Invalid Content-Length for %s response: %r (must be absent or zero)' % (self.code, self.provided_content_length)
|
||||
raise AssertionError(msg)
|
||||
|
||||
return self.write
|
||||
|
||||
def log_request(self):
|
||||
log = self.server.log
|
||||
if log:
|
||||
log.write(self.format_request() + '\n')
|
||||
|
||||
def format_request(self):
|
||||
now = datetime.now().replace(microsecond=0)
|
||||
length = self.response_length or '-'
|
||||
if self.time_finish:
|
||||
delta = '%.6f' % (self.time_finish - self.time_start)
|
||||
else:
|
||||
delta = '-'
|
||||
client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
|
||||
return '%s - - [%s] "%s" %s %s %s' % (
|
||||
client_address or '-',
|
||||
now,
|
||||
getattr(self, 'requestline', ''),
|
||||
(getattr(self, 'status', None) or '000').split()[0],
|
||||
length,
|
||||
delta)
|
||||
|
||||
def process_result(self):
|
||||
for data in self.result:
|
||||
if data:
|
||||
self.write(data)
|
||||
if self.status and not self.headers_sent:
|
||||
self.write('')
|
||||
if self.response_use_chunked:
|
||||
self.socket.sendall('0\r\n\r\n')
|
||||
self.response_length += 5
|
||||
|
||||
def run_application(self):
|
||||
self.result = self.application(self.environ, self.start_response)
|
||||
self.process_result()
|
||||
|
||||
def handle_one_response(self):
|
||||
self.time_start = time.time()
|
||||
self.status = None
|
||||
self.headers_sent = False
|
||||
|
||||
self.result = None
|
||||
self.response_use_chunked = False
|
||||
self.response_length = 0
|
||||
|
||||
try:
|
||||
try:
|
||||
self.run_application()
|
||||
finally:
|
||||
close = getattr(self.result, 'close', None)
|
||||
if close is not None:
|
||||
close()
|
||||
self.wsgi_input._discard()
|
||||
except:
|
||||
self.handle_error(*sys.exc_info())
|
||||
finally:
|
||||
self.time_finish = time.time()
|
||||
self.log_request()
|
||||
|
||||
def handle_error(self, type, value, tb):
|
||||
if not issubclass(type, GreenletExit):
|
||||
self.server.loop.handle_error(self.environ, type, value, tb)
|
||||
del tb
|
||||
if self.response_length:
|
||||
self.close_connection = True
|
||||
else:
|
||||
self.start_response(_INTERNAL_ERROR_STATUS, _INTERNAL_ERROR_HEADERS[:])
|
||||
self.write(_INTERNAL_ERROR_BODY)
|
||||
|
||||
def _headers(self):
|
||||
key = None
|
||||
value = None
|
||||
for header in self.headers.headers:
|
||||
if key is not None and header[:1] in " \t":
|
||||
value += header
|
||||
continue
|
||||
|
||||
if key not in (None, 'CONTENT_TYPE', 'CONTENT_LENGTH'):
|
||||
yield 'HTTP_' + key, value.strip()
|
||||
|
||||
key, value = header.split(':', 1)
|
||||
key = key.replace('-', '_').upper()
|
||||
|
||||
if key not in (None, 'CONTENT_TYPE', 'CONTENT_LENGTH'):
|
||||
yield 'HTTP_' + key, value.strip()
|
||||
|
||||
def get_environ(self):
|
||||
env = self.server.get_environ()
|
||||
env['REQUEST_METHOD'] = self.command
|
||||
env['SCRIPT_NAME'] = ''
|
||||
|
||||
if '?' in self.path:
|
||||
path, query = self.path.split('?', 1)
|
||||
else:
|
||||
path, query = self.path, ''
|
||||
env['PATH_INFO'] = unquote(path)
|
||||
env['QUERY_STRING'] = query
|
||||
|
||||
if self.headers.typeheader is not None:
|
||||
env['CONTENT_TYPE'] = self.headers.typeheader
|
||||
|
||||
length = self.headers.getheader('content-length')
|
||||
if length:
|
||||
env['CONTENT_LENGTH'] = length
|
||||
env['SERVER_PROTOCOL'] = self.request_version
|
||||
|
||||
client_address = self.client_address
|
||||
if isinstance(client_address, tuple):
|
||||
env['REMOTE_ADDR'] = str(client_address[0])
|
||||
env['REMOTE_PORT'] = str(client_address[1])
|
||||
|
||||
for key, value in self._headers():
|
||||
if key in env:
|
||||
if 'COOKIE' in key:
|
||||
env[key] += '; ' + value
|
||||
else:
|
||||
env[key] += ',' + value
|
||||
else:
|
||||
env[key] = value
|
||||
|
||||
if env.get('HTTP_EXPECT') == '100-continue':
|
||||
socket = self.socket
|
||||
else:
|
||||
socket = None
|
||||
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
|
||||
self.wsgi_input = Input(self.rfile, self.content_length, socket=socket, chunked_input=chunked)
|
||||
env['wsgi.input'] = self.wsgi_input
|
||||
return env
|
||||
|
||||
|
||||
class WSGIServer(StreamServer):
|
||||
"""A WSGI server based on :class:`StreamServer` that supports HTTPS."""
|
||||
|
||||
handler_class = WSGIHandler
|
||||
base_env = {'GATEWAY_INTERFACE': 'CGI/1.1',
|
||||
'SERVER_SOFTWARE': 'gevent/%d.%d Python/%d.%d' % (gevent.version_info[:2] + sys.version_info[:2]),
|
||||
'SCRIPT_NAME': '',
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.run_once': False}
|
||||
|
||||
def __init__(self, listener, application=None, backlog=None, spawn='default', log='default', handler_class=None,
|
||||
environ=None, **ssl_args):
|
||||
StreamServer.__init__(self, listener, backlog=backlog, spawn=spawn, **ssl_args)
|
||||
if application is not None:
|
||||
self.application = application
|
||||
if handler_class is not None:
|
||||
self.handler_class = handler_class
|
||||
if log == 'default':
|
||||
self.log = sys.stderr
|
||||
else:
|
||||
self.log = log
|
||||
self.set_environ(environ)
|
||||
self.set_max_accept()
|
||||
|
||||
def set_environ(self, environ=None):
|
||||
if environ is not None:
|
||||
self.environ = environ
|
||||
environ_update = getattr(self, 'environ', None)
|
||||
self.environ = self.base_env.copy()
|
||||
if self.ssl_enabled:
|
||||
self.environ['wsgi.url_scheme'] = 'https'
|
||||
else:
|
||||
self.environ['wsgi.url_scheme'] = 'http'
|
||||
if environ_update is not None:
|
||||
self.environ.update(environ_update)
|
||||
if self.environ.get('wsgi.errors') is None:
|
||||
self.environ['wsgi.errors'] = sys.stderr
|
||||
|
||||
def set_max_accept(self):
|
||||
if self.environ.get('wsgi.multiprocess'):
|
||||
self.max_accept = 1
|
||||
|
||||
def get_environ(self):
|
||||
return self.environ.copy()
|
||||
|
||||
def init_socket(self):
|
||||
StreamServer.init_socket(self)
|
||||
self.update_environ()
|
||||
|
||||
def update_environ(self):
|
||||
address = self.address
|
||||
if isinstance(address, tuple):
|
||||
if 'SERVER_NAME' not in self.environ:
|
||||
try:
|
||||
name = socket.getfqdn(address[0])
|
||||
except socket.error:
|
||||
name = str(address[0])
|
||||
self.environ['SERVER_NAME'] = name
|
||||
self.environ.setdefault('SERVER_PORT', str(address[1]))
|
||||
else:
|
||||
self.environ.setdefault('SERVER_NAME', '')
|
||||
self.environ.setdefault('SERVER_PORT', '')
|
||||
|
||||
def handle(self, socket, address):
|
||||
handler = self.handler_class(socket, address, self)
|
||||
handler.handle()
|
505
panda/python/Lib/site-packages/gevent/queue.py
Normal file
505
panda/python/Lib/site-packages/gevent/queue.py
Normal file
|
@ -0,0 +1,505 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""Synchronized queues.
|
||||
|
||||
The :mod:`gevent.queue` module implements multi-producer, multi-consumer queues
|
||||
that work across greenlets, with the API similar to the classes found in the
|
||||
standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>` modules.
|
||||
|
||||
Changed in version 1.0: Queue(0) now means queue of infinite size, not a channel.
|
||||
|
||||
The classes in this module implement iterator protocol. Iterating over queue
|
||||
means repeatedly calling :meth:`get <Queue.get>` until :meth:`get <Queue.get>` returns ``StopIteration``.
|
||||
|
||||
>>> queue = gevent.queue.Queue()
|
||||
>>> queue.put(1)
|
||||
>>> queue.put(2)
|
||||
>>> queue.put(StopIteration)
|
||||
>>> for item in queue:
|
||||
... print item
|
||||
1
|
||||
2
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
import heapq
|
||||
import collections
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
import Queue as __queue__
|
||||
else:
|
||||
import queue as __queue__
|
||||
Full = __queue__.Full
|
||||
Empty = __queue__.Empty
|
||||
|
||||
from gevent.timeout import Timeout
|
||||
from gevent.hub import get_hub, Waiter, getcurrent
|
||||
|
||||
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue', 'Channel']
|
||||
|
||||
|
||||
class Queue(object):
|
||||
"""Create a queue object with a given maximum size.
|
||||
|
||||
If *maxsize* is less than or equal to zero or ``None``, the queue size is infinite.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=None, items=None):
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
self.maxsize = None
|
||||
if maxsize == 0:
|
||||
import warnings
|
||||
warnings.warn('Queue(0) now equivalent to Queue(None); if you want a channel, use Channel',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
else:
|
||||
self.maxsize = maxsize
|
||||
self.getters = set()
|
||||
self.putters = set()
|
||||
self.hub = get_hub()
|
||||
self._event_unlock = None
|
||||
if items:
|
||||
self._init(maxsize, items)
|
||||
else:
|
||||
self._init(maxsize)
|
||||
|
||||
# QQQ make maxsize into a property with setter that schedules unlock if necessary
|
||||
|
||||
def copy(self):
|
||||
return type(self)(self.maxsize, self.queue)
|
||||
|
||||
def _init(self, maxsize, items=None):
|
||||
if items:
|
||||
self.queue = collections.deque(items)
|
||||
else:
|
||||
self.queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self.queue.popleft()
|
||||
|
||||
def _peek(self):
|
||||
return self.queue[0]
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), self._format())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s%s>' % (type(self).__name__, self._format())
|
||||
|
||||
def _format(self):
|
||||
result = []
|
||||
if self.maxsize is not None:
|
||||
result.append('maxsize=%r' % (self.maxsize, ))
|
||||
if getattr(self, 'queue', None):
|
||||
result.append('queue=%r' % (self.queue, ))
|
||||
if self.getters:
|
||||
result.append('getters[%s]' % len(self.getters))
|
||||
if self.putters:
|
||||
result.append('putters[%s]' % len(self.putters))
|
||||
if result:
|
||||
return ' ' + ' '.join(result)
|
||||
else:
|
||||
return ''
|
||||
|
||||
def qsize(self):
|
||||
"""Return the size of the queue."""
|
||||
return len(self.queue)
|
||||
|
||||
def empty(self):
|
||||
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
|
||||
return not self.qsize()
|
||||
|
||||
def full(self):
|
||||
"""Return ``True`` if the queue is full, ``False`` otherwise.
|
||||
|
||||
``Queue(None)`` is never full.
|
||||
"""
|
||||
return self.maxsize is not None and self.qsize() >= self.maxsize
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If optional arg *block* is true and *timeout* is ``None`` (the default),
|
||||
block if necessary until a free slot is available. If *timeout* is
|
||||
a positive number, it blocks at most *timeout* seconds and raises
|
||||
the :class:`Full` exception if no free slot was available within that time.
|
||||
Otherwise (*block* is false), put an item on the queue if a free slot
|
||||
is immediately available, else raise the :class:`Full` exception (*timeout*
|
||||
is ignored in that case).
|
||||
"""
|
||||
if self.maxsize is None or self.qsize() < self.maxsize:
|
||||
# there's a free slot, put an item right away
|
||||
self._put(item)
|
||||
if self.getters:
|
||||
self._schedule_unlock()
|
||||
elif self.hub is getcurrent():
|
||||
# We're in the mainloop, so we cannot wait; we can switch to other greenlets though.
|
||||
# Check if possible to get a free slot in the queue.
|
||||
while self.getters and self.qsize() and self.qsize() >= self.maxsize:
|
||||
getter = self.getters.pop()
|
||||
getter.switch(getter)
|
||||
if self.qsize() < self.maxsize:
|
||||
self._put(item)
|
||||
return
|
||||
raise Full
|
||||
elif block:
|
||||
waiter = ItemWaiter(item, self)
|
||||
self.putters.add(waiter)
|
||||
timeout = Timeout.start_new(timeout, Full)
|
||||
try:
|
||||
if self.getters:
|
||||
self._schedule_unlock()
|
||||
result = waiter.get()
|
||||
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
|
||||
finally:
|
||||
timeout.cancel()
|
||||
self.putters.discard(waiter)
|
||||
else:
|
||||
raise Full
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
Only enqueue the item if a free slot is immediately available.
|
||||
Otherwise raise the :class:`Full` exception.
|
||||
"""
|
||||
self.put(item, False)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If optional args *block* is true and *timeout* is ``None`` (the default),
|
||||
block if necessary until an item is available. If *timeout* is a positive number,
|
||||
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
|
||||
if no item was available within that time. Otherwise (*block* is false), return
|
||||
an item if one is immediately available, else raise the :class:`Empty` exception
|
||||
(*timeout* is ignored in that case).
|
||||
"""
|
||||
if self.qsize():
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
return self._get()
|
||||
elif self.hub is getcurrent():
|
||||
# special case to make get_nowait() runnable in the mainloop greenlet
|
||||
# there are no items in the queue; try to fix the situation by unlocking putters
|
||||
while self.putters:
|
||||
self.putters.pop().put_and_switch()
|
||||
if self.qsize():
|
||||
return self._get()
|
||||
raise Empty
|
||||
elif block:
|
||||
waiter = Waiter()
|
||||
timeout = Timeout.start_new(timeout, Empty)
|
||||
try:
|
||||
self.getters.add(waiter)
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
result = waiter.get()
|
||||
assert result is waiter, 'Invalid switch into Queue.get: %r' % (result, )
|
||||
return self._get()
|
||||
finally:
|
||||
self.getters.discard(waiter)
|
||||
timeout.cancel()
|
||||
else:
|
||||
raise Empty
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue without blocking.
|
||||
|
||||
Only get an item if one is immediately available. Otherwise
|
||||
raise the :class:`Empty` exception.
|
||||
"""
|
||||
return self.get(False)
|
||||
|
||||
def peek(self, block=True, timeout=None):
|
||||
"""Return an item from the queue without removing it.
|
||||
|
||||
If optional args *block* is true and *timeout* is ``None`` (the default),
|
||||
block if necessary until an item is available. If *timeout* is a positive number,
|
||||
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
|
||||
if no item was available within that time. Otherwise (*block* is false), return
|
||||
an item if one is immediately available, else raise the :class:`Empty` exception
|
||||
(*timeout* is ignored in that case).
|
||||
"""
|
||||
if self.qsize():
|
||||
return self._peek()
|
||||
elif self.hub is getcurrent():
|
||||
# special case to make peek(False) runnable in the mainloop greenlet
|
||||
# there are no items in the queue; try to fix the situation by unlocking putters
|
||||
while self.putters:
|
||||
self.putters.pop().put_and_switch()
|
||||
if self.qsize():
|
||||
return self._peek()
|
||||
raise Empty
|
||||
elif block:
|
||||
waiter = Waiter()
|
||||
timeout = Timeout.start_new(timeout, Empty)
|
||||
try:
|
||||
self.getters.add(waiter)
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
result = waiter.get()
|
||||
assert result is waiter, 'Invalid switch into Queue.peek: %r' % (result, )
|
||||
return self._peek()
|
||||
finally:
|
||||
self.getters.discard(waiter)
|
||||
timeout.cancel()
|
||||
else:
|
||||
raise Empty
|
||||
|
||||
def peek_nowait(self):
|
||||
return self.peek(False)
|
||||
|
||||
def _unlock(self):
|
||||
while True:
|
||||
repeat = False
|
||||
if self.putters and (self.maxsize is None or self.qsize() < self.maxsize):
|
||||
repeat = True
|
||||
try:
|
||||
putter = self.putters.pop()
|
||||
self._put(putter.item)
|
||||
except:
|
||||
putter.throw(*sys.exc_info())
|
||||
else:
|
||||
putter.switch(putter)
|
||||
if self.getters and self.qsize():
|
||||
repeat = True
|
||||
getter = self.getters.pop()
|
||||
getter.switch(getter)
|
||||
if not repeat:
|
||||
return
|
||||
|
||||
def _schedule_unlock(self):
|
||||
if not self._event_unlock:
|
||||
self._event_unlock = self.hub.loop.run_callback(self._unlock)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
result = self.get()
|
||||
if result is StopIteration:
|
||||
raise result
|
||||
return result
|
||||
|
||||
|
||||
class ItemWaiter(Waiter):
|
||||
__slots__ = ['item', 'queue']
|
||||
|
||||
def __init__(self, item, queue):
|
||||
Waiter.__init__(self)
|
||||
self.item = item
|
||||
self.queue = queue
|
||||
|
||||
def put_and_switch(self):
|
||||
self.queue._put(self.item)
|
||||
self.queue = None
|
||||
self.item = None
|
||||
return self.switch(self)
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: ``(priority number, data)``.
|
||||
'''
|
||||
|
||||
def _init(self, maxsize, items=None):
|
||||
if items:
|
||||
self.queue = list(items)
|
||||
else:
|
||||
self.queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self.queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self.queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
|
||||
|
||||
def _init(self, maxsize, items=None):
|
||||
if items:
|
||||
self.queue = list(items)
|
||||
else:
|
||||
self.queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self.queue.pop()
|
||||
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
'''A subclass of :class:`Queue` that additionally has :meth:`task_done` and :meth:`join` methods.'''
|
||||
|
||||
def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
|
||||
from gevent.event import Event
|
||||
Queue.__init__(self, maxsize, items)
|
||||
self.unfinished_tasks = unfinished_tasks or 0
|
||||
self._cond = Event()
|
||||
self._cond.set()
|
||||
|
||||
def copy(self):
|
||||
return type(self)(self.maxsize, self.queue, self.unfinished_tasks)
|
||||
|
||||
def _format(self):
|
||||
result = Queue._format(self)
|
||||
if self.unfinished_tasks:
|
||||
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
|
||||
return result
|
||||
|
||||
def _put(self, item):
|
||||
Queue._put(self, item)
|
||||
self.unfinished_tasks += 1
|
||||
self._cond.clear()
|
||||
|
||||
def task_done(self):
|
||||
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
|
||||
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
|
||||
that the processing on the task is complete.
|
||||
|
||||
If a :meth:`join` is currently blocking, it will resume when all items have been processed
|
||||
(meaning that a :meth:`task_done` call was received for every item that had been
|
||||
:meth:`put <Queue.put>` into the queue).
|
||||
|
||||
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
|
||||
'''
|
||||
if self.unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self.unfinished_tasks -= 1
|
||||
if self.unfinished_tasks == 0:
|
||||
self._cond.set()
|
||||
|
||||
def join(self):
|
||||
'''Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the queue.
|
||||
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
|
||||
that the item was retrieved and all work on it is complete. When the count of
|
||||
unfinished tasks drops to zero, :meth:`join` unblocks.
|
||||
'''
|
||||
self._cond.wait()
|
||||
|
||||
|
||||
class Channel(object):
|
||||
|
||||
def __init__(self):
|
||||
self.getters = collections.deque()
|
||||
self.putters = collections.deque()
|
||||
self.hub = get_hub()
|
||||
self._event_unlock = None
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s>' % (type(self).__name__, self._format())
|
||||
|
||||
def _format(self):
|
||||
result = ''
|
||||
if self.getters:
|
||||
result += ' getters[%s]' % len(self.getters)
|
||||
if self.putters:
|
||||
result += ' putters[%s]' % len(self.putters)
|
||||
return result
|
||||
|
||||
@property
|
||||
def balance(self):
|
||||
return len(self.putters) - len(self.getters)
|
||||
|
||||
def qsize(self):
|
||||
return 0
|
||||
|
||||
def empty(self):
|
||||
return True
|
||||
|
||||
def full(self):
|
||||
return True
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
if self.hub is getcurrent():
|
||||
if self.getters:
|
||||
getter = self.getters.popleft()
|
||||
getter.switch(item)
|
||||
return
|
||||
raise Full
|
||||
|
||||
if not block:
|
||||
timeout = 0
|
||||
|
||||
waiter = Waiter()
|
||||
item = (item, waiter)
|
||||
self.putters.append(item)
|
||||
timeout = Timeout.start_new(timeout, Full)
|
||||
try:
|
||||
if self.getters:
|
||||
self._schedule_unlock()
|
||||
result = waiter.get()
|
||||
assert result is waiter, "Invalid switch into Channel.put: %r" % (result, )
|
||||
except:
|
||||
self._discard(item)
|
||||
raise
|
||||
finally:
|
||||
timeout.cancel()
|
||||
|
||||
def _discard(self, item):
|
||||
try:
|
||||
self.putters.remove(item)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def put_nowait(self, item):
|
||||
self.put(item, False)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
if self.hub is getcurrent():
|
||||
if self.putters:
|
||||
item, putter = self.putters.popleft()
|
||||
self.hub.loop.run_callback(putter.switch, putter)
|
||||
return item
|
||||
|
||||
if not block:
|
||||
timeout = 0
|
||||
|
||||
waiter = Waiter()
|
||||
timeout = Timeout.start_new(timeout, Empty)
|
||||
try:
|
||||
self.getters.append(waiter)
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
return waiter.get()
|
||||
except:
|
||||
self.getters.remove(waiter)
|
||||
raise
|
||||
finally:
|
||||
timeout.cancel()
|
||||
|
||||
def get_nowait(self):
|
||||
return self.get(False)
|
||||
|
||||
def _unlock(self):
|
||||
while self.putters and self.getters:
|
||||
getter = self.getters.popleft()
|
||||
item, putter = self.putters.popleft()
|
||||
getter.switch(item)
|
||||
putter.switch(putter)
|
||||
|
||||
def _schedule_unlock(self):
|
||||
if not self._event_unlock:
|
||||
self._event_unlock = self.hub.loop.run_callback(self._unlock)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
result = self.get()
|
||||
if result is StopIteration:
|
||||
raise result
|
||||
return result
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue