code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
"""
None Object implementation
ok and tested
"""
from pypy.objspace.std.objspace import *
class W_NoneObject(W_Object):
from pypy.objspace.std.nonetype import none_typedef as typedef
def unwrap(w_self, space):
return None
registerimplementation(W_NoneObject)
W_NoneObject.w_None = W_NoneObject()
def nonzero__None(space, w_none):
return space.w_False
def repr__None(space, w_none):
return space.wrap('None')
register_all(vars())
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.unicodeobject import delegate_String2Unicode
from pypy.objspace.std.stringtype import joined, wrapstr
class W_StringJoinObject(W_Object):
from pypy.objspace.std.stringtype import str_typedef as typedef
def __init__(w_self, joined_strs, until=-1):
w_self.joined_strs = joined_strs
if until == -1:
until = len(joined_strs)
w_self.until = until
def force(w_self, always=False):
if w_self.until == 1 and not always:
return w_self.joined_strs[0]
res = "".join(w_self.joined_strs[:w_self.until])
w_self.joined_strs = [res]
w_self.until = 1
return res
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r, %r)" % (
w_self.__class__.__name__, w_self.joined_strs, w_self.until)
def unwrap(w_self, space):
return w_self.force()
registerimplementation(W_StringJoinObject)
def delegate_join2str(space, w_strjoin):
return wrapstr(space, w_strjoin.force())
def delegate_join2unicode(space, w_strjoin):
w_str = wrapstr(space, w_strjoin.force())
return delegate_String2Unicode(space, w_str)
def len__StringJoin(space, w_self):
result = 0
for i in range(w_self.until):
result += len(w_self.joined_strs[i])
return space.wrap(result)
def str_w__StringJoin(space, w_str):
return w_str.force()
def add__StringJoin_StringJoin(space, w_self, w_other):
if len(w_self.joined_strs) > w_self.until:
w_self.force(True)
w_self.joined_strs.extend(w_other.joined_strs[:w_other.until])
return W_StringJoinObject(w_self.joined_strs)
def add__StringJoin_String(space, w_self, w_other):
if len(w_self.joined_strs) > w_self.until:
w_self.force(True)
other = space.str_w(w_other)
w_self.joined_strs.append(other)
return W_StringJoinObject(w_self.joined_strs)
def str__StringJoin(space, w_str):
# you cannot get subclasses of W_StringObject here
assert type(w_str) is W_StringJoinObject
return w_str
from pypy.objspace.std import stringtype
register_all(vars(), stringtype)
| Python |
"""
Reviewed 03-06-22
Sequence-iteration is correctly implemented, thoroughly
tested, and complete. The only missing feature is support
for function-iteration.
"""
from pypy.objspace.std.objspace import *
class W_SeqIterObject(W_Object):
from pypy.objspace.std.itertype import iter_typedef as typedef
def __init__(w_self, w_seq, index=0):
w_self.w_seq = w_seq
w_self.index = index
class W_ReverseSeqIterObject(W_Object):
from pypy.objspace.std.itertype import reverse_iter_typedef as typedef
def __init__(w_self, space, w_seq, index=-1):
w_self.w_seq = w_seq
w_self.w_len = space.len(w_seq)
w_self.index = space.int_w(w_self.w_len) + index
registerimplementation(W_SeqIterObject)
registerimplementation(W_ReverseSeqIterObject)
def iter__SeqIter(space, w_seqiter):
return w_seqiter
def next__SeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
raise OperationError(space.w_StopIteration, space.w_None)
try:
w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index))
except OperationError, e:
w_seqiter.w_seq = None
if not e.match(space, space.w_IndexError):
raise
raise OperationError(space.w_StopIteration, space.w_None)
w_seqiter.index += 1
return w_item
def len__SeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
return space.wrap(0)
index = w_seqiter.index
w_length = space.len(w_seqiter.w_seq)
w_len = space.sub(w_length, space.wrap(index))
if space.is_true(space.lt(w_len,space.wrap(0))):
w_len = space.wrap(0)
return w_len
def iter__ReverseSeqIter(space, w_seqiter):
return w_seqiter
def next__ReverseSeqIter(space, w_seqiter):
if w_seqiter.w_seq is None or w_seqiter.index < 0:
raise OperationError(space.w_StopIteration, space.w_None)
try:
w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index))
w_seqiter.index -= 1
except OperationError, e:
w_seqiter.w_seq = None
if not e.match(space, space.w_IndexError):
raise
raise OperationError(space.w_StopIteration, space.w_None)
return w_item
def len__ReverseSeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
return space.wrap(0)
index = w_seqiter.index+1
w_length = space.len(w_seqiter.w_seq)
# if length of sequence is less than index :exhaust iterator
if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)):
w_len = space.wrap(0)
w_seqiter.w_seq = None
else:
w_len =space.wrap(index)
if space.is_true(space.lt(w_len,space.wrap(0))):
w_len = space.wrap(0)
return w_len
register_all(vars())
| Python |
"""
For reference, the MRO algorithm of Python 2.3.
"""
def mro(cls):
order = []
orderlists = [mro(base) for base in cls.__bases__]
orderlists.append([cls] + list(cls.__bases__))
while orderlists:
for candidatelist in orderlists:
candidate = candidatelist[0]
if blockinglist(candidate, orderlists) is None:
break # good candidate
else:
mro_error(orderlists) # no candidate found
assert candidate not in order
order.append(candidate)
for i in range(len(orderlists)-1, -1, -1):
if orderlists[i][0] == candidate:
del orderlists[i][0]
if len(orderlists[i]) == 0:
del orderlists[i]
return order
def blockinglist(candidate, orderlists):
for lst in orderlists:
if candidate in lst[1:]:
return lst
return None # good candidate
def mro_error(orderlists):
cycle = []
candidate = orderlists[0][0]
while candidate not in cycle:
cycle.append(candidate)
nextblockinglist = blockinglist(candidate, orderlists)
candidate = nextblockinglist[0]
# avoid the only use of list.index in the PyPy code base:
i = 0
for c in cycle:
if c == candidate:
break
i += 1
del cycle[:i]
cycle.append(candidate)
cycle.reverse()
names = [cls.__name__ for cls in cycle]
raise TypeError, "Cycle among base classes: " + ' < '.join(names)
def mronames(cls):
names = [cls.__name__ for cls in mro(cls)]
return names
if __name__ == '__main__':
class ex_9:
#O = object
class O: pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class F(O): pass
class K1(A,B,C): pass
class K2(D,F,B,E): pass
class K3(D,A): pass
class Z(K1,K2,F,K3): pass
class ZM(K1,K2,K3): pass
#print ZM.__mro__
print 'K1:', mronames(ex_9.K1)
print 'K2:', mronames(ex_9.K2)
print 'K3:', mronames(ex_9.K3)
print mronames(ex_9.ZM)
print mronames(ex_9.Z)
| Python |
from pypy.objspace.std.register_all import register_all
from pypy.interpreter.baseobjspace import ObjSpace, Wrappable
from pypy.interpreter.error import OperationError, debug_print
from pypy.interpreter.typedef import get_unique_interplevel_subclass
from pypy.interpreter.argument import Arguments
from pypy.interpreter import pyframe
from pypy.rlib.objectmodel import instantiate
from pypy.interpreter.gateway import PyPyCacheDir
from pypy.tool.cache import Cache
from pypy.tool.sourcetools import func_with_new_name
from pypy.objspace.std.model import W_Object, UnwrapError
from pypy.objspace.std.model import W_ANY, StdObjSpaceMultiMethod, StdTypeModel
from pypy.objspace.std.multimethod import FailedToImplement
from pypy.objspace.descroperation import DescrOperation
from pypy.objspace.std import stdtypedef
from pypy.rlib.rarithmetic import base_int
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.jit import hint, we_are_jitted
from pypy.objspace.flow.flowcontext import PyFrame as FlowPyFrame
import sys
import os
import __builtin__
#check for sets
try:
s = set()
del s
except NameError:
try:
from sets import Set as set
from sets import ImmutableSet as frozenset
except ImportError:
class DummySet(object):pass
set = DummySet
frozenset = DummySet
_registered_implementations = {}
def registerimplementation(implcls):
# hint to objspace.std.model to register the implementation class
assert issubclass(implcls, W_Object)
_registered_implementations[implcls] = True
##################################################################
class StdObjSpace(ObjSpace, DescrOperation):
"""The standard object space, implementing a general-purpose object
library in Restricted Python."""
PACKAGE_PATH = 'objspace.std'
def setoptions(self, **kwds):
if "oldstyle" in kwds:
self.config.objspace.std.oldstyle = kwds["oldstyle"]
def initialize(self):
"NOT_RPYTHON: only for initializing the space."
self._typecache = Cache()
# Import all the object types and implementations
self.model = StdTypeModel(self.config)
class StdObjSpaceFrame(FlowPyFrame):
if self.config.objspace.std.optimized_int_add:
if self.config.objspace.std.withsmallint:
def BINARY_ADD(f, oparg, *ignored):
from pypy.objspace.std.smallintobject import \
W_SmallIntObject, add__SmallInt_SmallInt
w_2 = f.popvalue()
w_1 = f.popvalue()
if type(w_1) is W_SmallIntObject and type(w_2) is W_SmallIntObject:
try:
w_result = add__SmallInt_SmallInt(f.space, w_1, w_2)
except FailedToImplement:
w_result = f.space.add(w_1, w_2)
else:
w_result = f.space.add(w_1, w_2)
f.pushvalue(w_result)
else:
def BINARY_ADD(f, oparg, *ignored):
from pypy.objspace.std.intobject import \
W_IntObject, add__Int_Int
w_2 = f.popvalue()
w_1 = f.popvalue()
if type(w_1) is W_IntObject and type(w_2) is W_IntObject:
try:
w_result = add__Int_Int(f.space, w_1, w_2)
except FailedToImplement:
w_result = f.space.add(w_1, w_2)
else:
w_result = f.space.add(w_1, w_2)
f.pushvalue(w_result)
if self.config.objspace.std.optimized_list_getitem:
def BINARY_SUBSCR(f, *ignored):
w_2 = f.popvalue()
w_1 = f.popvalue()
if type(w_1) is W_ListObject and type(w_2) is W_IntObject:
try:
w_result = w_1.wrappeditems[w_2.intval]
except IndexError:
raise OperationError(f.space.w_IndexError,
f.space.wrap("list index out of range"))
else:
w_result = f.space.getitem(w_1, w_2)
f.pushvalue(w_result)
if self.config.objspace.opcodes.CALL_LIKELY_BUILTIN:
def CALL_LIKELY_BUILTIN(f, oparg, *ignored):
from pypy.module.__builtin__ import OPTIMIZED_BUILTINS, Module
from pypy.objspace.std.dictmultiobject import W_DictMultiObject
w_globals = f.w_globals
num = oparg >> 8
assert isinstance(w_globals, W_DictMultiObject)
w_value = w_globals.implementation.get_builtin_indexed(num)
if w_value is None:
builtins = f.get_builtin()
assert isinstance(builtins, Module)
w_builtin_dict = builtins.w_dict
assert isinstance(w_builtin_dict, W_DictMultiObject)
w_value = w_builtin_dict.implementation.get_builtin_indexed(num)
## if w_value is not None:
## print "CALL_LIKELY_BUILTIN fast"
if w_value is None:
varname = OPTIMIZED_BUILTINS[num]
message = "global name '%s' is not defined" % varname
raise OperationError(f.space.w_NameError,
f.space.wrap(message))
nargs = oparg & 0xff
w_function = w_value
try:
w_result = f.space.call_valuestack(w_function, nargs, f)
# XXX XXX fix the problem of resume points!
#rstack.resume_point("CALL_FUNCTION", f, nargs, returns=w_result)
finally:
f.dropvalues(nargs)
f.pushvalue(w_result)
if self.config.objspace.std.logspaceoptypes:
_space_op_types = []
for name, func in pyframe.PyFrame.__dict__.iteritems():
if hasattr(func, 'binop'):
operationname = func.binop
def make_opimpl(operationname):
def opimpl(f, *ignored):
operation = getattr(f.space, operationname)
w_2 = f.popvalue()
w_1 = f.popvalue()
if we_are_translated():
s = operationname + ' ' + str(w_1) + ' ' + str(w_2)
else:
s = operationname + ' ' + w_1.__class__.__name__ + ' ' + w_2.__class__.__name__
f._space_op_types.append(s)
w_result = operation(w_1, w_2)
f.pushvalue(w_result)
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
locals()[name] = make_opimpl(operationname)
elif hasattr(func, 'unaryop'):
operationname = func.unaryop
def make_opimpl(operationname):
def opimpl(f, *ignored):
operation = getattr(f.space, operationname)
w_1 = f.popvalue()
if we_are_translated():
s = operationname + ' ' + str(w_1)
else:
s = operationname + ' ' + w_1.__class__.__name__
f._space_op_types.append(s)
w_result = operation(w_1)
f.pushvalue(w_result)
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
locals()[name] = make_opimpl(operationname)
self.FrameClass = StdObjSpaceFrame
# XXX store the dict class on the space to access it in various places
if self.config.objspace.std.withmultidict:
from pypy.objspace.std import dictmultiobject
self.DictObjectCls = dictmultiobject.W_DictMultiObject
self.emptydictimpl = dictmultiobject.EmptyDictImplementation(self)
if self.config.objspace.std.withbucketdict:
from pypy.objspace.std import dictbucket
self.DefaultDictImpl = dictbucket.BucketDictImplementation
else:
self.DefaultDictImpl = dictmultiobject.RDictImplementation
else:
from pypy.objspace.std import dictobject
self.DictObjectCls = dictobject.W_DictObject
# install all the MultiMethods into the space instance
for name, mm in self.MM.__dict__.items():
if isinstance(mm, StdObjSpaceMultiMethod) and not hasattr(self, name):
if name.endswith('_w'): # int_w, str_w...: these do not return a wrapped object
func = mm.install_not_sliced(self.model.typeorder, baked_perform_call=True)
else:
exprargs, expr, miniglobals, fallback = (
mm.install_not_sliced(self.model.typeorder, baked_perform_call=False))
func = stdtypedef.make_perform_trampoline('__mm_'+name,
exprargs, expr, miniglobals,
mm)
# e.g. add(space, w_x, w_y)
def make_boundmethod(func=func):
def boundmethod(*args):
return func(self, *args)
return func_with_new_name(boundmethod, 'boundmethod_'+name)
boundmethod = make_boundmethod()
setattr(self, name, boundmethod) # store into 'space' instance
# hack to avoid imports in the time-critical functions below
for cls in self.model.typeorder:
globals()[cls.__name__] = cls
for cls in self.model.imported_but_not_registered:
globals()[cls.__name__] = cls
# singletons
self.w_None = W_NoneObject.w_None
self.w_False = W_BoolObject.w_False
self.w_True = W_BoolObject.w_True
from pypy.interpreter.special import NotImplemented, Ellipsis
self.w_NotImplemented = self.wrap(NotImplemented(self))
self.w_Ellipsis = self.wrap(Ellipsis(self))
# types
for typedef in self.model.pythontypes:
w_type = self.gettypeobject(typedef)
setattr(self, 'w_' + typedef.name, w_type)
# exceptions & builtins
w_mod = self.setup_exceptions()
self.make_builtins()
self.sys.setmodule(w_mod)
# dummy old-style classes types
self.w_classobj = W_TypeObject(self, 'classobj', [self.w_object], {})
self.w_instance = W_TypeObject(self, 'instance', [self.w_object], {})
self.setup_old_style_classes()
# fix up a problem where multimethods apparently don't
# like to define this at interp-level
# HACK HACK HACK
from pypy.objspace.std.typeobject import _HEAPTYPE
old_flags = self.w_dict.__flags__
self.w_dict.__flags__ |= _HEAPTYPE
self.appexec([self.w_dict], """
(dict):
def fromkeys(cls, seq, value=None):
r = cls()
for s in seq:
r[s] = value
return r
dict.fromkeys = classmethod(fromkeys)
""")
self.w_dict.__flags__ = old_flags
if self.config.objspace.std.oldstyle:
self.enable_old_style_classes_as_default_metaclass()
# final setup
self.setup_builtin_modules()
# Adding transparent proxy call
if self.config.objspace.std.withtproxy:
w___pypy__ = self.getbuiltinmodule("__pypy__")
from pypy.objspace.std.transparent import app_proxy, app_proxy_controller
self.setattr(w___pypy__, self.wrap('tproxy'),
self.wrap(app_proxy))
self.setattr(w___pypy__, self.wrap('get_tproxy_controller'),
self.wrap(app_proxy_controller))
def enable_old_style_classes_as_default_metaclass(self):
self.setitem(self.builtin.w_dict, self.wrap('__metaclass__'), self.w_classobj)
def enable_new_style_classes_as_default_metaclass(self):
space = self
try:
self.delitem(self.builtin.w_dict, self.wrap('__metaclass__'))
except OperationError, e:
if not e.match(space, space.w_KeyError):
raise
def setup_old_style_classes(self):
"""NOT_RPYTHON"""
# sanity check that this approach is working and is not too late
w_mod, w_dic = self.create_builtin_module('_classobj.py', 'classobj')
w_purify = self.getitem(w_dic, self.wrap('purify'))
w_classobj = self.getitem(w_dic, self.wrap('classobj'))
w_instance = self.getitem(w_dic, self.wrap('instance'))
self.call_function(w_purify)
assert not self.is_true(self.contains(self.builtin.w_dict,self.wrap('_classobj'))),"app-level code has seen dummy old style classes"
assert not self.is_true(self.contains(self.builtin.w_dict,self.wrap('_instance'))),"app-level code has seen dummy old style classes"
self.w_classobj = w_classobj
self.w_instance = w_instance
def create_builtin_module(self, pyname, publicname):
"""NOT_RPYTHON
helper function which returns the wrapped module and its dict.
"""
# generate on-the-fly
class Fake: pass
fake = Fake()
from pypy import lib
fname = os.path.join(os.path.split(lib.__file__)[0], pyname)
fake.filename = fname
fake.code = compile(file(fname).read(), fname, "exec")
fake.modname = publicname
w_dic = PyPyCacheDir.build_applevelinterp_dict(fake, self)
from pypy.interpreter.module import Module
mod = Module(self, self.wrap(publicname), w_dic)
w_mod = self.wrap(mod)
return w_mod, w_dic
def setup_exceptions(self):
"""NOT_RPYTHON"""
## hacking things in
def call(w_type, w_args):
space = self
# too early for unpackiterable as well :-(
name = space.unwrap(space.getitem(w_args, space.wrap(0)))
bases = space.unpacktuple(space.getitem(w_args, space.wrap(1)))
dic = space.unwrap(space.getitem(w_args, space.wrap(2)))
dic = dict([(key,space.wrap(value)) for (key, value) in dic.items()])
bases = list(bases)
if not bases:
bases = [space.w_object]
res = W_TypeObject(space, name, bases, dic)
res.ready()
return res
try:
# note that we hide the real call method by an instance variable!
self.call = call
mod, w_dic = self.create_builtin_module('_exceptions.py', 'exceptions')
self.w_IndexError = self.getitem(w_dic, self.wrap("IndexError"))
self.w_StopIteration = self.getitem(w_dic, self.wrap("StopIteration"))
finally:
del self.call # revert
names_w = self.unpackiterable(self.call_function(self.getattr(w_dic, self.wrap("keys"))))
for w_name in names_w:
name = self.str_w(w_name)
if not name.startswith('__'):
excname = name
w_exc = self.getitem(w_dic, w_name)
setattr(self, "w_"+excname, w_exc)
return mod
def createexecutioncontext(self):
# add space specific fields to execution context
ec = ObjSpace.createexecutioncontext(self)
ec._py_repr = self.newdict()
if self.config.objspace.std.withmethodcache:
SIZE = 1 << self.config.objspace.std.methodcachesizeexp
ec.method_cache_versions = [None] * SIZE
ec.method_cache_names = [None] * SIZE
ec.method_cache_lookup_where = [(None, None)] * SIZE
if self.config.objspace.std.withmethodcachecounter:
ec.method_cache_hits = {}
ec.method_cache_misses = {}
return ec
def createframe(self, code, w_globals, closure=None):
from pypy.objspace.std.fake import CPythonFakeCode, CPythonFakeFrame
if not we_are_translated() and isinstance(code, CPythonFakeCode):
return CPythonFakeFrame(self, code, w_globals)
else:
return self.FrameClass(self, code, w_globals, closure)
def gettypefor(self, cls):
return self.gettypeobject(cls.typedef)
def gettypeobject(self, typedef):
# stdtypedef.TypeCache maps each StdTypeDef instance to its
# unique-for-this-space W_TypeObject instance
return self.fromcache(stdtypedef.TypeCache).getorbuild(typedef)
def wrap(self, x):
"Wraps the Python value 'x' into one of the wrapper classes."
# You might notice that this function is rather conspicuously
# not RPython. We can get away with this because the function
# is specialized (see after the function body). Also worth
# noting is that the isinstance's involving integer types
# behave rather differently to how you might expect during
# annotation (see pypy/annotation/builtin.py)
if x is None:
return self.w_None
if isinstance(x, W_Object):
raise TypeError, "attempt to wrap already wrapped object: %s"%(x,)
if isinstance(x, OperationError):
raise TypeError, ("attempt to wrap already wrapped exception: %s"%
(x,))
if isinstance(x, int):
if isinstance(x, bool):
return self.newbool(x)
else:
return self.newint(x)
if isinstance(x, str):
from pypy.objspace.std.stringtype import wrapstr
return wrapstr(self, x)
if isinstance(x, unicode):
return W_UnicodeObject([unichr(ord(u)) for u in x]) # xxx
if isinstance(x, float):
return W_FloatObject(x)
if isinstance(x, Wrappable):
w_result = x.__spacebind__(self)
#print 'wrapping', x, '->', w_result
return w_result
if isinstance(x, base_int):
return W_LongObject.fromrarith_int(x)
# _____ below here is where the annotator should not get _____
# wrap() of a container works on CPython, but the code is
# not RPython. Don't use -- it is kept around mostly for tests.
# Use instead newdict(), newlist(), newtuple().
if isinstance(x, dict):
items_w = [(self.wrap(k), self.wrap(v)) for (k, v) in x.iteritems()]
r = self.newdict()
r.initialize_content(items_w)
return r
if isinstance(x, tuple):
wrappeditems = [self.wrap(item) for item in list(x)]
return W_TupleObject(wrappeditems)
if isinstance(x, list):
wrappeditems = [self.wrap(item) for item in x]
return self.newlist(wrappeditems)
# The following cases are even stranger.
# Really really only for tests.
if isinstance(x, long):
return W_LongObject.fromlong(x)
if isinstance(x, slice):
return W_SliceObject(self.wrap(x.start),
self.wrap(x.stop),
self.wrap(x.step))
if isinstance(x, complex):
return W_ComplexObject(x.real, x.imag)
if isinstance(x, set):
wrappeditems = [self.wrap(item) for item in x]
return W_SetObject(self, wrappeditems)
if isinstance(x, frozenset):
wrappeditems = [self.wrap(item) for item in x]
return W_FrozensetObject(self, wrappeditems)
if x is __builtin__.Ellipsis:
# '__builtin__.Ellipsis' avoids confusion with special.Ellipsis
return self.w_Ellipsis
if self.config.objspace.nofaking:
# annotation should actually not get here. If it does, you get
# an error during rtyping because '%r' is not supported. It tells
# you that there was a space.wrap() on a strange object.
raise OperationError(self.w_RuntimeError,
self.wrap("nofaking enabled: refusing "
"to wrap cpython value %r" %(x,)))
if isinstance(x, type(Exception)) and issubclass(x, Exception):
w_result = self.wrap_exception_cls(x)
if w_result is not None:
return w_result
#print "fake-wrapping", x
from fake import fake_object
return fake_object(self, x)
wrap._annspecialcase_ = "specialize:wrap"
def wrap_exception_cls(self, x):
"""NOT_RPYTHON"""
if hasattr(self, 'w_' + x.__name__):
w_result = getattr(self, 'w_' + x.__name__)
return w_result
return None
wrap_exception_cls._annspecialcase_ = "override:wrap_exception_cls"
def unwrap(self, w_obj):
if isinstance(w_obj, Wrappable):
return w_obj
if isinstance(w_obj, W_Object):
return w_obj.unwrap(self)
raise UnwrapError, "cannot unwrap: %r" % w_obj
def newint(self, intval):
# this time-critical and circular-imports-funny method was stored
# on 'self' by initialize()
# not sure how bad this is:
from pypy.objspace.std.inttype import wrapint
return wrapint(self, intval)
def newfloat(self, floatval):
return W_FloatObject(floatval)
def newcomplex(self, realval, imagval):
return W_ComplexObject(realval, imagval)
def newset(self, rdict_w):
return W_SetObject(self, rdict_w)
def newfrozenset(self, rdict_w):
return W_FrozensetObject(self, rdict_w)
def newlong(self, val): # val is an int
return W_LongObject.fromint(self, val)
def newtuple(self, list_w):
assert isinstance(list_w, list)
return W_TupleObject(list_w)
def newlist(self, list_w):
if self.config.objspace.std.withmultilist:
from pypy.objspace.std.listmultiobject import convert_list_w
return convert_list_w(self, list_w)
else:
from pypy.objspace.std.listobject import W_ListObject
return W_ListObject(list_w)
def newdict(self, track_builtin_shadowing=False):
if self.config.objspace.opcodes.CALL_LIKELY_BUILTIN and track_builtin_shadowing:
from pypy.objspace.std.dictmultiobject import W_DictMultiObject
return W_DictMultiObject(self, wary=True)
return self.DictObjectCls(self)
def newslice(self, w_start, w_end, w_step):
return W_SliceObject(w_start, w_end, w_step)
def newstring(self, chars_w):
try:
chars = [chr(self.int_w(w_c)) for w_c in chars_w]
except ValueError: # chr(out-of-range)
raise OperationError(self.w_ValueError,
self.wrap("character code not in range(256)"))
return self.wrap(''.join(chars))
def newunicode(self, chars):
return W_UnicodeObject(chars)
def newseqiter(self, w_obj):
return W_SeqIterObject(w_obj)
def type(self, w_obj):
hint(w_obj.__class__, promote=True)
return w_obj.getclass(self)
def lookup(self, w_obj, name):
w_type = self.type(w_obj)
return w_type.lookup(name)
lookup._annspecialcase_ = 'specialize:lookup'
def lookup_in_type_where(self, w_type, name):
return w_type.lookup_where(name)
lookup_in_type_where._annspecialcase_ = 'specialize:lookup_in_type_where'
def allocate_instance(self, cls, w_subtype):
"""Allocate the memory needed for an instance of an internal or
user-defined type, without actually __init__ializing the instance."""
w_type = self.gettypeobject(cls.typedef)
if self.is_w(w_type, w_subtype):
instance = instantiate(cls)
elif cls.typedef.acceptable_as_base_class:
# the purpose of the above check is to avoid the code below
# to be annotated at all for 'cls' if it is not necessary
w_subtype = w_type.check_user_subclass(w_subtype)
subcls = get_unique_interplevel_subclass(cls, w_subtype.hasdict, w_subtype.nslots != 0, w_subtype.needsdel, w_subtype.weakrefable)
instance = instantiate(subcls)
instance.user_setup(self, w_subtype)
else:
raise OperationError(self.w_TypeError,
self.wrap("%s.__new__(%s): only for the type %s" % (
w_type.name, w_subtype.getname(self, '?'), w_type.name)))
assert isinstance(instance, cls)
return instance
allocate_instance._annspecialcase_ = "specialize:arg(1)"
def unpacktuple(self, w_tuple, expected_length=-1):
assert isinstance(w_tuple, W_TupleObject)
t = w_tuple.wrappeditems
if expected_length != -1 and expected_length != len(t):
raise ValueError, "got a tuple of length %d instead of %d" % (
len(t), expected_length)
return t
def is_(self, w_one, w_two):
# XXX a bit of hacking to gain more speed
if w_one is w_two:
return self.w_True
return self.w_False
# short-cut
def is_w(self, w_one, w_two):
return w_one is w_two
def is_true(self, w_obj):
# first a few shortcuts for performance
if type(w_obj) is W_BoolObject:
return w_obj.boolval
if w_obj is self.w_None:
return False
# then a shortcut for bootstrapping reasons
if type(w_obj) is self.DictObjectCls:
return w_obj.len() != 0
else:
return DescrOperation.is_true(self, w_obj)
def finditem(self, w_obj, w_key):
# performance shortcut to avoid creating the OperationError(KeyError)
if type(w_obj) is self.DictObjectCls:
return w_obj.get(w_key, None)
return ObjSpace.finditem(self, w_obj, w_key)
def set_str_keyed_item(self, w_obj, w_key, w_value, shadows_type=True):
# performance shortcut to avoid creating the OperationError(KeyError)
if type(w_obj) is self.DictObjectCls:
w_obj.set_str_keyed_item(w_key, w_value, shadows_type)
else:
self.setitem(w_obj, w_key, w_value)
# support for the deprecated __getslice__, __setslice__, __delslice__
def getslice(self, w_obj, w_start, w_stop):
w_descr = self.lookup(w_obj, '__getslice__')
if w_descr is not None:
w_start, w_stop = old_slice_range(self, w_obj, w_start, w_stop)
return self.get_and_call_function(w_descr, w_obj, w_start, w_stop)
else:
return ObjSpace.getslice(self, w_obj, w_start, w_stop)
def setslice(self, w_obj, w_start, w_stop, w_sequence):
w_descr = self.lookup(w_obj, '__setslice__')
if w_descr is not None:
w_start, w_stop = old_slice_range(self, w_obj, w_start, w_stop)
self.get_and_call_function(w_descr, w_obj, w_start, w_stop,
w_sequence)
else:
ObjSpace.setslice(self, w_obj, w_start, w_stop, w_sequence)
def delslice(self, w_obj, w_start, w_stop):
w_descr = self.lookup(w_obj, '__delslice__')
if w_descr is not None:
w_start, w_stop = old_slice_range(self, w_obj, w_start, w_stop)
self.get_and_call_function(w_descr, w_obj, w_start, w_stop)
else:
ObjSpace.delslice(self, w_obj, w_start, w_stop)
class MM:
"Container for multimethods."
call = StdObjSpaceMultiMethod('call', 1, ['__call__'], general__args__=True)
init = StdObjSpaceMultiMethod('__init__', 1, general__args__=True)
getnewargs = StdObjSpaceMultiMethod('__getnewargs__', 1)
# special visible multimethods
int_w = StdObjSpaceMultiMethod('int_w', 1, []) # returns an unwrapped int
str_w = StdObjSpaceMultiMethod('str_w', 1, []) # returns an unwrapped string
float_w = StdObjSpaceMultiMethod('float_w', 1, []) # returns an unwrapped float
uint_w = StdObjSpaceMultiMethod('uint_w', 1, []) # returns an unwrapped unsigned int (r_uint)
unichars_w = StdObjSpaceMultiMethod('unichars_w', 1, []) # returns an unwrapped list of unicode characters
bigint_w = StdObjSpaceMultiMethod('bigint_w', 1, []) # returns an unwrapped rbigint
marshal_w = StdObjSpaceMultiMethod('marshal_w', 1, [], extra_args=['marshaller'])
log = StdObjSpaceMultiMethod('log', 1, [], extra_args=['base'])
# add all regular multimethods here
for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable:
if _name not in locals():
mm = StdObjSpaceMultiMethod(_symbol, _arity, _specialnames)
locals()[_name] = mm
del mm
pow.extras['defaults'] = (None,)
# what is the maximum value slices can get on CPython?
# we need to stick to that value, because fake.py etc.
class Temp:
def __getslice__(self, i, j):
return j
slice_max = Temp()[:]
del Temp
def old_slice_range(space, w_obj, w_start, w_stop):
"""Only for backward compatibility for __getslice__()&co methods."""
if space.is_w(w_start, space.w_None):
w_start = space.wrap(0)
else:
w_start = space.wrap(space.getindex_w(w_start, None))
if space.is_true(space.lt(w_start, space.wrap(0))):
w_start = space.add(w_start, space.len(w_obj))
# NB. the language ref is inconsistent with the new-style class
# behavior when w_obj doesn't implement __len__(), so we just
# ignore this case.
if space.is_w(w_stop, space.w_None):
w_stop = space.wrap(slice_max)
else:
w_stop = space.wrap(space.getindex_w(w_stop, None))
if space.is_true(space.lt(w_stop, space.wrap(0))):
w_stop = space.add(w_stop, space.len(w_obj))
return w_start, w_stop
| Python |
from pypy.interpreter.error import OperationError
from pypy.objspace.std.objspace import register_all
from pypy.objspace.std.stdtypedef import StdTypeDef, newmethod
from pypy.objspace.std.stdtypedef import SMM
from pypy.interpreter.gateway import NoneNotWrapped
from pypy.interpreter import gateway
frozenset_copy = SMM('copy', 1,
doc='Return a shallow copy of a set.')
frozenset_difference = SMM('difference', 2,
doc='Return the difference of two sets'
' as a new set.\n\n(i.e. all'
' elements that are in this set but'
' not the other.)')
frozenset_intersection = SMM('intersection', 2,
doc='Return the intersection of two sets'
' as a new set.\n\n(i.e. all'
' elements that are in both sets.)')
frozenset_issubset = SMM('issubset', 2,
doc='Report whether another set contains'
' this set.')
frozenset_issuperset = SMM('issuperset', 2,
doc='Report whether this set contains'
' another set.')
frozenset_symmetric_difference = SMM('symmetric_difference', 2,
doc='Return the symmetric difference of'
' two sets as a new set.\n\n(i.e.'
' all elements that are in exactly'
' one of the sets.)')
frozenset_union = SMM('union', 2,
doc='Return the union of two sets as a'
' new set.\n\n(i.e. all elements'
' that are in either set.)')
frozenset_reduce = SMM('__reduce__',1,
doc='Return state information for'
' pickling.')
register_all(vars(), globals())
def descr__frozenset__new__(space, w_frozensettype, w_iterable=NoneNotWrapped):
from pypy.objspace.std.setobject import W_FrozensetObject
from pypy.objspace.std.setobject import _is_frozenset_exact
if _is_frozenset_exact(w_iterable):
return w_iterable
w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype)
W_FrozensetObject.__init__(w_obj, space, None)
return w_obj
frozenset_typedef = StdTypeDef("frozenset",
__doc__ = """frozenset(iterable) --> frozenset object
Build an immutable unordered collection.""",
__new__ = newmethod(descr__frozenset__new__),
)
frozenset_typedef.custom_hash = True
frozenset_typedef.registermethods(globals())
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.unicodeobject import delegate_String2Unicode
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.objspace.std import slicetype
from pypy.objspace.std.inttype import wrapint
from pypy.objspace.std.stringtype import wrapstr, wrapchar, sliced, \
stringendswith, stringstartswith
class W_StringSliceObject(W_Object):
from pypy.objspace.std.stringtype import str_typedef as typedef
def __init__(w_self, str, start, stop):
assert start >= 0
assert stop >= 0
w_self.str = str
w_self.start = start
w_self.stop = stop
def force(w_self):
if w_self.start == 0 and w_self.stop == len(w_self.str):
return w_self.str
str = w_self.str[w_self.start:w_self.stop]
w_self.str = str
w_self.start = 0
w_self.stop = len(str)
return str
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r[%d:%d])" % (w_self.__class__.__name__,
w_self.str, w_self.start, w_self.stop)
registerimplementation(W_StringSliceObject)
def delegate_slice2str(space, w_strslice):
return wrapstr(space, w_strslice.force())
def delegate_slice2unicode(space, w_strslice):
w_str = wrapstr(space, w_strslice.force())
return delegate_String2Unicode(space, w_str)
# ____________________________________________________________
def contains__StringSlice_String(space, w_self, w_sub):
sub = w_sub._value
return space.newbool(w_self.str.find(sub, w_self.start, w_self.stop) >= 0)
def _convert_idx_params(space, w_self, w_sub, w_start, w_end):
length = w_self.stop - w_self.start
sub = w_sub._value
start = slicetype.adapt_bound(space, length, w_start)
end = slicetype.adapt_bound(space, length, w_end)
assert start >= 0
assert end >= 0
return (w_self.str, sub, w_self.start + start, w_self.start + end)
def str_find__StringSlice_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.find(sub, start, end)
if res >= 0:
return space.wrap(res - w_self.start)
else:
return space.wrap(res)
def str_partition__StringSlice_String(space, w_self, w_sub):
self = w_self.str
sub = w_sub._value
if not sub:
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
pos = self.find(sub, w_self.start, w_self.stop)
if pos == -1:
return space.newtuple([w_self, space.wrap(''), space.wrap('')])
else:
return space.newtuple([sliced(space, self, w_self.start, pos),
w_sub,
sliced(space, self, pos+len(sub), w_self.stop)])
def str_rpartition__StringSlice_String(space, w_self, w_sub):
self = w_self.str
sub = w_sub._value
if not sub:
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
pos = self.rfind(sub, w_self.start, w_self.stop)
if pos == -1:
return space.newtuple([space.wrap(''), space.wrap(''), w_self])
else:
return space.newtuple([sliced(space, self, w_self.start, pos),
w_sub,
sliced(space, self, pos+len(sub), w_self.stop)])
def str_count__StringSlice_String_ANY_ANY(space, w_self, w_arg, w_start, w_end):
(s, arg, start, end) = _convert_idx_params(
space, w_self, w_arg, w_start, w_end)
return wrapint(space, s.count(arg, start, end))
def str_rfind__StringSlice_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.rfind(sub, start, end)
if res >= 0:
return space.wrap(res - w_self.start)
else:
return space.wrap(res)
def str_index__StringSlice_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.find(sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.index"))
return space.wrap(res - w_self.start)
def str_rindex__StringSlice_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.rfind(sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.rindex"))
return space.wrap(res - w_self.start)
def str_endswith__StringSlice_String_ANY_ANY(space, w_self, w_suffix, w_start, w_end):
(u_self, suffix, start, end) = _convert_idx_params(space, w_self,
w_suffix, w_start, w_end)
return space.newbool(stringendswith(u_self, suffix, start, end))
def str_endswith__StringSlice_Tuple_ANY_ANY(space, w_self, w_suffixes, w_start, w_end):
(u_self, _, start, end) = _convert_idx_params(space, w_self,
space.wrap(''), w_start, w_end)
for w_suffix in space.unpacktuple(w_suffixes):
suffix = space.str_w(w_suffix)
if stringendswith(u_self, suffix, start, end):
return space.w_True
return space.w_False
def str_startswith__StringSlice_String_ANY_ANY(space, w_self, w_prefix, w_start, w_end):
(u_self, prefix, start, end) = _convert_idx_params(space, w_self,
w_prefix, w_start, w_end)
return space.newbool(stringstartswith(u_self, prefix, start, end))
def str_startswith__StringSlice_Tuple_ANY_ANY(space, w_self, w_prefixes, w_start, w_end):
(u_self, _, start, end) = _convert_idx_params(space, w_self, space.wrap(''),
w_start, w_end)
for w_prefix in space.unpacktuple(w_prefixes):
prefix = space.str_w(w_prefix)
if stringstartswith(u_self, prefix, start, end):
return space.w_True
return space.w_False
def str_w__StringSlice(space, w_str):
return w_str.force()
def getitem__StringSlice_ANY(space, w_str, w_index):
ival = space.getindex_w(w_index, space.w_IndexError, "string index")
slen = w_str.stop - w_str.start
if ival < 0:
ival += slen
if ival < 0 or ival >= slen:
exc = space.call_function(space.w_IndexError,
space.wrap("string index out of range"))
raise OperationError(space.w_IndexError, exc)
return wrapchar(space, w_str.str[w_str.start + ival])
def getitem__StringSlice_Slice(space, w_str, w_slice):
w = space.wrap
length = w_str.stop - w_str.start
start, stop, step, sl = w_slice.indices4(space, length)
if sl == 0:
return W_StringObject.EMPTY
else:
s = w_str.str
start = w_str.start + start
if step == 1:
stop = w_str.start + stop
assert start >= 0 and stop >= 0
return W_StringSliceObject(s, start, stop)
else:
str = "".join([s[start + i*step] for i in range(sl)])
return wrapstr(space, str)
def len__StringSlice(space, w_str):
return space.wrap(w_str.stop - w_str.start)
def str__StringSlice(space, w_str):
if type(w_str) is W_StringSliceObject:
return w_str
return W_StringSliceObject(w_str.str, w_str.start, w_str.stop)
from pypy.objspace.std import stringtype
register_all(vars(), stringtype)
| Python |
from pypy.objspace.std.objspace import *
def descr_get_dictproxy(space, w_obj):
return W_DictProxyObject(w_obj.getdict())
class W_DictProxyObject(W_Object):
from pypy.objspace.std.dictproxytype import dictproxy_typedef as typedef
def __init__(w_self, w_dict):
w_self.w_dict = w_dict
registerimplementation(W_DictProxyObject)
register_all(vars())
| Python |
from pypy.objspace.std.stdtypedef import *
from pypy.interpreter.error import OperationError
# ____________________________________________________________
def _proxymethod(name):
def fget(space, w_obj):
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
if not isinstance(w_obj, W_DictProxyObject):
raise OperationError(space.w_TypeError,
space.wrap("expected dictproxy"))
return space.getattr(w_obj.w_dict, space.wrap(name))
return GetSetProperty(fget)
def _compareproxymethod(opname):
def compare(space, w_obj1, w_obj2):
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
if not isinstance(w_obj1, W_DictProxyObject):
raise OperationError(space.w_TypeError,
space.wrap("expected dictproxy"))
return getattr(space, opname)(w_obj1.w_dict, w_obj2)
return gateway.interp2app(compare)
# ____________________________________________________________
dictproxy_typedef = StdTypeDef("dictproxy",
has_key = _proxymethod('has_key'),
get = _proxymethod('get'),
keys = _proxymethod('keys'),
values = _proxymethod('values'),
items = _proxymethod('items'),
iterkeys = _proxymethod('iterkeys'),
itervalues = _proxymethod('itervalues'),
iteritems = _proxymethod('iteritems'),
copy = _proxymethod('copy'),
__len__ = _proxymethod('__len__'),
__getitem__ = _proxymethod('__getitem__'),
__contains__ = _proxymethod('__contains__'),
__str__ = _proxymethod('__str__'),
__iter__ = _proxymethod('__iter__'),
__lt__ = _compareproxymethod('lt'),
__le__ = _compareproxymethod('le'),
__eq__ = _compareproxymethod('eq'),
__ne__ = _compareproxymethod('ne'),
__gt__ = _compareproxymethod('gt'),
__ge__ = _compareproxymethod('ge'),
)
dictproxy_typedef.registermethods(globals())
| Python |
""" some simple benchmarikng stuff
"""
import random, time
def sample(population, num):
l = len(population)
retval = []
for i in xrange(num):
retval.append(population[random.randrange(l)])
return retval
random.sample = sample
def get_random_string(l):
strings = 'qwertyuiopasdfghjklzxcvbm,./;QWERTYUIOPASDFGHJKLZXCVBNM!@#$%^&*()_+1234567890-='
return str(random.sample(strings, l))
def count_operation(name, function):
print name
t0 = time.time()
retval = function()
tk = time.time()
print name, " takes: %f" % (tk - t0)
return retval
def bench_simple_dict(SIZE = 10000):
keys = [get_random_string(20) for i in xrange(SIZE)]
values = [random.random() for i in xrange(SIZE)]
lookup_keys = random.sample(keys, 1000)
random_keys = [get_random_string(20) for i in xrange(1000)]
test_d = count_operation("Creation", lambda : dict(zip(keys, values)))
def rand_keys(keys):
for key in keys:
try:
test_d[key]
except KeyError:
pass
count_operation("Random key access", lambda : rand_keys(random_keys))
count_operation("Existing key access", lambda : rand_keys(lookup_keys))
return test_d
if __name__ == '__main__':
test_d = bench_simple_dict()
import __pypy__
print __pypy__.internal_repr(test_d)
print __pypy__.internal_repr(test_d.iterkeys())
| Python |
import py
from pypy.objspace.std.objspace import *
from pypy.interpreter import gateway
from pypy.module.__builtin__.__init__ import BUILTIN_TO_INDEX, OPTIMIZED_BUILTINS
from pypy.rlib.objectmodel import r_dict, we_are_translated
def _is_str(space, w_key):
return space.is_w(space.type(w_key), space.w_str)
def _is_sane_hash(space, w_lookup_type):
""" Handles the case of a non string key lookup.
Types that have a sane hash/eq function should allow us to return True
directly to signal that the key is not in the dict in any case.
XXX The types should provide such a flag. """
# XXX there are many more types
return (space.is_w(w_lookup_type, space.w_NoneType) or
space.is_w(w_lookup_type, space.w_int) or
space.is_w(w_lookup_type, space.w_bool) or
space.is_w(w_lookup_type, space.w_float)
)
# DictImplementation lattice
# a dictionary starts with an EmptyDictImplementation, and moves down
# in this list:
#
# EmptyDictImplementation
# / \
# SmallStrDictImplementation SmallDictImplementation
# | |
# StrDictImplementation |
# \ /
# RDictImplementation
#
# (in addition, any dictionary can go back to EmptyDictImplementation)
class DictImplementation(object):
def get(self, w_lookup):
#return w_value or None
raise NotImplementedError("abstract base class")
def setitem_str(self, w_key, w_value, shadows_type=True):
#return implementation
raise NotImplementedError("abstract base class")
def setitem(self, w_key, w_value):
#return implementation
raise NotImplementedError("abstract base class")
def delitem(self, w_key):
#return implementation
raise NotImplementedError("abstract base class")
def length(self):
raise NotImplementedError("abstract base class")
def iteritems(self):
raise NotImplementedError("abstract base class")
def iterkeys(self):
raise NotImplementedError("abstract base class")
def itervalues(self):
raise NotImplementedError("abstract base class")
def keys(self):
iterator = self.iterkeys()
result = []
while 1:
w_key = iterator.next()
if w_key is not None:
result.append(w_key)
else:
return result
def values(self):
iterator = self.itervalues()
result = []
while 1:
w_value = iterator.next()
if w_value is not None:
result.append(w_value)
else:
return result
def items(self):
iterator = self.iteritems()
result = []
while 1:
w_item = iterator.next()
if w_item is not None:
result.append(w_item)
else:
return result
# the following method only makes sense when the option to use the
# CALL_LIKELY_BUILTIN opcode is set. Otherwise it won't even be seen
# by the annotator
def get_builtin_indexed(self, i):
w_key = self.space.wrap(OPTIMIZED_BUILTINS[i])
return self.get(w_key)
# this method will only be seen whan a certain config option is used
def shadows_anything(self):
return True
def set_shadows_anything(self):
pass
# Iterator Implementation base classes
class IteratorImplementation(object):
def __init__(self, space, implementation):
self.space = space
self.dictimplementation = implementation
self.len = implementation.length()
self.pos = 0
def next(self):
if self.dictimplementation is None:
return None
if self.len != self.dictimplementation.length():
self.len = -1 # Make this error state sticky
raise OperationError(self.space.w_RuntimeError,
self.space.wrap("dictionary changed size during iteration"))
# look for the next entry
w_result = self.next_entry()
if w_result is not None:
self.pos += 1
return w_result
# no more entries
self.dictimplementation = None
return None
def length(self):
if self.dictimplementation is not None:
return self.len - self.pos
return 0
# concrete subclasses of the above
class EmptyDictImplementation(DictImplementation):
def __init__(self, space):
self.space = space
def get(self, w_lookup):
space = self.space
if not _is_str(space, w_lookup) and not _is_sane_hash(space, w_lookup):
# give hash a chance to raise an exception
space.hash(w_lookup)
return None
def setitem(self, w_key, w_value):
space = self.space
if _is_str(space, w_key):
if space.config.objspace.std.withsmalldicts:
return SmallStrDictImplementation(space, w_key, w_value)
else:
return StrDictImplementation(space).setitem_str(w_key, w_value)
else:
if space.config.objspace.std.withsmalldicts:
return SmallDictImplementation(space, w_key, w_value)
else:
return space.DefaultDictImpl(space).setitem(w_key, w_value)
def setitem_str(self, w_key, w_value, shadows_type=True):
return StrDictImplementation(self.space).setitem_str(w_key, w_value)
#return SmallStrDictImplementation(self.space, w_key, w_value)
def delitem(self, w_key):
space = self.space
if not _is_str(space, w_key) and not _is_sane_hash(space, w_key):
# count hash
space.hash(w_key)
raise KeyError
def length(self):
return 0
def iteritems(self):
return EmptyIteratorImplementation(self.space, self)
def iterkeys(self):
return EmptyIteratorImplementation(self.space, self)
def itervalues(self):
return EmptyIteratorImplementation(self.space, self)
def keys(self):
return []
def values(self):
return []
def items(self):
return []
class EmptyIteratorImplementation(IteratorImplementation):
def next_entry(self):
return None
class Entry(object):
def __init__(self):
self.hash = 0
self.w_key = None
self.w_value = None
def __repr__(self):
return '<%r, %r, %r>'%(self.hash, self.w_key, self.w_value)
class SmallDictImplementation(DictImplementation):
# XXX document the invariants here!
def __init__(self, space, w_key, w_value):
self.space = space
self.entries = [Entry(), Entry(), Entry(), Entry(), Entry()]
self.entries[0].hash = space.hash_w(w_key)
self.entries[0].w_key = w_key
self.entries[0].w_value = w_value
self.valid = 1
def _lookup(self, w_key):
hash = self.space.hash_w(w_key)
i = 0
last = self.entries[self.valid]
last.hash = hash
last.w_key = w_key
while 1:
look_entry = self.entries[i]
if look_entry.hash == hash and self.space.eq_w(look_entry.w_key, w_key):
return look_entry
i += 1
def _convert_to_rdict(self):
newimpl = self.space.DefaultDictImpl(self.space)
i = 0
while 1:
entry = self.entries[i]
if entry.w_value is None:
break
newimpl.setitem(entry.w_key, entry.w_value)
i += 1
return newimpl
def setitem(self, w_key, w_value):
entry = self._lookup(w_key)
if entry.w_value is None:
if self.valid == 4:
return self._convert_to_rdict().setitem(w_key, w_value)
self.valid += 1
entry.w_value = w_value
return self
def setitem_str(self, w_key, w_value, shadows_type=True):
return self.setitem(w_key, w_value)
def delitem(self, w_key):
entry = self._lookup(w_key)
if entry.w_value is not None:
for i in range(self.entries.index(entry), self.valid):
self.entries[i] = self.entries[i+1]
self.entries[self.valid] = entry
entry.w_value = None
self.valid -= 1
if self.valid == 0:
return self.space.emptydictimpl
return self
else:
entry.w_key = None
raise KeyError
def length(self):
return self.valid
def get(self, w_lookup):
entry = self._lookup(w_lookup)
val = entry.w_value
if val is None:
entry.w_key = None
return val
def iteritems(self):
return self._convert_to_rdict().iteritems()
def iterkeys(self):
return self._convert_to_rdict().iterkeys()
def itervalues(self):
return self._convert_to_rdict().itervalues()
def keys(self):
return [self.entries[i].w_key for i in range(self.valid)]
def values(self):
return [self.entries[i].w_value for i in range(self.valid)]
def items(self):
return [self.space.newtuple([e.w_key, e.w_value])
for e in [self.entries[i] for i in range(self.valid)]]
class StrEntry(object):
def __init__(self):
self.key = None
self.w_value = None
def __repr__(self):
return '<%r, %r, %r>'%(self.hash, self.key, self.w_value)
class SmallStrDictImplementation(DictImplementation):
# XXX document the invariants here!
def __init__(self, space, w_key, w_value):
self.space = space
self.entries = [StrEntry(), StrEntry(), StrEntry(), StrEntry(), StrEntry()]
key = space.str_w(w_key)
self.entries[0].key = key
self.entries[0].w_value = w_value
self.valid = 1
def _lookup(self, key):
assert isinstance(key, str)
_hash = hash(key)
i = 0
last = self.entries[self.valid]
last.key = key
while 1:
look_entry = self.entries[i]
if hash(look_entry.key) == _hash and look_entry.key == key:
return look_entry
i += 1
def _convert_to_rdict(self):
newimpl = self.space.DefaultDictImpl(self.space)
i = 0
while 1:
entry = self.entries[i]
if entry.w_value is None:
break
newimpl.setitem(self.space.wrap(entry.key), entry.w_value)
i += 1
return newimpl
def _convert_to_sdict(self, w_value):
# this relies on the fact that the new key is in the entries
# list already.
newimpl = StrDictImplementation(self.space)
i = 0
while 1:
entry = self.entries[i]
if entry.w_value is None:
newimpl.content[entry.key] = w_value
break
newimpl.content[entry.key] = entry.w_value
i += 1
return newimpl
def setitem(self, w_key, w_value):
if not _is_str(self.space, w_key):
return self._convert_to_rdict().setitem(w_key, w_value)
return self.setitem_str(w_key, w_value)
def setitem_str(self, w_key, w_value, shadows_type=True):
entry = self._lookup(self.space.str_w(w_key))
if entry.w_value is None:
if self.valid == 4:
return self._convert_to_sdict(w_value)
self.valid += 1
entry.w_value = w_value
return self
def delitem(self, w_key):
space = self.space
w_key_type = space.type(w_key)
if space.is_w(w_key_type, space.w_str):
entry = self._lookup(space.str_w(w_key))
if entry.w_value is not None:
for i in range(self.entries.index(entry), self.valid):
self.entries[i] = self.entries[i+1]
self.entries[self.valid] = entry
entry.w_value = None
self.valid -= 1
if self.valid == 0:
return self.space.emptydictimpl
return self
else:
entry.key = None
raise KeyError
elif _is_sane_hash(self.space, w_key_type):
raise KeyError
else:
return self._convert_to_rdict().delitem(w_key)
def length(self):
return self.valid
def get(self, w_lookup):
space = self.space
w_lookup_type = space.type(w_lookup)
if space.is_w(w_lookup_type, space.w_str):
entry = self._lookup(space.str_w(w_lookup))
val = entry.w_value
if val is None:
entry.key = None
return val
elif _is_sane_hash(self.space, w_lookup_type):
return None
else:
return self._convert_to_rdict().get(w_lookup)
def iteritems(self):
return self._convert_to_rdict().iteritems()
def iterkeys(self):
return self._convert_to_rdict().iterkeys()
def itervalues(self):
return self._convert_to_rdict().itervalues()
def keys(self):
return [self.space.wrap(self.entries[i].key) for i in range(self.valid)]
def values(self):
return [self.entries[i].w_value for i in range(self.valid)]
def items(self):
return [self.space.newtuple([self.space.wrap(e.key), e.w_value])
for e in [self.entries[i] for i in range(self.valid)]]
class StrDictImplementation(DictImplementation):
def __init__(self, space):
self.space = space
self.content = {}
def setitem(self, w_key, w_value):
space = self.space
if space.is_w(space.type(w_key), space.w_str):
return self.setitem_str(w_key, w_value)
else:
return self._as_rdict().setitem(w_key, w_value)
def setitem_str(self, w_key, w_value, shadows_type=True):
self.content[self.space.str_w(w_key)] = w_value
return self
def delitem(self, w_key):
space = self.space
w_key_type = space.type(w_key)
if space.is_w(w_key_type, space.w_str):
del self.content[space.str_w(w_key)]
if self.content:
return self
else:
return space.emptydictimpl
elif _is_sane_hash(space, w_key_type):
raise KeyError
else:
return self._as_rdict().delitem(w_key)
def length(self):
return len(self.content)
def get(self, w_lookup):
space = self.space
w_lookup_type = space.type(w_lookup)
if space.is_w(w_lookup_type, space.w_str):
return self.content.get(space.str_w(w_lookup), None)
elif _is_sane_hash(space, w_lookup_type):
return None
else:
return self._as_rdict().get(w_lookup)
def iteritems(self):
return StrItemIteratorImplementation(self.space, self)
def iterkeys(self):
return StrKeyIteratorImplementation(self.space, self)
def itervalues(self):
return StrValueIteratorImplementation(self.space, self)
def keys(self):
space = self.space
return [space.wrap(key) for key in self.content.iterkeys()]
def values(self):
return self.content.values()
def items(self):
space = self.space
return [space.newtuple([space.wrap(key), w_value])
for (key, w_value) in self.content.iteritems()]
def _as_rdict(self):
newimpl = self.space.DefaultDictImpl(self.space)
for k, w_v in self.content.items():
newimpl.setitem(self.space.wrap(k), w_v)
return newimpl
# the following are very close copies of the base classes above
class StrKeyIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.iterkeys()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for key in self.iterator:
return self.space.wrap(key)
else:
return None
class StrValueIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.itervalues()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for w_value in self.iterator:
return w_value
else:
return None
class StrItemIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.iteritems()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for key, w_value in self.iterator:
return self.space.newtuple([self.space.wrap(key), w_value])
else:
return None
class ShadowDetectingDictImplementation(StrDictImplementation):
def __init__(self, space, w_type):
StrDictImplementation.__init__(self, space)
self.w_type = w_type
self.original_version_tag = w_type.version_tag
if self.original_version_tag is None:
self._shadows_anything = True
else:
self._shadows_anything = False
def setitem_str(self, w_key, w_value, shadows_type=True):
if shadows_type:
self._shadows_anything = True
return StrDictImplementation.setitem_str(
self, w_key, w_value, shadows_type)
def setitem(self, w_key, w_value):
space = self.space
if space.is_w(space.type(w_key), space.w_str):
if not self._shadows_anything:
w_obj = self.w_type.lookup(space.str_w(w_key))
if w_obj is not None:
self._shadows_anything = True
return StrDictImplementation.setitem_str(
self, w_key, w_value, False)
else:
return self._as_rdict().setitem(w_key, w_value)
def shadows_anything(self):
return (self._shadows_anything or
self.w_type.version_tag is not self.original_version_tag)
def set_shadows_anything(self):
self._shadows_anything = True
class WaryDictImplementation(StrDictImplementation):
def __init__(self, space):
StrDictImplementation.__init__(self, space)
self.shadowed = [None] * len(BUILTIN_TO_INDEX)
def setitem_str(self, w_key, w_value, shadows_type=True):
key = self.space.str_w(w_key)
i = BUILTIN_TO_INDEX.get(key, -1)
if i != -1:
self.shadowed[i] = w_value
self.content[key] = w_value
return self
def delitem(self, w_key):
space = self.space
w_key_type = space.type(w_key)
if space.is_w(w_key_type, space.w_str):
key = space.str_w(w_key)
del self.content[key]
i = BUILTIN_TO_INDEX.get(key, -1)
if i != -1:
self.shadowed[i] = None
return self
elif _is_sane_hash(space, w_key_type):
raise KeyError
else:
return self._as_rdict().delitem(w_key)
def get_builtin_indexed(self, i):
return self.shadowed[i]
class RDictImplementation(DictImplementation):
def __init__(self, space):
self.space = space
self.content = r_dict(space.eq_w, space.hash_w)
def __repr__(self):
return "%s<%s>" % (self.__class__.__name__, self.content)
def setitem(self, w_key, w_value):
self.content[w_key] = w_value
return self
def setitem_str(self, w_key, w_value, shadows_type=True):
return self.setitem(w_key, w_value)
def delitem(self, w_key):
del self.content[w_key]
if self.content:
return self
else:
return self.space.emptydictimpl
def length(self):
return len(self.content)
def get(self, w_lookup):
return self.content.get(w_lookup, None)
def iteritems(self):
return RDictItemIteratorImplementation(self.space, self)
def iterkeys(self):
return RDictKeyIteratorImplementation(self.space, self)
def itervalues(self):
return RDictValueIteratorImplementation(self.space, self)
def keys(self):
return self.content.keys()
def values(self):
return self.content.values()
def items(self):
return [self.space.newtuple([w_key, w_val])
for w_key, w_val in self.content.iteritems()]
class RDictKeyIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.iterkeys()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for w_key in self.iterator:
return w_key
else:
return None
class RDictValueIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.itervalues()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for w_value in self.iterator:
return w_value
else:
return None
class RDictItemIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.content.iteritems()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for w_key, w_value in self.iterator:
return self.space.newtuple([w_key, w_value])
else:
return None
class SharedStructure(object):
def __init__(self, keys=None, length=0,
other_structs=None,
last_key=None,
back_struct=None):
if keys is None:
keys = {}
self.keys = keys
self.length = length
self.back_struct = back_struct
if other_structs is None:
other_structs = []
self.other_structs = other_structs
self.last_key = last_key
if last_key is not None:
assert back_struct is not None
self.propagating = False
def new_structure(self, added_key):
keys = {}
for key, item in self.keys.iteritems():
if item >= 0:
keys[key] = item
new_structure = SharedStructure(keys, self.length + 1,
[], added_key, self)
new_index = len(keys)
new_structure.keys[added_key] = new_index
self.keys[added_key] = ~len(self.other_structs)
self.other_structs.append(new_structure)
return new_structure
class State(object):
def __init__(self, space):
self.empty_structure = SharedStructure()
self.empty_structure.propagating = True
class SharedDictImplementation(DictImplementation):
def __init__(self, space):
self.space = space
self.structure = space.fromcache(State).empty_structure
self.entries = []
def get(self, w_lookup):
space = self.space
w_lookup_type = space.type(w_lookup)
if space.is_w(w_lookup_type, space.w_str):
lookup = space.str_w(w_lookup)
i = self.structure.keys.get(lookup, -1)
if i < 0:
return None
return self.entries[i]
elif _is_sane_hash(space, w_lookup_type):
return None
else:
return self._as_rdict().get(w_lookup)
def setitem(self, w_key, w_value):
space = self.space
if space.is_w(space.type(w_key), space.w_str):
return self.setitem_str(w_key, w_value)
else:
return self._as_rdict().setitem(w_key, w_value)
def setitem_str(self, w_key, w_value, shadows_type=True):
m = ~len(self.structure.other_structs)
key = self.space.str_w(w_key)
i = self.structure.keys.get(key, m)
if i >= 0:
self.entries[i] = w_value
return self
if not self.structure.propagating:
return self._as_rdict(as_strdict=True).setitem_str(w_key, w_value)
if i == m:
new_structure = self.structure.new_structure(key)
else:
new_structure = self.structure.other_structs[~i]
new_structure.propagating = True
self.entries.append(w_value)
assert self.structure.length + 1 == new_structure.length
self.structure = new_structure
assert self.structure.keys[key] >= 0
return self
def delitem(self, w_key):
space = self.space
w_key_type = space.type(w_key)
if space.is_w(w_key_type, space.w_str):
key = space.str_w(w_key)
if (self.structure.last_key is not None and
key == self.structure.last_key):
self.entries.pop()
self.structure = self.structure.back_struct
return self
return self._as_rdict().delitem(w_key)
elif _is_sane_hash(space, w_key_type):
raise KeyError
else:
return self._as_rdict().delitem(w_key)
def length(self):
return self.structure.length
def iteritems(self):
return SharedItemIteratorImplementation(self.space, self)
def iterkeys(self):
return SharedKeyIteratorImplementation(self.space, self)
def itervalues(self):
return SharedValueIteratorImplementation(self.space, self)
def keys(self):
space = self.space
return [space.wrap(key)
for (key, item) in self.structure.keys.iteritems()
if item >= 0]
def values(self):
return self.entries[:]
def items(self):
space = self.space
return [space.newtuple([space.wrap(key), self.entries[item]])
for (key, item) in self.structure.keys.iteritems()
if item >= 0]
def _as_rdict(self, as_strdict=False):
if as_strdict:
newimpl = StrDictImplementation(self.space)
else:
newimpl = self.space.DefaultDictImpl(self.space)
for k, i in self.structure.keys.items():
if i >= 0:
newimpl.setitem_str(self.space.wrap(k), self.entries[i])
return newimpl
class SharedValueIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.values = dictimplementation.entries
def next(self):
if self.pos < self.len:
return self.values[self.pos]
else:
self.values = None
return None
class SharedItemIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.structure.keys.iteritems()
def next_entry(self):
implementation = self.dictimplementation
assert isinstance(implementation, SharedDictImplementation)
for key, index in self.iterator:
if index >= 0:
w_value = implementation.entries[index]
return self.space.newtuple([self.space.wrap(key), w_value])
else:
return None
class SharedKeyIteratorImplementation(IteratorImplementation):
def __init__(self, space, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = dictimplementation.structure.keys.iteritems()
def next_entry(self):
implementation = self.dictimplementation
assert isinstance(implementation, SharedDictImplementation)
for key, index in self.iterator:
if index >= 0:
return self.space.wrap(key)
else:
return None
import time, py
class DictInfo(object):
_dict_infos = []
def __init__(self):
self.id = len(self._dict_infos)
self.setitem_strs = 0; self.setitems = 0; self.delitems = 0
self.lengths = 0; self.gets = 0
self.iteritems = 0; self.iterkeys = 0; self.itervalues = 0
self.keys = 0; self.values = 0; self.items = 0
self.maxcontents = 0
self.reads = 0
self.hits = self.misses = 0
self.writes = 0
self.iterations = 0
self.listings = 0
self.seen_non_string_in_write = 0
self.seen_non_string_in_read_first = 0
self.size_on_non_string_seen_in_read = -1
self.size_on_non_string_seen_in_write = -1
self.createtime = time.time()
self.lifetime = -1.0
if not we_are_translated():
# very probable stack from here:
# 0 - us
# 1 - MeasuringDictImplementation.__init__
# 2 - W_DictMultiObject.__init__
# 3 - space.newdict
# 4 - newdict's caller. let's look at that
try:
frame = sys._getframe(4)
except ValueError:
pass # might be at import time
else:
self.sig = '(%s:%s)%s'%(frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name)
self._dict_infos.append(self)
def __repr__(self):
args = []
for k in py.builtin.sorted(self.__dict__):
v = self.__dict__[k]
if v != 0:
args.append('%s=%r'%(k, v))
return '<DictInfo %s>'%(', '.join(args),)
class OnTheWayOut:
def __init__(self, info):
self.info = info
def __del__(self):
self.info.lifetime = time.time() - self.info.createtime
class MeasuringDictImplementation(DictImplementation):
def __init__(self, space):
self.space = space
self.content = r_dict(space.eq_w, space.hash_w)
self.info = DictInfo()
self.thing_with_del = OnTheWayOut(self.info)
def __repr__(self):
return "%s<%s>" % (self.__class__.__name__, self.content)
def _is_str(self, w_key):
space = self.space
return space.is_true(space.isinstance(w_key, space.w_str))
def _read(self, w_key):
self.info.reads += 1
if not self.info.seen_non_string_in_write \
and not self.info.seen_non_string_in_read_first \
and not self._is_str(w_key):
self.info.seen_non_string_in_read_first = True
self.info.size_on_non_string_seen_in_read = len(self.content)
hit = w_key in self.content
if hit:
self.info.hits += 1
else:
self.info.misses += 1
def setitem(self, w_key, w_value):
if not self.info.seen_non_string_in_write and not self._is_str(w_key):
self.info.seen_non_string_in_write = True
self.info.size_on_non_string_seen_in_write = len(self.content)
self.info.setitems += 1
self.info.writes += 1
self.content[w_key] = w_value
self.info.maxcontents = max(self.info.maxcontents, len(self.content))
return self
def setitem_str(self, w_key, w_value, shadows_type=True):
self.info.setitem_strs += 1
return self.setitem(w_key, w_value)
def delitem(self, w_key):
if not self.info.seen_non_string_in_write \
and not self.info.seen_non_string_in_read_first \
and not self._is_str(w_key):
self.info.seen_non_string_in_read_first = True
self.info.size_on_non_string_seen_in_read = len(self.content)
self.info.delitems += 1
self.info.writes += 1
del self.content[w_key]
return self
def length(self):
self.info.lengths += 1
return len(self.content)
def get(self, w_lookup):
self.info.gets += 1
self._read(w_lookup)
return self.content.get(w_lookup, None)
def iteritems(self):
self.info.iteritems += 1
self.info.iterations += 1
return RDictItemIteratorImplementation(self.space, self)
def iterkeys(self):
self.info.iterkeys += 1
self.info.iterations += 1
return RDictKeyIteratorImplementation(self.space, self)
def itervalues(self):
self.info.itervalues += 1
self.info.iterations += 1
return RDictValueIteratorImplementation(self.space, self)
def keys(self):
self.info.keys += 1
self.info.listings += 1
return self.content.keys()
def values(self):
self.info.values += 1
self.info.listings += 1
return self.content.values()
def items(self):
self.info.items += 1
self.info.listings += 1
return [self.space.newtuple([w_key, w_val])
for w_key, w_val in self.content.iteritems()]
_example = DictInfo()
del DictInfo._dict_infos[-1]
tmpl = 'os.write(fd, "%(attr)s" + ": " + str(info.%(attr)s) + "\\n")'
bodySrc = []
for attr in py.builtin.sorted(_example.__dict__):
if attr == 'sig':
continue
bodySrc.append(tmpl%locals())
exec py.code.Source('''
def _report_one(fd, info):
os.write(fd, "_address" + ": " + str(id(info)) + "\\n")
%s
'''%'\n '.join(bodySrc)).compile()
def report():
if not DictInfo._dict_infos:
return
os.write(2, "Starting multidict report.\n")
fd = os.open('dictinfo.txt', os.O_CREAT|os.O_WRONLY|os.O_TRUNC, 0644)
for info in DictInfo._dict_infos:
os.write(fd, '------------------\n')
_report_one(fd, info)
os.close(fd)
os.write(2, "Reporting done.\n")
class W_DictMultiObject(W_Object):
from pypy.objspace.std.dicttype import dict_typedef as typedef
def __init__(w_self, space, wary=False, sharing=False):
if space.config.objspace.opcodes.CALL_LIKELY_BUILTIN and wary:
w_self.implementation = WaryDictImplementation(space)
elif space.config.objspace.std.withdictmeasurement:
w_self.implementation = MeasuringDictImplementation(space)
elif space.config.objspace.std.withsharingdict and sharing:
w_self.implementation = SharedDictImplementation(space)
else:
w_self.implementation = space.emptydictimpl
def initialize_content(w_self, list_pairs_w):
impl = w_self.implementation
for w_k, w_v in list_pairs_w:
impl = impl.setitem(w_k, w_v)
w_self.implementation = impl
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%s)" % (w_self.__class__.__name__, w_self.implementation)
def unwrap(w_dict, space):
result = {}
items = w_dict.implementation.items()
for w_pair in items:
key, val = space.unwrap(w_pair)
result[key] = val
return result
def missing_method(w_dict, space, w_key):
if not space.is_w(space.type(w_dict), space.w_dict):
w_missing = space.lookup(w_dict, "__missing__")
if w_missing is None:
return None
return space.call_function(w_missing, w_dict, w_key)
else:
return None
def len(w_self):
return w_self.implementation.length()
def get(w_dict, w_key, w_default):
w_value = w_dict.implementation.get(w_key)
if w_value is not None:
return w_value
else:
return w_default
def set_str_keyed_item(w_dict, w_key, w_value, shadows_type=True):
w_dict.implementation = w_dict.implementation.setitem_str(
w_key, w_value, shadows_type)
registerimplementation(W_DictMultiObject)
def init__DictMulti(space, w_dict, __args__):
w_src, w_kwds = __args__.parse('dict',
(['seq_or_map'], None, 'kwargs'), # signature
[W_DictMultiObject(space)]) # default argument
# w_dict.implementation = space.emptydictimpl
# ^^^ disabled only for CPython compatibility
if space.findattr(w_src, space.wrap("keys")) is None:
list_of_w_pairs = space.unpackiterable(w_src)
for w_pair in list_of_w_pairs:
pair = space.unpackiterable(w_pair)
if len(pair)!=2:
raise OperationError(space.w_ValueError,
space.wrap("dict() takes a sequence of pairs"))
w_k, w_v = pair
w_dict.implementation = w_dict.implementation.setitem(w_k, w_v)
else:
if space.is_true(w_src):
from pypy.objspace.std.dicttype import update1
update1(space, w_dict, w_src)
if space.is_true(w_kwds):
from pypy.objspace.std.dicttype import update1
update1(space, w_dict, w_kwds)
def getitem__DictMulti_ANY(space, w_dict, w_lookup):
w_value = w_dict.implementation.get(w_lookup)
if w_value is not None:
return w_value
w_missing_item = w_dict.missing_method(space, w_lookup)
if w_missing_item is not None:
return w_missing_item
raise OperationError(space.w_KeyError, w_lookup)
def setitem__DictMulti_ANY_ANY(space, w_dict, w_newkey, w_newvalue):
w_dict.implementation = w_dict.implementation.setitem(w_newkey, w_newvalue)
def delitem__DictMulti_ANY(space, w_dict, w_lookup):
try:
w_dict.implementation = w_dict.implementation.delitem(w_lookup)
except KeyError:
raise OperationError(space.w_KeyError, w_lookup)
def len__DictMulti(space, w_dict):
return space.wrap(w_dict.implementation.length())
def contains__DictMulti_ANY(space, w_dict, w_lookup):
return space.newbool(w_dict.implementation.get(w_lookup) is not None)
dict_has_key__DictMulti_ANY = contains__DictMulti_ANY
def iter__DictMulti(space, w_dict):
return W_DictMultiIterObject(space, w_dict.implementation.iterkeys())
def eq__DictMulti_DictMulti(space, w_left, w_right):
if space.is_w(w_left, w_right):
return space.w_True
if w_left.implementation.length() != w_right.implementation.length():
return space.w_False
iteratorimplementation = w_left.implementation.iteritems()
while 1:
w_item = iteratorimplementation.next()
if w_item is None:
break
w_key = space.getitem(w_item, space.wrap(0))
w_val = space.getitem(w_item, space.wrap(1))
w_rightval = w_right.implementation.get(w_key)
if w_rightval is None:
return space.w_False
if not space.eq_w(w_val, w_rightval):
return space.w_False
return space.w_True
def characterize(space, aimpl, bimpl):
""" (similar to CPython)
returns the smallest key in acontent for which b's value is different or absent and this value """
w_smallest_diff_a_key = None
w_its_value = None
iteratorimplementation = aimpl.iteritems()
while 1:
w_item = iteratorimplementation.next()
if w_item is None:
break
w_key = space.getitem(w_item, space.wrap(0))
w_val = space.getitem(w_item, space.wrap(1))
if w_smallest_diff_a_key is None or space.is_true(space.lt(w_key, w_smallest_diff_a_key)):
w_bvalue = bimpl.get(w_key)
if w_bvalue is None:
w_its_value = w_val
w_smallest_diff_a_key = w_key
else:
if not space.eq_w(w_val, w_bvalue):
w_its_value = w_val
w_smallest_diff_a_key = w_key
return w_smallest_diff_a_key, w_its_value
def lt__DictMulti_DictMulti(space, w_left, w_right):
# Different sizes, no problem
leftimpl = w_left.implementation
rightimpl = w_right.implementation
if leftimpl.length() < rightimpl.length():
return space.w_True
if leftimpl.length() > rightimpl.length():
return space.w_False
# Same size
w_leftdiff, w_leftval = characterize(space, leftimpl, rightimpl)
if w_leftdiff is None:
return space.w_False
w_rightdiff, w_rightval = characterize(space, rightimpl, leftimpl)
if w_rightdiff is None:
# w_leftdiff is not None, w_rightdiff is None
return space.w_True
w_res = space.lt(w_leftdiff, w_rightdiff)
if (not space.is_true(w_res) and
space.eq_w(w_leftdiff, w_rightdiff) and
w_rightval is not None):
w_res = space.lt(w_leftval, w_rightval)
return w_res
def dict_copy__DictMulti(space, w_self):
from pypy.objspace.std.dicttype import update1
w_new = W_DictMultiObject(space)
update1(space, w_new, w_self)
return w_new
def dict_items__DictMulti(space, w_self):
return space.newlist(w_self.implementation.items())
def dict_keys__DictMulti(space, w_self):
return space.newlist(w_self.implementation.keys())
def dict_values__DictMulti(space, w_self):
return space.newlist(w_self.implementation.values())
def dict_iteritems__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.implementation.iteritems())
def dict_iterkeys__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.implementation.iterkeys())
def dict_itervalues__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.implementation.itervalues())
def dict_clear__DictMulti(space, w_self):
w_self.implementation = space.emptydictimpl
def dict_get__DictMulti_ANY_ANY(space, w_dict, w_lookup, w_default):
return w_dict.get(w_lookup, w_default)
app = gateway.applevel('''
def dictrepr(currently_in_repr, d):
# Now we only handle one implementation of dicts, this one.
# The fix is to move this to dicttype.py, and do a
# multimethod lookup mapping str to StdObjSpace.str
# This cannot happen until multimethods are fixed. See dicttype.py
dict_id = id(d)
if dict_id in currently_in_repr:
return '{...}'
currently_in_repr[dict_id] = 1
try:
items = []
# XXX for now, we cannot use iteritems() at app-level because
# we want a reasonable result instead of a RuntimeError
# even if the dict is mutated by the repr() in the loop.
for k, v in d.items():
items.append(repr(k) + ": " + repr(v))
return "{" + ', '.join(items) + "}"
finally:
try:
del currently_in_repr[dict_id]
except:
pass
''', filename=__file__)
dictrepr = app.interphook("dictrepr")
def repr__DictMulti(space, w_dict):
if w_dict.implementation.length() == 0:
return space.wrap('{}')
w_currently_in_repr = space.getexecutioncontext()._py_repr
return dictrepr(space, w_currently_in_repr, w_dict)
# ____________________________________________________________
# Iteration
class W_DictMultiIterObject(W_Object):
from pypy.objspace.std.dicttype import dictiter_typedef as typedef
def __init__(w_self, space, iteratorimplementation):
w_self.space = space
w_self.iteratorimplementation = iteratorimplementation
registerimplementation(W_DictMultiIterObject)
def iter__DictMultiIterObject(space, w_dictiter):
return w_dictiter
def next__DictMultiIterObject(space, w_dictiter):
iteratorimplementation = w_dictiter.iteratorimplementation
w_result = iteratorimplementation.next()
if w_result is not None:
return w_result
raise OperationError(space.w_StopIteration, space.w_None)
def len__DictMultiIterObject(space, w_dictiter):
iteratorimplementation = w_dictiter.iteratorimplementation
return space.wrap(iteratorimplementation.length())
# ____________________________________________________________
from pypy.objspace.std import dicttype
register_all(vars(), dicttype)
| Python |
import py
import sys
from pypy.rlib.rarithmetic import intmask, _hash_string, ovfcheck
from pypy.rlib.objectmodel import we_are_translated
import math
LOG2 = math.log(2)
NBITS = int(math.log(sys.maxint) / LOG2) + 2
# XXX should optimize the numbers
NEW_NODE_WHEN_LENGTH = 16
MAX_DEPTH = 32 # maybe should be smaller
MIN_SLICE_LENGTH = 64
CONCATENATE_WHEN_MULTIPLYING = 128
HIGHEST_BIT_SET = intmask(1L << (NBITS - 1))
def find_fib_index(l):
if l == 0:
return -1
a, b = 1, 2
i = 0
while 1:
if a <= l < b:
return i
a, b = b, a + b
i += 1
def masked_power(a, b):
if b == 0:
return 1
if b == 1:
return a
if a == 0:
return 0
if a == 1:
return 1
num_bits = 2
mask = b >> 2
while mask:
num_bits += 1
mask >>= 1
result = a
mask = 1 << (num_bits - 2)
#import pdb; pdb.set_trace()
for i in range(num_bits - 1):
if mask & b:
result = intmask(result * result * a)
else:
result = intmask(result * result)
mask >>= 1
return result
class StringNode(object):
hash_cache = 0
def length(self):
return 0
def depth(self):
return 0
def rebalance(self):
return self
def hash_part(self):
raise NotImplementedError("base class")
def flatten(self):
return ''
def __add__(self, other):
return concatenate(self, other)
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(self.length())
# XXX sucks
slicelength = len(xrange(start, stop, step))
return getslice(self, start, stop, step, slicelength)
return self.getitem(index)
def getitem(self, index):
raise NotImplementedError("abstract base class")
def getitem_slice(self, start, stop):
# XXX really horrible, in most cases
result = []
for i in range(start, stop):
result.append(self.getitem(i))
return rope_from_charlist(result)
def view(self):
view([self])
def check_balanced(self):
return True
class LiteralStringNode(StringNode):
def __init__(self, s):
self.s = s
def length(self):
return len(self.s)
def flatten(self):
return self.s
def hash_part(self):
h = self.hash_cache
if not h:
x = 0
for c in self.s:
x = (1000003*x) + ord(c)
x = intmask(x)
x |= HIGHEST_BIT_SET
h = self.hash_cache = x
return h
def getitem(self, index):
return self.s[index]
def getitem_slice(self, start, stop):
assert 0 <= start <= stop
return LiteralStringNode(self.s[start:stop])
def dot(self, seen, toplevel=False):
if self in seen:
return
seen[self] = True
addinfo = str(self.s).replace('"', "'") or "_"
if len(addinfo) > 10:
addinfo = addinfo[:3] + "..." + addinfo[-3:]
yield ('"%s" [shape=box,label="length: %s\\n%s"];' % (
id(self), len(self.s),
repr(addinfo).replace('"', '').replace("\\", "\\\\")))
class BinaryConcatNode(StringNode):
def __init__(self, left, right):
self.left = left
self.right = right
try:
self.len = ovfcheck(left.length() + right.length())
except OverflowError:
raise
self._depth = max(left.depth(), right.depth()) + 1
self.balanced = False
def check_balanced(self):
if self.balanced:
return True
if not self.left.check_balanced() or not self.right.check_balanced():
return False
left = self.left
right = self.right
llen = left.length()
rlen = right.length()
ldepth = left.depth()
rdepth = right.depth()
balanced = (find_fib_index(self.len // (NEW_NODE_WHEN_LENGTH / 2)) >=
self._depth)
self.balanced = balanced
return balanced
def length(self):
return self.len
def depth(self):
return self._depth
def getitem(self, index):
llen = self.left.length()
if index >= llen:
return self.right.getitem(index - llen)
else:
return self.left.getitem(index)
def flatten(self):
f = fringe(self)
return "".join([node.flatten() for node in f])
def hash_part(self):
h = self.hash_cache
if not h:
h1 = self.left.hash_part()
h2 = self.right.hash_part()
x = intmask(h2 + h1 * (masked_power(1000003, self.right.length())))
x |= HIGHEST_BIT_SET
h = self.hash_cache = x
return h
def rebalance(self):
return rebalance([self], self.len)
def dot(self, seen, toplevel=False):
if self in seen:
return
seen[self] = True
if toplevel:
addition = ", fillcolor=red"
elif self.check_balanced():
addition = ", fillcolor=yellow"
else:
addition = ""
yield '"%s" [shape=octagon,label="+\\ndepth=%s, length=%s"%s];' % (
id(self), self._depth, self.len, addition)
for child in [self.left, self.right]:
yield '"%s" -> "%s";' % (id(self), id(child))
for line in child.dot(seen):
yield line
class SliceNode(StringNode):
def __init__(self, start, stop, node):
assert 0 <= start <= stop
self.start = start
self.stop = stop
self.node = node
def length(self):
return self.stop - self.start
def getitem_slice(self, start, stop):
return self.node.getitem_slice(self.start + start, self.start + stop)
def getitem(self, index):
return self.node.getitem(self.start + index)
def flatten(self):
return self.node.flatten()[self.start: self.stop]
def hash_part(self):
h = self.hash_cache
if not h:
x = 0
for i in range(self.start, self.stop):
x = (1000003*x) + ord(self.node.getitem(i))
x = intmask(x)
x |= HIGHEST_BIT_SET
h = self.hash_cache = x
return h
def dot(self, seen, toplevel=False):
if self in seen:
return
seen[self] = True
yield '"%s" [shape=octagon,label="slice\\nstart=%s, stop=%s"];' % (
id(self), self.start, self.stop)
yield '"%s" -> "%s";' % (id(self), id(self.node))
for line in self.node.dot(seen):
yield line
class EfficientGetitemWraper(StringNode):
def __init__(self, node):
assert isinstance(node, BinaryConcatNode)
self.node = node
self.iter = SeekableCharIterator(node)
self.nextpos = 0
self.accesses = 0
self.seeks = 0
def length(self):
return self.node.length()
def depth(self):
return self.node.depth()
def rebalance(self):
return EfficientGetitemWraper(self.node.rebalance())
def hash_part(self):
return self.node.hash_part()
def flatten(self):
return self.node.flatten()
def getitem(self, index):
self.accesses += 1
nextpos = self.nextpos
self.nextpos = index + 1
if index < nextpos:
self.iter.seekback(nextpos - index)
self.seeks += nextpos - index
elif index > nextpos:
self.iter.seekforward(index - nextpos)
self.seeks += index - nextpos
return self.iter.next()
def view(self):
return self.node.view()
def check_balanced(self):
return self.node.check_balanced()
def concatenate(node1, node2):
if node1.length() == 0:
return node2
if node2.length() == 0:
return node1
if (isinstance(node2, LiteralStringNode) and
len(node2.s) <= NEW_NODE_WHEN_LENGTH):
if isinstance(node1, LiteralStringNode):
if len(node1.s) + len(node2.s) <= NEW_NODE_WHEN_LENGTH:
return LiteralStringNode(node1.s + node2.s)
elif isinstance(node1, BinaryConcatNode):
r = node1.right
if isinstance(r, LiteralStringNode):
if len(r.s) + len(node2.s) <= NEW_NODE_WHEN_LENGTH:
return BinaryConcatNode(node1.left,
LiteralStringNode(r.s + node2.s))
result = BinaryConcatNode(node1, node2)
if result.depth() > MAX_DEPTH: #XXX better check
return result.rebalance()
return result
def getslice(node, start, stop, step, slicelength):
if step != 1:
start, stop, node = find_straddling(node, start, stop)
iter = SeekableCharIterator(node)
iter.seekforward(start)
result = [iter.next()]
for i in range(slicelength - 1):
iter.seekforward(step - 1)
result.append(iter.next())
return rope_from_charlist(result)
return getslice_one(node, start, stop)
def getslice_one(node, start, stop):
start, stop, node = find_straddling(node, start, stop)
if isinstance(node, BinaryConcatNode):
if start == 0:
if stop == node.length():
return node
return getslice_left(node, stop)
if stop == node.length():
return getslice_right(node, start)
return concatenate(
getslice_right(node.left, start),
getslice_left(node.right, stop - node.left.length()))
else:
return getslice_primitive(node, start, stop)
def find_straddling(node, start, stop):
while 1:
if isinstance(node, BinaryConcatNode):
llen = node.left.length()
if start >= llen:
node = node.right
start = start - llen
stop = stop - llen
continue
if stop <= llen:
node = node.left
continue
return start, stop, node
def getslice_right(node, start):
while 1:
if start == 0:
return node
if isinstance(node, BinaryConcatNode):
llen = node.left.length()
if start >= llen:
node = node.right
start = start - llen
continue
else:
return concatenate(getslice_right(node.left, start),
node.right)
return getslice_primitive(node, start, node.length())
def getslice_left(node, stop):
while 1:
if stop == node.length():
return node
if isinstance(node, BinaryConcatNode):
llen = node.left.length()
if stop <= llen:
node = node.left
continue
else:
return concatenate(node.left,
getslice_left(node.right, stop - llen))
return getslice_primitive(node, 0, stop)
def getslice_primitive(node, start, stop):
if stop - start >= MIN_SLICE_LENGTH:
if isinstance(node, SliceNode):
return SliceNode(start + node.start, stop + node.start,
node.node)
return SliceNode(start, stop, node)
return node.getitem_slice(start, stop)
def multiply(node, times):
if times <= 0:
return LiteralStringNode("")
if times == 1:
return node
end_length = node.length() * times
num_bits = 2
mask = times >> 2
while mask:
num_bits += 1
mask >>= 1
result = node
mask = 1 << (num_bits - 2)
#import pdb; pdb.set_trace()
for i in range(num_bits - 1):
if mask & times:
if result.length() < CONCATENATE_WHEN_MULTIPLYING:
result = concatenate(result, result)
result = concatenate(result, node)
else:
result = BinaryConcatNode(result, result)
result = BinaryConcatNode(result, node)
else:
if result.length() < CONCATENATE_WHEN_MULTIPLYING:
result = concatenate(result, result)
else:
result = BinaryConcatNode(result, result)
mask >>= 1
return result
def join(node, l):
if node.length() == 0:
return rebalance(l)
nodelist = [None] * (2 * len(l) - 1)
length = 0
for i in range(len(l)):
nodelist[2 * i] = l[i]
length += l[i].length()
for i in range(len(l) - 1):
nodelist[2 * i + 1] = node
length += (len(l) - 1) * node.length()
return rebalance(nodelist, length)
def rebalance(nodelist, sizehint=-1):
nodelist.reverse()
if sizehint < 0:
sizehint = 0
for node in nodelist:
sizehint += node.length()
if sizehint == 0:
return LiteralStringNode("")
# this code is based on the Fibonacci identity:
# sum(fib(i) for i in range(n+1)) == fib(n+2)
l = [None] * (find_fib_index(sizehint) + 2)
stack = nodelist
empty_up_to = len(l)
a = b = sys.maxint
first_node = None
while stack:
curr = stack.pop()
while isinstance(curr, BinaryConcatNode) and not curr.balanced:
stack.append(curr.right)
curr = curr.left
currlen = curr.length()
if currlen == 0:
continue
if currlen < a:
# we can put 'curr' to its preferred location, which is in
# the known empty part at the beginning of 'l'
a, b = 1, 2
empty_up_to = 0
while not (currlen < b):
empty_up_to += 1
a, b = b, a+b
else:
# sweep all elements up to the preferred location for 'curr'
while not (currlen < b and l[empty_up_to] is None):
if l[empty_up_to] is not None:
curr = concatenate(l[empty_up_to], curr)
l[empty_up_to] = None
currlen = curr.length()
else:
empty_up_to += 1
a, b = b, a+b
if empty_up_to == len(l):
return curr
l[empty_up_to] = curr
first_node = curr
# sweep all elements
curr = first_node
for index in range(empty_up_to + 1, len(l)):
if l[index] is not None:
curr = BinaryConcatNode(l[index], curr)
assert curr is not None
curr.check_balanced()
return curr
# __________________________________________________________________________
# construction from normal strings
def rope_from_charlist(charlist):
nodelist = []
size = 0
for i in range(0, len(charlist), NEW_NODE_WHEN_LENGTH):
chars = charlist[i: min(len(charlist), i + NEW_NODE_WHEN_LENGTH)]
nodelist.append(LiteralStringNode("".join(chars)))
size += len(chars)
return rebalance(nodelist, size)
# __________________________________________________________________________
# searching
def find_char(node, c, start=0, stop=-1):
offset = 0
length = node.length()
if stop == -1:
stop = length
if start != 0 or stop != length:
newstart, newstop, node = find_straddling(node, start, stop)
offset = start - newstart
start = newstart
stop = newstop
assert 0 <= start <= stop
if isinstance(node, LiteralStringNode):
result = node.s.find(c, start, stop)
if result == -1:
return -1
return result + offset
elif isinstance(node, SliceNode):
return find_char(node.node, c, node.start + start,
node.start + stop) - node.start + offset
iter = FringeIterator(node)
#import pdb; pdb.set_trace()
i = 0
while i < stop:
try:
fringenode = iter.next()
except StopIteration:
return -1
nodelength = fringenode.length()
if i + nodelength <= start:
i += nodelength
continue
searchstart = max(0, start - i)
searchstop = min(stop - i, nodelength)
if isinstance(fringenode, LiteralStringNode):
st = fringenode.s
localoffset = 0
else:
assert isinstance(fringenode, SliceNode)
n = fringenode.node
assert isinstance(n, LiteralStringNode)
st = n.s
localoffset = -fringenode.start
searchstart += fringenode.start
searchstop += fringenode.stop
pos = st.find(c, searchstart, searchstop)
if pos != -1:
return pos + i + offset + localoffset
i += nodelength
return -1
def find(node, subnode, start=0, stop=-1):
len1 = node.length()
if stop > len1 or stop == -1:
stop = len1
substring = subnode.flatten() # XXX stressful to do it as a node
len2 = len(substring)
if len2 == 1:
return find_char(node, substring[0], start, stop)
if len2 == 0:
if (stop - start) < 0:
return -1
return start
restart = construct_restart_positions(substring)
return _find(node, substring, start, stop, restart)
def _find(node, substring, start, stop, restart):
len2 = len(substring)
i = 0
m = start
iter = SeekableCharIterator(node)
iter.seekforward(start)
c = iter.next()
while m + i < stop:
if c == substring[i]:
i += 1
if i == len2:
return m
if m + i < stop:
c = iter.next()
else:
# mismatch, go back to the last possible starting pos
if i==0:
m += 1
if m + i < stop:
c = iter.next()
else:
e = restart[i-1]
new_m = m + i - e
assert new_m <= m + i
seek = m + i - new_m
if seek:
iter.seekback(m + i - new_m)
c = iter.next()
m = new_m
i = e
return -1
def construct_restart_positions(s):
l = len(s)
restart = [0] * l
restart[0] = 0
i = 1
j = 0
while i < l:
if s[i] == s[j]:
j += 1
restart[i] = j
i += 1
elif j>0:
j = restart[j-1]
else:
restart[i] = 0
i += 1
j = 0
return restart
def construct_restart_positions_node(node):
# really a bit overkill
l = node.length()
restart = [0] * l
restart[0] = 0
i = 1
j = 0
iter1 = CharIterator(node)
iter1.next()
c1 = iter1.next()
iter2 = SeekableCharIterator(node)
c2 = iter2.next()
while i < l:
if c1 == c2:
j += 1
if j != l:
c2 = iter2.next()
restart[i] = j
i += 1
if i != l:
c1 = iter1.next()
else:
break
elif j>0:
new_j = restart[j-1]
assert new_j < j
iter2.seekback(j - new_j)
c2 = iter2.next()
j = new_j
else:
restart[i] = 0
i += 1
if i != l:
c1 = iter1.next()
else:
break
j = 0
iter2 = SeekableCharIterator(node)
c2 = iter2.next()
return restart
def view(objs):
from dotviewer import graphclient
content = ["digraph G{"]
seen = {}
for i, obj in enumerate(objs):
if obj is None:
content.append(str(i) + ";")
else:
content.extend(obj.dot(seen, toplevel=True))
content.append("}")
p = py.test.ensuretemp("automaton").join("temp.dot")
p.write("\n".join(content))
graphclient.display_dot_file(str(p))
# __________________________________________________________________________
# iteration
class FringeIterator(object):
def __init__(self, node):
self.stack = [node]
def next(self):
while self.stack:
curr = self.stack.pop()
while 1:
if isinstance(curr, BinaryConcatNode):
self.stack.append(curr.right)
curr = curr.left
else:
return curr
raise StopIteration
def fringe(node):
result = []
iter = FringeIterator(node)
while 1:
try:
result.append(iter.next())
except StopIteration:
return result
class SeekableFringeIterator(object):
# XXX allow to seek in bigger character steps
def __init__(self, node):
self.stack = [node]
self.fringestack = []
self.fringe = []
def next(self):
if self.fringestack:
result = self.fringestack.pop()
self.fringe.append(result)
return result
while self.stack:
curr = self.stack.pop()
while 1:
if isinstance(curr, BinaryConcatNode):
self.stack.append(curr.right)
curr = curr.left
else:
self.fringe.append(curr)
return curr
raise StopIteration
def seekback(self):
result = self.fringe.pop()
self.fringestack.append(result)
return result
class CharIterator(object):
def __init__(self, node):
self.iter = FringeIterator(node)
self.node = None
self.nodelength = 0
self.index = 0
def next(self):
node = self.node
if node is None:
while 1:
node = self.node = self.iter.next()
nodelength = self.nodelength = node.length()
if nodelength != 0:
break
self.index = 0
index = self.index
result = self.node.getitem(index)
if self.index == self.nodelength - 1:
self.node = None
else:
self.index = index + 1
return result
class SeekableCharIterator(object):
def __init__(self, node):
self.iter = SeekableFringeIterator(node)
self.node = self.nextnode()
self.nodelength = self.node.length()
self.index = 0
def nextnode(self):
while 1:
node = self.node = self.iter.next()
nodelength = self.nodelength = node.length()
if nodelength != 0:
break
self.index = 0
return node
def next(self):
node = self.node
if node is None:
node = self.nextnode()
index = self.index
result = self.node.getitem(index)
if self.index == self.nodelength - 1:
self.node = None
self.index = index + 1
return result
def seekforward(self, numchars):
if numchars < (self.nodelength - self.index):
self.index += numchars
return
numchars -= self.nodelength - self.index
while 1:
node = self.iter.next()
length = node.length()
if length <= numchars:
numchars -= length
else:
self.index = numchars
self.node = node
self.nodelength = node.length()
return
def seekback(self, numchars):
if numchars <= self.index:
self.index -= numchars
if self.node is None:
self.iter.seekback()
self.node = self.iter.next()
return
numchars -= self.index
self.iter.seekback() # for first item
while 1:
node = self.iter.seekback()
length = node.length()
if length < numchars:
numchars -= length
else:
self.index = length - numchars
self.node = self.iter.next()
self.nodelength = self.node.length()
return
class FindIterator(object):
def __init__(self, node, sub, start=0, stop=-1):
self.node = node
len1 = self.length = node.length()
substring = self.substring = sub.flatten() # XXX for now
len2 = len(substring)
self.search_length = len2
if len2 == 0:
self.restart_positions = None
elif len2 == 1:
self.restart_positions = None
else:
self.restart_positions = construct_restart_positions(substring)
self.start = start
if stop == -1 or stop > len1:
stop = len1
self.stop = stop
def next(self):
if self.search_length == 0:
if (self.stop - self.start) < 0:
raise StopIteration
start = self.start
self.start += 1
return start
elif self.search_length == 1:
result = find_char(self.node, self.substring[0],
self.start, self.stop)
if result == -1:
self.start = self.length
raise StopIteration
self.start = result + 1
return result
if self.start >= self.stop:
raise StopIteration
result = _find(self.node, self.substring, self.start,
self.stop, self.restart_positions)
if result == -1:
self.start = self.length
raise StopIteration
self.start = result + self.search_length
return result
# __________________________________________________________________________
# comparison
def eq(node1, node2):
if node1 is node2:
return True
if node1.length() != node2.length():
return False
if hash_rope(node1) != hash_rope(node2):
return False
if (isinstance(node1, LiteralStringNode) and
isinstance(node2, LiteralStringNode)):
return node1.s == node2.s
iter1 = CharIterator(node1)
iter2 = CharIterator(node2)
# XXX could be cleverer and detect partial equalities
while 1:
try:
c = iter1.next()
except StopIteration:
return True
if c != iter2.next():
return False
def compare(node1, node2):
len1 = node1.length()
len2 = node2.length()
if not len1:
if not len2:
return 0
return -1
if not len2:
return 1
if len1 < len2:
cmplen = len1
else:
cmplen = len2
i = 0
iter1 = CharIterator(node1)
iter2 = CharIterator(node2)
while i < cmplen:
diff = ord(iter1.next()) - ord(iter2.next())
if diff != 0:
return diff
i += 1
return len1 - len2
# __________________________________________________________________________
# misc
def hash_rope(rope):
length = rope.length()
if length == 0:
return -1
x = rope.hash_part()
x <<= 1 # get rid of the bit that is always set
x ^= ord(rope.getitem(0))
x ^= rope.length()
return intmask(x)
| Python |
from pypy.interpreter import gateway, baseobjspace, argument
from pypy.interpreter.error import OperationError
from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member
from pypy.interpreter.typedef import descr_get_dict, descr_set_dict
from pypy.interpreter.typedef import no_hash_descr
from pypy.interpreter.baseobjspace import SpaceCache
from pypy.objspace.std.model import StdObjSpaceMultiMethod
from pypy.objspace.std.multimethod import FailedToImplement
from pypy.tool.sourcetools import compile2
__all__ = ['StdTypeDef', 'newmethod', 'gateway',
'GetSetProperty', 'Member',
'SMM', 'descr_get_dict', 'no_hash_descr']
SMM = StdObjSpaceMultiMethod
class StdTypeDef(TypeDef):
def __init__(self, __name, __base=None, **rawdict):
"NOT_RPYTHON: initialization-time only."
TypeDef.__init__(self, __name, __base, **rawdict)
self.any = type("W_Any"+__name.title(), (baseobjspace.W_Root,), {'typedef': self})
self.local_multimethods = []
def registermethods(self, namespace):
"NOT_RPYTHON: initialization-time only."
self.local_multimethods += hack_out_multimethods(namespace)
def issubtypedef(a, b):
from pypy.objspace.std.objecttype import object_typedef
if b is object_typedef:
return True
while a is not b:
if a is None:
return False
a = a.base
return True
def descr_del_dict(space, w_obj): # blame CPython for the existence of this one
w_obj.setdict(space, space.newdict())
std_dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict)
std_dict_descr.name = '__dict__'
def newmethod(descr_new, unwrap_spec=None):
"NOT_RPYTHON: initialization-time only."
# this is turned into a static method by the constructor of W_TypeObject.
return gateway.interp2app(descr_new, unwrap_spec=unwrap_spec)
# ____________________________________________________________
#
# All the code below fishes from the multimethod registration tables
# the descriptors to put into the W_TypeObjects.
#
class TypeCache(SpaceCache):
def build(cache, typedef):
"NOT_RPYTHON: initialization-time only."
# build a W_TypeObject from this StdTypeDef
from pypy.objspace.std.typeobject import W_TypeObject
from pypy.objspace.std.objecttype import object_typedef
space = cache.space
w = space.wrap
rawdict = typedef.rawdict
lazyloaders = {}
if isinstance(typedef, StdTypeDef):
# get all the sliced multimethods
multimethods = slicemultimethods(space, typedef)
for name, loader in multimethods.items():
if name in rawdict:
# the name specified in the rawdict has priority
continue
assert name not in lazyloaders, (
'name clash: %s in %s.lazyloaders' % (name, typedef.name))
lazyloaders[name] = loader
# compute the bases
if typedef is object_typedef:
bases_w = []
else:
base = typedef.base or object_typedef
bases_w = [space.gettypeobject(base)]
# wrap everything
dict_w = {}
for descrname, descrvalue in rawdict.items():
dict_w[descrname] = w(descrvalue)
w_type = W_TypeObject(space, typedef.name, bases_w, dict_w,
overridetypedef=typedef)
w_type.lazyloaders = lazyloaders
return w_type
def ready(self, w_type):
w_type.ready()
def hack_out_multimethods(ns):
"NOT_RPYTHON: initialization-time only."
result = []
seen = {}
for value in ns.itervalues():
if isinstance(value, StdObjSpaceMultiMethod):
if value.name in seen:
raise Exception("duplicate multimethod name %r" %
(value.name,))
seen[value.name] = True
result.append(value)
return result
##def make_frameclass_for_arity(arity, varargs, keywords, isspecial):
## argnames = []
## for i in range(arity):
## argnames.append('arg%dof%d'%(i+1, arity))
## if varargs:
## argnames.append('var_args')
## if keywords:
## argnames.append('kw_args')
## self_args_assigning = []
## for i in range(len(argnames)):
## self_args_assigning.append(' self.%s = args[%i]'%(argnames[i], i))
## self_args_assigning = "\n".join(self_args_assigning)
## self_args = ", ".join(['self.'+ a for a in argnames])
## name = 'MmFrameOfArity%d'%arity
## if varargs:
## name += "Var"
## if keywords:
## name += "KW"
## if isspecial:
## name = "Special" + name
## d = locals()
## template = mmtemplate
## if isspecial:
## template += specialmmruntemplate
## else:
## template += mmruntemplate
### print template%d
## exec template%d in globals(), d
## return d[name]
##
##_frameclass_for_arity_cache = {}
##def frameclass_for_arity(arity, varargs, keywords, isspecial):
## try:
## return _frameclass_for_arity_cache[(arity, varargs, keywords, isspecial)]
## except KeyError:
## r = _frameclass_for_arity_cache[(arity, varargs, keywords, isspecial)] = \
## make_frameclass_for_arity(arity, varargs, keywords, isspecial)
## return r
def sliced_typeorders(typeorder, multimethod, typedef, i, local=False):
"""NOT_RPYTHON"""
list_of_typeorders = [typeorder] * multimethod.arity
prefix = '_mm_' + multimethod.name
if not local:
# slice
sliced_typeorder = {}
for type, order in typeorder.items():
thistypedef = getattr(type, 'typedef', None)
if issubtypedef(thistypedef, typedef):
lst = []
for target_type, conversion in order:
targettypedef = getattr(target_type, 'typedef', None)
if targettypedef == typedef:
lst.append((target_type, conversion))
sliced_typeorder[type] = lst
list_of_typeorders[i] = sliced_typeorder
prefix += '_%sS%d' % (typedef.name, i)
else:
prefix = typedef.name +'_mth'+prefix
return prefix, list_of_typeorders
def typeerrormsg(space, operatorsymbol, args_w):
type_names = [ space.type(w_arg).getname(space, '?') for w_arg in args_w ]
if len(args_w) > 1:
plural = 's'
else:
plural = ''
msg = "unsupported operand type%s for %s (%s)" % (
plural, operatorsymbol,
', '.join(type_names))
return space.wrap(msg)
def make_perform_trampoline(prefix, exprargs, expr, miniglobals, multimethod, selfindex=0,
allow_NotImplemented_results=False):
"""NOT_RPYTHON"""
# mess to figure out how to put a gateway around executing expr
argnames = ['_%d'%(i+1) for i in range(multimethod.arity)]
explicit_argnames = multimethod.extras.get('argnames', [])
argnames[len(argnames)-len(explicit_argnames):] = explicit_argnames
solid_arglist = ['w_'+name for name in argnames]
wrapper_arglist = solid_arglist[:]
if multimethod.extras.get('varargs_w', False):
wrapper_arglist.append('args_w')
if multimethod.extras.get('w_varargs', False):
wrapper_arglist.append('w_args')
if multimethod.extras.get('keywords', False):
raise Exception, "no longer supported, use __args__"
if multimethod.extras.get('general__args__', False):
wrapper_arglist.append('__args__')
wrapper_arglist += multimethod.extras.get('extra_args', ())
miniglobals.update({ 'OperationError': OperationError,
'typeerrormsg': typeerrormsg})
app_defaults = multimethod.extras.get('defaults', ())
i = len(argnames) - len(app_defaults)
wrapper_signature = wrapper_arglist[:]
for app_default in app_defaults:
name = wrapper_signature[i]
wrapper_signature[i] = '%s=%s' % (name, name)
miniglobals[name] = app_default
i += 1
wrapper_signature.insert(0, wrapper_signature.pop(selfindex))
wrapper_sig = ', '.join(wrapper_signature)
src = []
dest = []
for wrapper_arg,expr_arg in zip(['space']+wrapper_arglist, exprargs):
if wrapper_arg != expr_arg:
src.append(wrapper_arg)
dest.append(expr_arg)
renaming = ', '.join(dest) +" = "+', '.join(src)
if allow_NotImplemented_results and (len(multimethod.specialnames) > 1 or
multimethod.name.startswith('inplace_')):
# turn FailedToImplement into NotImplemented
code = """def %s_perform_call(space, %s):
%s
try:
return %s
except FailedToImplement, e:
if e.w_type is not None:
raise OperationError(e.w_type, e.w_value)
else:
return space.w_NotImplemented
""" % (prefix, wrapper_sig, renaming, expr)
else:
# turn FailedToImplement into nice TypeErrors
code = """def %s_perform_call(space, %s):
%s
try:
w_res = %s
except FailedToImplement, e:
if e.w_type is not None:
raise OperationError(e.w_type, e.w_value)
else:
raise OperationError(space.w_TypeError,
typeerrormsg(space, %r, [%s]))
if w_res is None:
w_res = space.w_None
return w_res
""" % (prefix, wrapper_sig, renaming, expr,
multimethod.operatorsymbol, ', '.join(solid_arglist))
exec compile2(code, '', 'exec') in miniglobals
return miniglobals["%s_perform_call" % prefix]
def wrap_trampoline_in_gateway(func, methname, multimethod):
"""NOT_RPYTHON"""
unwrap_spec = [baseobjspace.ObjSpace] + [baseobjspace.W_Root]*multimethod.arity
if multimethod.extras.get('varargs_w', False):
unwrap_spec.append('args_w')
if multimethod.extras.get('w_varargs', False):
unwrap_spec.append('w_args')
if multimethod.extras.get('general__args__', False):
unwrap_spec.append(argument.Arguments)
if 'doc' in multimethod.extras:
func.__doc__ = multimethod.extras['doc']
return gateway.interp2app(func, app_name=methname, unwrap_spec=unwrap_spec)
def slicemultimethod(space, multimethod, typedef, result, local=False):
"""NOT_RPYTHON"""
for i in range(len(multimethod.specialnames)):
methname = multimethod.specialnames[i]
if methname in result:
# conflict between e.g. __lt__ and
# __lt__-as-reversed-version-of-__gt__
loader = result[methname]
if loader.bound_position < i:
continue
def multimethod_loader(i=i, methname=methname):
"""NOT_RPYTHON"""
prefix, list_of_typeorders = sliced_typeorders(
space.model.typeorder, multimethod, typedef, i, local=local)
exprargs, expr, miniglobals, fallback = multimethod.install(prefix, list_of_typeorders,
baked_perform_call=False,
base_typeorder=space.model.typeorder)
if fallback:
return None # skip empty multimethods
trampoline = make_perform_trampoline(prefix, exprargs, expr, miniglobals,
multimethod, i,
allow_NotImplemented_results=True)
gw = wrap_trampoline_in_gateway(trampoline, methname, multimethod)
return space.wrap(gw)
multimethod_loader.bound_position = i # for the check above
result[methname] = multimethod_loader
def slicemultimethods(space, typedef):
"""NOT_RPYTHON"""
result = {}
# import and slice all multimethods of the space.MM container
for multimethod in hack_out_multimethods(space.MM.__dict__):
slicemultimethod(space, multimethod, typedef, result)
# import all multimethods defined directly on the type without slicing
for multimethod in typedef.local_multimethods:
slicemultimethod(space, multimethod, typedef, result, local=True)
return result
def multimethods_defined_on(cls):
"""NOT_RPYTHON: enumerate the (multimethod, local_flag) for all the
multimethods that have an implementation whose first typed argument
is 'cls'.
"""
from pypy.objspace.std.objspace import StdObjSpace # XXX for now
typedef = cls.typedef
for multimethod in hack_out_multimethods(StdObjSpace.MM.__dict__):
if cls in multimethod.dispatch_tree:
yield multimethod, False
for multimethod in typedef.local_multimethods:
yield multimethod, True
| Python |
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.strutil import string_to_w_long, ParseStringError
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import NoneNotWrapped
def descr__new__(space, w_longtype, w_x=0, w_base=NoneNotWrapped):
from pypy.objspace.std.longobject import W_LongObject
w_value = w_x # 'x' is the keyword argument name in CPython
if w_base is None:
# check for easy cases
if isinstance(w_value, W_LongObject):
pass
elif space.is_true(space.isinstance(w_value, space.w_str)):
try:
w_value = string_to_w_long(space, space.str_w(w_value))
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
elif space.is_true(space.isinstance(w_value, space.w_unicode)):
try:
from unicodeobject import unicode_to_decimal_w
w_value = string_to_w_long(space, unicode_to_decimal_w(space, w_value))
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
else:
# otherwise, use the __long__() method
w_obj = space.long(w_value)
# 'long(x)' should return whatever x.__long__() returned
if space.is_w(w_longtype, space.w_long):
return w_obj
if space.is_true(space.isinstance(w_obj, space.w_long)):
assert isinstance(w_obj, W_LongObject) # XXX this could fail!
# XXX find a way to do that even if w_obj is not a W_LongObject
w_value = w_obj
elif space.is_true(space.isinstance(w_obj, space.w_int)):
intval = space.int_w(w_obj)
w_value = W_LongObject.fromint(space, intval)
else:
raise OperationError(space.w_ValueError,
space.wrap("value can't be converted to long"))
else:
base = space.int_w(w_base)
if space.is_true(space.isinstance(w_value, space.w_unicode)):
from pypy.objspace.std.unicodeobject import unicode_to_decimal_w
s = unicode_to_decimal_w(space, w_value)
else:
try:
s = space.str_w(w_value)
except OperationError, e:
raise OperationError(space.w_TypeError,
space.wrap("long() can't convert non-string "
"with explicit base"))
try:
w_value = string_to_w_long(space, s, base)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
w_obj = space.allocate_instance(W_LongObject, w_longtype)
W_LongObject.__init__(w_obj, w_value.num)
return w_obj
# ____________________________________________________________
long_typedef = StdTypeDef("long",
__doc__ = '''long(x[, base]) -> integer
Convert a string or number to a long integer, if possible. A floating
point argument will be truncated towards zero (this does not include a
string representation of a floating point number!) When converting a
string, use the optional base. It is an error to supply a base when
converting a non-string.''',
__new__ = newmethod(descr__new__),
)
long_typedef.custom_hash = True
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.intobject import W_IntObject
class W_BoolObject(W_Object):
from pypy.objspace.std.booltype import bool_typedef as typedef
def __init__(w_self, boolval):
w_self.boolval = not not boolval
def __nonzero__(w_self):
raise Exception, "you cannot do that, you must use space.is_true()"
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%s)" % (w_self.__class__.__name__, w_self.boolval)
def unwrap(w_self, space):
return w_self.boolval
registerimplementation(W_BoolObject)
W_BoolObject.w_False = W_BoolObject(False)
W_BoolObject.w_True = W_BoolObject(True)
# bool-to-int delegation requires translating the .boolvar attribute
# to an .intval one
def delegate_Bool2IntObject(space, w_bool):
return W_IntObject(int(w_bool.boolval))
def delegate_Bool2SmallInt(space, w_bool):
from pypy.objspace.std.smallintobject import W_SmallIntObject
return W_SmallIntObject(int(w_bool.boolval)) # cannot overflow
def nonzero__Bool(space, w_bool):
return w_bool
def repr__Bool(space, w_bool):
if w_bool.boolval:
return space.wrap('True')
else:
return space.wrap('False')
def and__Bool_Bool(space, w_bool1, w_bool2):
return space.newbool(w_bool1.boolval & w_bool2.boolval)
def or__Bool_Bool(space, w_bool1, w_bool2):
return space.newbool(w_bool1.boolval | w_bool2.boolval)
def xor__Bool_Bool(space, w_bool1, w_bool2):
return space.newbool(w_bool1.boolval ^ w_bool2.boolval)
str__Bool = repr__Bool
register_all(vars())
| Python |
from pypy.objspace.std.stdtypedef import *
from pypy.interpreter.error import OperationError
from pypy.objspace.std.strutil import string_to_float, ParseStringError
from pypy.objspace.std.strutil import interp_string_to_float
USE_NEW_S2F = True
def descr__new__(space, w_floattype, w_x=0.0):
from pypy.objspace.std.floatobject import W_FloatObject
w_value = w_x # 'x' is the keyword argument name in CPython
if space.is_true(space.isinstance(w_value, space.w_str)):
strvalue = space.str_w(w_value)
try:
if USE_NEW_S2F:
value = interp_string_to_float(space, strvalue)
else:
value = string_to_float(strvalue)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
elif space.is_true(space.isinstance(w_value, space.w_unicode)):
from unicodeobject import unicode_to_decimal_w
strvalue = unicode_to_decimal_w(space, w_value)
try:
if USE_NEW_S2F:
value = interp_string_to_float(space, strvalue)
else:
value = string_to_float(strvalue)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
else:
w_obj = space.float(w_value)
if space.is_w(w_floattype, space.w_float):
return w_obj # 'float(x)' should return
# whatever x.__float__() returned
value = space.float_w(w_obj)
w_obj = space.allocate_instance(W_FloatObject, w_floattype)
W_FloatObject.__init__(w_obj, value)
return w_obj
# ____________________________________________________________
float_typedef = StdTypeDef("float",
__doc__ = '''float(x) -> floating point number
Convert a string or number to a floating point number, if possible.''',
__new__ = newmethod(descr__new__),
)
float_typedef.custom_hash = True
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT, r_uint
from pypy.rlib.rbigint import rbigint
from pypy.objspace.std.inttype import wrapint
"""
In order to have the same behavior running
on CPython, and after RPython translation we use ovfcheck
from rarithmetic to explicitly check for overflows,
something CPython does not do anymore.
"""
class W_IntObject(W_Object):
from pypy.objspace.std.inttype import int_typedef as typedef
def __init__(w_self, intval):
w_self.intval = intval
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%d)" % (w_self.__class__.__name__, w_self.intval)
def unwrap(w_self, space):
return int(w_self.intval)
registerimplementation(W_IntObject)
def int_w__Int(space, w_int1):
return int(w_int1.intval)
def uint_w__Int(space, w_int1):
intval = w_int1.intval
if intval < 0:
raise OperationError(space.w_ValueError,
space.wrap("cannot convert negative integer to unsigned"))
else:
return r_uint(intval)
def bigint_w__Int(space, w_int1):
return rbigint.fromint(w_int1.intval)
def repr__Int(space, w_int1):
a = w_int1.intval
res = str(a)
return space.wrap(res)
str__Int = repr__Int
def lt__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i < j )
def le__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i <= j )
def eq__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i == j )
def ne__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i != j )
def gt__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i > j )
def ge__Int_Int(space, w_int1, w_int2):
i = w_int1.intval
j = w_int2.intval
return space.newbool( i >= j )
def hash__Int(space, w_int1):
# unlike CPython, we don't special-case the value -1 in most of our
# hash functions, so there is not much sense special-casing it here either.
# Make sure this is consistent with the hash of floats and longs.
return int__Int(space, w_int1)
# coerce
def coerce__Int_Int(space, w_int1, w_int2):
return space.newtuple([w_int1, w_int2])
def add__Int_Int(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x + y)
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer addition"))
return wrapint(space, z)
def sub__Int_Int(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x - y)
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer substraction"))
return wrapint(space, z)
def mul__Int_Int(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x * y)
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer multiplication"))
return wrapint(space, z)
def _floordiv(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x // y)
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("integer division by zero"))
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer division"))
return wrapint(space, z)
def _truediv(space, w_int1, w_int2):
# XXX how to do delegation to float elegantly?
# avoiding a general space.div operation which pulls
# the whole interpreter in.
# Instead, we delegate to long for now.
raise FailedToImplement(space.w_TypeError,
space.wrap("integer division"))
def mod__Int_Int(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x % y)
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("integer modulo by zero"))
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer modulo"))
return wrapint(space, z)
def divmod__Int_Int(space, w_int1, w_int2):
x = w_int1.intval
y = w_int2.intval
try:
z = ovfcheck(x // y)
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("integer divmod by zero"))
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer modulo"))
# no overflow possible
m = x % y
w = space.wrap
return space.newtuple([w(z), w(m)])
def div__Int_Int(space, w_int1, w_int2):
return _floordiv(space, w_int1, w_int2)
floordiv__Int_Int = _floordiv
truediv__Int_Int = _truediv
# helper for pow()
def _impl_int_int_pow(space, iv, iw, iz=0):
if iw < 0:
if iz != 0:
raise OperationError(space.w_TypeError,
space.wrap("pow() 2nd argument "
"cannot be negative when 3rd argument specified"))
## bounce it, since it always returns float
raise FailedToImplement(space.w_ValueError,
space.wrap("integer exponentiation"))
temp = iv
ix = 1
try:
while iw > 0:
if iw & 1:
ix = ovfcheck(ix*temp)
iw >>= 1 #/* Shift exponent down by 1 bit */
if iw==0:
break
temp = ovfcheck(temp*temp) #/* Square the value of temp */
if iz:
#/* If we did a multiplication, perform a modulo */
ix = ix % iz;
temp = temp % iz;
if iz:
ix = ix % iz
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer exponentiation"))
return wrapint(space, ix)
def pow__Int_Int_Int(space, w_int1, w_int2, w_int3):
x = w_int1.intval
y = w_int2.intval
z = w_int3.intval
if z == 0:
raise OperationError(space.w_ValueError,
space.wrap("pow() 3rd argument cannot be 0"))
return _impl_int_int_pow(space, x, y, z)
def pow__Int_Int_None(space, w_int1, w_int2, w_int3):
x = w_int1.intval
y = w_int2.intval
return _impl_int_int_pow(space, x, y)
def neg__Int(space, w_int1):
a = w_int1.intval
try:
x = ovfcheck(-a)
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer negation"))
return wrapint(space, x)
# pos__Int is supposed to do nothing, unless it has
# a derived integer object, where it should return
# an exact one.
def pos__Int(space, w_int1):
return int__Int(space, w_int1)
def abs__Int(space, w_int1):
if w_int1.intval >= 0:
return pos__Int(space, w_int1)
else:
return neg__Int(space, w_int1)
def nonzero__Int(space, w_int1):
return space.newbool(w_int1.intval != 0)
def invert__Int(space, w_int1):
x = w_int1.intval
a = ~x
return wrapint(space, a)
def lshift__Int_Int(space, w_int1, w_int2):
a = w_int1.intval
b = w_int2.intval
if b < 0:
raise OperationError(space.w_ValueError,
space.wrap("negative shift count"))
if a == 0 or b == 0:
return int__Int(space, w_int1)
if b >= LONG_BIT:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer left shift"))
##
## XXX please! have a look into pyport.h and see how to implement
## the overflow checking, using macro Py_ARITHMETIC_RIGHT_SHIFT
## we *assume* that the overflow checking is done correctly
## in the code generator, which is not trivial!
## XXX also note that Python 2.3 returns a long and never raises
## OverflowError.
try:
c = ovfcheck_lshift(a, b)
## the test in C code is
## if (a != Py_ARITHMETIC_RIGHT_SHIFT(long, c, b)) {
## if (PyErr_Warn(PyExc_FutureWarning,
# and so on
except OverflowError:
raise FailedToImplement(space.w_OverflowError,
space.wrap("integer left shift"))
return wrapint(space, c)
def rshift__Int_Int(space, w_int1, w_int2):
a = w_int1.intval
b = w_int2.intval
if b < 0:
raise OperationError(space.w_ValueError,
space.wrap("negative shift count"))
if a == 0 or b == 0:
return int__Int(space, w_int1)
if b >= LONG_BIT:
if a < 0:
a = -1
else:
a = 0
else:
## please look into pyport.h, how >> should be implemented!
## a = Py_ARITHMETIC_RIGHT_SHIFT(long, a, b);
a = a >> b
return wrapint(space, a)
def and__Int_Int(space, w_int1, w_int2):
a = w_int1.intval
b = w_int2.intval
res = a & b
return wrapint(space, res)
def xor__Int_Int(space, w_int1, w_int2):
a = w_int1.intval
b = w_int2.intval
res = a ^ b
return wrapint(space, res)
def or__Int_Int(space, w_int1, w_int2):
a = w_int1.intval
b = w_int2.intval
res = a | b
return wrapint(space, res)
# int__Int is supposed to do nothing, unless it has
# a derived integer object, where it should return
# an exact one.
def int__Int(space, w_int1):
if space.is_w(space.type(w_int1), space.w_int):
return w_int1
a = w_int1.intval
return wrapint(space, a)
def index__Int(space, w_int1):
return int__Int(space, w_int1)
def float__Int(space, w_int1):
a = w_int1.intval
x = float(a)
return space.newfloat(x)
def oct__Int(space, w_int1):
return space.wrap(oct(w_int1.intval))
def hex__Int(space, w_int1):
return space.wrap(hex(w_int1.intval))
def getnewargs__Int(space, w_int1):
return space.newtuple([wrapint(space, w_int1.intval)])
register_all(vars())
| Python |
# -*- coding: latin-1 -*-
from pypy.objspace.std.objspace import *
from pypy.interpreter import gateway
from pypy.rlib.rarithmetic import ovfcheck, _hash_string
from pypy.rlib.objectmodel import we_are_translated
from pypy.objspace.std.inttype import wrapint
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.objspace.std import slicetype
from pypy.objspace.std.listobject import W_ListObject
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.objspace.std.stringtype import sliced, joined, wrapstr, wrapchar, \
stringendswith, stringstartswith
from pypy.objspace.std.formatting import mod_format
class W_StringObject(W_Object):
from pypy.objspace.std.stringtype import str_typedef as typedef
def __init__(w_self, str):
w_self._value = str
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r)" % (w_self.__class__.__name__, w_self._value)
def unwrap(w_self, space):
return w_self._value
registerimplementation(W_StringObject)
W_StringObject.EMPTY = W_StringObject('')
W_StringObject.PREBUILT = [W_StringObject(chr(i)) for i in range(256)]
del i
def _is_generic(space, w_self, fun):
v = w_self._value
if len(v) == 0:
return space.w_False
if len(v) == 1:
c = v[0]
return space.newbool(fun(c))
else:
for idx in range(len(v)):
if not fun(v[idx]):
return space.w_False
return space.w_True
_is_generic._annspecialcase_ = "specialize:arg(2)"
def _upper(ch):
if ch.islower():
o = ord(ch) - 32
return chr(o)
else:
return ch
def _lower(ch):
if ch.isupper():
o = ord(ch) + 32
return chr(o)
else:
return ch
_isspace = lambda c: c.isspace()
_isdigit = lambda c: c.isdigit()
_isalpha = lambda c: c.isalpha()
_isalnum = lambda c: c.isalnum()
def str_isspace__String(space, w_self):
return _is_generic(space, w_self, _isspace)
def str_isdigit__String(space, w_self):
return _is_generic(space, w_self, _isdigit)
def str_isalpha__String(space, w_self):
return _is_generic(space, w_self, _isalpha)
def str_isalnum__String(space, w_self):
return _is_generic(space, w_self, _isalnum)
def str_isupper__String(space, w_self):
"""Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise."""
v = w_self._value
if len(v) == 1:
c = v[0]
return space.newbool(c.isupper())
cased = False
for idx in range(len(v)):
if v[idx].islower():
return space.w_False
elif not cased and v[idx].isupper():
cased = True
return space.newbool(cased)
def str_islower__String(space, w_self):
"""Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise."""
v = w_self._value
if len(v) == 1:
c = v[0]
return space.newbool(c.islower())
cased = False
for idx in range(len(v)):
if v[idx].isupper():
return space.w_False
elif not cased and v[idx].islower():
cased = True
return space.newbool(cased)
def str_istitle__String(space, w_self):
"""Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise."""
input = w_self._value
cased = False
previous_is_cased = False
for pos in range(0, len(input)):
ch = input[pos]
if ch.isupper():
if previous_is_cased:
return space.w_False
previous_is_cased = True
cased = True
elif ch.islower():
if not previous_is_cased:
return space.w_False
cased = True
else:
previous_is_cased = False
return space.newbool(cased)
def str_upper__String(space, w_self):
self = w_self._value
res = [' '] * len(self)
for i in range(len(self)):
ch = self[i]
res[i] = _upper(ch)
return space.wrap("".join(res))
def str_lower__String(space, w_self):
self = w_self._value
res = [' '] * len(self)
for i in range(len(self)):
ch = self[i]
res[i] = _lower(ch)
return space.wrap("".join(res))
def str_swapcase__String(space, w_self):
self = w_self._value
res = [' '] * len(self)
for i in range(len(self)):
ch = self[i]
if ch.isupper():
o = ord(ch) + 32
res[i] = chr(o)
elif ch.islower():
o = ord(ch) - 32
res[i] = chr(o)
else:
res[i] = ch
return space.wrap("".join(res))
def str_capitalize__String(space, w_self):
input = w_self._value
buffer = [' '] * len(input)
if len(input) > 0:
ch = input[0]
if ch.islower():
o = ord(ch) - 32
buffer[0] = chr(o)
else:
buffer[0] = ch
for i in range(1, len(input)):
ch = input[i]
if ch.isupper():
o = ord(ch) + 32
buffer[i] = chr(o)
else:
buffer[i] = ch
return space.wrap("".join(buffer))
def str_title__String(space, w_self):
input = w_self._value
buffer = [' '] * len(input)
prev_letter=' '
for pos in range(0, len(input)):
ch = input[pos]
if not prev_letter.isalpha():
buffer[pos] = _upper(ch)
else:
buffer[pos] = _lower(ch)
prev_letter = buffer[pos]
return space.wrap("".join(buffer))
def str_split__String_None_ANY(space, w_self, w_none, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
value = w_self._value
length = len(value)
i = 0
while True:
# find the beginning of the next word
while i < length:
if not value[i].isspace():
break # found
i += 1
else:
break # end of string, finished
# find the end of the word
if maxsplit == 0:
j = length # take all the rest of the string
else:
j = i + 1
while j < length and not value[j].isspace():
j += 1
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
# the word is value[i:j]
res_w.append(sliced(space, value, i, j))
# continue to look from the character following the space after the word
i = j + 1
return space.newlist(res_w)
def str_split__String_String_ANY(space, w_self, w_by, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
start = 0
value = w_self._value
by = w_by._value
bylen = len(by)
if bylen == 0:
raise OperationError(space.w_ValueError, space.wrap("empty separator"))
while maxsplit != 0:
next = value.find(by, start)
if next < 0:
break
res_w.append(sliced(space, value, start, next))
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res_w.append(sliced(space, value, start, len(value)))
return space.newlist(res_w)
def str_rsplit__String_None_ANY(space, w_self, w_none, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
value = w_self._value
i = len(value)-1
while True:
# starting from the end, find the end of the next word
while i >= 0:
if not value[i].isspace():
break # found
i -= 1
else:
break # end of string, finished
# find the start of the word
# (more precisely, 'j' will be the space character before the word)
if maxsplit == 0:
j = -1 # take all the rest of the string
else:
j = i - 1
while j >= 0 and not value[j].isspace():
j -= 1
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
# the word is value[j+1:i+1]
j1 = j + 1
assert j1 >= 0
res_w.append(sliced(space, value, j1, i+1))
# continue to look from the character before the space before the word
i = j - 1
res_w.reverse()
return space.newlist(res_w)
def str_rsplit__String_String_ANY(space, w_self, w_by, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
value = w_self._value
end = len(value)
by = w_by._value
bylen = len(by)
if bylen == 0:
raise OperationError(space.w_ValueError, space.wrap("empty separator"))
while maxsplit != 0:
next = value.rfind(by, 0, end)
if next < 0:
break
res_w.append(sliced(space, value, next+bylen, end))
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res_w.append(sliced(space, value, 0, end))
res_w.reverse()
return space.newlist(res_w)
def str_join__String_ANY(space, w_self, w_list):
list_w = space.unpackiterable(w_list)
str_w = space.str_w
if list_w:
self = w_self._value
listlen = 0
reslen = 0
l = []
for i in range(len(list_w)):
w_s = list_w[i]
if not space.is_true(space.isinstance(w_s, space.w_str)):
if space.is_true(space.isinstance(w_s, space.w_unicode)):
w_u = space.call_function(space.w_unicode, w_self)
return space.call_method(w_u, "join", space.newlist(list_w))
raise OperationError(
space.w_TypeError,
space.wrap("sequence item %d: expected string, %s "
"found" % (i,
space.type(w_s).getname(space, '?'))))
l.append(space.str_w(w_s))
return space.wrap(self.join(l))
else:
return W_StringObject.EMPTY
def str_rjust__String_ANY_ANY(space, w_self, w_arg, w_fillchar):
u_arg = space.int_w(w_arg)
u_self = w_self._value
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("rjust() argument 2 must be a single character"))
d = u_arg - len(u_self)
if d>0:
fillchar = fillchar[0] # annotator hint: it's a single character
u_self = d * fillchar + u_self
return space.wrap(u_self)
def str_ljust__String_ANY_ANY(space, w_self, w_arg, w_fillchar):
u_self = w_self._value
u_arg = space.int_w(w_arg)
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("ljust() argument 2 must be a single character"))
d = u_arg - len(u_self)
if d>0:
fillchar = fillchar[0] # annotator hint: it's a single character
u_self += d * fillchar
return space.wrap(u_self)
def _convert_idx_params(space, w_self, w_sub, w_start, w_end):
self = w_self._value
sub = w_sub._value
start = slicetype.adapt_bound(space, len(self), w_start)
end = slicetype.adapt_bound(space, len(self), w_end)
assert start >= 0
assert end >= 0
return (self, sub, start, end)
def contains__String_String(space, w_self, w_sub):
self = w_self._value
sub = w_sub._value
return space.newbool(self.find(sub) >= 0)
def str_find__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.find(sub, start, end)
return space.wrap(res)
def str_rfind__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.rfind(sub, start, end)
return space.wrap(res)
def str_partition__String_String(space, w_self, w_sub):
self = w_self._value
sub = w_sub._value
if not sub:
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
pos = self.find(sub)
if pos == -1:
return space.newtuple([w_self, space.wrap(''), space.wrap('')])
else:
return space.newtuple([sliced(space, self, 0, pos),
w_sub,
sliced(space, self, pos+len(sub), len(self))])
def str_rpartition__String_String(space, w_self, w_sub):
self = w_self._value
sub = w_sub._value
if not sub:
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
pos = self.rfind(sub)
if pos == -1:
return space.newtuple([space.wrap(''), space.wrap(''), w_self])
else:
return space.newtuple([sliced(space, self, 0, pos),
w_sub,
sliced(space, self, pos+len(sub), len(self))])
def str_index__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.find(sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.index"))
return space.wrap(res)
def str_rindex__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = self.rfind(sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.rindex"))
return space.wrap(res)
def str_replace__String_String_String_ANY(space, w_self, w_sub, w_by, w_maxsplit=-1):
input = w_self._value
sub = w_sub._value
by = w_by._value
maxsplit = space.int_w(w_maxsplit)
if maxsplit == 0:
return space.wrap(input)
#print "from replace, input: %s, sub: %s, by: %s" % (input, sub, by)
if not sub:
upper = len(input)
if maxsplit > 0 and maxsplit < upper + 2:
upper = maxsplit - 1
assert upper >= 0
substrings = [""]
for i in range(upper):
c = input[i]
substrings.append(c)
substrings.append(input[upper:])
return space.wrap(by.join(substrings))
startidx = 0
substrings = []
foundidx = input.find(sub, startidx)
while foundidx >= 0 and maxsplit != 0:
substrings.append(input[startidx:foundidx])
startidx = foundidx + len(sub)
foundidx = input.find(sub, startidx)
maxsplit = maxsplit - 1
substrings.append(input[startidx:])
return space.wrap(by.join(substrings))
def _strip(space, w_self, w_chars, left, right):
"internal function called by str_xstrip methods"
u_self = w_self._value
u_chars = w_chars._value
lpos = 0
rpos = len(u_self)
if left:
#print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars)
while lpos < rpos and u_self[lpos] in u_chars:
lpos += 1
if right:
while rpos > lpos and u_self[rpos - 1] in u_chars:
rpos -= 1
assert rpos >= lpos # annotator hint, don't remove
return sliced(space, u_self, lpos, rpos)
def _strip_none(space, w_self, left, right):
"internal function called by str_xstrip methods"
u_self = w_self._value
lpos = 0
rpos = len(u_self)
if left:
#print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars)
while lpos < rpos and u_self[lpos].isspace():
lpos += 1
if right:
while rpos > lpos and u_self[rpos - 1].isspace():
rpos -= 1
assert rpos >= lpos # annotator hint, don't remove
return sliced(space, u_self, lpos, rpos)
def str_strip__String_String(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=1, right=1)
def str_strip__String_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=1, right=1)
def str_rstrip__String_String(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=0, right=1)
def str_rstrip__String_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=0, right=1)
def str_lstrip__String_String(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=1, right=0)
def str_lstrip__String_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=1, right=0)
def str_center__String_ANY_ANY(space, w_self, w_arg, w_fillchar):
u_self = w_self._value
u_arg = space.int_w(w_arg)
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("center() argument 2 must be a single character"))
d = u_arg - len(u_self)
if d>0:
offset = d//2
fillchar = fillchar[0] # annotator hint: it's a single character
u_centered = offset * fillchar + u_self + (d - offset) * fillchar
else:
u_centered = u_self
return wrapstr(space, u_centered)
def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end):
u_self, u_arg, u_start, u_end = _convert_idx_params(space, w_self, w_arg,
w_start, w_end)
return wrapint(space, u_self.count(u_arg, u_start, u_end))
def str_endswith__String_String_ANY_ANY(space, w_self, w_suffix, w_start, w_end):
(u_self, suffix, start, end) = _convert_idx_params(space, w_self,
w_suffix, w_start, w_end)
return space.newbool(stringendswith(u_self, suffix, start, end))
def str_endswith__String_Tuple_ANY_ANY(space, w_self, w_suffixes, w_start, w_end):
(u_self, _, start, end) = _convert_idx_params(space, w_self,
space.wrap(''), w_start, w_end)
for w_suffix in space.unpacktuple(w_suffixes):
suffix = space.str_w(w_suffix)
if stringendswith(u_self, suffix, start, end):
return space.w_True
return space.w_False
def str_startswith__String_String_ANY_ANY(space, w_self, w_prefix, w_start, w_end):
(u_self, prefix, start, end) = _convert_idx_params(space, w_self,
w_prefix, w_start, w_end)
return space.newbool(stringstartswith(u_self, prefix, start, end))
def str_startswith__String_Tuple_ANY_ANY(space, w_self, w_prefixes, w_start, w_end):
(u_self, _, start, end) = _convert_idx_params(space, w_self, space.wrap(''),
w_start, w_end)
for w_prefix in space.unpacktuple(w_prefixes):
prefix = space.str_w(w_prefix)
if stringstartswith(u_self, prefix, start, end):
return space.w_True
return space.w_False
def _tabindent(u_token, u_tabsize):
"calculates distance behind the token to the next tabstop"
distance = u_tabsize
if u_token:
distance = 0
offset = len(u_token)
while 1:
#no sophisticated linebreak support now, '\r' just for passing adapted CPython test
if u_token[offset-1] == "\n" or u_token[offset-1] == "\r":
break;
distance += 1
offset -= 1
if offset == 0:
break
#the same like distance = len(u_token) - (offset + 1)
#print '<offset:%d distance:%d tabsize:%d token:%s>' % (offset, distance, u_tabsize, u_token)
distance = (u_tabsize-distance) % u_tabsize
if distance == 0:
distance=u_tabsize
return distance
def str_expandtabs__String_ANY(space, w_self, w_tabsize):
u_self = w_self._value
u_tabsize = space.int_w(w_tabsize)
u_expanded = ""
if u_self:
split = u_self.split("\t") #XXX use pypy split
u_expanded =oldtoken = split.pop(0)
for token in split:
#print "%d#%d -%s-" % (_tabindent(oldtoken,u_tabsize), u_tabsize, token)
u_expanded += " " * _tabindent(oldtoken,u_tabsize) + token
oldtoken = token
return wrapstr(space, u_expanded)
def str_splitlines__String_ANY(space, w_self, w_keepends):
data = w_self._value
u_keepends = space.int_w(w_keepends) # truth value, but type checked
selflen = len(data)
strs_w = []
i = j = 0
while i < selflen:
# Find a line and append it
while i < selflen and data[i] != '\n' and data[i] != '\r':
i += 1
# Skip the line break reading CRLF as one line break
eol = i
i += 1
if i < selflen and data[i-1] == '\r' and data[i] == '\n':
i += 1
if u_keepends:
eol = i
strs_w.append(sliced(space, data, j, eol))
j = i
if j < selflen:
strs_w.append(sliced(space, data, j, len(data)))
return space.newlist(strs_w)
def str_zfill__String_ANY(space, w_self, w_width):
input = w_self._value
width = space.int_w(w_width)
if len(input) >= width:
# cannot return w_self, in case it is a subclass of str
return space.wrap(input)
buf = [' '] * width
if len(input) > 0 and (input[0] == '+' or input[0] == '-'):
buf[0] = input[0]
start = 1
middle = width - len(input) + 1
else:
start = 0
middle = width - len(input)
for i in range(start, middle):
buf[i] = '0'
for i in range(middle, width):
buf[i] = input[start]
start = start + 1
return space.wrap("".join(buf))
def str_w__String(space, w_str):
return w_str._value
def hash__String(space, w_str):
s = w_str._value
if we_are_translated():
x = hash(s) # to use the hash cache in rpython strings
else:
x = _hash_string(s) # to make sure we get the same hash as rpython
# (otherwise translation will freeze W_DictObjects where we can't find
# the keys any more!)
return wrapint(space, x)
def lt__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 < s2:
return space.w_True
else:
return space.w_False
def le__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 <= s2:
return space.w_True
else:
return space.w_False
def eq__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 == s2:
return space.w_True
else:
return space.w_False
def ne__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 != s2:
return space.w_True
else:
return space.w_False
def gt__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 > s2:
return space.w_True
else:
return space.w_False
def ge__String_String(space, w_str1, w_str2):
s1 = w_str1._value
s2 = w_str2._value
if s1 >= s2:
return space.w_True
else:
return space.w_False
def getitem__String_ANY(space, w_str, w_index):
ival = space.getindex_w(w_index, space.w_IndexError, "string index")
str = w_str._value
slen = len(str)
if ival < 0:
ival += slen
if ival < 0 or ival >= slen:
exc = space.call_function(space.w_IndexError,
space.wrap("string index out of range"))
raise OperationError(space.w_IndexError, exc)
return wrapchar(space, str[ival])
def getitem__String_Slice(space, w_str, w_slice):
w = space.wrap
s = w_str._value
length = len(s)
start, stop, step, sl = w_slice.indices4(space, length)
if sl == 0:
return W_StringObject.EMPTY
elif step == 1:
assert start >= 0 and stop >= 0
return sliced(space, s, start, stop)
else:
str = "".join([s[start + i*step] for i in range(sl)])
return wrapstr(space, str)
def mul_string_times(space, w_str, w_times):
try:
mul = space.getindex_w(w_times, space.w_OverflowError)
except OperationError, e:
if e.match(space, space.w_TypeError):
raise FailedToImplement
raise
if mul <= 0:
return W_StringObject.EMPTY
input = w_str._value
input_len = len(input)
try:
buflen = ovfcheck(mul * input_len)
except OverflowError:
raise OperationError(
space.w_OverflowError,
space.wrap("repeated string is too long: %d %d" % (input_len, mul)))
# XXX maybe only do this when input has a big length
return joined(space, [input] * mul)
def mul__String_ANY(space, w_str, w_times):
return mul_string_times(space, w_str, w_times)
def mul__ANY_String(space, w_times, w_str):
return mul_string_times(space, w_str, w_times)
def add__String_String(space, w_left, w_right):
right = w_right._value
left = w_left._value
return joined(space, [left, right])
def len__String(space, w_str):
return space.wrap(len(w_str._value))
def str__String(space, w_str):
if type(w_str) is W_StringObject:
return w_str
return wrapstr(space, w_str._value)
def iter__String(space, w_list):
from pypy.objspace.std import iterobject
return iterobject.W_SeqIterObject(w_list)
def ord__String(space, w_str):
u_str = w_str._value
if len(u_str) != 1:
raise OperationError(
space.w_TypeError,
space.wrap("ord() expected a character, but string "
"of length %d found"%(len(w_str._value),)))
return space.wrap(ord(u_str))
def getnewargs__String(space, w_str):
return space.newtuple([wrapstr(space, w_str._value)])
def repr__String(space, w_str):
s = w_str._value
i = 0
buf = [' '] * (len(s) * 4 + 2) # safely overallocate
quote = "'"
if quote in s and '"' not in s:
quote = '"'
buf[i] = quote
for c in s:
i += 1
bs_char = None # character quoted by backspace
if c == '\\' or c == quote:
bs_char = c
elif c == '\t': bs_char = 't'
elif c == '\r': bs_char = 'r'
elif c == '\n': bs_char = 'n'
elif not '\x20' <= c < '\x7f':
n = ord(c)
buf[i] = '\\'
i += 1
buf[i] = 'x'
i += 1
buf[i] = "0123456789abcdef"[n>>4]
i += 1
buf[i] = "0123456789abcdef"[n&0xF]
else:
buf[i] = c
if bs_char is not None:
buf[i] = '\\'
i += 1
buf[i] = bs_char
i += 1
buf[i] = quote
return space.wrap("".join(buf[:i+1])) # buffer was overallocated, so slice
app = gateway.applevel(r'''
def str_translate__String_ANY_ANY(s, table, deletechars=''):
"""charfilter - unicode handling is not implemented
Return a copy of the string where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given translation table,
which must be a string of length 256"""
if len(table) != 256:
raise ValueError("translation table must be 256 characters long")
L = [ table[ord(s[i])] for i in range(len(s)) if s[i] not in deletechars ]
return ''.join(L)
def str_decode__String_ANY_ANY(str, encoding=None, errors=None):
import codecs
if encoding is None and errors is None:
return unicode(str)
elif errors is None:
return codecs.getdecoder(encoding)(str)[0]
else:
return codecs.getdecoder(encoding)(str, errors)[0]
def str_encode__String_ANY_ANY(str, encoding=None, errors=None):
import codecs
if encoding is None and errors is None:
return unicode(str)
elif errors is None:
return codecs.getencoder(encoding)(str)[0]
else:
return codecs.getencoder(encoding)(str, errors)[0]
''', filename=__file__)
str_translate__String_ANY_ANY = app.interphook('str_translate__String_ANY_ANY')
str_decode__String_ANY_ANY = app.interphook('str_decode__String_ANY_ANY')
str_encode__String_ANY_ANY = app.interphook('str_encode__String_ANY_ANY')
# CPython's logic for deciding if ""%values is
# an error (1 value, 0 %-formatters) or not
# (values is of a mapping type)
def mod__String_ANY(space, w_format, w_values):
return mod_format(space, w_format, w_values, do_unicode=False)
# register all methods
from pypy.objspace.std import stringtype
register_all(vars(), stringtype)
| Python |
from pypy.objspace.std.objspace import *
from pypy.interpreter import gateway
from pypy.rlib.objectmodel import we_are_translated
from pypy.objspace.std.inttype import wrapint
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.objspace.std import slicetype
from pypy.objspace.std.listobject import W_ListObject
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.rlib.rarithmetic import ovfcheck
from pypy.objspace.std.stringtype import wrapchar
from pypy.objspace.std import rope
from pypy.objspace.std.stringobject import mod__String_ANY as mod__Rope_ANY
class W_RopeObject(W_Object):
from pypy.objspace.std.stringtype import str_typedef as typedef
def __init__(w_self, node):
w_self._node = node
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r)" % (w_self.__class__.__name__, w_self._node)
def unwrap(w_self, space):
return w_self._node.flatten()
def create_if_subclassed(w_self):
if type(w_self) is W_RopeObject:
return w_self
return W_RopeObject(w_self._node)
W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode(""))
W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode(chr(i)))
for i in range(256)]
del i
def rope_w(space, w_str):
if isinstance(w_str, W_RopeObject):
return w_str._node
return rope.LiteralStringNode(space.str_w(w_str))
registerimplementation(W_RopeObject)
class W_RopeIterObject(W_Object):
from pypy.objspace.std.itertype import iter_typedef as typedef
def __init__(w_self, w_rope, index=0):
w_self.node = node = w_rope._node
w_self.char_iter = rope.CharIterator(node)
w_self.index = index
registerimplementation(W_RopeIterObject)
def _is_generic(space, w_self, fun):
l = w_self._node.length()
if l == 0:
return space.w_False
iter = rope.CharIterator(w_self._node)
for i in range(l):
if not fun(iter.next()):
return space.w_False
return space.w_True
_is_generic._annspecialcase_ = "specialize:arg(2)"
def _upper(ch):
if ch.islower():
o = ord(ch) - 32
return chr(o)
else:
return ch
def _lower(ch):
if ch.isupper():
o = ord(ch) + 32
return chr(o)
else:
return ch
_isspace = lambda c: c.isspace()
_isdigit = lambda c: c.isdigit()
_isalpha = lambda c: c.isalpha()
_isalnum = lambda c: c.isalnum()
def str_isspace__Rope(space, w_self):
return _is_generic(space, w_self, _isspace)
def str_isdigit__Rope(space, w_self):
return _is_generic(space, w_self, _isdigit)
def str_isalpha__Rope(space, w_self):
return _is_generic(space, w_self, _isalpha)
def str_isalnum__Rope(space, w_self):
return _is_generic(space, w_self, _isalnum)
def str_isupper__Rope(space, w_self):
"""Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise."""
l = w_self._node.length()
if l == 0:
return space.w_False
cased = False
iter = rope.CharIterator(w_self._node)
for idx in range(l):
c = iter.next()
if c.islower():
return space.w_False
elif not cased and c.isupper():
cased = True
return space.newbool(cased)
def str_islower__Rope(space, w_self):
"""Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise."""
l = w_self._node.length()
if l == 0:
return space.w_False
cased = False
iter = rope.CharIterator(w_self._node)
for idx in range(l):
c = iter.next()
if c.isupper():
return space.w_False
elif not cased and c.islower():
cased = True
return space.newbool(cased)
def str_istitle__Rope(space, w_self):
"""Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise."""
cased = False
previous_is_cased = False
iter = rope.CharIterator(w_self._node)
for pos in range(0, w_self._node.length()):
ch = iter.next()
if ch.isupper():
if previous_is_cased:
return space.w_False
previous_is_cased = True
cased = True
elif ch.islower():
if not previous_is_cased:
return space.w_False
cased = True
else:
previous_is_cased = False
return space.newbool(cased)
def str_upper__Rope(space, w_self):
l = w_self._node.length()
res = [' '] * l
iter = rope.CharIterator(w_self._node)
for i in range(l):
ch = iter.next()
res[i] = _upper(ch)
return W_RopeObject(rope.rope_from_charlist(res))
def str_lower__Rope(space, w_self):
l = w_self._node.length()
res = [' '] * l
iter = rope.CharIterator(w_self._node)
for i in range(l):
ch = iter.next()
res[i] = _lower(ch)
return W_RopeObject(rope.rope_from_charlist(res))
def str_swapcase__Rope(space, w_self):
l = w_self._node.length()
res = [' '] * l
iter = rope.CharIterator(w_self._node)
for i in range(l):
ch = iter.next()
if ch.isupper():
o = ord(ch) + 32
res[i] = chr(o)
elif ch.islower():
o = ord(ch) - 32
res[i] = chr(o)
else:
res[i] = ch
return W_RopeObject(rope.rope_from_charlist(res))
def str_capitalize__Rope(space, w_self):
node = w_self._node
length = node.length()
buffer = [' '] * length
if length > 0:
iter = rope.CharIterator(node)
ch = iter.next()
if ch.islower():
o = ord(ch) - 32
buffer[0] = chr(o)
else:
buffer[0] = ch
for i in range(1, length):
ch = iter.next()
if ch.isupper():
o = ord(ch) + 32
buffer[i] = chr(o)
else:
buffer[i] = ch
else:
return W_RopeObject.EMPTY
return W_RopeObject(rope.rope_from_charlist(buffer))
def str_title__Rope(space, w_self):
node = w_self._node
length = node.length()
buffer = [' '] * length
prev_letter = ' '
iter = rope.CharIterator(node)
for pos in range(0, length):
ch = iter.next()
if not prev_letter.isalpha():
buffer[pos] = _upper(ch)
else:
buffer[pos] = _lower(ch)
prev_letter = buffer[pos]
return W_RopeObject(rope.rope_from_charlist(buffer))
def str_split__Rope_None_ANY(space, w_self, w_none, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
node = w_self._node
length = node.length()
i = 0
iter = rope.CharIterator(node)
while True:
# find the beginning of the next word
while i < length:
if not iter.next().isspace():
break # found
i += 1
else:
break # end of string, finished
# find the end of the word
if maxsplit == 0:
j = length # take all the rest of the string
else:
j = i + 1
while j < length and not iter.next().isspace():
j += 1
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
# the word is value[i:j]
res_w.append(W_RopeObject(rope.getslice_one(node, i, j)))
# continue to look from the character following the space after the word
i = j + 1
return space.newlist(res_w)
def str_split__Rope_Rope_ANY(space, w_self, w_by, w_maxsplit=-1):
maxsplit = space.int_w(w_maxsplit)
res_w = []
start = 0
selfnode = w_self._node
bynode = w_by._node
bylen = bynode.length()
if bylen == 0:
raise OperationError(space.w_ValueError, space.wrap("empty separator"))
iter = rope.FindIterator(selfnode, bynode)
while maxsplit != 0:
try:
next = iter.next()
except StopIteration:
break
res_w.append(W_RopeObject(rope.getslice_one(selfnode, start, next)))
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res_w.append(W_RopeObject(rope.getslice_one(
selfnode, start, selfnode.length())))
return space.newlist(res_w)
def str_rsplit__Rope_None_ANY(space, w_self, w_none, w_maxsplit=-1):
# XXX works but flattens
maxsplit = space.int_w(w_maxsplit)
res_w = []
value = w_self._node.flatten()
i = len(value)-1
while True:
# starting from the end, find the end of the next word
while i >= 0:
if not value[i].isspace():
break # found
i -= 1
else:
break # end of string, finished
# find the start of the word
# (more precisely, 'j' will be the space character before the word)
if maxsplit == 0:
j = -1 # take all the rest of the string
else:
j = i - 1
while j >= 0 and not value[j].isspace():
j -= 1
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
# the word is value[j+1:i+1]
j1 = j + 1
assert j1 >= 0
res_w.append(space.wrap(value[j1:i+1]))
# continue to look from the character before the space before the word
i = j - 1
res_w.reverse()
return space.newlist(res_w)
def str_rsplit__Rope_Rope_ANY(space, w_self, w_by, w_maxsplit=-1):
# XXX works but flattens
maxsplit = space.int_w(w_maxsplit)
res_w = []
value = w_self._node.flatten()
end = len(value)
by = w_by._node.flatten()
bylen = len(by)
if bylen == 0:
raise OperationError(space.w_ValueError, space.wrap("empty separator"))
while maxsplit != 0:
next = value.rfind(by, 0, end)
if next < 0:
break
res_w.append(space.wrap(value[next+bylen: end]))
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res_w.append(space.wrap(value[:end]))
res_w.reverse()
return space.newlist(res_w)
def str_join__Rope_ANY(space, w_self, w_list):
list_w = space.unpackiterable(w_list)
if list_w:
self = w_self._node
l = []
for i in range(len(list_w)):
w_s = list_w[i]
if not space.is_true(space.isinstance(w_s, space.w_str)):
if space.is_true(space.isinstance(w_s, space.w_unicode)):
w_u = space.call_function(space.w_unicode, w_self)
return space.call_method(w_u, "join", space.newlist(list_w))
raise OperationError(
space.w_TypeError,
space.wrap("sequence item %d: expected string, %s "
"found" % (i, space.type(w_s).name)))
assert isinstance(w_s, W_RopeObject)
node = w_s._node
l.append(node)
selfnode = w_self._node
length = selfnode.length()
listlen_minus_one = len(list_w) - 1
try:
return W_RopeObject(rope.join(selfnode, l))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
else:
return W_RopeObject.EMPTY
def str_rjust__Rope_ANY_ANY(space, w_self, w_arg, w_fillchar):
u_arg = space.int_w(w_arg)
selfnode = w_self._node
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("rjust() argument 2 must be a single character"))
d = u_arg - selfnode.length()
if d > 0:
fillchar = fillchar[0] # annotator hint: it's a single character
resultnode = rope.concatenate(rope.LiteralStringNode(d * fillchar),
selfnode)
return W_RopeObject(resultnode)
else:
return W_RopeObject(selfnode)
def str_ljust__Rope_ANY_ANY(space, w_self, w_arg, w_fillchar):
u_arg = space.int_w(w_arg)
selfnode = w_self._node
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("rjust() argument 2 must be a single character"))
d = u_arg - selfnode.length()
if d > 0:
fillchar = fillchar[0] # annotator hint: it's a single character
resultnode = rope.concatenate(selfnode,
rope.LiteralStringNode(d * fillchar))
return W_RopeObject(resultnode)
else:
return W_RopeObject(selfnode)
def _convert_idx_params(space, w_self, w_sub, w_start, w_end):
self = w_self._node
sub = w_sub._node
start = slicetype.adapt_bound(space, self.length(), w_start)
assert start >= 0
end = slicetype.adapt_bound(space, self.length(), w_end)
assert end >= 0
return (self, sub, start, end)
def contains__Rope_Rope(space, w_self, w_sub):
self = w_self._node
sub = w_sub._node
return space.newbool(rope.find(self, sub) >= 0)
def str_find__Rope_Rope_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = rope.find(self, sub, start, end)
return wrapint(space, res)
def str_rfind__Rope_Rope_ANY_ANY(space, w_self, w_sub, w_start, w_end):
# XXX works but flattens
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
self = self.flatten()
sub = sub.flatten()
res = self.rfind(sub, start, end)
return wrapint(space, res)
def str_partition__Rope_Rope(space, w_self, w_sub):
self = w_self._node
sub = w_sub._node
if not sub.length():
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
pos = rope.find(self, sub)
if pos == -1:
return space.newtuple([w_self, W_RopeObject.EMPTY,
W_RopeObject.EMPTY])
else:
return space.newtuple(
[W_RopeObject(rope.getslice_one(self, 0, pos)),
w_sub,
W_RopeObject(rope.getslice_one(self, pos + sub.length(),
self.length()))])
def str_rpartition__Rope_Rope(space, w_self, w_sub):
# XXX works but flattens
self = w_self._node
sub = w_sub._node
if not sub.length():
raise OperationError(space.w_ValueError,
space.wrap("empty separator"))
flattened_self = self.flatten()
flattened_sub = sub.flatten()
pos = flattened_self.rfind(flattened_sub)
if pos == -1:
return space.newtuple([W_RopeObject.EMPTY, W_RopeObject.EMPTY, w_self])
else:
return space.newtuple(
[W_RopeObject(rope.getslice_one(self, 0, pos)),
w_sub,
W_RopeObject(rope.getslice_one(self, pos + sub.length(),
self.length()))])
def str_index__Rope_Rope_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
res = rope.find(self, sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.index"))
return wrapint(space, res)
def str_rindex__Rope_Rope_ANY_ANY(space, w_self, w_sub, w_start, w_end):
(self, sub, start, end) = _convert_idx_params(space, w_self, w_sub, w_start, w_end)
# XXX works but flattens
self = self.flatten()
sub = sub.flatten()
res = self.rfind(sub, start, end)
if res < 0:
raise OperationError(space.w_ValueError,
space.wrap("substring not found in string.rindex"))
return wrapint(space, res)
def str_replace__Rope_Rope_Rope_ANY(space, w_self, w_sub, w_by, w_maxsplit=-1):
node = w_self._node
length = node.length()
sub = w_sub._node
by = w_by._node
maxsplit = space.int_w(w_maxsplit)
if maxsplit == 0:
return w_self.create_if_subclassed()
if not sub.length():
upper = node.length()
if maxsplit > 0 and maxsplit < upper + 2:
upper = maxsplit - 1
assert upper >= 0
substrings = [by]
iter = rope.CharIterator(node)
for i in range(upper):
substrings.append(rope.LiteralStringNode(iter.next()))
substrings.append(by)
substrings.append(rope.getslice_one(node, upper, length))
try:
return W_RopeObject(rope.rebalance(substrings))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
startidx = 0
substrings = []
iter = rope.FindIterator(node, sub)
try:
foundidx = iter.next()
except StopIteration:
return w_self.create_if_subclassed()
while maxsplit != 0:
substrings.append(rope.getslice_one(node, startidx, foundidx))
startidx = foundidx + sub.length()
try:
foundidx = iter.next()
except StopIteration:
break
maxsplit = maxsplit - 1
substrings.append(rope.getslice_one(node, startidx, length))
try:
return W_RopeObject(rope.join(by, substrings))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def _strip(space, w_self, w_chars, left, right):
"internal function called by str_xstrip methods"
node = w_self._node
length = node.length()
u_chars = space.str_w(w_chars)
lpos = 0
rpos = length
if left:
#print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars)
iter = rope.CharIterator(node)
while lpos < rpos and iter.next() in u_chars:
lpos += 1
if right:
# XXX improve this
while rpos > lpos and node.getitem(rpos - 1) in u_chars:
rpos -= 1
return W_RopeObject(rope.getslice_one(node, lpos, rpos))
def _strip_none(space, w_self, left, right):
"internal function called by str_xstrip methods"
node = w_self._node
length = node.length()
lpos = 0
rpos = length
if left:
#print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars)
iter = rope.CharIterator(node)
while lpos < rpos and iter.next().isspace():
lpos += 1
if right:
# XXX fix this
while rpos > lpos and node.getitem(rpos - 1).isspace():
rpos -= 1
assert rpos >= lpos # annotator hint, don't remove
return W_RopeObject(rope.getslice_one(node, lpos, rpos))
def str_strip__Rope_Rope(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=1, right=1)
def str_strip__Rope_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=1, right=1)
def str_rstrip__Rope_Rope(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=0, right=1)
def str_rstrip__Rope_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=0, right=1)
def str_lstrip__Rope_Rope(space, w_self, w_chars):
return _strip(space, w_self, w_chars, left=1, right=0)
def str_lstrip__Rope_None(space, w_self, w_chars):
return _strip_none(space, w_self, left=1, right=0)
def str_center__Rope_ANY_ANY(space, w_self, w_arg, w_fillchar):
node = w_self._node
length = node.length()
arg = space.int_w(w_arg)
fillchar = space.str_w(w_fillchar)
if len(fillchar) != 1:
raise OperationError(space.w_TypeError,
space.wrap("center() argument 2 must be a single character"))
d = arg - length
if d>0:
offset = d//2
fillcharnode = rope.LiteralStringNode(fillchar)
pre = rope.multiply(fillcharnode, offset)
post = rope.multiply(fillcharnode, (d - offset))
centered = rope.rebalance([pre, node, post])
return W_RopeObject(centered)
else:
return w_self.create_if_subclassed()
def str_count__Rope_Rope_ANY_ANY(space, w_self, w_arg, w_start, w_end):
selfnode = w_self._node
length = selfnode.length()
argnode = w_arg._node
u_start = slicetype.adapt_bound(space, length, w_start)
u_end = slicetype.adapt_bound(space, length, w_end)
assert u_start >= 0
assert u_end >= 0
iter = rope.FindIterator(selfnode, argnode, u_start, u_end)
i = 0
while 1:
try:
index = iter.next()
except StopIteration:
break
i += 1
return wrapint(space, i)
def ropeendswith(self, suffix, start, end):
if suffix.length() == 0:
return True
if self.length() == 0:
return False
begin = end - suffix.length()
if begin < start:
return False
iter1 = rope.SeekableCharIterator(self)
iter1.seekforward(begin)
iter2 = rope.CharIterator(suffix)
for i in range(suffix.length()):
if iter1.next() != iter2.next():
return False
return True
def str_endswith__Rope_Rope_ANY_ANY(space, w_self, w_suffix, w_start, w_end):
(self, suffix, start, end) = _convert_idx_params(space, w_self,
w_suffix, w_start, w_end)
return space.newbool(ropeendswith(self, suffix, start, end))
def str_endswith__Rope_Tuple_ANY_ANY(space, w_self, w_suffixes, w_start, w_end):
(self, _, start, end) = _convert_idx_params(space, w_self,
space.wrap(''), w_start, w_end)
for w_suffix in space.unpacktuple(w_suffixes):
suffix = rope_w(space, w_suffix)
if ropeendswith(self, suffix, start, end):
return space.w_True
return space.w_False
def ropestartswith(self, prefix, start, end):
if prefix.length() == 0:
return True
if self.length() == 0:
return False
stop = start + prefix.length()
if stop > end:
return False
iter1 = rope.SeekableCharIterator(self)
iter1.seekforward(start)
iter2 = rope.CharIterator(prefix)
for i in range(prefix.length()):
if iter1.next() != iter2.next():
return False
return True
def str_startswith__Rope_Rope_ANY_ANY(space, w_self, w_prefix, w_start, w_end):
(self, prefix, start, end) = _convert_idx_params(space, w_self,
w_prefix, w_start, w_end)
return space.newbool(ropestartswith(self, prefix, start, end))
def str_startswith__Rope_Tuple_ANY_ANY(space, w_self, w_prefixes, w_start, w_end):
(self, _, start, end) = _convert_idx_params(space, w_self, space.wrap(''),
w_start, w_end)
for w_prefix in space.unpacktuple(w_prefixes):
prefix = rope_w(space, w_prefix)
if ropestartswith(self, prefix, start, end):
return space.w_True
return space.w_False
def _tabindent(node, tabsize):
"calculates distance after the token to the next tabstop"
# XXX implement reverse char iterator
length = node.length()
distance = tabsize
if length:
distance = 0
offset = length
while 1:
# no sophisticated linebreak support now
# '\r' just for passing adapted CPython test
char = node.getitem(offset - 1)
if char == "\n" or char == "\r":
break
distance += 1
offset -= 1
if offset == 0:
break
#the same like distance = len(u_token) - (offset + 1)
distance = (tabsize - distance) % tabsize
if distance == 0:
return tabsize
return distance
def str_expandtabs__Rope_ANY(space, w_self, w_tabsize):
node = w_self._node
length = node.length()
if length == 0:
return W_RopeObject.EMPTY
tabsize = space.int_w(w_tabsize)
expanded = []
iter = rope.FindIterator(node, rope.LiteralStringNode("\t"))
#split = u_self.split("\t")
#u_expanded = oldtoken = split.pop(0)
#for token in split:
# u_expanded += " " * _tabindent(oldtoken,u_tabsize) + token
# oldtoken = token
start = 0
try:
start = iter.next()
last = rope.getslice_one(node, 0, start)
start += 1
except StopIteration:
return w_self.create_if_subclassed()
expanded.append(last)
while 1:
expanded.append(rope.multiply(rope.LiteralStringNode(" "),
_tabindent(last, tabsize)))
try:
next = iter.next()
except StopIteration:
break
last = rope.getslice_one(node, start, next)
expanded.append(last)
start = next + 1
expanded.append(rope.getslice_one(node, start, length))
return W_RopeObject(rope.rebalance(expanded))
def str_splitlines__Rope_ANY(space, w_self, w_keepends):
#import pdb; pdb.set_trace()
keepends = bool(space.int_w(w_keepends)) # truth value, but type checked
node = w_self._node
length = node.length()
if length == 0:
return space.newlist([])
strs_w = []
iter = rope.CharIterator(node)
i = j = 0
last = " "
char = iter.next()
while i < length:
# Find a line and append it
while char != '\n' and char != '\r':
try:
i += 1
last = char
char = iter.next()
except StopIteration:
break
# Skip the line break reading CRLF as one line break
eol = i
i += 1
last = char
try:
char = iter.next()
except StopIteration:
pass
else:
if last == '\r' and char == '\n':
i += 1
try:
last = char
char = iter.next()
except StopIteration:
pass
if keepends:
eol = i
strs_w.append(W_RopeObject(rope.getslice_one(node, j, eol)))
j = i
if j == 0:
strs_w.append(w_self.create_if_subclassed())
elif j < length:
strs_w.append(W_RopeObject(rope.getslice_one(node, j, length)))
return space.newlist(strs_w)
def str_zfill__Rope_ANY(space, w_self, w_width):
node = w_self._node
length = node.length()
width = space.int_w(w_width)
if length >= width:
return w_self.create_if_subclassed()
zero = rope.LiteralStringNode("0")
if length == 0:
return W_RopeObject(rope.multiply(zero, width))
middle = width - length
firstchar = node.getitem(0)
if length > 0 and (firstchar == '+' or firstchar == '-'):
return W_RopeObject(rope.rebalance(
[rope.LiteralStringNode(firstchar),
rope.multiply(zero, middle),
rope.getslice_one(node, 1, length)]))
else:
middle = width - length
return W_RopeObject(rope.concatenate(
rope.multiply(zero, middle), node))
def str_w__Rope(space, w_str):
return w_str._node.flatten()
def hash__Rope(space, w_str):
return wrapint(space, rope.hash_rope(w_str._node))
def lt__Rope_Rope(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) < 0)
def le__Rope_Rope(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) <= 0)
def _eq(w_str1, w_str2):
result = rope.eq(w_str1._node, w_str2._node)
return result
def eq__Rope_Rope(space, w_str1, w_str2):
return space.newbool(_eq(w_str1, w_str2))
def ne__Rope_Rope(space, w_str1, w_str2):
return space.newbool(not _eq(w_str1, w_str2))
def gt__Rope_Rope(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) > 0)
def ge__Rope_Rope(space, w_str1, w_str2):
n1 = w_str1._node
n2 = w_str2._node
return space.newbool(rope.compare(n1, n2) >= 0)
def getitem__Rope_ANY(space, w_str, w_index):
ival = space.getindex_w(w_index, space.w_IndexError, "string index")
node = w_str._node
slen = node.length()
if ival < 0:
ival += slen
if ival < 0 or ival >= slen:
exc = space.call_function(space.w_IndexError,
space.wrap("string index out of range"))
raise OperationError(space.w_IndexError, exc)
return wrapchar(space, node.getitem(ival))
def getitem__Rope_Slice(space, w_str, w_slice):
node = w_str._node
length = node.length()
start, stop, step, sl = w_slice.indices4(space, length)
if sl == 0:
return W_RopeObject.EMPTY
return W_RopeObject(rope.getslice(node, start, stop, step, sl))
def mul_string_times(space, w_str, w_times):
try:
mul = space.getindex_w(w_times, space.w_OverflowError)
except OperationError, e:
if e.match(space, space.w_TypeError):
raise FailedToImplement
raise
if mul <= 0:
return W_RopeObject.EMPTY
node = w_str._node
length = node.length()
# try:
# buflen = ovfcheck(mul * length)
# except OverflowError:
# raise OperationError(
# space.w_OverflowError,
# space.wrap("repeated string is too long: %d %d" % (length, mul)))
try:
return W_RopeObject(rope.multiply(node, mul))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def mul__Rope_ANY(space, w_str, w_times):
return mul_string_times(space, w_str, w_times)
def mul__ANY_Rope(space, w_times, w_str):
return mul_string_times(space, w_str, w_times)
def add__Rope_Rope(space, w_left, w_right):
right = w_right._node
left = w_left._node
try:
return W_RopeObject(rope.concatenate(left, right))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
def len__Rope(space, w_str):
return space.wrap(w_str._node.length())
def str__Rope(space, w_str):
if type(w_str) is W_RopeObject:
return w_str
return W_RopeObject(w_str._node)
def iter__Rope(space, w_list):
return W_RopeIterObject(w_list)
def ord__Rope(space, w_str):
node = w_str._node
if node.length() != 1:
raise OperationError(
space.w_TypeError,
space.wrap("ord() expected a character, but string "
"of length %d found"% (w_str._node.length(),)))
return space.wrap(ord(node.flatten()[0]))
def getnewargs__Rope(space, w_str):
return space.newtuple([W_RopeObject(w_str._node)])
def repr__Rope(space, w_str):
node = w_str._node
length = node.length()
i = 0
buf = [' '] * (length * 4 + 2) # safely overallocate
quote = "'"
if rope.find_char(node, quote) != -1 and rope.find_char(node, '"') == -1:
quote = '"'
buf[0] = quote
iter = rope.CharIterator(node)
while 1:
try:
c = iter.next()
i += 1
except StopIteration:
break
bs_char = None # character quoted by backspace
if c == '\\' or c == quote:
bs_char = c
elif c == '\t': bs_char = 't'
elif c == '\r': bs_char = 'r'
elif c == '\n': bs_char = 'n'
elif not '\x20' <= c < '\x7f':
n = ord(c)
buf[i] = '\\'
i += 1
buf[i] = 'x'
i += 1
buf[i] = "0123456789abcdef"[n>>4]
i += 1
buf[i] = "0123456789abcdef"[n&0xF]
else:
buf[i] = c
if bs_char is not None:
buf[i] = '\\'
i += 1
buf[i] = bs_char
i += 1
buf[i] = quote
return W_RopeObject(rope.rope_from_charlist(buf[:i+1]))
app = gateway.applevel(r'''
def str_translate__Rope_ANY_ANY(s, table, deletechars=''):
"""charfilter - unicode handling is not implemented
Return a copy of the string where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given translation table,
which must be a string of length 256"""
if len(table) != 256:
raise ValueError("translation table must be 256 characters long")
L = [ table[ord(s[i])] for i in range(len(s)) if s[i] not in deletechars ]
return ''.join(L)
def str_decode__Rope_ANY_ANY(str, encoding=None, errors=None):
import codecs
if encoding is None and errors is None:
return unicode(str)
elif errors is None:
return codecs.getdecoder(encoding)(str)[0]
else:
return codecs.getdecoder(encoding)(str, errors)[0]
def str_encode__Rope_ANY_ANY(str, encoding=None, errors=None):
import codecs
if encoding is None and errors is None:
return unicode(str)
elif errors is None:
return codecs.getencoder(encoding)(str)[0]
else:
return codecs.getencoder(encoding)(str, errors)[0]
''', filename=__file__)
str_translate__Rope_ANY_ANY = app.interphook('str_translate__Rope_ANY_ANY')
str_decode__Rope_ANY_ANY = app.interphook('str_decode__Rope_ANY_ANY')
str_encode__Rope_ANY_ANY = app.interphook('str_encode__Rope_ANY_ANY')
# methods of the iterator
def iter__RopeIter(space, w_ropeiter):
return w_ropeiter
def next__RopeIter(space, w_ropeiter):
if w_ropeiter.node is None:
raise OperationError(space.w_StopIteration, space.w_None)
try:
char = w_ropeiter.char_iter.next()
w_item = space.wrap(char)
except StopIteration:
w_ropeiter.node = None
w_ropeiter.char_iter = None
raise OperationError(space.w_StopIteration, space.w_None)
w_ropeiter.index += 1
return w_item
def len__RopeIter(space, w_ropeiter):
if w_ropeiter.node is None:
return wrapint(space, 0)
index = w_ropeiter.index
length = w_ropeiter.node.length()
result = length - index
if result < 0:
return wrapint(space, 0)
return wrapint(space, result)
# register all methods
from pypy.objspace.std import stringtype
register_all(vars(), stringtype)
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.inttype import wrapint
from pypy.rlib.rarithmetic import intmask
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.interpreter import gateway
class W_TupleObject(W_Object):
from pypy.objspace.std.tupletype import tuple_typedef as typedef
def __init__(w_self, wrappeditems):
w_self.wrappeditems = wrappeditems # a list of wrapped values
def __repr__(w_self):
""" representation for debugging purposes """
reprlist = [repr(w_item) for w_item in w_self.wrappeditems]
return "%s(%s)" % (w_self.__class__.__name__, ', '.join(reprlist))
def unwrap(w_tuple, space):
items = [space.unwrap(w_item) for w_item in w_tuple.wrappeditems] # XXX generic mixed types unwrap
return tuple(items)
registerimplementation(W_TupleObject)
def len__Tuple(space, w_tuple):
result = len(w_tuple.wrappeditems)
return wrapint(space, result)
def getitem__Tuple_ANY(space, w_tuple, w_index):
# getindex_w should get a second argument space.w_IndexError,
# but that doesn't exist the first time this is called.
try:
w_IndexError = space.w_IndexError
except AttributeError:
w_IndexError = None
index = space.getindex_w(w_index, w_IndexError, "tuple index")
try:
return w_tuple.wrappeditems[index]
except IndexError:
raise OperationError(space.w_IndexError,
space.wrap("tuple index out of range"))
def getitem__Tuple_Slice(space, w_tuple, w_slice):
items = w_tuple.wrappeditems
length = len(items)
start, stop, step, slicelength = w_slice.indices4(space, length)
assert slicelength >= 0
subitems = [None] * slicelength
for i in range(slicelength):
subitems[i] = items[start]
start += step
return W_TupleObject(subitems)
def contains__Tuple_ANY(space, w_tuple, w_obj):
for w_item in w_tuple.wrappeditems:
if space.eq_w(w_item, w_obj):
return space.w_True
return space.w_False
def iter__Tuple(space, w_tuple):
from pypy.objspace.std import iterobject
return iterobject.W_SeqIterObject(w_tuple)
def add__Tuple_Tuple(space, w_tuple1, w_tuple2):
items1 = w_tuple1.wrappeditems
items2 = w_tuple2.wrappeditems
return W_TupleObject(items1 + items2)
def mul_tuple_times(space, w_tuple, w_times):
try:
times = space.getindex_w(w_times, space.w_OverflowError)
except OperationError, e:
if e.match(space, space.w_TypeError):
raise FailedToImplement
raise
items = w_tuple.wrappeditems
return W_TupleObject(items * times)
def mul__Tuple_ANY(space, w_tuple, w_times):
return mul_tuple_times(space, w_tuple, w_times)
def mul__ANY_Tuple(space, w_times, w_tuple):
return mul_tuple_times(space, w_tuple, w_times)
def eq__Tuple_Tuple(space, w_tuple1, w_tuple2):
items1 = w_tuple1.wrappeditems
items2 = w_tuple2.wrappeditems
if len(items1) != len(items2):
return space.w_False
for i in range(len(items1)):
item1 = items1[i]
item2 = items2[i]
if not space.eq_w(item1, item2):
return space.w_False
return space.w_True
def _min(a, b):
if a < b:
return a
return b
def lt__Tuple_Tuple(space, w_tuple1, w_tuple2):
items1 = w_tuple1.wrappeditems
items2 = w_tuple2.wrappeditems
ncmp = _min(len(items1), len(items2))
# Search for the first index where items are different
for p in range(ncmp):
if not space.eq_w(items1[p], items2[p]):
return space.lt(items1[p], items2[p])
# No more items to compare -- compare sizes
return space.newbool(len(items1) < len(items2))
def gt__Tuple_Tuple(space, w_tuple1, w_tuple2):
items1 = w_tuple1.wrappeditems
items2 = w_tuple2.wrappeditems
ncmp = _min(len(items1), len(items2))
# Search for the first index where items are different
for p in range(ncmp):
if not space.eq_w(items1[p], items2[p]):
return space.gt(items1[p], items2[p])
# No more items to compare -- compare sizes
return space.newbool(len(items1) > len(items2))
app = gateway.applevel("""
def repr__Tuple(t):
if len(t) == 1:
return "(" + repr(t[0]) + ",)"
else:
return "(" + ", ".join([repr(x) for x in t]) + ')'
""", filename=__file__)
repr__Tuple = app.interphook('repr__Tuple')
def hash__Tuple(space, w_tuple):
# this is the CPython 2.4 algorithm (changed from 2.3)
mult = 1000003
x = 0x345678
z = len(w_tuple.wrappeditems)
for w_item in w_tuple.wrappeditems:
y = space.int_w(space.hash(w_item))
x = (x ^ y) * mult
z -= 1
mult += 82520 + z + z
x += 97531
return space.wrap(intmask(x))
def getnewargs__Tuple(space, w_tuple):
return space.newtuple([W_TupleObject(w_tuple.wrappeditems)])
register_all(vars())
| Python |
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.strutil import string_to_int, string_to_w_long, ParseStringError, ParseStringOverflowError
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import NoneNotWrapped
from pypy.rlib.rarithmetic import r_uint
from pypy.rlib.objectmodel import instantiate
# ____________________________________________________________
def wrapint(space, x):
if space.config.objspace.std.withsmallint:
from pypy.objspace.std.smallintobject import W_SmallIntObject
try:
return W_SmallIntObject(x)
except OverflowError:
from pypy.objspace.std.intobject import W_IntObject
return W_IntObject(x)
elif space.config.objspace.std.withprebuiltint:
from pypy.objspace.std.intobject import W_IntObject
lower = space.config.objspace.std.prebuiltintfrom
upper = space.config.objspace.std.prebuiltintto
# use r_uint to perform a single comparison (this whole function
# is getting inlined into every caller so keeping the branching
# to a minimum is a good idea)
index = r_uint(x - lower)
if index >= r_uint(upper - lower):
w_res = instantiate(W_IntObject)
else:
w_res = W_IntObject.PREBUILT[index]
# obscure hack to help the CPU cache: we store 'x' even into
# a prebuilt integer's intval. This makes sure that the intval
# field is present in the cache in the common case where it is
# quickly reused. (we could use a prefetch hint if we had that)
w_res.intval = x
return w_res
else:
from pypy.objspace.std.intobject import W_IntObject
return W_IntObject(x)
# ____________________________________________________________
def retry_to_w_long(space, parser, base=0):
parser.rewind()
try:
return string_to_w_long(space, None, base=base, parser=parser)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
def descr__new__(space, w_inttype, w_x=0, w_base=NoneNotWrapped):
from pypy.objspace.std.intobject import W_IntObject
w_longval = None
w_value = w_x # 'x' is the keyword argument name in CPython
value = 0
if w_base is None:
# check for easy cases
if isinstance(w_value, W_IntObject):
value = w_value.intval
elif space.is_true(space.isinstance(w_value, space.w_str)):
try:
value = string_to_int(space.str_w(w_value))
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
except ParseStringOverflowError, e:
w_longval = retry_to_w_long(space, e.parser)
elif space.is_true(space.isinstance(w_value, space.w_unicode)):
from unicodeobject import unicode_to_decimal_w
string = unicode_to_decimal_w(space, w_value)
try:
value = string_to_int(string)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
except ParseStringOverflowError, e:
w_longval = retry_to_w_long(space, e.parser)
else:
# otherwise, use the __int__() method
w_obj = space.int(w_value)
# 'int(x)' should return whatever x.__int__() returned
if space.is_w(w_inttype, space.w_int):
return w_obj
# int_w is effectively what we want in this case,
# we cannot construct a subclass of int instance with an
# an overflowing long
try:
value = space.int_w(w_obj)
except OperationError, e:
if e.match(space,space.w_TypeError):
raise OperationError(space.w_ValueError,
space.wrap("value can't be converted to int"))
raise e
else:
base = space.int_w(w_base)
if space.is_true(space.isinstance(w_value, space.w_unicode)):
from pypy.objspace.std.unicodeobject import unicode_to_decimal_w
s = unicode_to_decimal_w(space, w_value)
else:
try:
s = space.str_w(w_value)
except OperationError, e:
raise OperationError(space.w_TypeError,
space.wrap("int() can't convert non-string "
"with explicit base"))
try:
value = string_to_int(s, base)
except ParseStringError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.msg))
except ParseStringOverflowError, e:
w_longval = retry_to_w_long(space, e.parser, base)
if w_longval is not None:
if not space.is_w(w_inttype, space.w_int):
raise OperationError(space.w_OverflowError,
space.wrap(
"long int too large to convert to int"))
return w_longval
elif space.is_w(w_inttype, space.w_int):
# common case
return wrapint(space, value)
else:
w_obj = space.allocate_instance(W_IntObject, w_inttype)
W_IntObject.__init__(w_obj, value)
return w_obj
# ____________________________________________________________
int_typedef = StdTypeDef("int",
__doc__ = '''int(x[, base]) -> integer
Convert a string or number to an integer, if possible. A floating point
argument will be truncated towards zero (this does not include a string
representation of a floating point number!) When converting a string, use
the optional base. It is an error to supply a base when converting a
non-string. If the argument is outside the integer range a long object
will be returned instead.''',
__new__ = newmethod(descr__new__),
)
int_typedef.custom_hash = True
| Python |
from objspace import StdObjSpace
Space = StdObjSpace
| Python |
from pypy.interpreter import gateway
from pypy.objspace.std.objspace import W_Object, OperationError
from pypy.objspace.std.objspace import registerimplementation, register_all
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.objspace.std.floatobject import W_FloatObject, _hash_float
import math
class W_ComplexObject(W_Object):
"""This is a reimplementation of the CPython "PyComplexObject"
"""
from pypy.objspace.std.complextype import complex_typedef as typedef
def __init__(w_self, realval=0.0, imgval=0.0):
w_self.realval = float(realval)
w_self.imagval = float(imgval)
def __repr__(w_self):
""" representation for debugging purposes """
return "<W_ComplexObject(%f,%f)>" % (w_self.realval, w_self.imagval)
registerimplementation(W_ComplexObject)
c_1 = (1.0, 0.0)
def _sum(c1, c2):
return (c1[0]+c2[0],c1[1]+c2[1])
def _diff(c1, c2):
return (c1[0]-c2[0],c1[1]-c2[1])
def _neg(c):
return (-c[0],-c[1])
def _prod(c1, c2):
r = c1[0]*c2[0] - c1[1]*c2[1]
i = c1[0]*c2[1] + c1[1]*c2[0]
return (r,i)
def _quot(c1,c2):
r1, i1 = c1
r2, i2 = c2
if r2 < 0:
abs_r2 = - r2
else:
abs_r2 = r2
if i2 < 0:
abs_i2 = - i2
else:
abs_i2 = i2
if abs_r2 >= abs_i2:
if abs_r2 == 0.0:
raise ZeroDivisionError
else:
ratio = i2 / r2
denom = r2 + i2 * ratio
rr = (r1 + i1 * ratio) / denom
ir = (i1 - r1 * ratio) / denom
else:
ratio = r2 / i2
denom = r2 * ratio + i2
assert i2 != 0.0
rr = (r1 * ratio + i1) / denom
ir = (i1 * ratio - r1) / denom
return (rr,ir)
def _pow(c1,c2):
r1, i1 = c1
r2, i2 = c2
if r2 == 0.0 and i2 == 0.0:
rr, ir = c_1
elif r1 == 0.0 and i1 == 0.0:
if i2 != 0.0 or r2 < 0.0:
raise ZeroDivisionError
rr, ir = (0.0, 0.0)
else:
vabs = math.hypot(r1,i1)
len = math.pow(vabs,r2)
at = math.atan2(i1,r1)
phase = at * r2
if i2 != 0.0:
len /= math.exp(at * i2)
phase += i2 * math.log(vabs)
rr = len * math.cos(phase)
ir = len * math.sin(phase)
return (rr, ir)
def _powu(c,n):
mask = 1;
rr, ir = c_1
rp = c[0]
ip = c[1]
while mask > 0 and n >= mask:
if n & mask:
rr, ir = _prod((rr, ir), (rp, ip))
mask <<= 1
rp, ip = _prod((rp, ip), (rp, ip))
return (rr, ir)
def _powi(c,n):
if n > 100 or n < -100:
return _pow(c,(1.0 * n, 0.0))
elif n > 0:
return _powu(c, n)
else:
return _quot(c_1, _powu(c, -n))
def delegate_Bool2Complex(space, w_bool):
return W_ComplexObject(w_bool.boolval, 0.0)
def delegate_Int2Complex(space, w_int):
return W_ComplexObject(w_int.intval, 0.0)
def delegate_Long2Complex(space, w_long):
try:
dval = w_long.tofloat()
except OverflowError, e:
raise OperationError(space.w_OverflowError, space.wrap(str(e)))
return W_ComplexObject(dval, 0.0)
def delegate_Float2Complex(space, w_float):
return W_ComplexObject(w_float.floatval, 0.0)
def hash__Complex(space, w_value):
#this is straight out of CPython complex implementation
hashreal = _hash_float(space, w_value.realval)
if hashreal == -1:
return space.newint(-1)
hashimg = _hash_float(space, w_value.imagval)
if hashimg == -1:
return space.newint(-1)
combined = hashreal + 1000003 * hashimg
if (combined == -1):
combined = -2
return space.newint(combined)
def _w2t(space, w_complex):
"convert an interplevel complex object to a tuple representation"
assert space.is_true(space.isinstance(w_complex, space.w_complex))
return w_complex.realval, w_complex.imagval
def _t2w(space, c):
return W_ComplexObject(c[0], c[1])
def add__Complex_Complex(space, w_complex1, w_complex2):
return _t2w(space, _sum(_w2t(space, w_complex1), _w2t(space, w_complex2)))
def sub__Complex_Complex(space, w_complex1, w_complex2):
return _t2w(space, _diff(_w2t(space, w_complex1), _w2t(space, w_complex2)))
def mul__Complex_Complex(space, w_complex1, w_complex2):
return _t2w(space, _prod(_w2t(space, w_complex1), _w2t(space, w_complex2)))
def div__Complex_Complex(space, w_complex1, w_complex2):
try:
return _t2w(space, _quot(_w2t(space, w_complex1), _w2t(space, w_complex2)))
except ZeroDivisionError, e:
raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e)))
truediv__Complex_Complex = div__Complex_Complex
def mod__Complex_Complex(space, w_complex1, w_complex2):
try:
div = _quot(_w2t(space, w_complex1), _w2t(space, w_complex2))
except ZeroDivisionError, e:
raise OperationError(space.w_ZeroDivisionError, space.wrap("complex remainder"))
div = (math.floor(div[0]), 0.0)
mod = _diff(_w2t(space, w_complex1), _prod(_w2t(space, w_complex2), div))
return _t2w(space, mod)
def divmod__Complex_Complex(space, w_complex1, w_complex2):
try:
div = _quot(_w2t(space, w_complex1), _w2t(space, w_complex2))
except ZeroDivisionError, e:
raise OperationError(space.w_ZeroDivisionError, space.wrap("complex divmod()"))
div = (math.floor(div[0]), 0.0)
mod = _diff(_w2t(space, w_complex1), _prod(_w2t(space, w_complex2), div))
w_div = _t2w(space, div)
w_mod = _t2w(space, mod)
return space.newtuple([w_div, w_mod])
def floordiv__Complex_Complex(space, w_complex1, w_complex2):
try:
div = _quot(_w2t(space, w_complex1), _w2t(space, w_complex2))
except ZeroDivisionError, e:
raise OperationError(space.w_ZeroDivisionError, space.wrap("complex floordiv()"))
div = (math.floor(div[0]), 0.0)
return _t2w(space, div)
def pow__Complex_Complex_ANY(space, w_complex1, w_complex2, thirdArg):
if not isinstance(thirdArg, W_NoneObject):
raise OperationError(space.w_ValueError, space.wrap('complex modulo'))
try:
v = _w2t(space, w_complex1)
exponent = _w2t(space, w_complex2)
int_exponent = int(exponent[0])
if exponent[1] == 0.0 and exponent[0] == int_exponent:
p = _powi(v, int_exponent)
else:
p = _pow(v, exponent)
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 to a negative or complex power"))
except OverflowError:
raise OperationError(space.w_OverflowError, space.wrap("complex exponentiation"))
return _t2w(space, p)
def neg__Complex(space, w_complex):
assert space.is_true(space.isinstance(w_complex, space.w_complex))
return W_ComplexObject(-w_complex.realval, -w_complex.imagval)
def pos__Complex(space, w_complex):
assert space.is_true(space.isinstance(w_complex, space.w_complex))
return W_ComplexObject(w_complex.realval, w_complex.imagval)
def abs__Complex(space, w_complex):
assert space.is_true(space.isinstance(w_complex, space.w_complex))
return space.newfloat(math.hypot(w_complex.realval, w_complex.imagval))
def eq__Complex_Complex(space, w_complex1, w_complex2):
assert space.is_true(space.isinstance(w_complex1, space.w_complex))
assert space.is_true(space.isinstance(w_complex2, space.w_complex))
return space.newbool((w_complex1.realval == w_complex2.realval) and
(w_complex1.imagval == w_complex2.imagval))
def ne__Complex_Complex(space, w_complex1, w_complex2):
assert space.is_true(space.isinstance(w_complex1, space.w_complex))
assert space.is_true(space.isinstance(w_complex2, space.w_complex))
return space.newbool((w_complex1.realval != w_complex2.realval) or
(w_complex1.imagval != w_complex2.imagval))
def lt__Complex_Complex(space, w_complex1, w_complex2):
raise OperationError(space.w_TypeError, space.wrap('cannot compare complex numbers using <, <=, >, >='))
gt__Complex_Complex = lt__Complex_Complex
ge__Complex_Complex = lt__Complex_Complex
le__Complex_Complex = lt__Complex_Complex
def nonzero__Complex(space, w_complex):
assert space.is_true(space.isinstance(w_complex, space.w_complex))
return space.newbool((w_complex.realval != 0.0) or
(w_complex.imagval != 0.0))
def coerce__Complex_Complex(space, w_complex1, w_complex2):
return space.newtuple([w_complex1, w_complex2])
def float__Complex(space, w_complex):
raise OperationError(space.w_TypeError, space.wrap("can't convert complex to float; use abs(z)"))
def int__Complex(space, w_complex):
raise OperationError(space.w_TypeError, space.wrap("can't convert complex to int; use int(abs(z))"))
def complex_conjugate__Complex(space, w_self):
#w_real = space.call_function(space.w_float,space.wrap(w_self.realval))
#w_imag = space.call_function(space.w_float,space.wrap(-w_self.imagval))
return space.newcomplex(w_self.realval,-w_self.imagval)
app = gateway.applevel("""
import math
def possint(f):
ff = math.floor(f)
if f == ff:
return int(ff)
return f
def repr__Complex(f):
if not f.real:
return repr(possint(f.imag))+'j'
imag = f.imag
sign = ((imag >= 0) and '+') or ''
return '('+repr(possint(f.real)) + sign + repr(possint(f.imag))+'j)'
def str__Complex(f):
if not f.real:
return str(possint(f.imag))+'j'
imag = f.imag
sign = ((imag >= 0) and '+') or ''
return '('+str(possint(f.real)) + sign + str(possint(f.imag))+'j)'
""", filename=__file__)
repr__Complex = app.interphook('repr__Complex')
str__Complex = app.interphook('str__Complex')
from pypy.objspace.std import complextype
register_all(vars(), complextype)
| Python |
from pypy.objspace.std.objspace import *
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.objspace.std.inttype import wrapint
from pypy.objspace.std.sliceobject import W_SliceObject
from pypy.objspace.std.listobject import W_ListObject
from pypy.objspace.std import listtype
from pypy.objspace.std import slicetype
from pypy.interpreter import gateway, baseobjspace
def length(start, stop, step):
if step > 0:
if stop <= start:
return 0
return (stop - start + step - 1)/step
else: # step must be < 0
if stop >= start:
return 0
return (start - stop - step - 1)/-step
class W_RangeListObject(W_Object):
typedef = listtype.list_typedef
def __init__(w_self, start, step, length):
assert step != 0
w_self.start = start
w_self.step = step
w_self.length = length
w_self.w_list = None
def force(w_self, space):
if w_self.w_list is not None:
return w_self.w_list
start = w_self.start
step = w_self.step
length = w_self.length
if not length:
w_self.w_list = space.newlist([])
return w_self.w_list
arr = [None] * length # this is to avoid using append.
i = start
n = 0
while n < length:
arr[n] = wrapint(space, i)
i += step
n += 1
w_self.w_list = space.newlist(arr)
return w_self.w_list
def getitem(w_self, i):
if i < 0:
i += w_self.length
if i >= w_self.length or i < 0:
raise IndexError
return w_self.start + i * w_self.step
def __repr__(w_self):
if w_self.w_list is None:
return "W_RangeListObject(%s, %s, %s)" % (
w_self.start, w_self.step, w_self.length)
else:
return "W_RangeListObject(%r)" % (w_self.w_list, )
def delegate_range2list(space, w_rangelist):
return w_rangelist.force(space)
def len__RangeList(space, w_rangelist):
if w_rangelist.w_list is not None:
return space.len(w_rangelist.w_list)
return wrapint(space, w_rangelist.length)
def getitem__RangeList_ANY(space, w_rangelist, w_index):
if w_rangelist.w_list is not None:
return space.getitem(w_rangelist.w_list, w_index)
idx = space.getindex_w(w_index, space.w_IndexError, "list index")
try:
return wrapint(space, w_rangelist.getitem(idx))
except IndexError:
raise OperationError(space.w_IndexError,
space.wrap("list index out of range"))
def getitem__RangeList_Slice(space, w_rangelist, w_slice):
if w_rangelist.w_list is not None:
return space.getitem(w_rangelist.w_list, w_slice)
length = w_rangelist.length
start, stop, step, slicelength = w_slice.indices4(space, length)
assert slicelength >= 0
rangestart = w_rangelist.getitem(start)
rangestep = w_rangelist.step * step
return W_RangeListObject(rangestart, rangestep, slicelength)
def iter__RangeList(space, w_rangelist):
from pypy.objspace.std import iterobject
return W_RangeIterObject(w_rangelist)
def repr__RangeList(space, w_rangelist):
if w_rangelist.w_list is not None:
return space.repr(w_rangelist.w_list)
if w_rangelist.length == 0:
return space.wrap('[]')
result = [''] * w_rangelist.length
i = w_rangelist.start
n = 0
while n < w_rangelist.length:
result[n] = str(i)
i += w_rangelist.step
n += 1
return space.wrap("[" + ", ".join(result) + "]")
def list_pop__RangeList_ANY(space, w_rangelist, w_idx=-1):
if w_rangelist.w_list is not None:
raise FailedToImplement
length = w_rangelist.length
if length == 0:
raise OperationError(space.w_IndexError,
space.wrap("pop from empty list"))
idx = space.int_w(w_idx)
if idx == 0:
result = w_rangelist.start
w_rangelist.start += w_rangelist.step
w_rangelist.length -= 1
return wrapint(space, result)
if idx == -1 or idx == length - 1:
w_rangelist.length -= 1
return wrapint(
space, w_rangelist.start + (length - 1) * w_rangelist.step)
if idx >= w_rangelist.length:
raise OperationError(space.w_IndexError,
space.wrap("pop index out of range"))
raise FailedToImplement
def list_reverse__RangeList(space, w_rangelist):
# probably somewhat useless, but well...
if w_rangelist.w_list is not None:
raise FailedToImplement
w_rangelist.start = w_rangelist.getitem(-1)
w_rangelist.step = -w_rangelist.step
def list_sort__RangeList_None_None_ANY(space, w_rangelist, w_cmp,
w_keyfunc, w_reverse):
# even more useless but fun
has_reverse = space.is_true(w_reverse)
if w_rangelist.w_list is not None:
raise FailedToImplement
if has_reverse:
factor = -1
else:
factor = 1
reverse = w_rangelist.step * factor < 0
if reverse:
w_rangelist.start = w_rangelist.getitem(-1)
w_rangelist.step = -w_rangelist.step
return space.w_None
class W_RangeIterObject(W_Object):
from pypy.objspace.std.itertype import iter_typedef as typedef
def __init__(w_self, w_rangelist, index=0):
w_self.w_rangelist = w_rangelist
w_self.index = index
def iter__RangeIter(space, w_rangeiter):
return w_rangeiter
def next__RangeIter(space, w_rangeiter):
if w_rangeiter.w_rangelist is None:
raise OperationError(space.w_StopIteration, space.w_None)
if w_rangeiter.w_rangelist.w_list is not None:
try:
w_item = space.getitem(w_rangeiter.w_rangelist.w_list,
wrapint(space, w_rangeiter.index))
except OperationError, e:
w_rangeiter.w_rangelist = None
if not e.match(space, space.w_IndexError):
raise
raise OperationError(space.w_StopIteration, space.w_None)
else:
try:
w_item = wrapint(
space,
w_rangeiter.w_rangelist.getitem(w_rangeiter.index))
except IndexError:
w_rangeiter.w_rangelist = None
raise OperationError(space.w_StopIteration, space.w_None)
w_rangeiter.index += 1
return w_item
def len__RangeIter(space, w_rangeiter):
if w_rangeiter.w_rangelist is None:
return wrapint(space, 0)
index = w_rangeiter.index
w_length = space.len(w_rangeiter.w_rangelist)
w_len = space.sub(w_length, wrapint(space, index))
if space.is_true(space.lt(w_len, wrapint(space, 0))):
w_len = wrapint(space, 0)
return w_len
registerimplementation(W_RangeListObject)
registerimplementation(W_RangeIterObject)
register_all(vars(), listtype)
| Python |
import sys
from pypy.objspace.std.objspace import *
from pypy.objspace.std.intobject import W_IntObject
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.rlib.rbigint import rbigint, SHIFT
class W_LongObject(W_Object):
"""This is a wrapper of rbigint."""
from pypy.objspace.std.longtype import long_typedef as typedef
def __init__(w_self, l):
w_self.num = l # instance of rbigint
def fromint(space, intval):
return W_LongObject(rbigint.fromint(intval))
fromint = staticmethod(fromint)
def longval(self):
return self.num.tolong()
def unwrap(w_self, space): #YYYYYY
return w_self.longval()
def tofloat(self):
return self.num.tofloat()
def toint(self):
return self.num.toint()
def fromfloat(f):
return W_LongObject(rbigint.fromfloat(f))
fromfloat = staticmethod(fromfloat)
def fromlong(l):
return W_LongObject(rbigint.fromlong(l))
fromlong = staticmethod(fromlong)
def fromrarith_int(i):
return W_LongObject(rbigint.fromrarith_int(i))
fromrarith_int._annspecialcase_ = "specialize:argtype(0)"
fromrarith_int = staticmethod(fromrarith_int)
def fromdecimalstr(s):
return W_LongObject(rbigint.fromdecimalstr(s))
fromdecimalstr = staticmethod(fromdecimalstr)
def _count_bits(self):
return self.num._count_bits()
def is_odd(self):
return self.num.is_odd()
def get_sign(self):
return self.num.sign
registerimplementation(W_LongObject)
# bool-to-long
def delegate_Bool2Long(space, w_bool):
return W_LongObject(rbigint.frombool(space.is_true(w_bool)))
# int-to-long delegation
def delegate_Int2Long(space, w_intobj):
return long__Int(space, w_intobj)
# long__Long is supposed to do nothing, unless it has
# a derived long object, where it should return
# an exact one.
def long__Long(space, w_long1):
if space.is_w(space.type(w_long1), space.w_long):
return w_long1
l = w_long1.num
return W_LongObject(l)
def long__Int(space, w_intobj):
return W_LongObject.fromint(space, w_intobj.intval)
def int__Long(space, w_value):
try:
return space.newint(w_value.num.toint())
except OverflowError:
return long__Long(space, w_value)
def index__Long(space, w_value):
return long__Long(space, w_value)
def float__Long(space, w_longobj):
try:
return space.newfloat(w_longobj.num.tofloat())
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("long int too large to convert to float"))
def int_w__Long(space, w_value):
try:
return w_value.num.toint()
except OverflowError:
raise OperationError(space.w_OverflowError, space.wrap(
"long int too large to convert to int"))
def uint_w__Long(space, w_value):
try:
return w_value.num.touint()
except ValueError:
raise OperationError(space.w_ValueError, space.wrap(
"cannot convert negative integer to unsigned int"))
except OverflowError:
raise OperationError(space.w_OverflowError, space.wrap(
"long int too large to convert to unsigned int"))
def bigint_w__Long(space, w_value):
return w_value.num
def repr__Long(space, w_long):
return space.wrap(w_long.num.repr())
def str__Long(space, w_long):
return space.wrap(w_long.num.str())
def eq__Long_Long(space, w_long1, w_long2):
return space.newbool(w_long1.num.eq(w_long2.num))
def lt__Long_Long(space, w_long1, w_long2):
return space.newbool(w_long1.num.lt(w_long2.num))
def hash__Long(space, w_value):
return space.wrap(w_value.num.hash())
# coerce
def coerce__Long_Long(space, w_long1, w_long2):
return space.newtuple([w_long1, w_long2])
def add__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.add(w_long2.num))
def sub__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.sub(w_long2.num))
def mul__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.mul(w_long2.num))
def truediv__Long_Long(space, w_long1, w_long2):
try:
return space.newfloat(w_long1.num.truediv(w_long2.num))
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("long division or modulo by zero"))
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("long/long too large for a float"))
def floordiv__Long_Long(space, w_long1, w_long2):
try:
return W_LongObject(w_long1.num.floordiv(w_long2.num))
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("long division or modulo by zero"))
def div__Long_Long(space, w_long1, w_long2):
return floordiv__Long_Long(space, w_long1, w_long2)
def mod__Long_Long(space, w_long1, w_long2):
try:
return W_LongObject(w_long1.num.mod(w_long2.num))
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("long division or modulo by zero"))
def divmod__Long_Long(space, w_long1, w_long2):
try:
div, mod = w_long1.num.divmod(w_long2.num)
return space.newtuple([W_LongObject(div), W_LongObject(mod)])
except ZeroDivisionError:
raise OperationError(space.w_ZeroDivisionError,
space.wrap("long division or modulo by zero"))
def pow__Long_Long_Long(space, w_long1, w_long2, w_long3):
# XXX need to replicate some of the logic, to get the errors right
if w_long2.num.lt(rbigint.fromint(0)):
raise OperationError(
space.w_TypeError,
space.wrap(
"pow() 2nd argument "
"cannot be negative when 3rd argument specified"))
try:
return W_LongObject(w_long1.num.pow(w_long2.num, w_long3.num))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("pow 3rd argument cannot be 0"))
def pow__Long_Long_None(space, w_long1, w_long2, w_long3):
# XXX need to replicate some of the logic, to get the errors right
if w_long2.num.lt(rbigint.fromint(0)):
raise FailedToImplement(
space.w_ValueError,
space.wrap("long pow() too negative"))
return W_LongObject(w_long1.num.pow(w_long2.num, None))
def neg__Long(space, w_long1):
return W_LongObject(w_long1.num.neg())
def pos__Long(space, w_long):
return long__Long(space, w_long)
def abs__Long(space, w_long):
return W_LongObject(w_long.num.abs())
def nonzero__Long(space, w_long):
return space.newbool(w_long.num.tobool())
def invert__Long(space, w_long):
return W_LongObject(w_long.num.invert())
def lshift__Long_Long(space, w_long1, w_long2):
# XXX need to replicate some of the logic, to get the errors right
if w_long2.num.lt(rbigint.fromint(0)):
raise OperationError(space.w_ValueError,
space.wrap("negative shift count"))
try:
return W_LongObject(w_long1.num.lshift(w_long2.num))
except OverflowError: # b too big
raise OperationError(space.w_OverflowError,
space.wrap("shift count too large"))
def rshift__Long_Long(space, w_long1, w_long2):
# XXX need to replicate some of the logic, to get the errors right
if w_long2.num.lt(rbigint.fromint(0)):
raise OperationError(space.w_ValueError,
space.wrap("negative shift count"))
try:
return W_LongObject(w_long1.num.rshift(w_long2.num))
except OverflowError: # b too big # XXX maybe just return 0L instead?
raise OperationError(space.w_OverflowError,
space.wrap("shift count too large"))
def and__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.and_(w_long2.num))
def xor__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.xor(w_long2.num))
def or__Long_Long(space, w_long1, w_long2):
return W_LongObject(w_long1.num.or_(w_long2.num))
def oct__Long(space, w_long1):
return space.wrap(w_long1.num.oct())
def hex__Long(space, w_long1):
return space.wrap(w_long1.num.hex())
def getnewargs__Long(space, w_long1):
return space.newtuple([W_LongObject(w_long1.num)])
register_all(vars())
# register implementations of ops that recover int op overflows
# binary ops
for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']:
exec compile("""
def %(opname)s_ovr__Int_Int(space, w_int1, w_int2):
w_long1 = delegate_Int2Long(space, w_int1)
w_long2 = delegate_Int2Long(space, w_int2)
return %(opname)s__Long_Long(space, w_long1, w_long2)
""" % {'opname': opname}, '', 'exec')
getattr(StdObjSpace.MM, opname).register(globals()['%s_ovr__Int_Int' % opname], W_IntObject, W_IntObject, order=1)
# unary ops
for opname in ['neg', 'abs']:
exec """
def %(opname)s_ovr__Int(space, w_int1):
w_long1 = delegate_Int2Long(space, w_int1)
return %(opname)s__Long(space, w_long1)
""" % {'opname': opname}
getattr(StdObjSpace.MM, opname).register(globals()['%s_ovr__Int' % opname], W_IntObject, order=1)
# pow
def pow_ovr__Int_Int_None(space, w_int1, w_int2, w_none3):
w_long1 = delegate_Int2Long(space, w_int1)
w_long2 = delegate_Int2Long(space, w_int2)
return pow__Long_Long_None(space, w_long1, w_long2, w_none3)
def pow_ovr__Int_Int_Long(space, w_int1, w_int2, w_long3):
w_long1 = delegate_Int2Long(space, w_int1)
w_long2 = delegate_Int2Long(space, w_int2)
return pow__Long_Long_Long(space, w_long1, w_long2, w_long3)
StdObjSpace.MM.pow.register(pow_ovr__Int_Int_None, W_IntObject, W_IntObject, W_NoneObject, order=1)
StdObjSpace.MM.pow.register(pow_ovr__Int_Int_Long, W_IntObject, W_IntObject, W_LongObject, order=1)
| Python |
from pypy.interpreter.error import OperationError
from pypy.objspace.std.objspace import register_all
from pypy.objspace.std.stdtypedef import StdTypeDef, newmethod, no_hash_descr
from pypy.objspace.std.stdtypedef import SMM
from pypy.interpreter.gateway import NoneNotWrapped
from pypy.interpreter import gateway
set_add = SMM('add', 2,
doc='Add an element to a set.\n\nThis'
' has no effect if the element is'
' already present.')
set_clear = SMM('clear', 1,
doc='Remove all elements from this set.')
set_copy = SMM('copy', 1,
doc='Return a shallow copy of a set.')
set_difference = SMM('difference', 2,
doc='Return the difference of two sets'
' as a new set.\n\n(i.e. all'
' elements that are in this set but'
' not the other.)')
set_difference_update = SMM('difference_update', 2,
doc='Remove all elements of another set'
' from this set.')
set_discard = SMM('discard', 2,
doc='Remove an element from a set if it'
' is a member.\n\nIf the element is'
' not a member, do nothing.')
set_intersection = SMM('intersection', 2,
doc='Return the intersection of two sets'
' as a new set.\n\n(i.e. all'
' elements that are in both sets.)')
set_intersection_update = SMM('intersection_update', 2,
doc='Update a set with the intersection'
' of itself and another.')
set_issubset = SMM('issubset', 2,
doc='Report whether another set contains'
' this set.')
set_issuperset = SMM('issuperset', 2,
doc='Report whether this set contains'
' another set.')
set_pop = SMM('pop', 1,
doc='Remove and return an arbitrary set'
' element.')
set_remove = SMM('remove', 2,
doc='Remove an element from a set; it'
' must be a member.\n\nIf the'
' element is not a member, raise a'
' KeyError.')
set_symmetric_difference = SMM('symmetric_difference', 2,
doc='Return the symmetric difference of'
' two sets as a new set.\n\n(i.e.'
' all elements that are in exactly'
' one of the sets.)')
set_symmetric_difference_update = SMM('symmetric_difference_update', 2,
doc='Update a set with the symmetric'
' difference of itself and another.')
set_union = SMM('union', 2,
doc='Return the union of two sets as a'
' new set.\n\n(i.e. all elements'
' that are in either set.)')
set_update = SMM('update', 2,
doc='Update a set with the union of'
' itself and another.')
set_reduce = SMM('__reduce__',1,
doc='Return state information for'
' pickling.')
register_all(vars(), globals())
def descr__new__(space, w_settype, __args__):
from pypy.objspace.std.setobject import W_SetObject
w_obj = space.allocate_instance(W_SetObject, w_settype)
W_SetObject.__init__(w_obj, space, None)
return w_obj
set_typedef = StdTypeDef("set",
__doc__ = """set(iterable) --> set object
Build an unordered collection.""",
__new__ = newmethod(descr__new__, unwrap_spec=[gateway.ObjSpace,
gateway.W_Root,
gateway.Arguments]),
__hash__ = no_hash_descr,
)
set_typedef.registermethods(globals())
setiter_typedef = StdTypeDef("setiterator")
| Python |
"""Default implementation for some operation."""
from pypy.objspace.std.objspace import *
# The following default implementations are used before delegation is tried.
# 'id' is normally the address of the wrapper.
def id__ANY(space, w_obj):
#print 'id:', w_obj
return space.wrap(id(w_obj))
# __init__ should succeed if called internally as a multimethod
def init__ANY(space, w_obj, __args__):
pass
def typed_unwrap_error_msg(space, expected, w_obj):
type_name = space.type(w_obj).getname(space, '?')
return space.wrap("expected %s, got %s object" % (expected, type_name))
def int_w__ANY(space,w_obj):
raise OperationError(space.w_TypeError,
typed_unwrap_error_msg(space, "integer", w_obj))
def str_w__ANY(space,w_obj):
raise OperationError(space.w_TypeError,
typed_unwrap_error_msg(space, "string", w_obj))
def float_w__ANY(space,w_obj):
raise OperationError(space.w_TypeError,
typed_unwrap_error_msg(space, "float", w_obj))
def uint_w__ANY(space,w_obj):
raise OperationError(space.w_TypeError,
typed_unwrap_error_msg(space, "integer", w_obj))
register_all(vars())
| Python |
from pypy.interpreter import gateway
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.basestringtype import basestring_typedef
from pypy.interpreter.error import OperationError
from sys import maxint
unicode_capitalize = SMM('capitalize', 1,
doc='S.capitalize() -> unicode\n\nReturn a'
' capitalized version of S, i.e. make the first'
' character\nhave upper case.')
unicode_center = SMM('center', 3, defaults=(' ',),
doc='S.center(width[, fillchar]) -> unicode\n\nReturn'
' S centered in a Unicode string of length width.'
' Padding is\ndone using the specified fill'
' character (default is a space)')
unicode_count = SMM('count', 4, defaults=(0, maxint),
doc='S.count(sub[, start[, end]]) -> int\n\nReturn'
' the number of occurrences of substring sub in'
' Unicode string\nS[start:end]. Optional'
' arguments start and end are\ninterpreted as in'
' slice notation.')
unicode_encode = SMM('encode', 3, defaults=(None, None),
doc='S.encode([encoding[,errors]]) -> string or'
' unicode\n\nEncodes S using the codec registered'
' for encoding. encoding defaults\nto the default'
' encoding. errors may be given to set a'
' different error\nhandling scheme. Default is'
" 'strict' meaning that encoding errors raise\na"
' UnicodeEncodeError. Other possible values are'
" 'ignore', 'replace' and\n'xmlcharrefreplace' as"
' well as any other name registered'
' with\ncodecs.register_error that can handle'
' UnicodeEncodeErrors.')
unicode_expandtabs = SMM('expandtabs', 2, defaults=(8,),
doc='S.expandtabs([tabsize]) -> unicode\n\nReturn a'
' copy of S where all tab characters are expanded'
' using spaces.\nIf tabsize is not given, a tab'
' size of 8 characters is assumed.')
unicode_isalnum = SMM('isalnum', 1,
doc='S.isalnum() -> bool\n\nReturn True if all'
' characters in S are alphanumeric\nand there is'
' at least one character in S, False otherwise.')
unicode_isalpha = SMM('isalpha', 1,
doc='S.isalpha() -> bool\n\nReturn True if all'
' characters in S are alphabetic\nand there is at'
' least one character in S, False otherwise.')
unicode_isdecimal = SMM('isdecimal', 1,
doc='S.isdecimal() -> bool\n\nReturn True if there'
' are only decimal characters in S,\nFalse'
' otherwise.')
unicode_isdigit = SMM('isdigit', 1,
doc='S.isdigit() -> bool\n\nReturn True if all'
' characters in S are digits\nand there is at'
' least one character in S, False otherwise.')
unicode_islower = SMM('islower', 1,
doc='S.islower() -> bool\n\nReturn True if all cased'
' characters in S are lowercase and there is\nat'
' least one cased character in S, False'
' otherwise.')
unicode_isnumeric = SMM('isnumeric', 1,
doc='S.isnumeric() -> bool\n\nReturn True if there'
' are only numeric characters in S,\nFalse'
' otherwise.')
unicode_isspace = SMM('isspace', 1,
doc='S.isspace() -> bool\n\nReturn True if all'
' characters in S are whitespace\nand there is at'
' least one character in S, False otherwise.')
unicode_istitle = SMM('istitle', 1,
doc='S.istitle() -> bool\n\nReturn True if S is a'
' titlecased string and there is at least'
' one\ncharacter in S, i.e. upper- and titlecase'
' characters may only\nfollow uncased characters'
' and lowercase characters only cased'
' ones.\nReturn False otherwise.')
unicode_isupper = SMM('isupper', 1,
doc='S.isupper() -> bool\n\nReturn True if all cased'
' characters in S are uppercase and there is\nat'
' least one cased character in S, False'
' otherwise.')
unicode_join = SMM('join', 2,
doc='S.join(sequence) -> unicode\n\nReturn a string'
' which is the concatenation of the strings in'
' the\nsequence. The separator between elements'
' is S.')
unicode_ljust = SMM('ljust', 3, defaults=(' ',),
doc='S.ljust(width[, fillchar]) -> int\n\nReturn S'
' left justified in a Unicode string of length'
' width. Padding is\ndone using the specified'
' fill character (default is a space).')
unicode_lower = SMM('lower', 1,
doc='S.lower() -> unicode\n\nReturn a copy of the'
' string S converted to lowercase.')
unicode_rjust = SMM('rjust', 3, defaults=(' ',),
doc='S.rjust(width[, fillchar]) -> unicode\n\nReturn'
' S right justified in a Unicode string of length'
' width. Padding is\ndone using the specified'
' fill character (default is a space).')
unicode_swapcase = SMM('swapcase', 1,
doc='S.swapcase() -> unicode\n\nReturn a copy of S'
' with uppercase characters converted to'
' lowercase\nand vice versa.')
unicode_title = SMM('title', 1,
doc='S.title() -> unicode\n\nReturn a titlecased'
' version of S, i.e. words start with title'
' case\ncharacters, all remaining cased'
' characters have lower case.')
unicode_translate = SMM('translate', 2,
doc='S.translate(table) -> unicode\n\nReturn a copy'
' of the string S, where all characters have been'
' mapped\nthrough the given translation table,'
' which must be a mapping of\nUnicode ordinals to'
' Unicode ordinals, Unicode strings or'
' None.\nUnmapped characters are left untouched.'
' Characters mapped to None\nare deleted.')
unicode_upper = SMM('upper', 1,
doc='S.upper() -> unicode\n\nReturn a copy of S'
' converted to uppercase.')
unicode_zfill = SMM('zfill', 2,
doc='S.zfill(width) -> unicode\n\nPad a numeric'
' string x with zeros on the left, to fill a'
' field\nof the specified width. The string x is'
' never truncated.')
# stuff imported from stringtype for interoperability
from pypy.objspace.std.stringtype import str_endswith as unicode_endswith
from pypy.objspace.std.stringtype import str_startswith as unicode_startswith
from pypy.objspace.std.stringtype import str_find as unicode_find
from pypy.objspace.std.stringtype import str_index as unicode_index
from pypy.objspace.std.stringtype import str_replace as unicode_replace
from pypy.objspace.std.stringtype import str_rfind as unicode_rfind
from pypy.objspace.std.stringtype import str_rindex as unicode_rindex
from pypy.objspace.std.stringtype import str_split as unicode_split
from pypy.objspace.std.stringtype import str_rsplit as unicode_rsplit
from pypy.objspace.std.stringtype import str_partition as unicode_partition
from pypy.objspace.std.stringtype import str_rpartition as unicode_rpartition
from pypy.objspace.std.stringtype import str_splitlines as unicode_splitlines
from pypy.objspace.std.stringtype import str_strip as unicode_strip
from pypy.objspace.std.stringtype import str_rstrip as unicode_rstrip
from pypy.objspace.std.stringtype import str_lstrip as unicode_lstrip
# ____________________________________________________________
app = gateway.applevel('''
def unicode_from_encoded_object(obj, encoding, errors):
import codecs, sys
if encoding is None:
encoding = sys.getdefaultencoding()
decoder = codecs.getdecoder(encoding)
if errors is None:
retval, length = decoder(obj)
else:
retval, length = decoder(obj, errors)
if not isinstance(retval, unicode):
raise TypeError("decoder did not return an unicode object (type=%s)" %
type(retval).__name__)
return retval
def unicode_from_object(obj):
if isinstance(obj, str):
res = obj
else:
try:
unicode_method = obj.__unicode__
except AttributeError:
res = str(obj)
else:
res = unicode_method()
if isinstance(res, unicode):
return res
return unicode_from_encoded_object(res, None, "strict")
''')
unicode_from_object = app.interphook('unicode_from_object')
unicode_from_encoded_object = app.interphook('unicode_from_encoded_object')
def unicode_from_string(space, w_str):
# this is a performance and bootstrapping hack
from pypy.objspace.std.unicodeobject import W_UnicodeObject
w_encoding = space.call_function(space.sys.get('getdefaultencoding'))
if not space.eq_w(w_encoding, space.wrap('ascii')):
return unicode_from_object(space, w_str)
s = space.str_w(w_str)
codelist = []
for i in range(len(s)):
code = ord(s[i])
if code >= 128:
# raising UnicodeDecodeError is messy, so "please crash for me"
return unicode_from_object(space, w_str)
codelist.append(unichr(code))
return W_UnicodeObject(codelist)
def descr__new__(space, w_unicodetype, w_string='', w_encoding=None, w_errors=None):
# NB. the default value of w_string is really a *wrapped* empty string:
# there is gateway magic at work
from pypy.objspace.std.unicodeobject import W_UnicodeObject
w_obj = w_string
w_obj_type = space.type(w_obj)
if space.is_w(w_obj_type, space.w_unicode):
if (not space.is_w(w_encoding, space.w_None) or
not space.is_w(w_errors, space.w_None)):
raise OperationError(space.w_TypeError,
space.wrap('decoding Unicode is not supported'))
if space.is_w(w_unicodetype, space.w_unicode):
return w_obj
w_value = w_obj
elif (space.is_w(w_encoding, space.w_None) and
space.is_w(w_errors, space.w_None)):
if space.is_true(space.isinstance(w_obj, space.w_str)):
w_value = unicode_from_string(space, w_obj)
elif space.is_true(space.isinstance(w_obj, space.w_unicode)):
w_value = w_obj
else:
w_value = unicode_from_object(space, w_obj)
else:
w_value = unicode_from_encoded_object(space, w_obj, w_encoding, w_errors)
# help the annotator! also the ._value depends on W_UnicodeObject layout
assert isinstance(w_value, W_UnicodeObject)
w_newobj = space.allocate_instance(W_UnicodeObject, w_unicodetype)
W_UnicodeObject.__init__(w_newobj, w_value._value)
return w_newobj
# ____________________________________________________________
unicode_typedef = StdTypeDef("unicode", basestring_typedef,
__new__ = newmethod(descr__new__),
__doc__ = '''unicode(string [, encoding[, errors]]) -> object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.'''
)
unicode_typedef.custom_hash = True
unicode_typedef.registermethods(globals())
| Python |
from pypy.objspace.std.stdtypedef import *
from pypy.interpreter.gateway import NoneNotWrapped
def descr__new__(space, w_tupletype, w_sequence=NoneNotWrapped):
from pypy.objspace.std.tupleobject import W_TupleObject
if w_sequence is None:
tuple_w = []
elif (space.is_w(w_tupletype, space.w_tuple) and
space.is_w(space.type(w_sequence), space.w_tuple)):
return w_sequence
else:
tuple_w = space.unpackiterable(w_sequence)
w_obj = space.allocate_instance(W_TupleObject, w_tupletype)
W_TupleObject.__init__(w_obj, tuple_w)
return w_obj
# ____________________________________________________________
tuple_typedef = StdTypeDef("tuple",
__doc__ = '''tuple() -> an empty tuple
tuple(sequence) -> tuple initialized from sequence's items
If the argument is a tuple, the return value is the same object.''',
__new__ = newmethod(descr__new__),
)
tuple_typedef.custom_hash = True
| Python |
from pypy.interpreter.error import OperationError
from pypy.interpreter import gateway
from pypy.interpreter.argument import Arguments
from pypy.interpreter.typedef import weakref_descr
from pypy.objspace.std.stdtypedef import *
def descr__new__(space, w_typetype, w_name, w_bases, w_dict):
"This is used to create user-defined classes only."
from pypy.objspace.std.typeobject import W_TypeObject
# XXX check types
w_typetype = _precheck_for_new(space, w_typetype)
bases_w = space.unpackiterable(w_bases)
w_winner = w_typetype
for base in bases_w:
w_typ = space.type(base)
if space.is_w(w_typ, space.w_classobj):
continue # special-case old-style classes
if space.is_true(space.issubtype(w_winner, w_typ)):
continue
if space.is_true(space.issubtype(w_typ, w_winner)):
w_winner = w_typ
continue
raise OperationError(space.w_TypeError,
space.wrap("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases"))
if not space.is_w(w_winner, w_typetype):
newfunc = space.getattr(w_winner, space.wrap('__new__'))
if not space.is_w(newfunc, space.getattr(space.w_type, space.wrap('__new__'))):
return space.call_function(newfunc, w_winner, w_name, w_bases, w_dict)
w_typetype = w_winner
name = space.str_w(w_name)
assert isinstance(name, str)
dict_w = {}
dictkeys_w = space.unpackiterable(w_dict)
for w_key in dictkeys_w:
key = space.str_w(w_key)
dict_w[key] = space.getitem(w_dict, w_key)
w_type = space.allocate_instance(W_TypeObject, w_typetype)
W_TypeObject.__init__(w_type, space, name, bases_w or [space.w_object],
dict_w)
w_type.ready()
return w_type
def _precheck_for_new(space, w_type):
from pypy.objspace.std.typeobject import W_TypeObject
if not isinstance(w_type, W_TypeObject):
raise OperationError(space.w_TypeError,
space.wrap("X is not a type object (%s)" %
(space.type(w_type).getname(space, '?'))))
return w_type
def _check(space, w_type, msg=None):
from pypy.objspace.std.typeobject import W_TypeObject
if not isinstance(w_type, W_TypeObject):
raise OperationError(space.w_TypeError,
space.wrap(msg or "descriptor is for 'type'"))
return w_type
def descr_get__name__(space, w_type):
w_type = _check(space, w_type)
return space.wrap(w_type.name)
def descr_set__name__(space, w_type, w_value):
w_type = _check(space, w_type)
if not w_type.is_heaptype():
raise OperationError(space.w_TypeError,
space.wrap("can't set %s.__name__" %
w_type.name))
w_type.name = space.str_w(w_value)
def descr_get__mro__(space, w_type):
w_type = _check(space, w_type)
# XXX this should be inside typeobject.py
return space.newtuple(w_type.mro_w)
def descr_mro(space, w_type):
"""Return a type's method resolution order."""
w_type = _check(space, w_type,"expected type")
return space.newlist(w_type.compute_mro())
def descr_get__bases__(space, w_type):
w_type = _check(space, w_type)
return space.newtuple(w_type.bases_w)
def mro_subclasses(space, w_type, temp):
from pypy.objspace.std.typeobject import W_TypeObject
if not w_type.weak_subclasses_w:
return
for w_ref in w_type.weak_subclasses_w:
w_sc = space.call_function(w_ref)
if not space.is_w(w_sc, space.w_None):
assert isinstance(w_sc, W_TypeObject)
temp.append((w_sc, w_sc.mro_w))
mro_internal(space, w_sc)
mro_subclasses(space, w_sc, temp)
# should be a W_TypeObject method i guess
def mro_internal(space, w_type):
if not space.is_w(space.type(w_type), space.w_type):
#w_type.mro_w = []
mro_func = space.lookup(w_type, 'mro')
mro_func_args = Arguments(space, [w_type])
w_mro = space.call_args(mro_func, mro_func_args)
w_type.mro_w = space.unpackiterable(w_mro)
# do some checking here
else:
w_type.mro_w = w_type.compute_mro()
def best_base(space, newstyle_bases_w):
if not newstyle_bases_w:
raise OperationError(space.w_TypeError,
space.wrap("a new-style class can't have only classic bases"))
w_bestbase = None
w_winner = None
for w_base in newstyle_bases_w:
w_candidate = w_base.get_layout()
if w_winner is None:
w_winner = w_candidate
w_bestbase = w_base
elif space.is_true(space.issubtype(w_winner, w_candidate)):
pass
elif space.is_true(space.issubtype(w_candidate, w_winner)):
w_winner = w_candidate
w_bestbase = w_base
else:
raise OperationError(space.w_TypeError,
space.wrap("multiple bases have instance lay-out conflict"))
return w_bestbase
def descr_set__bases__(space, w_type, w_value):
from pypy.objspace.std.typeobject import W_TypeObject
# this assumes all app-level type objects are W_TypeObject
w_type = _check(space, w_type)
if not w_type.is_heaptype():
raise OperationError(space.w_TypeError,
space.wrap("can't set %s.__bases__" %
w_type.name))
if not space.is_true(space.isinstance(w_value, space.w_tuple)):
raise OperationError(space.w_TypeError,
space.wrap("can only assign tuple"
" to %s.__bases__, not %s"%
(w_type.name,
space.type(w_value).getname(space, '?'))))
if space.int_w(space.len(w_value)) == 0:
raise OperationError(space.w_TypeError,
space.wrap("can only assign non-empty tuple to %s.__bases__, not ()"%
w_type.name))
new_newstyle_bases = []
for w_base in space.unpackiterable(w_value):
if not isinstance(w_base, W_TypeObject):
w_typ = space.type(w_base)
if not space.is_w(w_typ, space.w_classobj):
raise OperationError(space.w_TypeError,
space.wrap("%s.__bases__ must be tuple "
"of old- or new-style classes"
", not '%s'"%
(w_type.name,
w_typ.getname(space, '?'))))
else:
new_newstyle_bases.append(w_base)
if space.is_true(space.issubtype(w_base, w_type)):
raise OperationError(space.w_TypeError,
space.wrap("a __bases__ item causes an inheritance cycle"))
new_base = best_base(space, new_newstyle_bases)
if w_type.w_bestbase.get_full_instance_layout() != new_base.get_full_instance_layout():
raise OperationError(space.w_TypeError,
space.wrap("__bases__ assignment: '%s' object layout differs from '%s'" %
(w_type.getname(space, '?'), new_base.getname(space, '?'))))
if space.config.objspace.std.withtypeversion:
# it does not make sense to cache this type, it changes bases
w_type.version_tag = None
saved_bases = w_type.bases_w
saved_base = w_type.w_bestbase
saved_mro = w_type.mro_w
w_type.bases_w = space.unpackiterable(w_value)
w_type.w_bestbase = new_base
temp = []
try:
mro_internal(space, w_type)
mro_subclasses(space, w_type, temp)
for old_base in saved_bases:
if isinstance(old_base, W_TypeObject):
old_base.remove_subclass(w_type)
for new_base in new_newstyle_bases:
new_base.add_subclass(w_type)
except:
for cls, old_mro in temp:
cls.mro_w = old_mro
w_type.bases_w = saved_bases
w_type.w_bestbase = saved_base
w_type.mro_w = saved_mro
raise
def descr__base(space, w_type):
w_type = _check(space, w_type)
if w_type.w_bestbase is not None:
return w_type.w_bestbase
elif w_type is not space.w_object:
return space.w_object
else:
return space.w_None
def descr__doc(space, w_type):
if space.is_w(w_type, space.w_type):
return space.wrap("""type(object) -> the object's type
type(name, bases, dict) -> a new type""")
w_type = _check(space, w_type)
w_result = w_type.getdictvalue_w(space, '__doc__')
if w_result is None:
return space.w_None
else:
return space.get(w_result, space.w_None, w_type)
def descr__flags(space, w_type):
w_type = _check(space, w_type)
return space.wrap(w_type.__flags__)
def descr_get__module(space, w_type):
w_type = _check(space, w_type)
return w_type.get_module()
def descr_set__module(space, w_type, w_value):
w_type = _check(space, w_type)
if not w_type.is_heaptype():
raise OperationError(space.w_TypeError,
space.wrap("can't set %s.__module__" %
w_type.name))
w_type.dict_w['__module__'] = w_value
def descr___subclasses__(space, w_type):
"""Return the list of immediate subclasses."""
w_type = _check(space, w_type)
return space.newlist(w_type.get_subclasses())
# ____________________________________________________________
type_typedef = StdTypeDef("type",
__new__ = newmethod(descr__new__),
__name__ = GetSetProperty(descr_get__name__, descr_set__name__),
__bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__),
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
__dict__ = GetSetProperty(descr_get_dict),
__doc__ = GetSetProperty(descr__doc),
mro = gateway.interp2app(descr_mro),
__flags__ = GetSetProperty(descr__flags),
__module__ = GetSetProperty(descr_get__module, descr_set__module),
__subclasses__ = gateway.interp2app(descr___subclasses__),
__weakref__ = weakref_descr,
)
| Python |
def raises(excp, func, *args):
try:
func(*args)
assert 1 == 0
except excp:pass
def assertEqual(a, b):
assert a == b
def assertNotEqual(a, b):
assert a != b
def assertIs(a, b):
assert a is b
# complex specific tests
EPS = 1e-9
def assertAlmostEqual(a, b):
if isinstance(a, complex):
if isinstance(b, complex):
assert a.real - b.real < EPS
assert a.imag - b.imag < EPS
else:
assert a.real - b < EPS
assert a.imag < EPS
else:
if isinstance(b, complex):
assert a - b.real < EPS
assert b.imag < EPS
else:
assert a - b < EPS
def assertCloseAbs(x, y, eps=1e-9):
"""Return true iff floats x and y "are close\""""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
assert abs((x-y)/y) < eps
def assertClose(x, y, eps=1e-9):
"""Return true iff complexes x and y "are close\""""
assertCloseAbs(x.real, y.real, eps)
assertCloseAbs(x.imag, y.imag, eps)
def check_div(x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
assertClose(q, y)
q = z.__div__(x)
assertClose(q, y)
q = z.__truediv__(x)
assertClose(q, y)
if y != 0:
q = z / y
assertClose(q, x)
q = z.__div__(y)
assertClose(q, x)
q = z.__truediv__(y)
assertClose(q, x)
| Python |
# empty
| Python |
def conference_scheduling():
dom_values = [(room,slot)
for room in ('room A','room B','room C')
for slot in ('day 1 AM','day 1 PM','day 2 AM',
'day 2 PM')]
variables = {}
for v in ('c01','c02','c03','c04','c05', 'c06','c07','c08','c09','c10'):
variables[v] = domain(dom_values, v)
for conf in ('c03','c04','c05','c06'):
v = variables[conf]
tell(make_expression([v], "%s[0] == 'room C'" % conf))
for conf in ('c01','c05','c10'):
v = variables[conf]
tell(make_expression([v], "%s[1].startswith('day 1')" % conf))
for conf in ('c02','c03','c04','c09'):
v = variables[conf]
tell(make_expression([v], "%s[1].startswith('day 2')" % conf))
groups = (('c01','c02','c03','c10'),
('c02','c06','c08','c09'),
('c03','c05','c06','c07'),
('c01','c03','c07','c08'))
for group in groups:
for conf1 in group:
for conf2 in group:
if conf2 > conf1:
v1, v2 = variables[conf1], variables[conf2]
tell(make_expression([v1, v2], '%s[1] != %s[1]'% (conf1, conf2)))
tell(all_diff(variables.values()))
distribute('dichotomy')
return variables.values()
def queens(size=8):
# still not used/tested
variables = []
for i in range(size):
variables.append(domain(range(size), 'Q%02d'%i))
for r1 in range(size):
for r2 in range(size):
q1 = 'Q%02d' % r1
q2 = 'Q%02d' % r2
if r1 < r2:
D = {'q1':q1,'q2':q2, 'r1' : r1, 'r2' : r2 }
c = tell(make_expression([q1,q2],
'%(q1)s != %(q2)s and '
'abs(%(r1)s-%(r2)s) != '
'abs(%(q1)s-%(q2)s)'% D ))
distribute('dichotomy')
return variables
| Python |
def dummy_problem(computation_space):
ret = computation_space.var('__dummy__')
computation_space.set_dom(ret, c.FiniteDomain([]))
return (ret,)
def send_more_money(computation_space):
#FIXME: this problem needs propagators for integer finite domains
# performance is terrible without it
cs = computation_space
variables = (s, e, n, d, m, o, r, y) = cs.make_vars('s', 'e', 'n', 'd', 'm', 'o', 'r', 'y')
digits = range(10)
for var in variables:
cs.set_dom(var, c.FiniteDomain(digits))
# use fd.AllDistinct
for v1 in variables:
for v2 in variables:
if v1 != v2:
cs.add_constraint([v1, v2],
'%s != %s' % (v1.name, v2.name))
# use fd.NotEquals
cs.add_constraint([s], 's != 0')
cs.add_constraint([m], 'm != 0')
cs.add_constraint([s, e, n, d, m, o, r, y],
'1000*s+100*e+10*n+d+1000*m+100*o+10*r+e == 10000*m+1000*o+100*n+10*e+y')
cs.set_distributor(di.DichotomyDistributor(cs))
print cs.constraints
return (s, e, n, d, m, o, r, y)
def conference_scheduling(computation_space):
cs = computation_space
dom_values = [(room,slot)
for room in ('room A','room B','room C')
for slot in ('day 1 AM','day 1 PM','day 2 AM',
'day 2 PM')]
variables = [cs.var(v, FiniteDomain(dom_values))
for v in ('c01','c02','c03','c04','c05',
'c06','c07','c08','c09','c10')]
for conf in ('c03','c04','c05','c06'):
v = cs.find_var(conf)
cs.tell(make_expression([v], "%s[0] == 'room C'" % conf))
for conf in ('c01','c05','c10'):
v = cs.find_var(conf)
cs.tell(make_expression([v], "%s[1].startswith('day 1')" % conf))
for conf in ('c02','c03','c04','c09'):
v = cs.find_var(conf)
cs.tell(make_expression([v], "%s[1].startswith('day 2')" % conf))
groups = (('c01','c02','c03','c10'),
('c02','c06','c08','c09'),
('c03','c05','c06','c07'),
('c01','c03','c07','c08'))
for group in groups:
for conf1 in group:
for conf2 in group:
if conf2 > conf1:
v1, v2 = cs.find_vars((conf1, conf2))
cs.tell(make_expression([v1, v2], '%s[1] != %s[1]'% (v1.name(),v2.name())))
cs.tell(AllDistinct(variables))
return variables
def sudoku(computation_space):
cs = computation_space
import constraint as c
variables = [cs.var('v%i%i'%(x,y)) for x in range(1,10) for y in range(1,10)]
# Make the variables
for v in variables:
cs.set_dom(v, c.FiniteDomain(range(1,10)))
# Add constraints for rows (sum should be 45)
for i in range(1,10):
row = [ v for v in variables if v.name[1] == str(i)]
cs.add_constraint(row, 'sum([%s]) == 45' % ', '.join([v.name for v in row]))
# Add constraints for columns (sum should be 45)
for i in range(1,10):
row = [ v for v in variables if v.name[2] == str(i)]
cs.add_constraint(row, 'sum([%s]) == 45' % ', '.join([v.name for v in row]))
# Add constraints for subsquares (sum should be 45)
offsets = [(r,c) for r in [-1,0,1] for c in [-1,0,1]]
subsquares = [(r,c) for r in [2,5,8] for c in [2,5,8]]
for rc in subsquares:
sub = [cs.find_var('v%d%d'% (rc[0] + off[0],rc[1] + off[1])) for off in offsets]
cs.add_constraint(sub, 'sum([%s]) == 45' % ', '.join([v.name for v in sub]))
for v in sub:
for m in sub[sub.index(v)+1:]:
cs.add_constraint([v,m], '%s != %s' % (v.name, m.name))
#print cs.constraints
return tuple(variables)
| Python |
#
| Python |
import inspect
import os
from cclp import switch_debug_info
def raises(exception, call, *args):
try:
call(*args)
except exception:
return True
except:
pass
return False
class Skip(Exception): pass
def skip(desc):
raise Skip, desc
def out(obj):
os.write(1, str(obj))
def get_test_classes():
return [obj for name, obj in inspect.getmembers(tm)
if isinstance(obj, type)]
def get_test_methods(klass):
return [(name, meth)
for name, meth in inspect.getmembers(klass())
if not name.startswith('_')]
def run_tests(tm, selected_tests):
tm.raises = raises
tm.skip = skip
successes = []
failures = []
skipped = []
all_tests = [get_test_methods(cl) for cl in get_test_classes()]
print "testing %s test(s) classe(s)" % len(all_tests)
for tests in all_tests:
for name, meth in tests:
if name == 'setup_class': continue
if selected_tests and name not in selected_tests:
continue
try:
meth()
except Skip, s:
skipped.append((name, s.args[0]))
out('s')
except Exception, e:
failures.append((name, meth, e))
out('F')
else:
successes.append(name)
out('.')
out('\n')
if successes:
print "Successes :"
print '', '\n '.join(successes)
print
if failures:
print "Failures :"
for name, _, exc in failures:
print '', name, "failed because", str(exc)
print
if skipped:
print "Skipped"
for name, cause in skipped:
print '', name, "skipped because", cause
print
# replay failures with more info
switch_debug_info()
for name, meth, _ in failures:
meth()
if __name__ == __name__:
import sys
tm = __import__(sys.argv[1])
tests = []
try:
tests += (sys.argv[2:])
except:
pass
run_tests(tm, tests)
| Python |
"""
This file defines restricted arithmetic:
classes and operations to express integer arithmetic,
such that before and after translation semantics are
consistent
r_uint an unsigned integer which has not overflow
checking. It is always positive and always
truncated to the internal machine word size.
intmask mask a possibly long value when running on CPython
back to a signed int value
ovfcheck check on CPython whether the result of a signed
integer operation did overflow
ovfcheck_lshift
<< with oveflow checking
catering to 2.3/2.4 differences about <<
ovfcheck_float_to_int
convert to an integer or raise OverflowError
r_longlong
like r_int but double word size
r_ulonglong
like r_uint but double word size
These are meant to be erased by translation, r_uint
in the process should mark unsigned values, ovfcheck should
mark where overflow checking is required.
"""
import math
from pypy.rpython import extregistry
from pypy.rlib import objectmodel
# set up of machine internals
_bits = 0
_itest = 1
_Ltest = 1L
while _itest == _Ltest and type(_itest) is int:
_itest *= 2
_Ltest *= 2
_bits += 1
LONG_BIT = _bits+1
LONG_MASK = _Ltest*2-1
LONG_TEST = _Ltest
def isinf(x):
return x != 0.0 and x / 2 == x
def intmask(n):
if isinstance(n, int):
return int(n) # possibly bool->int
if isinstance(n, unsigned_int):
n = long(n)
elif isinstance(n, objectmodel.Symbolic):
return n # assume Symbolics don't overflow
n &= LONG_MASK
if n >= LONG_TEST:
n -= 2*LONG_TEST
return int(n)
del _bits, _itest, _Ltest
def ovfcheck(r):
# to be used as ovfcheck(x <op> y)
# raise OverflowError if the operation did overflow
assert not isinstance(r, r_uint), "unexpected ovf check on unsigned"
if type(r) is long:
raise OverflowError, "signed integer expression did overflow"
return r
def _local_ovfcheck(r):
# a copy of the above, because we cannot call ovfcheck
# in a context where no primitiveoperator is involved.
assert not isinstance(r, r_uint), "unexpected ovf check on unsigned"
if isinstance(r, long):
raise OverflowError, "signed integer expression did overflow"
return r
def ovfcheck_lshift(a, b):
return _local_ovfcheck(int(long(a) << b))
FL_MAXINT = float(LONG_TEST-1)
FL_MININT = float(-LONG_TEST)
def ovfcheck_float_to_int(x):
_, intp = math.modf(x)
if FL_MININT < intp < FL_MAXINT:
return int(intp)
raise OverflowError
def compute_restype(self_type, other_type):
if other_type in (bool, int, long):
if self_type is bool:
return int
return self_type
if self_type in (bool, int, long):
return other_type
return build_int(None, self_type.SIGNED and other_type.SIGNED, max(self_type.BITS, other_type.BITS))
def signedtype(t):
if t in (bool, int, long):
return True
else:
return t.SIGNED
def normalizedinttype(t):
if t is int:
return int
if t.BITS <= r_int.BITS:
return build_int(None, t.SIGNED, r_int.BITS)
else:
assert t.BITS <= r_longlong.BITS
return build_int(None, t.SIGNED, r_longlong.BITS)
class base_int(long):
""" fake unsigned integer implementation """
def _widen(self, other, value):
"""
if one argument is int or long, the other type wins.
otherwise, produce the largest class to hold the result.
"""
self_type = type(self)
other_type = type(other)
try:
return self.typemap[self_type, other_type](value)
except KeyError:
pass
restype = compute_restype(self_type, other_type)
self.typemap[self_type, other_type] = restype
return restype(value)
def __new__(klass, val):
if klass is base_int:
raise TypeError("abstract base!")
else:
return super(base_int, klass).__new__(klass, val)
def __int__(self):
if self < LONG_TEST:
return long.__int__(self)
else:
return intmask(self)
def __add__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x + y)
__radd__ = __add__
def __sub__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x - y)
def __rsub__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x - y)
def __mul__(self, other):
x = long(self)
if not isinstance(other, (int, long)):
return x * other
y = long(other)
return self._widen(other, x * y)
__rmul__ = __mul__
def __div__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x // y)
__floordiv__ = __div__
def __rdiv__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x // y)
__rfloordiv__ = __rdiv__
def __mod__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x % y)
def __rmod__(self, other):
y = long(self)
x = long(other)
return self._widen(other, x % y)
def __divmod__(self, other):
x = long(self)
y = long(other)
res = divmod(x, y)
return (self.__class__(res[0]), self.__class__(res[1]))
def __lshift__(self, n):
x = long(self)
y = long(n)
return self.__class__(x << y)
def __rlshift__(self, n):
y = long(self)
x = long(n)
return self._widen(n, x << y)
def __rshift__(self, n):
x = long(self)
y = long(n)
return self._widen(n, x >> y)
def __rrshift__(self, n):
y = long(self)
x = long(n)
return self._widen(n, x >> y)
def __or__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x | y)
__ror__ = __or__
def __and__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x & y)
__rand__ = __and__
def __xor__(self, other):
x = long(self)
y = long(other)
return self._widen(other, x ^ y)
__rxor__ = __xor__
def __neg__(self):
x = long(self)
return self.__class__(-x)
def __abs__(self):
x = long(self)
return self.__class__(abs(x))
def __pos__(self):
return self.__class__(self)
def __invert__(self):
x = long(self)
return self.__class__(~x)
def __pow__(self, other, m=None):
x = long(self)
y = long(other)
res = pow(x, y, m)
return self._widen(other, res)
def __rpow__(self, other, m=None):
y = long(self)
x = long(other)
res = pow(x, y, m)
return self._widen(other, res)
class signed_int(base_int):
SIGNED = True
def __new__(klass, val=0):
if type(val) is float:
val = long(val)
if val > klass.MASK>>1 or val < -(klass.MASK>>1)-1:
raise OverflowError("%s does not fit in signed %d-bit integer"%(val, klass.BITS))
if val < 0:
val = - ((-val) & klass.MASK)
return super(signed_int, klass).__new__(klass, val)
typemap = {}
class unsigned_int(base_int):
SIGNED = False
def __new__(klass, val=0):
if type(val) is float:
val = long(val)
return super(unsigned_int, klass).__new__(klass, val & klass.MASK)
typemap = {}
_inttypes = {}
def build_int(name, sign, bits):
sign = bool(sign)
try:
return _inttypes[sign, bits]
except KeyError:
pass
if sign:
base_int_type = signed_int
else:
base_int_type = unsigned_int
mask = (2 ** bits) - 1
if name is None:
raise TypeError('No predefined %sint%d'%(['u', ''][sign], bits))
int_type = _inttypes[sign, bits] = type(name, (base_int_type,), {'MASK': mask,
'BITS': bits})
class ForValuesEntry(extregistry.ExtRegistryEntry):
_type_ = int_type
def compute_annotation(self):
from pypy.annotation import model as annmodel
return annmodel.SomeInteger(knowntype=int_type)
class ForTypeEntry(extregistry.ExtRegistryEntry):
_about_ = int_type
def compute_result_annotation(self, *args_s, **kwds_s):
from pypy.annotation import model as annmodel
return annmodel.SomeInteger(knowntype=int_type)
def specialize_call(self, hop):
v_result, = hop.inputargs(hop.r_result.lowleveltype)
hop.exception_cannot_occur()
return v_result
return int_type
class BaseIntValueEntry(extregistry.ExtRegistryEntry):
_type_ = base_int
def compute_annotation(self):
from pypy.annotation import model as annmodel
return annmodel.SomeInteger(knowntype=r_ulonglong)
class BaseIntTypeEntry(extregistry.ExtRegistryEntry):
_about_ = base_int
def compute_result_annotation(self, *args_s, **kwds_s):
raise TypeError("abstract base!")
r_int = build_int('r_int', True, LONG_BIT)
r_uint = build_int('r_uint', False, LONG_BIT)
r_longlong = build_int('r_longlong', True, 64)
r_ulonglong = build_int('r_ulonglong', False, 64)
# float as string -> sign, beforept, afterpt, exponent
def break_up_float(s):
i = 0
sign = ''
before_point = ''
after_point = ''
exponent = ''
if s[i] in '+-':
sign = s[i]
i += 1
while i < len(s) and s[i] in '0123456789':
before_point += s[i]
i += 1
if i == len(s):
return sign, before_point, after_point, exponent
if s[i] == '.':
i += 1
while i < len(s) and s[i] in '0123456789':
after_point += s[i]
i += 1
if i == len(s):
return sign, before_point, after_point, exponent
if s[i] not in 'eE':
raise ValueError
i += 1
if i == len(s):
raise ValueError
if s[i] in '-+':
exponent += s[i]
i += 1
if i == len(s):
raise ValueError
while i < len(s) and s[i] in '0123456789':
exponent += s[i]
i += 1
if i != len(s):
raise ValueError
return sign, before_point, after_point, exponent
# string -> float helper
def parts_to_float(sign, beforept, afterpt, exponent):
if not exponent:
exponent = '0'
return float("%s%s.%se%s" % (sign, beforept, afterpt, exponent))
# float -> string
formatd_max_length = 120
def formatd(fmt, x):
return fmt % (x,)
def formatd_overflow(alt, prec, kind, x):
if ((kind in 'gG' and formatd_max_length <= 10+prec) or
(kind in 'fF' and formatd_max_length <= 53+prec)):
raise OverflowError("formatted float is too long (precision too large?)")
if alt:
alt = '#'
else:
alt = ''
fmt = "%%%s.%d%s" % (alt, prec, kind)
return formatd(fmt, x)
# a common string hash function
def _hash_string(s):
length = len(s)
if length == 0:
x = -1
else:
x = ord(s[0]) << 7
i = 0
while i < length:
x = (1000003*x) ^ ord(s[i])
i += 1
x ^= length
if x == 0:
x = -1
return intmask(x)
| Python |
""" simple non-constant constant. Ie constant which does not get annotated as constant
"""
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.objspace.flow.model import Variable, Constant
from pypy.rpython.lltypesystem import lltype
class NonConstant(object):
def __init__(self, _constant):
self.__dict__['constant'] = _constant
def __getattr__(self, attr):
return getattr(self.__dict__['constant'], attr)
def __setattr__(self, attr, value):
setattr(self.__dict__['constant'], attr, value)
class EntryNonConstant(ExtRegistryEntry):
_about_ = NonConstant
def compute_result_annotation(self, arg):
if hasattr(arg, 'const'):
return self.bookkeeper.immutablevalue(arg.const, False)
else:
return arg
def specialize_call(self, hop):
hop.exception_cannot_occur()
retval = Constant(hop.r_result.convert_const(hop.args_v[0].value))
retval.concretetype = hop.r_result.lowleveltype
return retval
| Python |
"""
An RPython implementation of getaddrinfo() based on ctypes.
This is a rewrite of the CPython source: Modules/getaddrinfo.c
"""
from ctypes import POINTER, sizeof, cast, pointer
from pypy.rlib import _rsocket_ctypes as _c
from pypy.rlib.rsocket import GAIError, CSocketError
from pypy.rlib.rsocket import gethost_common, make_address
# valid flags for addrinfo
AI_MASK = (_c.AI_PASSIVE | _c.AI_CANONNAME | _c.AI_NUMERICHOST)
GAI_ERRORS = [
(1, 'EAI_ADDRFAMILY', "address family for hostname not supported"),
(2, 'EAI_AGAIN', "temporary failure in name resolution"),
(3, 'EAI_BADFLAGS', "invalid value for ai_flags"),
(4, 'EAI_FAIL', "failure in name resolution"),
(5, 'EAI_FAMILY', "ai_family not supported"),
(6, 'EAI_MEMORY', "memory allocation failure"),
(7, 'EAI_NODATA', "no address associated with hostname"),
(8, 'EAI_NONAME', "hostname nor servname provided, or not known"),
(9, 'EAI_SERVICE', "servname not supported for ai_socktype"),
(10, 'EAI_SOCKTYPE', "ai_socktype not supported"),
(11, 'EAI_SYSTEM', "system error returned in errno"),
(12, 'EAI_BADHINTS', "invalid value for hints"),
(13, 'EAI_PROTOCOL', "resolved protocol is unknown."),
(14, 'EAI_MAX', "unknown error"),
]
GAI_ERROR_MESSAGES = {}
for value, name, text in GAI_ERRORS:
globals()[name] = value
GAI_ERROR_MESSAGES[value] = text
# Replacement function for rsocket.GAIError.get_msg
def GAIError_getmsg(self):
return GAI_ERROR_MESSAGES[self.errno]
# str.isdigit is not RPython, so provide our own
def str_isdigit(name):
if name == "":
return False
for c in name:
if c not in "012345789":
return False
return True
GAI_ANY = 0
INADDR_NONE = 0xFFFFFFFF
def getaddrinfo(hostname, servname,
family=_c.AF_UNSPEC, socktype=0,
protocol=0, flags=0,
address_to_fill=None):
if not hostname and not servname:
raise GAIError(EAI_NONAME)
# error checks for hints
if flags & ~AI_MASK:
raise GAIError(EAI_BADFLAGS)
if family not in (_c.AF_UNSPEC, _c.AF_INET):
raise GAIError(EAI_FAMILY)
if socktype == GAI_ANY:
if protocol == GAI_ANY:
pass
elif protocol == _c.IPPROTO_UDP:
socktype = _c.SOCK_DGRAM
elif protocol == _c.IPPROTO_TCP:
socktype = _c.SOCK_STREAM
else:
socktype = _c.SOCK_RAW
elif socktype == _c.SOCK_RAW:
pass
elif socktype == _c.SOCK_DGRAM:
if protocol not in (_c.IPPROTO_UDP, GAI_ANY):
raise GAIError(EAI_BADHINTS)
protocol = _c.IPPROTO_UDP
elif socktype == _c.SOCK_STREAM:
if protocol not in (_c.IPPROTO_TCP, GAI_ANY):
raise GAIError(EAI_BADHINTS)
protocol = _c.IPPROTO_TCP
else:
raise GAIError(EAI_SOCKTYPE)
port = GAI_ANY
# service port
if servname:
if str_isdigit(servname):
port = _c.htons(int(servname))
# On windows, python2.3 uses getattrinfo.c,
# python2.4 uses VC2003 implementation of getaddrinfo().
# if sys.version < (2, 4)
# socktype = _c.SOCK_DGRAM
# protocol = _c.IPPROTO_UDP
else:
if socktype == _c.SOCK_DGRAM:
proto = "udp"
elif socktype == _c.SOCK_STREAM:
proto = "tcp"
else:
proto = None
sp = _c.getservbyname(servname, proto)
if not sp:
raise GAIError(EAI_SERVICE)
port = sp.contents.s_port
if socktype == GAI_ANY:
if sp.contents.s_proto == "udp":
socktype = _c.SOCK_DGRAM
protocol = _c.IPPROTO_UDP
elif sp.contents.s_proto == "tcp":
socktype = _c.SOCK_STREAM
protocol = _c.IPPROTO_TCP
else:
raise GAIError(EAI_PROTOCOL)
# hostname == NULL
# passive socket -> anyaddr (0.0.0.0 or ::)
# non-passive socket -> localhost (127.0.0.1 or ::1)
if not hostname:
result = []
if family in (_c.AF_UNSPEC, _c.AF_INET):
sin = _c.sockaddr_in(sin_family=_c.AF_INET, sin_port=port)
if flags & _c.AI_PASSIVE:
sin.sin_addr.s_addr = 0x0 # addrany
else:
sin.sin_addr.s_addr = 0x0100007f # loopback
addr = make_address(cast(pointer(sin), POINTER(_c.sockaddr)),
sizeof(_c.sockaddr_in), address_to_fill)
result.append((_c.AF_INET, socktype, protocol, "", # xxx canonname meaningless? "anyaddr"
addr))
if not result:
raise GAIError(EAI_FAMILY)
return result
# hostname as numeric name
if family in (_c.AF_UNSPEC, _c.AF_INET):
packedaddr = _c.inet_addr(hostname)
if packedaddr != INADDR_NONE:
v4a = _c.ntohl(packedaddr)
if (v4a & 0xf0000000 == 0xe0000000 or # IN_MULTICAST()
v4a & 0xe0000000 == 0xe0000000): # IN_EXPERIMENTAL()
flags &= ~_c.AI_CANONNAME
v4a >>= 24 # = IN_CLASSA_NSHIFT
if v4a in (0, 127): # = IN_LOOPBACKNET
flags &= ~_c.AI_CANONNAME
if not flags & _c.AI_CANONNAME:
sin = _c.sockaddr_in(sin_family=_c.AF_INET, sin_port=port)
sin.sin_addr.s_addr = packedaddr
addr = make_address(cast(pointer(sin), POINTER(_c.sockaddr)),
sizeof(_c.sockaddr_in), address_to_fill)
return [(_c.AF_INET, socktype, protocol, None, addr)]
else:
sin = _c.sockaddr_in(sin_family=_c.AF_INET, sin_port=port)
sin.sin_addr.s_addr = packedaddr
# getaddrinfo() is a name->address translation function,
# and it looks strange that we do addr->name translation here.
# This is what python2.3 did on Windows:
# if sys.version < (2, 4):
# canonname = get_name(hostname, sin.sin_addr,
# sizeof(_c.in_addr))
canonname = hostname
addr = make_address(cast(pointer(sin), POINTER(_c.sockaddr)),
sizeof(_c.sockaddr_in), address_to_fill)
return [(_c.AF_INET, socktype, protocol, canonname, addr)]
if flags & _c.AI_NUMERICHOST:
raise GAIError(EAI_NONAME)
# hostname as alphabetical name
result = get_addr(hostname, socktype, protocol, port, address_to_fill)
if result:
return result
raise GAIError(EAI_FAIL)
def get_name(hostname, addr, addrlen):
hostent = _c.gethostbyaddr(pointer(addr), addrlen, _c.AF_INET)
# if reverse lookup fail,
# return address anyway to pacify calling application.
if not hostent:
return hostname
hname, aliases, address_list = gethost_common("", hostent)
if hostent and hostent.contents.h_name and hostent.contents.h_addr_list[0]:
return hostent.contents.h_name
else:
return hostname
def get_addr(hostname, socktype, protocol, port, address_to_fill):
hostent = _c.gethostbyname(hostname)
if not hostent:
raise GAIError(EAI_FAIL)
hname, aliases, address_list = gethost_common("", hostent)
result = []
for address in address_list:
if address.addr.sa_family == _c.AF_INET:
a = cast(pointer(address.addr), POINTER(_c.sockaddr_in)).contents
a.sin_port = port & 0xffff
addr = make_address(pointer(address.addr),address.addrlen,address_to_fill)
result.append((address.addr.sa_family,
socktype,
protocol,
"", # XXX canonname?
addr))
return result
| Python |
"""
Helper file for Python equivalents of os specific calls.
"""
import os
def putenv(name_eq_value):
# we fake it with the real one
global _initial_items
name, value = name_eq_value.split('=', 1)
os.environ[name] = value
_initial_items = os.environ.items()
putenv._annenforceargs_ = (str,)
_initial_items = os.environ.items()
def environ(idx):
# we simulate the environ list
if idx < len(_initial_items):
return '%s=%s' % _initial_items[idx]
def getenv(name):
# slowish, ok for non-repeated use
pattern = name + '='
idx = 0
while 1:
s = environ(idx)
if s is None:
break
if s.startswith(pattern):
value = s[len(pattern):]
return value
idx += 1
return None
getenv._annenforceargs_ = (str,)
class DIR(object):
# a simulated DIR structure from C, i.e. a directory opened by
# opendir() from which we can enumerate the entries with readdir().
# Like readdir(), this version does not hide the '.' and '..' entries.
def __init__(self, dirname):
self._entries = iter(['.', '..'] + os.listdir(dirname))
def readdir(self):
try:
return self._entries.next()
except StopIteration:
return None
def closedir(self):
pass
def opendir(dirname):
return DIR(dirname)
opendir._annenforceargs_ = (str,)
# probably we can get an annotation support for not having both implementations
# here, but let's leave it for now
def utime_null(path):
os.utime(path, None)
def utime_tuple(path, tp):
os.utime(path, tp)
| Python |
from pypy.rlib.rarithmetic import LONG_BIT, intmask, r_uint, r_ulonglong, ovfcheck
import math, sys
# It took many days of debugging and testing, until
# I (chris) finally understood how things work and where
# to expect overflows in the division code.
# In the end, I decided to throw this all out and to use
# plain integer expressions. r_uint and friends should go away!
# Unsignedness can be completely deduced by back-propagation
# of masking. I will change the annotator to do this.
# Having no special types at all, but describing everything
# in terms of operations and masks is the stronger way.
# Digit size:
# SHIFT cannot be larger than below, for the moment.
# In division, the native integer type must be able to hold
# a sign bit plus two digits plus 1 overflow bit.
# As a result, our digits will be 15 bits with one unused
# bit, exactly as it is in CPython.
#
# The algorithms are anyway not bound to a given digit size.
# There are different models possible, if we support more
# native integer sizes. To support this, the annotator should
# be extended to do some basic size tracking of integers.
#
# Examples:
# C
# Most C implementations have support for signed long long.
# use an unsigned 16 bit unsigned short for the digits.
# The operations which must hold two digits become unsigned long.
# The sign+two digits+overflow register in division becomes
# a 64 bit signed long long.
#
# X86 assembler
# Given that we support some more primitive types for integers,
# this might become a nicer layout for an X86 assembly backend:
# The digit would be 32 bit long unsigned int,
# two digits would be 64 bit long long unsigned int,
# and the signed type mentioned above would be 80 bit extended.
#
# Emulation of different integer types
# Even if we don't have machine support for certain types,
# it might be worth trying to emulate them by providing some
# means of multi-precision integers in rpython.
# It is possible to write primitive code that emits the
# necessary operations for emulation of larger types.
# But we should do some careful testing how fast this code
# will be, compared to just working with native types.
# Probably the primitive types will outperform this.
SHIFT = (LONG_BIT // 2) - 1
# XXX
# SHIFT cannot be anything but 15 at the moment, or we break marshal
SHIFT = 15
MASK = int((1 << SHIFT) - 1)
# Debugging digit array access.
#
# False == no checking at all
# True == check 0 <= value <= MASK
CHECK_DIGITS = False # True
if CHECK_DIGITS:
class DigitArray(list):
def __setitem__(self, idx, value):
assert value >=0
assert value <= MASK
list.__setitem__(self, idx, value)
else:
DigitArray = list
USE_KARATSUBA = True # set to False for comparison
# For long multiplication, use the O(N**2) school algorithm unless
# both operands contain more than KARATSUBA_CUTOFF digits (this
# being an internal Python long digit, in base BASE).
KARATSUBA_CUTOFF = 70
KARATSUBA_SQUARE_CUTOFF = 2 * KARATSUBA_CUTOFF
# For exponentiation, use the binary left-to-right algorithm
# unless the exponent contains more than FIVEARY_CUTOFF digits.
# In that case, do 5 bits at a time. The potential drawback is that
# a table of 2**5 intermediate results is computed.
FIVEARY_CUTOFF = 8
class rbigint(object):
"""This is a reimplementation of longs using a list of digits."""
def __init__(self, digits, sign=0):
if len(digits) == 0:
digits = [0]
self.digits = DigitArray(digits)
self.sign = sign
def fromint(intval):
if intval < 0:
sign = -1
ival = r_uint(-intval)
elif intval > 0:
sign = 1
ival = r_uint(intval)
else:
return rbigint([0], 0)
# Count the number of Python digits.
# We used to pick 5 ("big enough for anything"), but that's a
# waste of time and space given that 5*15 = 75 bits are rarely
# needed.
t = ival
ndigits = 0
while t:
ndigits += 1
t >>= SHIFT
v = rbigint([0] * ndigits, sign)
t = ival
p = 0
while t:
v.digits[p] = intmask(t & MASK)
t >>= SHIFT
p += 1
return v
fromint = staticmethod(fromint)
def frombool(b):
return rbigint([b & MASK], int(b))
frombool = staticmethod(frombool)
def fromlong(l):
return rbigint(*args_from_long(l))
fromlong = staticmethod(fromlong)
def fromfloat(dval):
""" Create a new bigint object from a float """
neg = 0
if isinf(dval):
raise OverflowError
if dval < 0.0:
neg = 1
dval = -dval
frac, expo = math.frexp(dval) # dval = frac*2**expo; 0.0 <= frac < 1.0
if expo <= 0:
return rbigint([0], 0)
ndig = (expo-1) // SHIFT + 1 # Number of 'digits' in result
v = rbigint([0] * ndig, 1)
frac = math.ldexp(frac, (expo-1) % SHIFT + 1)
for i in range(ndig-1, -1, -1):
bits = int(frac) & MASK # help the future annotator?
v.digits[i] = bits
frac -= float(bits)
frac = math.ldexp(frac, SHIFT)
if neg:
v.sign = -1
return v
fromfloat = staticmethod(fromfloat)
def fromrarith_int(i):
return rbigint(*args_from_rarith_int(i))
fromrarith_int._annspecialcase_ = "specialize:argtype(0)"
fromrarith_int = staticmethod(fromrarith_int)
def fromdecimalstr(s):
return _decimalstr_to_bigint(s)
fromdecimalstr = staticmethod(fromdecimalstr)
def toint(self):
return _AsLong(self)
def tobool(self):
return self.sign != 0
def touint(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
x = r_uint(0)
i = len(self.digits) - 1
while i >= 0:
prev = x
x = (x << SHIFT) + self.digits[i]
if (x >> SHIFT) != prev:
raise OverflowError(
"long int too large to convert to unsigned int")
i -= 1
return x
def toulonglong(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
x = r_ulonglong(0)
i = len(self.digits) - 1
while i >= 0:
prev = x
x = (x << SHIFT) + self.digits[i]
if (x >> SHIFT) != prev:
raise OverflowError(
"long int too large to convert to unsigned long long int")
i -= 1
return x
def tofloat(self):
return _AsDouble(self)
def _count_bits(a):
# return the number of bits in the digits
if a.sign == 0:
return 0
p = len(a.digits) - 1
bits = SHIFT * p
digit = a.digits[p]
while digit:
digit >>= 1
bits += 1
return bits
def is_odd(a):
# Note: this is a tiny optimization.
# Instead of implementing a general "get_bit" operation,
# which would be expensive for negative numbers,
# get_odd has the nice feature that it is always correct,
# no matter what the sign is (two's complement)
return a.digits[0] & 1
def format(self, digits, prefix='', suffix=''):
# 'digits' is a string whose length is the base to use,
# and where each character is the corresponding digit.
return _format(self, digits, prefix, suffix)
def repr(self):
return _format(self, BASE10, '', 'L')
def str(self):
return _format(self, BASE10)
def eq(self, other):
if (self.sign != other.sign or
len(self.digits) != len(other.digits)):
return False
i = 0
ld = len(self.digits)
while i < ld:
if self.digits[i] != other.digits[i]:
return False
i += 1
return True
def lt(self, other):
if self.sign > other.sign:
return False
if self.sign < other.sign:
return True
ld1 = len(self.digits)
ld2 = len(other.digits)
if ld1 > ld2:
if other.sign > 0:
return False
else:
return True
elif ld1 < ld2:
if other.sign > 0:
return True
else:
return False
i = ld1 - 1
while i >= 0:
d1 = self.digits[i]
d2 = other.digits[i]
if d1 < d2:
if other.sign > 0:
return True
else:
return False
elif d1 > d2:
if other.sign > 0:
return False
else:
return True
i -= 1
return False
def hash(self):
return _hash(self)
def add(self, other):
if self.sign < 0:
if other.sign < 0:
result = _x_add(self, other)
if result.sign != 0:
result.sign = -result.sign
else:
result = _x_sub(other, self)
else:
if other.sign < 0:
result = _x_sub(self, other)
else:
result = _x_add(self, other)
result._normalize()
return result
def sub(self, other):
if self.sign < 0:
if other.sign < 0:
result = _x_sub(self, other)
else:
result = _x_add(self, other)
result.sign = -result.sign
else:
if other.sign < 0:
result = _x_add(self, other)
else:
result = _x_sub(self, other)
result._normalize()
return result
def mul(self, other):
if USE_KARATSUBA:
result = _k_mul(self, other)
else:
result = _x_mul(self, other)
result.sign = self.sign * other.sign
return result
def truediv(self, other):
div = _bigint_true_divide(self, other)
return div
def floordiv(self, other):
div, mod = self.divmod(other)
return div
def div(self, other):
return self.floordiv(other)
def mod(self, other):
div, mod = self.divmod(other)
return mod
def divmod(v, w):
"""
The / and % operators are now defined in terms of divmod().
The expression a mod b has the value a - b*floor(a/b).
The _divrem function gives the remainder after division of
|a| by |b|, with the sign of a. This is also expressed
as a - b*trunc(a/b), if trunc truncates towards zero.
Some examples:
a b a rem b a mod b
13 10 3 3
-13 10 -3 7
13 -10 3 -7
-13 -10 -3 -3
So, to get from rem to mod, we have to add b if a and b
have different signs. We then subtract one from the 'div'
part of the outcome to keep the invariant intact.
"""
div, mod = _divrem(v, w)
if mod.sign * w.sign == -1:
mod = mod.add(w)
one = rbigint([1], 1)
div = div.sub(one)
return div, mod
def pow(a, b, c=None):
negativeOutput = False # if x<0 return negative output
# 5-ary values. If the exponent is large enough, table is
# precomputed so that table[i] == a**i % c for i in range(32).
# python translation: the table is computed when needed.
if b.sign < 0: # if exponent is negative
if c is not None:
raise TypeError(
"pow() 2nd argument "
"cannot be negative when 3rd argument specified")
# XXX failed to implement
raise ValueError("bigint pow() too negative")
if c is not None:
if c.sign == 0:
raise ValueError("pow() 3rd argument cannot be 0")
# if modulus < 0:
# negativeOutput = True
# modulus = -modulus
if c.sign < 0:
negativeOutput = True
c = rbigint(c.digits, -c.sign)
# if modulus == 1:
# return 0
if len(c.digits) == 1 and c.digits[0] == 1:
return rbigint([0], 0)
# if base < 0:
# base = base % modulus
# Having the base positive just makes things easier.
if a.sign < 0:
a, temp = a.divmod(c)
a = temp
# At this point a, b, and c are guaranteed non-negative UNLESS
# c is NULL, in which case a may be negative. */
z = rbigint([1], 1)
# python adaptation: moved macros REDUCE(X) and MULT(X, Y, result)
# into helper function result = _help_mult(x, y, c)
if len(b.digits) <= FIVEARY_CUTOFF:
# Left-to-right binary exponentiation (HAC Algorithm 14.79)
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
i = len(b.digits) - 1
while i >= 0:
bi = b.digits[i]
j = 1 << (SHIFT-1)
while j != 0:
z = _help_mult(z, z, c)
if bi & j:
z = _help_mult(z, a, c)
j >>= 1
i -= 1
else:
# Left-to-right 5-ary exponentiation (HAC Algorithm 14.82)
# z still holds 1L
table = [z] * 32
table[0] = z;
for i in range(1, 32):
table[i] = _help_mult(table[i-1], a, c)
i = len(b.digits) - 1
while i >= 0:
bi = b.digits[i]
j = j = SHIFT - 5
while j >= 0:
index = (bi >> j) & 0x1f
for k in range(5):
z = _help_mult(z, z, c)
if index:
z = _help_mult(z, table[index], c)
j -= 5
i -= 1
if negativeOutput and z.sign != 0:
z = z.sub(c)
return z
def neg(self):
return rbigint(self.digits, -self.sign)
def abs(self):
return rbigint(self.digits, abs(self.sign))
def invert(self): #Implement ~x as -(x + 1)
return self.add(rbigint([1], 1)).neg()
def lshift(self, other):
if other.sign < 0:
raise ValueError("negative shift count")
elif other.sign == 0:
return self
shiftby = other.toint()
# wordshift, remshift = divmod(shiftby, SHIFT)
wordshift = shiftby // SHIFT
remshift = shiftby - wordshift * SHIFT
oldsize = len(self.digits)
newsize = oldsize + wordshift
if remshift:
newsize += 1
z = rbigint([0] * newsize, self.sign)
accum = 0
i = wordshift
j = 0
while j < oldsize:
accum |= self.digits[j] << remshift
z.digits[i] = accum & MASK
accum >>= SHIFT
i += 1
j += 1
if remshift:
z.digits[newsize-1] = accum
else:
assert not accum
z._normalize()
return z
def rshift(self, other):
if other.sign < 0:
raise ValueError("negative shift count")
elif other.sign == 0:
return self
if self.sign == -1:
a1 = self.invert()
a2 = a1.rshift(other)
return a2.invert()
shiftby = other.toint()
wordshift = shiftby // SHIFT
newsize = len(self.digits) - wordshift
if newsize <= 0:
return rbigint([0], 0)
loshift = shiftby % SHIFT
hishift = SHIFT - loshift
lomask = (1 << hishift) - 1
himask = MASK ^ lomask
z = rbigint([0] * newsize, self.sign)
i = 0
j = wordshift
while i < newsize:
z.digits[i] = (self.digits[j] >> loshift) & lomask
if i+1 < newsize:
z.digits[i] |= (self.digits[j+1] << hishift) & himask
i += 1
j += 1
z._normalize()
return z
def and_(self, other):
return _bitwise(self, '&', other)
def xor(self, other):
return _bitwise(self, '^', other)
def or_(self, other):
return _bitwise(self, '|', other)
def oct(self):
if self.sign == 0:
return '0L'
else:
return _format(self, BASE8, '0', 'L')
def hex(self):
return _format(self, BASE16, '0x', 'L')
def log(self, base):
# base is supposed to be positive or 0.0, which means we use e
if base == 10.0:
return _loghelper(math.log10, self)
ret = _loghelper(math.log, self)
if base != 0.0:
ret /= math.log(base)
return ret
def tolong(self): #YYYYYY
l = 0
digits = list(self.digits)
digits.reverse()
for d in digits:
l = l << SHIFT
l += long(d)
return l * self.sign
def _normalize(self):
if len(self.digits) == 0:
self.sign = 0
self.digits = [0]
return
i = len(self.digits) - 1
while i != 0 and self.digits[i] == 0:
self.digits.pop(-1)
i -= 1
if len(self.digits) == 1 and self.digits[0] == 0:
self.sign = 0
#_________________________________________________________________
# Helper Functions
def _help_mult(x, y, c):
"""
Multiply two values, then reduce the result:
result = X*Y % c. If c is NULL, skip the mod.
"""
res = x.mul(y)
# Perform a modular reduction, X = X % c, but leave X alone if c
# is NULL.
if c is not None:
res, temp = res.divmod(c)
res = temp
return res
def digits_from_nonneg_long(l):
digits = []
while True:
digits.append(intmask(l) & MASK)
l = l >> SHIFT
if not l:
return digits
digits_from_nonneg_long._annspecialcase_ = "specialize:argtype(0)"
def digits_for_most_neg_long(l):
# This helper only works if 'l' is the most negative integer of its
# type, which in base 2 looks like: 1000000..0000
digits = []
while (intmask(l) & MASK) == 0:
digits.append(0)
l = l >> SHIFT
# now 'l' looks like: ...111100000
# turn it into: ...000100000
# to drop the extra unwanted 1's introduced by the signed right shift
l = -intmask(l)
assert l >= 0
digits.append(l)
return digits
digits_for_most_neg_long._annspecialcase_ = "specialize:argtype(0)"
def args_from_rarith_int(x):
if x >= 0:
if x == 0:
return [0], 0
else:
return digits_from_nonneg_long(x), 1
else:
try:
y = ovfcheck(-x)
except OverflowError:
y = -1
# be conservative and check again if the result is >= 0, even
# if no OverflowError was raised (e.g. broken CPython/GCC4.2)
if y >= 0:
# normal case
return digits_from_nonneg_long(y), -1
else:
# the most negative integer! hacks needed...
return digits_for_most_neg_long(x), -1
args_from_rarith_int._annspecialcase_ = "specialize:argtype(0)"
# ^^^ specialized by the precise type of 'x', which is typically a r_xxx
# instance from rlib.rarithmetic
def args_from_long(x):
"NOT_RPYTHON"
if x >= 0:
if x == 0:
return [0], 0
else:
return digits_from_nonneg_long(x), 1
else:
return digits_from_nonneg_long(-long(x)), -1
def _x_add(a, b):
""" Add the absolute values of two bigint integers. """
size_a = len(a.digits)
size_b = len(b.digits)
# Ensure a is the larger of the two:
if size_a < size_b:
a, b = b, a
size_a, size_b = size_b, size_a
z = rbigint([0] * (len(a.digits) + 1), 1)
i = 0
carry = 0
while i < size_b:
carry += a.digits[i] + b.digits[i]
z.digits[i] = carry & MASK
carry >>= SHIFT
i += 1
while i < size_a:
carry += a.digits[i]
z.digits[i] = carry & MASK
carry >>= SHIFT
i += 1
z.digits[i] = carry
z._normalize()
return z
def _x_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = len(a.digits)
size_b = len(b.digits)
sign = 1
borrow = 0
# Ensure a is the larger of the two:
if size_a < size_b:
sign = -1
a, b = b, a
size_a, size_b = size_b, size_a
elif size_a == size_b:
# Find highest digit where a and b differ:
i = size_a - 1
while i >= 0 and a.digits[i] == b.digits[i]:
i -= 1
if i < 0:
return rbigint([0], 0)
if a.digits[i] < b.digits[i]:
sign = -1
a, b = b, a
size_a = size_b = i+1
z = rbigint([0] * size_a, 1)
i = 0
while i < size_b:
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.digits[i] - b.digits[i] - borrow
z.digits[i] = borrow & MASK
borrow >>= SHIFT
borrow &= 1 # Keep only one sign bit
i += 1
while i < size_a:
borrow = a.digits[i] - borrow
z.digits[i] = borrow & MASK
borrow >>= SHIFT
borrow &= 1 # Keep only one sign bit
i += 1
assert borrow == 0
if sign < 0:
z.sign = -1
z._normalize()
return z
def _x_mul(a, b):
"""
Grade school multiplication, ignoring the signs.
Returns the absolute value of the product, or NULL if error.
"""
size_a = len(a.digits)
size_b = len(b.digits)
z = rbigint([0] * (size_a + size_b), 1)
if a == b:
# Efficient squaring per HAC, Algorithm 14.16:
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
# Gives slightly less than a 2x speedup when a == b,
# via exploiting that each entry in the multiplication
# pyramid appears twice (except for the size_a squares).
i = 0
while i < size_a:
f = a.digits[i]
pz = i << 1
pa = i + 1
paend = size_a
carry = z.digits[pz] + f * f
z.digits[pz] = carry & MASK
pz += 1
carry >>= SHIFT
assert carry <= MASK
# Now f is added in twice in each column of the
# pyramid it appears. Same as adding f<<1 once.
f <<= 1
while pa < paend:
carry += z.digits[pz] + a.digits[pa] * f
pa += 1
z.digits[pz] = carry & MASK
pz += 1
carry >>= SHIFT
assert carry <= (MASK << 1)
if carry:
carry += z.digits[pz]
z.digits[pz] = carry & MASK
pz += 1
carry >>= SHIFT
if carry:
z.digits[pz] += carry & MASK
assert (carry >> SHIFT) == 0
i += 1
else:
# a is not the same as b -- gradeschool long mult
i = 0
while i < size_a:
carry = 0
f = a.digits[i]
pz = i
pb = 0
pbend = size_b
while pb < pbend:
carry += z.digits[pz] + b.digits[pb] * f
pb += 1
z.digits[pz] = carry & MASK
pz += 1
carry >>= SHIFT
assert carry <= MASK
if carry:
z.digits[pz] += carry & MASK
assert (carry >> SHIFT) == 0
i += 1
z._normalize()
return z
def _kmul_split(n, size):
"""
A helper for Karatsuba multiplication (k_mul).
Takes a bigint "n" and an integer "size" representing the place to
split, and sets low and high such that abs(n) == (high << size) + low,
viewing the shift as being by digits. The sign bit is ignored, and
the return values are >= 0.
"""
size_n = len(n.digits)
size_lo = min(size_n, size)
lo = rbigint(n.digits[:size_lo], 1)
hi = rbigint(n.digits[size_lo:], 1)
lo._normalize()
hi._normalize()
return hi, lo
def _k_mul(a, b):
"""
Karatsuba multiplication. Ignores the input signs, and returns the
absolute value of the product (or raises if error).
See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
"""
asize = len(a.digits)
bsize = len(b.digits)
# (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
# Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
# Then the original product is
# ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
# By picking X to be a power of 2, "*X" is just shifting, and it's
# been reduced to 3 multiplies on numbers half the size.
# We want to split based on the larger number; fiddle so that b
# is largest.
if asize > bsize:
a, b, asize, bsize = b, a, bsize, asize
# Use gradeschool math when either number is too small.
if a == b:
i = KARATSUBA_SQUARE_CUTOFF
else:
i = KARATSUBA_CUTOFF
if asize <= i:
if a.sign == 0:
return rbigint([0], 0)
else:
return _x_mul(a, b)
# If a is small compared to b, splitting on b gives a degenerate
# case with ah==0, and Karatsuba may be (even much) less efficient
# than "grade school" then. However, we can still win, by viewing
# b as a string of "big digits", each of width a->ob_size. That
# leads to a sequence of balanced calls to k_mul.
if 2 * asize <= bsize:
return _k_lopsided_mul(a, b)
# Split a & b into hi & lo pieces.
shift = bsize >> 1
ah, al = _kmul_split(a, shift)
assert ah.sign == 1 # the split isn't degenerate
if a == b:
bh = ah
bl = al
else:
bh, bl = _kmul_split(b, shift)
# The plan:
# 1. Allocate result space (asize + bsize digits: that's always
# enough).
# 2. Compute ah*bh, and copy into result at 2*shift.
# 3. Compute al*bl, and copy into result at 0. Note that this
# can't overlap with #2.
# 4. Subtract al*bl from the result, starting at shift. This may
# underflow (borrow out of the high digit), but we don't care:
# we're effectively doing unsigned arithmetic mod
# BASE**(sizea + sizeb), and so long as the *final* result fits,
# borrows and carries out of the high digit can be ignored.
# 5. Subtract ah*bh from the result, starting at shift.
# 6. Compute (ah+al)*(bh+bl), and add it into the result starting
# at shift.
# 1. Allocate result space.
ret = rbigint([0] * (asize + bsize), 1)
# 2. t1 <- ah*bh, and copy into high digits of result.
t1 = _k_mul(ah, bh)
assert t1.sign >= 0
assert 2*shift + len(t1.digits) <= len(ret.digits)
ret.digits[2*shift : 2*shift + len(t1.digits)] = t1.digits
# Zero-out the digits higher than the ah*bh copy. */
## ignored, assuming that we initialize to zero
##i = ret->ob_size - 2*shift - t1->ob_size;
##if (i)
## memset(ret->ob_digit + 2*shift + t1->ob_size, 0,
## i * sizeof(digit));
# 3. t2 <- al*bl, and copy into the low digits.
t2 = _k_mul(al, bl)
assert t2.sign >= 0
assert len(t2.digits) <= 2*shift # no overlap with high digits
ret.digits[:len(t2.digits)] = t2.digits
# Zero out remaining digits.
## ignored, assuming that we initialize to zero
##i = 2*shift - t2->ob_size; /* number of uninitialized digits */
##if (i)
## memset(ret->ob_digit + t2->ob_size, 0, i * sizeof(digit));
# 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
# because it's fresher in cache.
i = len(ret.digits) - shift # # digits after shift
_v_isub(ret.digits, shift, i, t2.digits, len(t2.digits))
_v_isub(ret.digits, shift, i, t1.digits, len(t1.digits))
del t1, t2
# 6. t3 <- (ah+al)(bh+bl), and add into result.
t1 = _x_add(ah, al)
del ah, al
if a == b:
t2 = t1
else:
t2 = _x_add(bh, bl)
del bh, bl
t3 = _k_mul(t1, t2)
del t1, t2
assert t3.sign ==1
# Add t3. It's not obvious why we can't run out of room here.
# See the (*) comment after this function.
_v_iadd(ret.digits, shift, i, t3.digits, len(t3.digits))
del t3
ret._normalize()
return ret
""" (*) Why adding t3 can't "run out of room" above.
Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
to start with:
1. For any integer i, i = c(i/2) + f(i/2). In particular,
bsize = c(bsize/2) + f(bsize/2).
2. shift = f(bsize/2)
3. asize <= bsize
4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
routine, so asize > bsize/2 >= f(bsize/2) in this routine.
We allocated asize + bsize result digits, and add t3 into them at an offset
of shift. This leaves asize+bsize-shift allocated digit positions for t3
to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
asize + c(bsize/2) available digit positions.
bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
at most c(bsize/2) digits + 1 bit.
If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
The product (ah+al)*(bh+bl) therefore has at most
c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
and we have asize + c(bsize/2) available digit positions. We need to show
this is always enough. An instance of c(bsize/2) cancels out in both, so
the question reduces to whether asize digits is enough to hold
(asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
digit is enough to hold 2 bits. This is so since SHIFT=15 >= 2. If
asize == bsize, then we're asking whether bsize digits is enough to hold
c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
is enough to hold 2 bits. This is so if bsize >= 2, which holds because
bsize >= KARATSUBA_CUTOFF >= 2.
Note that since there's always enough room for (ah+al)*(bh+bl), and that's
clearly >= each of ah*bh and al*bl, there's always enough room to subtract
ah*bh and al*bl too.
"""
def _k_lopsided_mul(a, b):
"""
b has at least twice the digits of a, and a is big enough that Karatsuba
would pay off *if* the inputs had balanced sizes. View b as a sequence
of slices, each with a->ob_size digits, and multiply the slices by a,
one at a time. This gives k_mul balanced inputs to work with, and is
also cache-friendly (we compute one double-width slice of the result
at a time, then move on, never bactracking except for the helpful
single-width slice overlap between successive partial sums).
"""
asize = len(a.digits)
bsize = len(b.digits)
# nbdone is # of b digits already multiplied
assert asize > KARATSUBA_CUTOFF
assert 2 * asize <= bsize
# Allocate result space, and zero it out.
ret = rbigint([0] * (asize + bsize), 1)
# Successive slices of b are copied into bslice.
#bslice = rbigint([0] * asize, 1)
# XXX we cannot pre-allocate, see comments below!
bslice = rbigint([0], 1)
nbdone = 0;
while bsize > 0:
nbtouse = min(bsize, asize)
# Multiply the next slice of b by a.
#bslice.digits[:nbtouse] = b.digits[nbdone : nbdone + nbtouse]
# XXX: this would be more efficient if we adopted CPython's
# way to store the size, instead of resizing the list!
# XXX change the implementation, encoding length via the sign.
bslice.digits = b.digits[nbdone : nbdone + nbtouse]
product = _k_mul(a, bslice)
# Add into result.
_v_iadd(ret.digits, nbdone, len(ret.digits) - nbdone,
product.digits, len(product.digits))
del product
bsize -= nbtouse
nbdone += nbtouse
ret._normalize()
return ret
def _inplace_divrem1(pout, pin, n, size=0):
"""
Divide bigint pin by non-zero digit n, storing quotient
in pout, and returning the remainder. It's OK for pin == pout on entry.
"""
rem = 0
assert n > 0 and n <= MASK
if not size:
size = len(pin.digits)
size -= 1
while size >= 0:
rem = (rem << SHIFT) + pin.digits[size]
hi = rem // n
pout.digits[size] = hi
rem -= hi * n
size -= 1
return rem
def _divrem1(a, n):
"""
Divide a bigint integer by a digit, returning both the quotient
and the remainder as a tuple.
The sign of a is ignored; n should not be zero.
"""
assert n > 0 and n <= MASK
size = len(a.digits)
z = rbigint([0] * size, 1)
rem = _inplace_divrem1(z, a, n)
z._normalize()
return z, rem
def _v_iadd(x, xofs, m, y, n):
"""
x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
is modified in place, by adding y to it. Carries are propagated as far as
x[m-1], and the remaining carry (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
carry = 0;
assert m >= n
i = xofs
iend = xofs + n
while i < iend:
carry += x[i] + y[i-xofs]
x[i] = carry & MASK
carry >>= SHIFT
assert (carry & 1) == carry
i += 1
iend = xofs + m
while carry and i < iend:
carry += x[i]
x[i] = carry & MASK
carry >>= SHIFT
assert (carry & 1) == carry
i += 1
return carry
def _v_isub(x, xofs, m, y, n):
"""
x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
is modified in place, by subtracting y from it. Borrows are propagated as
far as x[m-1], and the remaining borrow (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
borrow = 0
assert m >= n
i = xofs
iend = xofs + n
while i < iend:
borrow = x[i] - y[i-xofs] - borrow
x[i] = borrow & MASK
borrow >>= SHIFT
borrow &= 1 # keep only 1 sign bit
i += 1
iend = xofs + m
while borrow and i < iend:
borrow = x[i] - borrow
x[i] = borrow & MASK
borrow >>= SHIFT
borrow &= 1
i += 1
return borrow
def _muladd1(a, n, extra):
"""Multiply by a single digit and add a single digit, ignoring the sign.
"""
size_a = len(a.digits)
z = rbigint([0] * (size_a+1), 1)
carry = extra
assert carry & MASK == carry
i = 0
while i < size_a:
carry += a.digits[i] * n
z.digits[i] = carry & MASK
carry >>= SHIFT
i += 1
z.digits[i] = carry
z._normalize()
return z
def _x_divrem(v1, w1):
""" Unsigned bigint division with remainder -- the algorithm """
size_w = len(w1.digits)
d = (MASK+1) // (w1.digits[size_w-1] + 1)
v = _muladd1(v1, d, 0)
w = _muladd1(w1, d, 0)
size_v = len(v.digits)
size_w = len(w.digits)
assert size_v >= size_w and size_w > 1 # Assert checks by div()
size_a = size_v - size_w + 1
a = rbigint([0] * size_a, 1)
j = size_v
k = size_a - 1
while k >= 0:
if j >= size_v:
vj = 0
else:
vj = v.digits[j]
carry = 0
if vj == w.digits[size_w-1]:
q = MASK
else:
q = ((vj << SHIFT) + v.digits[j-1]) // w.digits[size_w-1]
while (w.digits[size_w-2] * q >
((
(vj << SHIFT)
+ v.digits[j-1]
- q * w.digits[size_w-1]
) << SHIFT)
+ v.digits[j-2]):
q -= 1
i = 0
while i < size_w and i+k < size_v:
z = w.digits[i] * q
zz = z >> SHIFT
carry += v.digits[i+k] - z + (zz << SHIFT)
v.digits[i+k] = carry & MASK
carry >>= SHIFT
carry -= zz
i += 1
if i+k < size_v:
carry += v.digits[i+k]
v.digits[i+k] = 0
if carry == 0:
a.digits[k] = q & MASK
assert not q >> SHIFT
else:
assert carry == -1
q -= 1
a.digits[k] = q & MASK
assert not q >> SHIFT
carry = 0
i = 0
while i < size_w and i+k < size_v:
carry += v.digits[i+k] + w.digits[i]
v.digits[i+k] = carry & MASK
carry >>= SHIFT
i += 1
j -= 1
k -= 1
a._normalize()
rem, _ = _divrem1(v, d)
return a, rem
def _divrem(a, b):
""" Long division with remainder, top-level routine """
size_a = len(a.digits)
size_b = len(b.digits)
if b.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if (size_a < size_b or
(size_a == size_b and
a.digits[size_a-1] < b.digits[size_b-1])):
# |a| < |b|
z = rbigint([0], 0)
rem = a
return z, rem
if size_b == 1:
z, urem = _divrem1(a, b.digits[0])
rem = rbigint([urem], int(urem != 0))
else:
z, rem = _x_divrem(a, b)
# Set the signs.
# The quotient z has the sign of a*b;
# the remainder r has the sign of a,
# so a = b*z + r.
if a.sign != b.sign:
z.sign = - z.sign
if a.sign < 0 and rem.sign != 0:
rem.sign = - rem.sign
return z, rem
# ______________ conversions to double _______________
def _AsScaledDouble(v):
"""
NBITS_WANTED should be > the number of bits in a double's precision,
but small enough so that 2**NBITS_WANTED is within the normal double
range. nbitsneeded is set to 1 less than that because the most-significant
Python digit contains at least 1 significant bit, but we don't want to
bother counting them (catering to the worst case cheaply).
57 is one more than VAX-D double precision; I (Tim) don't know of a double
format with more precision than that; it's 1 larger so that we add in at
least one round bit to stand in for the ignored least-significant bits.
"""
NBITS_WANTED = 57
multiplier = float(1 << SHIFT)
if v.sign == 0:
return 0.0, 0
i = len(v.digits) - 1
sign = v.sign
x = float(v.digits[i])
nbitsneeded = NBITS_WANTED - 1
# Invariant: i Python digits remain unaccounted for.
while i > 0 and nbitsneeded > 0:
i -= 1
x = x * multiplier + float(v.digits[i])
nbitsneeded -= SHIFT
# There are i digits we didn't shift in. Pretending they're all
# zeroes, the true value is x * 2**(i*SHIFT).
exponent = i
assert x > 0.0
return x * sign, exponent
def isinf(x):
return x != 0.0 and x / 2 == x
##def ldexp(x, exp):
## assert type(x) is float
## lb1 = LONG_BIT - 1
## multiplier = float(1 << lb1)
## while exp >= lb1:
## x *= multiplier
## exp -= lb1
## if exp:
## x *= float(1 << exp)
## return x
# note that math.ldexp checks for overflows,
# while the C ldexp is not guaranteed to do.
# XXX make sure that we don't ignore this!
# YYY no, we decided to do ignore this!
def _AsDouble(v):
""" Get a C double from a bigint object. """
x, e = _AsScaledDouble(v)
if e <= sys.maxint / SHIFT:
x = math.ldexp(x, e * SHIFT)
#if not isinf(x):
# this is checked by math.ldexp
return x
raise OverflowError # can't say "long int too large to convert to float"
def _loghelper(func, arg):
"""
A decent logarithm is easy to compute even for huge bigints, but libm can't
do that by itself -- loghelper can. func is log or log10.
Note that overflow isn't possible: a bigint can contain
no more than INT_MAX * SHIFT bits, so has value certainly less than
2**(2**64 * 2**16) == 2**2**80, and log2 of that is 2**80, which is
small enough to fit in an IEEE single. log and log10 are even smaller.
"""
x, e = _AsScaledDouble(arg)
if x <= 0.0:
raise ValueError
# Value is ~= x * 2**(e*SHIFT), so the log ~=
# log(x) + log(2) * e * SHIFT.
# CAUTION: e*SHIFT may overflow using int arithmetic,
# so force use of double. */
return func(x) + (e * float(SHIFT) * func(2.0))
_loghelper._annspecialcase_ = 'specialize:arg(0)'
def _bigint_true_divide(a, b):
ad, aexp = _AsScaledDouble(a)
bd, bexp = _AsScaledDouble(b)
if bd == 0.0:
raise ZeroDivisionError("long division or modulo by zero")
# True value is very close to ad/bd * 2**(SHIFT*(aexp-bexp))
ad /= bd # overflow/underflow impossible here
aexp -= bexp
if aexp > sys.maxint / SHIFT:
raise OverflowError
elif aexp < -(sys.maxint / SHIFT):
return 0.0 # underflow to 0
ad = math.ldexp(ad, aexp * SHIFT)
##if isinf(ad): # ignore underflow to 0.0
## raise OverflowError
# math.ldexp checks and raises
return ad
BASE8 = '01234567'
BASE10 = '0123456789'
BASE16 = '0123456789ABCDEF'
def _format(a, digits, prefix='', suffix=''):
"""
Convert a bigint object to a string, using a given conversion base.
Return a string object.
"""
size_a = len(a.digits)
base = len(digits)
assert base >= 2 and base <= 36
# Compute a rough upper bound for the length of the string
i = base
bits = 0
while i > 1:
bits += 1
i >>= 1
i = 5 + len(prefix) + len(suffix) + (size_a*SHIFT + bits-1) // bits
s = [chr(0)] * i
p = i
j = len(suffix)
while j > 0:
p -= 1
j -= 1
s[p] = suffix[j]
if a.sign == 0:
p -= 1
s[p] = '0'
elif (base & (base - 1)) == 0:
# JRH: special case for power-of-2 bases
accum = 0
accumbits = 0 # # of bits in accum
basebits = 1 # # of bits in base-1
i = base
while 1:
i >>= 1
if i <= 1:
break
basebits += 1
for i in range(size_a):
accum |= a.digits[i] << accumbits
accumbits += SHIFT
assert accumbits >= basebits
while 1:
cdigit = accum & (base - 1)
p -= 1
assert p >= 0
s[p] = digits[cdigit]
accumbits -= basebits
accum >>= basebits
if i < size_a - 1:
if accumbits < basebits:
break
else:
if accum <= 0:
break
else:
# Not 0, and base not a power of 2. Divide repeatedly by
# base, but for speed use the highest power of base that
# fits in a digit.
size = size_a
pin = a # just for similarity to C source which uses the array
# powbase <- largest power of base that fits in a digit.
powbase = base # powbase == base ** power
power = 1
while 1:
newpow = powbase * base
if newpow >> SHIFT: # doesn't fit in a digit
break
powbase = newpow
power += 1
# Get a scratch area for repeated division.
scratch = rbigint([0] * size, 1)
# Repeatedly divide by powbase.
while 1:
ntostore = power
rem = _inplace_divrem1(scratch, pin, powbase, size)
pin = scratch # no need to use a again
if pin.digits[size - 1] == 0:
size -= 1
# Break rem into digits.
assert ntostore > 0
while 1:
nextrem = rem // base
c = rem - nextrem * base
p -= 1
assert p >= 0
s[p] = digits[c]
rem = nextrem
ntostore -= 1
# Termination is a bit delicate: must not
# store leading zeroes, so must get out if
# remaining quotient and rem are both 0.
if not (ntostore and (size or rem)):
break
if size == 0:
break
j = len(prefix)
while j > 0:
p -= 1
j -= 1
s[p] = prefix[j]
if a.sign < 0:
p -= 1
s[p] = '-'
assert p >= 0 # otherwise, buffer overflow (this is also a
# hint for the annotator for the slice below)
return ''.join(s[p:])
def _bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b.sign < 0:
b = b.invert()
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
# JRH: The original logic here was to allocate the result value (z)
# as the longer of the two operands. However, there are some cases
# where the result is guaranteed to be shorter than that: AND of two
# positives, OR of two negatives: use the shorter number. AND with
# mixed signs: use the positive number. OR with mixed signs: use the
# negative number. After the transformations above, op will be '&'
# iff one of these cases applies, and mask will be non-0 for operands
# whose length should be ignored.
size_a = len(a.digits)
size_b = len(b.digits)
if op == '&':
if maska:
size_z = size_b
else:
if maskb:
size_z = size_a
else:
size_z = min(size_a, size_b)
else:
size_z = max(size_a, size_b)
z = rbigint([0] * size_z, 1)
for i in range(size_z):
if i < size_a:
diga = a.digits[i] ^ maska
else:
diga = maska
if i < size_b:
digb = b.digits[i] ^ maskb
else:
digb = maskb
if op == '&':
z.digits[i] = diga & digb
elif op == '|':
z.digits[i] = diga | digb
elif op == '^':
z.digits[i] = diga ^ digb
z._normalize()
if negz == 0:
return z
return z.invert()
_bitwise._annspecialcase_ = "specialize:arg(1)"
def _AsLong(v):
"""
Get an integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
# This version by Tim Peters
i = len(v.digits) - 1
sign = v.sign
if not sign:
return 0
x = r_uint(0)
while i >= 0:
prev = x
x = (x << SHIFT) + v.digits[i]
if (x >> SHIFT) != prev:
raise OverflowError
i -= 1
# Haven't lost any bits, but if the sign bit is set we're in
# trouble *unless* this is the min negative number. So,
# trouble iff sign bit set && (positive || some bit set other
# than the sign bit).
if intmask(x) < 0 and (sign > 0 or (x << 1) != 0):
raise OverflowError
return intmask(x * sign)
def _hash(v):
# This is designed so that Python ints and longs with the
# same value hash to the same value, otherwise comparisons
# of mapping keys will turn out weird
i = len(v.digits) - 1
sign = v.sign
x = 0
LONG_BIT_SHIFT = LONG_BIT - SHIFT
while i >= 0:
# Force a native long #-bits (32 or 64) circular shift
x = ((x << SHIFT) & ~MASK) | ((x >> LONG_BIT_SHIFT) & MASK)
x += v.digits[i]
i -= 1
x = intmask(x * sign)
return x
#_________________________________________________________________
# a few internal helpers
DEC_PER_DIGIT = 1
while int('9' * DEC_PER_DIGIT) < MASK:
DEC_PER_DIGIT += 1
DEC_PER_DIGIT -= 1
DEC_MAX = 10 ** DEC_PER_DIGIT
def _decimalstr_to_bigint(s):
# a string that has been already parsed to be decimal and valid,
# is turned into a bigint
p = 0
lim = len(s)
sign = False
if s[p] == '-':
sign = True
p += 1
elif s[p] == '+':
p += 1
a = rbigint.fromint(0)
cnt = DEC_PER_DIGIT
tens = 1
dig = 0
ord0 = ord('0')
while p < lim:
dig = dig * 10 + ord(s[p]) - ord0
p += 1
tens *= 10
if tens == DEC_MAX or p == lim:
a = _muladd1(a, tens, dig)
tens = 1
dig = 0
if sign:
a.sign = -1
return a
| Python |
import os, sys, new
# WARNING: this is all nicely RPython, but there is no RPython code around
# to *compile* regular expressions, so outside of PyPy this is only useful
# for RPython applications that just need precompiled regexps.
#
# XXX However it's not even clear how to get such prebuilt regexps...
import rsre_core
rsre_core_filename = rsre_core.__file__
if rsre_core_filename[-1] in 'oc':
rsre_core_filename = rsre_core_filename[:-1]
rsre_core_filename = os.path.abspath(rsre_core_filename)
del rsre_core
def insert_sre_methods(locals, name):
"""A hack that inserts the SRE entry point methods into the 'locals'
scope, which should be the __dict__ of a State class. The State
class defines methods to look at the input string or unicode string.
It should provide the following API for sre_core:
get_char_ord(p) - return the ord of the char at position 'p'
lower(charcode) - return the ord of the lowcase version of 'charcode'
start - start position for searching and matching
end - end position for searching and matching
"""
filename = rsre_core_filename
rsre_core = new.module('pypy.rlib.rsre.rsre_core_' + name)
rsre_core.__file__ = filename
execfile(filename, rsre_core.__dict__)
for key, value in rsre_core.StateMixin.__dict__.items():
if not key.startswith('__'):
locals[key] = value
locals['rsre_core'] = rsre_core # for tests
def set_unicode_db(unicodedb):
"""Another hack to set the unicodedb used by rsre_char. I guess there
is little point in allowing several different unicodedb's in the same
RPython program... See comments in rsre_char.
"""
from pypy.rlib.rsre import rsre_char
rsre_char.unicodedb = unicodedb
class SimpleStringState(object):
"""Prebuilt state for matching strings, for testing and for
stand-alone RPython applictions that don't worry about unicode.
"""
insert_sre_methods(locals(), 'simple')
def __init__(self, string, start=0, end=-1):
self.string = string
if end < 0:
end = len(string)
self.start = start
self.end = end
self.reset()
def get_char_ord(self, p):
return ord(self.string[p])
| Python |
"""
Core routines for regular expression matching and searching.
"""
# This module should not be imported directly; it is execfile'd by rsre.py,
# possibly more than once. This is done to create specialized version of
# this code: each copy is used with a 'state' that is an instance of a
# specific subclass of BaseState, so all the inner-loop calls to methods
# like state.get_char_ord() can be compiled as direct calls, which can be
# inlined.
from pypy.rlib.rsre import rsre_char
from pypy.rlib.rsre.rsre_char import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from pypy.rlib.rsre.rsre_char import OPCODE_INFO, MAXREPEAT
#### Core classes
class StateMixin(object):
def reset(self):
self.string_position = self.start
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def search(self, pattern_codes):
return search(self, pattern_codes)
def match(self, pattern_codes):
return match(self, pattern_codes)
def create_regs(self, group_count):
"""Creates a tuple of index pairs representing matched groups, a format
that's convenient for SRE_Match."""
regs = [(self.start, self.string_position)]
for group in range(group_count):
mark_index = 2 * group
start = end = -1
if mark_index + 1 < len(self.marks):
start1 = self.marks[mark_index]
end1 = self.marks[mark_index + 1]
if start1 >= 0 and end1 >= 0:
start = start1
end = end1
regs.append((start, end))
return regs
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
self.lastindex = mark_nr / 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([-1] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return -1, -1
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
marks, self.lastindex = self.marks_stack[-1]
self.marks = marks[:]
def marks_pop_discard(self):
self.marks_stack.pop()
class MatchContext(rsre_char.MatchContextBase):
def __init__(self, state, pattern_codes, offset=0):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = offset
self.has_matched = self.UNDECIDED
self.backup = []
self.resume_at_opcode = -1
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
offset = self.code_position + pattern_offset
assert offset >= 0
child_context = MatchContext(self.state, self.pattern_codes, offset)
self.state.context_stack.append(child_context)
self.child_context = child_context
return child_context
def is_resumed(self):
return self.resume_at_opcode > -1
def backup_value(self, value):
self.backup.append(value)
def restore_values(self):
values = self.backup
self.backup = []
return values
def peek_char(self, peek=0):
return self.state.get_char_ord(self.string_position + peek)
def skip_char(self, skip_count):
self.string_position = self.string_position + skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and self.peek_char() == rsre_char.linebreak
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
at_boundary._annspecialcase_ = 'specialize:arg(1)'
class RepeatContext(MatchContext):
def __init__(self, context):
offset = context.code_position
assert offset >= 0
MatchContext.__init__(self, context.state,
context.pattern_codes, offset)
self.count = -1
self.previous = context.state.repeat
self.last_position = -1
self.repeat_stack = []
StateMixin._MatchContext = MatchContext # for tests
#### Main opcode dispatch loop
def search(state, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODE_INFO:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return fast_search(state, pattern_codes)
flags = pattern_codes[2]
offset = pattern_codes[1] + 1
assert offset >= 0
#pattern_codes = pattern_codes[offset:]
string_position = state.start
while string_position <= state.end:
state.reset()
state.start = state.string_position = string_position
if match(state, pattern_codes):
return True
string_position += 1
return False
def fast_search(state, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
assert prefix_len >= 0
prefix_skip = pattern_codes[6] # don't really know what this is good for
assert prefix_skip >= 0
prefix = pattern_codes[7:7 + prefix_len]
overlap_offset = 7 + prefix_len - 1
assert overlap_offset >= 0
pattern_offset = pattern_codes[1] + 1
assert pattern_offset >= 0
i = 0
string_position = state.string_position
while string_position < state.end:
while True:
char_ord = state.get_char_ord(string_position)
if char_ord != prefix[i]:
if i == 0:
break
else:
i = pattern_codes[overlap_offset + i]
else:
i += 1
if i == prefix_len:
# found a potential match
state.start = string_position + 1 - prefix_len
state.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
start = pattern_offset + 2 * prefix_skip
if match(state, pattern_codes[start:]):
return True
i = pattern_codes[overlap_offset + i]
break
string_position += 1
return False
def match(state, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
if pattern_codes[0] == OPCODE_INFO and pattern_codes[3] > 0:
if state.end - state.string_position < pattern_codes[3]:
return False
state.context_stack.append(MatchContext(state, pattern_codes))
has_matched = MatchContext.UNDECIDED
while len(state.context_stack) > 0:
context = state.context_stack[-1]
if context.has_matched == context.UNDECIDED:
has_matched = dispatch_loop(context)
else:
has_matched = context.has_matched
if has_matched != context.UNDECIDED: # don't pop if context isn't done
state.context_stack.pop()
return has_matched == MatchContext.MATCHED
def dispatch_loop(context):
"""Returns MATCHED if the current context matches, NOT_MATCHED if it doesn't
and UNDECIDED if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.has_remaining_codes() and context.has_matched == context.UNDECIDED:
if context.is_resumed():
opcode = context.resume_at_opcode
else:
opcode = context.peek_code()
try:
has_finished = opcode_dispatch_table[opcode](context)
except IndexError:
raise RuntimeError("Internal re error. Unknown opcode: %s" % opcode)
if not has_finished:
context.resume_at_opcode = opcode
return context.UNDECIDED
context.resume_at_opcode = -1
if context.has_matched == context.UNDECIDED:
context.has_matched = context.NOT_MATCHED
return context.has_matched
def op_success(ctx):
# end of pattern
ctx.state.string_position = ctx.string_position
ctx.has_matched = ctx.MATCHED
return True
def op_failure(ctx):
# immediate failure
ctx.has_matched = ctx.NOT_MATCHED
return True
def op_literal(ctx):
# match literal string
# <LITERAL> <code>
if ctx.at_end() or ctx.peek_char() != ctx.peek_code(1):
ctx.has_matched = ctx.NOT_MATCHED
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_not_literal(ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
if ctx.at_end() or ctx.peek_char() == ctx.peek_code(1):
ctx.has_matched = ctx.NOT_MATCHED
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_literal_ignore(ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
if ctx.at_end() or \
ctx.state.lower(ctx.peek_char()) != ctx.state.lower(ctx.peek_code(1)):
ctx.has_matched = ctx.NOT_MATCHED
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_not_literal_ignore(ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
if ctx.at_end() or \
ctx.state.lower(ctx.peek_char()) == ctx.state.lower(ctx.peek_code(1)):
ctx.has_matched = ctx.NOT_MATCHED
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_at(ctx):
# match at given position
# <AT> <code>
if not at_dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_code(2)
return True
def op_category(ctx):
# match at given category
# <CATEGORY> <code>
if ctx.at_end() or \
not rsre_char.category_dispatch(ctx.peek_code(1), ctx.peek_char()):
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(ctx):
# match anything (except a newline)
# <ANY>
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(ctx):
# match anything
# <ANY_ALL>
if ctx.at_end():
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(ctx, ignore=False):
if ctx.at_end():
ctx.has_matched = ctx.NOT_MATCHED
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
char_code = ctx.peek_char()
if ignore:
char_code = ctx.state.lower(char_code)
if not rsre_char.check_charset(char_code, ctx):
ctx.has_matched = ctx.NOT_MATCHED
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
def op_in(ctx):
# match set member (or non_member)
# <IN> <skip> <set>
general_op_in(ctx)
return True
def op_in_ignore(ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
general_op_in(ctx, ignore=True)
return True
def op_branch(ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
if not ctx.is_resumed():
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
else:
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.MATCHED
return True
ctx.state.marks_pop_keep()
last_branch_length = ctx.restore_values()[0]
ctx.skip_code(last_branch_length)
current_branch_length = ctx.peek_code(0)
if current_branch_length:
ctx.state.string_position = ctx.string_position
ctx.push_new_context(1)
ctx.backup_value(current_branch_length)
return False
ctx.state.marks_pop_discard()
ctx.has_matched = ctx.NOT_MATCHED
return True
def op_repeat_one(ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
# Case 1: First entry point
if not ctx.is_resumed():
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
if ctx.remaining_chars() < mincount:
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.state.string_position = ctx.string_position
count = count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = ctx.NOT_MATCHED
return True
if ctx.peek_code(ctx.peek_code(1) + 1) == 1: # 1 == OPCODES["success"]
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = ctx.MATCHED
return True
ctx.state.marks_push()
# XXX literal optimization missing here
# Case 2: Repetition is resumed (aka backtracked)
else:
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.MATCHED
return True
values = ctx.restore_values()
mincount = values[0]
count = values[1]
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
# Initialize the actual backtracking
if count >= mincount:
ctx.state.string_position = ctx.string_position
ctx.push_new_context(ctx.peek_code(1) + 1)
ctx.backup_value(mincount)
ctx.backup_value(count)
return False
# Backtracking failed
ctx.state.marks_pop_discard()
ctx.has_matched = ctx.NOT_MATCHED
return True
def op_min_repeat_one(ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
# Case 1: First entry point
if not ctx.is_resumed():
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
if ctx.remaining_chars() < mincount:
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == 1: # OPCODES["success"]
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = ctx.MATCHED
return True
ctx.state.marks_push()
# Case 2: Repetition resumed, "forwardtracking"
else:
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.MATCHED
return True
values = ctx.restore_values()
maxcount = values[0]
count = values[1]
ctx.state.string_position = ctx.string_position
if count_repetitions(ctx, 1) == 0:
# Tail didn't match and no more repetitions --> fail
ctx.state.marks_pop_discard()
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
# Try to match tail
if maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
ctx.push_new_context(ctx.peek_code(1) + 1)
ctx.backup_value(maxcount)
ctx.backup_value(count)
return False
# Failed
ctx.state.marks_pop_discard()
ctx.has_matched = ctx.NOT_MATCHED
return True
def op_repeat(ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
if not ctx.is_resumed():
ctx.repeat = RepeatContext(ctx)
ctx.state.repeat = ctx.repeat
ctx.state.string_position = ctx.string_position
ctx.push_new_context(ctx.peek_code(1) + 1)
return False
else:
ctx.state.repeat = ctx.repeat
ctx.has_matched = ctx.child_context.has_matched
return True
def op_max_until(ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
# Case 1: First entry point
if not ctx.is_resumed():
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
if count < mincount:
# not enough matches
repeat.count = count
repeat.repeat_stack.append(repeat.push_new_context(4))
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(0) # Dummy for last_position
ctx.backup_value(0)
ctx.repeat = repeat
return False
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
repeat.last_position = ctx.state.string_position
repeat.repeat_stack.append(repeat.push_new_context(4))
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(repeat.last_position) # zero-width match protection
ctx.backup_value(2) # more matching
ctx.repeat = repeat
return False
# Cannot match more repeated items here. Make sure the tail matches.
ctx.state.repeat = repeat.previous
ctx.push_new_context(1)
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(repeat.last_position) # zero-width match protection
ctx.backup_value(1) # tail matching
ctx.repeat = repeat
return False
# Case 2: Resumed
else:
repeat = ctx.repeat
values = ctx.restore_values()
mincount = values[0]
maxcount = values[1]
count = values[2]
save_last_position = values[3]
tail_matching = values[4]
if tail_matching == 0:
ctx.has_matched = repeat.repeat_stack.pop().has_matched
if ctx.has_matched == ctx.NOT_MATCHED:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
return True
elif tail_matching == 2:
repeat.last_position = save_last_position
if repeat.repeat_stack.pop().has_matched == ctx.MATCHED:
ctx.state.marks_pop_discard()
ctx.has_matched = ctx.MATCHED
return True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# Cannot match more repeated items here. Make sure the tail matches.
ctx.state.repeat = repeat.previous
ctx.push_new_context(1)
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(repeat.last_position) # zero-width match protection
ctx.backup_value(1) # tail matching
return False
else: # resuming after tail matching
ctx.has_matched = ctx.child_context.has_matched
if ctx.has_matched == ctx.NOT_MATCHED:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
return True
def op_min_until(ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
# Case 1: First entry point
if not ctx.is_resumed():
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
if count < mincount:
# not enough matches
repeat.count = count
repeat.repeat_stack.append(repeat.push_new_context(4))
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(0)
ctx.repeat = repeat
return False
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
ctx.push_new_context(1)
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(1)
ctx.repeat = repeat
return False
# Case 2: Resumed
else:
repeat = ctx.repeat
if repeat.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.MATCHED
return True
values = ctx.restore_values()
mincount = values[0]
maxcount = values[1]
count = values[2]
matching_state = values[3]
if count < mincount:
# not enough matches
ctx.has_matched = repeat.repeat_stack.pop().has_matched
if ctx.has_matched == ctx.NOT_MATCHED:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
return True
if matching_state == 1:
# returning from tail matching
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.MATCHED
return True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
if not matching_state == 2:
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = ctx.NOT_MATCHED
return True
repeat.count = count
repeat.repeat_stack.append(repeat.push_new_context(4))
ctx.backup_value(mincount)
ctx.backup_value(maxcount)
ctx.backup_value(count)
ctx.backup_value(2)
ctx.repeat = repeat
return False
# Final return
ctx.has_matched = repeat.repeat_stack.pop().has_matched
repeat.has_matched = ctx.has_matched
if ctx.has_matched == ctx.NOT_MATCHED:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
return True
def op_jump(ctx):
# jump forward
# <JUMP>/<INFO> <offset>
ctx.skip_code(ctx.peek_code(1) + 1)
return True
def op_mark(ctx):
# set mark
# <MARK> <gid>
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def general_op_groupref(ctx, ignore=False):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start == -1 or group_end == -1 or group_end < group_start \
or group_end - group_start > ctx.remaining_chars():
ctx.has_matched = ctx.NOT_MATCHED
return True
while group_start < group_end:
new_char = ctx.peek_char()
old_char = ctx.state.get_char_ord(group_start)
if ctx.at_end() or (not ignore and old_char != new_char) \
or (ignore and ctx.state.lower(old_char) != ctx.state.lower(new_char)):
ctx.has_matched = ctx.NOT_MATCHED
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(ctx):
# match backreference
# <GROUPREF> <zero-based group index>
return general_op_groupref(ctx)
def op_groupref_ignore(ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
return general_op_groupref(ctx, ignore=True)
def op_groupref_exists(ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start == -1 or group_end == -1 or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
if not ctx.is_resumed():
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.push_new_context(3)
return False
else:
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = ctx.NOT_MATCHED
return True
def op_assert_not(ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
if not ctx.is_resumed():
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
ctx.push_new_context(3)
return False
else:
if ctx.child_context.has_matched == ctx.MATCHED:
ctx.has_matched = ctx.NOT_MATCHED
return True
ctx.skip_code(ctx.peek_code(1) + 1)
return True
def count_repetitions(ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
opcode_dispatch_table[ctx.peek_code()](ctx)
if ctx.has_matched == ctx.NOT_MATCHED:
break
count += 1
ctx.has_matched = ctx.UNDECIDED
ctx.code_position = code_position
ctx.string_position = string_position
return count
opcode_dispatch_table = [
op_failure, op_success,
op_any, op_any_all,
op_assert, op_assert_not,
op_at,
op_branch,
None, #CALL,
op_category,
None, None, #CHARSET, BIGCHARSET,
op_groupref, op_groupref_exists, op_groupref_ignore,
op_in, op_in_ignore,
op_jump, op_jump,
op_literal, op_literal_ignore,
op_mark,
op_max_until,
op_min_until,
op_not_literal, op_not_literal_ignore,
None, #NEGATE,
None, #RANGE,
op_repeat,
op_repeat_one,
None, #SUBPATTERN,
op_min_repeat_one,
]
##### At dispatch
def at_dispatch(atcode, context):
try:
function, negate = at_dispatch_table[atcode]
except IndexError:
return False
result = function(context)
if negate:
return not result
else:
return result
def at_beginning(ctx):
return ctx.at_beginning()
def at_beginning_line(ctx):
return ctx.at_beginning() or ctx.peek_char(-1) == rsre_char.linebreak
def at_end(ctx):
return ctx.at_end() or (ctx.remaining_chars() == 1 and ctx.at_linebreak())
def at_end_line(ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(ctx):
return ctx.at_end()
def at_boundary(ctx):
return ctx.at_boundary(rsre_char.is_word)
def at_loc_boundary(ctx):
return ctx.at_boundary(rsre_char.is_loc_word)
def at_uni_boundary(ctx):
return ctx.at_boundary(rsre_char.is_uni_word)
# Maps opcodes by indices to (function, negate) tuples.
at_dispatch_table = [
(at_beginning, False), (at_beginning_line, False), (at_beginning, False),
(at_boundary, False), (at_boundary, True),
(at_end, False), (at_end_line, False), (at_end_string, False),
(at_loc_boundary, False), (at_loc_boundary, True), (at_uni_boundary, False),
(at_uni_boundary, True)
]
| Python |
"""
Character categories and charsets.
"""
import sys
# Note: the unicode parts of this module require you to call
# rsre.set_unicode_db() first, to select one of the modules
# pypy.module.unicodedata.unicodedb_x_y_z. This allows PyPy to use sre
# with the same version of the unicodedb as it uses for
# unicodeobject.py. If unset, the RPython program cannot use unicode
# matching.
unicodedb = None # possibly patched by rsre.set_unicode_db()
#### Constants
# Identifying as _sre from Python 2.3 or 2.4
MAGIC = 20031017
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
if sys.maxunicode == 65535:
CODESIZE = 2
else:
CODESIZE = 4
copyright = "_sre.py 2.4 Copyright 2005 by Nik Haldimann"
BIG_ENDIAN = sys.byteorder == "big"
# XXX can we import those safely from sre_constants?
SRE_INFO_PREFIX = 1
SRE_INFO_LITERAL = 2
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_UNICODE = 32 # use unicode locale
OPCODE_INFO = 17
OPCODE_LITERAL = 19
MAXREPEAT = 65535
def getlower(char_ord, flags):
# XXX no platform-dependent locale support for now
if flags & SRE_FLAG_UNICODE:
char_ord = unicodedb.tolower(char_ord)
else:
if ord('A') <= char_ord <= ord('Z'): # ASCII lower
char_ord += ord('a') - ord('A')
return char_ord
class MatchContextBase(object):
UNDECIDED = 0
MATCHED = 1
NOT_MATCHED = 2
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position = self.code_position + skip_count
def has_remaining_codes(self):
return len(self.pattern_codes) != self.code_position
#### Category helpers
ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
linebreak = ord("\n")
underline = ord("_")
def is_digit(code):
return code < 128 and (ascii_char_info[code] & 1 != 0)
def is_uni_digit(code):
return unicodedb.isdigit(code)
def is_space(code):
return code < 128 and (ascii_char_info[code] & 2 != 0)
def is_uni_space(code):
return unicodedb.isspace(code)
def is_word(code):
return code < 128 and (ascii_char_info[code] & 16 != 0)
def is_uni_word(code):
return unicodedb.isalnum(code) or code == underline
is_loc_word = is_word # XXX no support for platform locales anyway
def is_linebreak(code):
return code == linebreak
def is_uni_linebreak(code):
return unicodedb.islinebreak(code)
#### Category dispatch
def category_dispatch(category_code, char_code):
try:
function, negate = category_dispatch_table[category_code]
except IndexError:
return False
result = function(char_code)
if negate:
return not result
else:
return result
# Maps opcodes by indices to (function, negate) tuples.
category_dispatch_table = [
(is_digit, False), (is_digit, True), (is_space, False),
(is_space, True), (is_word, False), (is_word, True),
(is_linebreak, False), (is_linebreak, True), (is_loc_word, False),
(is_loc_word, True), (is_uni_digit, False), (is_uni_digit, True),
(is_uni_space, False), (is_uni_space, True), (is_uni_word, False),
(is_uni_word, True), (is_uni_linebreak, False),
(is_uni_linebreak, True)
]
##### Charset evaluation
SET_OK = 1
SET_NOT_OK = -1
SET_NOT_FINISHED = 0
def check_charset(char_code, context):
"""Checks whether a character matches set of arbitrary length. Currently
assumes the set starts at the first member of pattern_codes."""
result = SET_NOT_FINISHED
context.set_ok = SET_OK
backup_code_position = context.code_position
while result == SET_NOT_FINISHED:
opcode = context.peek_code()
try:
function = set_dispatch_table[opcode]
except IndexError:
return False
result = function(context, char_code)
context.code_position = backup_code_position
return result == SET_OK
def set_failure(ctx, char_code):
return -ctx.set_ok
def set_literal(ctx, char_code):
# <LITERAL> <code>
if ctx.peek_code(1) == char_code:
return ctx.set_ok
else:
ctx.skip_code(2)
return SET_NOT_FINISHED
def set_category(ctx, char_code):
# <CATEGORY> <code>
if category_dispatch(ctx.peek_code(1), char_code):
return ctx.set_ok
else:
ctx.skip_code(2)
return SET_NOT_FINISHED
def set_charset(ctx, char_code):
# <CHARSET> <bitmap> (16 bits per code word)
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return ctx.set_ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return ctx.set_ok
ctx.skip_code(8) # skip bitmap
return SET_NOT_FINISHED
def set_range(ctx, char_code):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= char_code <= ctx.peek_code(2):
return ctx.set_ok
ctx.skip_code(3)
return SET_NOT_FINISHED
def set_negate(ctx, char_code):
ctx.set_ok = -ctx.set_ok
ctx.skip_code(1)
return SET_NOT_FINISHED
def set_bigcharset(ctx, char_code):
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
# XXX this function probably needs a makeover
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = to_byte_array(ctx.peek_code(block_index / CODESIZE))
block = a[block_index % CODESIZE]
ctx.skip_code(256 / CODESIZE) # skip block indices
if CODESIZE == 2:
shift = 4
else:
shift = 5
block_value = ctx.peek_code(block * (32 / CODESIZE)
+ ((char_code & 255) >> shift))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return ctx.set_ok
else:
ctx.skip_code(256 / CODESIZE) # skip block indices
ctx.skip_code(count * (32 / CODESIZE)) # skip blocks
return SET_NOT_FINISHED
def to_byte_array(int_value):
"""Creates a list of bytes out of an integer representing data that is
CODESIZE bytes wide."""
byte_array = [0] * CODESIZE
for i in range(CODESIZE):
byte_array[i] = int_value & 0xff
int_value = int_value >> 8
if BIG_ENDIAN:
byte_array.reverse()
return byte_array
set_dispatch_table = [
set_failure, None, None, None, None, None, None, None, None,
set_category, set_charset, set_bigcharset, None, None, None,
None, None, None, None, set_literal, None, None, None, None,
None, None, set_negate, set_range
]
| Python |
"""
An RPython implementation of sockets based on ctypes.
Note that the interface has to be slightly different - this is not
a drop-in replacement for the 'socket' module.
"""
# Known missing features:
#
# - support for non-Linux platforms
# - address families other than AF_INET, AF_INET6, AF_UNIX
# - methods makefile(),
# - SSL
from pypy.rlib.objectmodel import instantiate
from pypy.rlib import _rsocket_ctypes as _c
from ctypes import c_char, c_ulong, c_char_p, c_void_p
from ctypes import POINTER, pointer, byref, create_string_buffer, sizeof, cast
from pypy.rpython.rctypes.astruct import offsetof
from pypy.rlib.rarithmetic import intmask
constants = _c.constants
locals().update(constants) # Define constants from _c
if _c.MS_WINDOWS:
def rsocket_startup():
wsadata = _c.WSAData()
res = _c.WSAStartup(1, byref(wsadata))
assert res == 0
else:
def rsocket_startup():
pass
ntohs = _c.ntohs
ntohl = _c.ntohl
htons = _c.htons
htonl = _c.htonl
_FAMILIES = {}
class Address(object):
"""The base class for RPython-level objects representing addresses.
Fields: addr - a _c.sockaddr structure
addrlen - size used within 'addr'
"""
class __metaclass__(type):
def __new__(cls, name, bases, dict):
family = dict.get('family')
A = type.__new__(cls, name, bases, dict)
if family is not None:
_FAMILIES[family] = A
return A
def __init__(self, addr, addrlen):
self.addr = addr
self.addrlen = addrlen
def as_object(self, space):
"""Convert the address to an app-level object."""
# If we don't know the address family, don't raise an
# exception -- return it as a tuple.
family = self.addr.sa_family
buf = copy_buffer(cast(pointer(self.addr.sa_data), POINTER(c_char)),
self.addrlen - offsetof(_c.sockaddr, 'sa_data'))
return space.newtuple([space.wrap(family),
space.wrap(buf.raw)])
def from_object(space, w_address):
"""Convert an app-level object to an Address."""
# It's a static method but it's overridden and must be called
# on the correct subclass.
raise RSocketError("unknown address family")
from_object = staticmethod(from_object)
# ____________________________________________________________
def makeipaddr(name, result=None):
# Convert a string specifying a host name or one of a few symbolic
# names to an IPAddress instance. This usually calls getaddrinfo()
# to do the work; the names "" and "<broadcast>" are special.
# If 'result' is specified it must be a prebuilt INETAddress or
# INET6Address that is filled; otherwise a new INETXAddress is returned.
if result is None:
family = AF_UNSPEC
else:
family = result.family
if len(name) == 0:
info = getaddrinfo(None, "0",
family=family,
socktype=SOCK_DGRAM, # dummy
flags=AI_PASSIVE,
address_to_fill=result)
if len(info) > 1:
raise RSocketError("wildcard resolved to "
"multiple addresses")
return info[0][4]
# IPv4 also supports the special name "<broadcast>".
if name == '<broadcast>':
return makeipv4addr(intmask(INADDR_BROADCAST), result)
# "dd.dd.dd.dd" format.
digits = name.split('.')
if len(digits) == 4:
try:
d0 = int(digits[0])
d1 = int(digits[1])
d2 = int(digits[2])
d3 = int(digits[3])
except ValueError:
pass
else:
if (0 <= d0 <= 255 and
0 <= d1 <= 255 and
0 <= d2 <= 255 and
0 <= d3 <= 255):
return makeipv4addr(intmask(htonl(
(intmask(d0 << 24)) | (d1 << 16) | (d2 << 8) | (d3 << 0))),
result)
# generic host name to IP conversion
info = getaddrinfo(name, None, family=family, address_to_fill=result)
return info[0][4]
class IPAddress(Address):
"""AF_INET and AF_INET6 addresses"""
def get_host(self):
# Create a string object representing an IP address.
# For IPv4 this is always a string of the form 'dd.dd.dd.dd'
# (with variable size numbers).
host, serv = getnameinfo(self, NI_NUMERICHOST | NI_NUMERICSERV)
return host
# ____________________________________________________________
class INETAddress(IPAddress):
family = AF_INET
struct = _c.sockaddr_in
maxlen = sizeof(struct)
def __init__(self, host, port):
makeipaddr(host, self)
a = self.as_sockaddr_in()
a.sin_port = htons(port)
def as_sockaddr_in(self):
if self.addrlen != INETAddress.maxlen:
raise RSocketError("invalid address")
return cast(pointer(self.addr), POINTER(_c.sockaddr_in)).contents
def __repr__(self):
try:
return '<INETAddress %s:%d>' % (self.get_host(), self.get_port())
except SocketError:
return '<INETAddress ?>'
def get_port(self):
a = self.as_sockaddr_in()
return ntohs(a.sin_port)
def eq(self, other): # __eq__() is not called by RPython :-/
return (isinstance(other, INETAddress) and
self.get_host() == other.get_host() and
self.get_port() == other.get_port())
def as_object(self, space):
return space.newtuple([space.wrap(self.get_host()),
space.wrap(self.get_port())])
def from_object(space, w_address):
# Parse an app-level object representing an AF_INET address
try:
w_host, w_port = space.unpackiterable(w_address, 2)
except ValueError:
raise TypeError("AF_INET address must be a tuple of length 2")
host = space.str_w(w_host)
port = space.int_w(w_port)
return INETAddress(host, port)
from_object = staticmethod(from_object)
def fill_from_object(self, space, w_address):
# XXX a bit of code duplication
_, w_port = space.unpackiterable(w_address, 2)
port = space.int_w(w_port)
a = self.as_sockaddr_in()
a.sin_port = htons(port)
def from_in_addr(in_addr):
sin = _c.sockaddr_in(sin_family = AF_INET) # PLAT sin_len
sin.sin_addr = in_addr
paddr = cast(pointer(sin), _c.sockaddr_ptr)
result = instantiate(INETAddress)
result.addr = paddr.contents
result.addrlen = sizeof(_c.sockaddr_in)
return result
from_in_addr = staticmethod(from_in_addr)
def extract_in_addr(self):
p = cast(pointer(self.as_sockaddr_in().sin_addr), c_void_p)
return p, sizeof(_c.in_addr)
# ____________________________________________________________
class INET6Address(IPAddress):
family = AF_INET6
struct = _c.sockaddr_in6
maxlen = sizeof(struct)
def __init__(self, host, port, flowinfo=0, scope_id=0):
makeipaddr(host, self)
a = self.as_sockaddr_in6()
a.sin6_port = htons(port)
a.sin6_flowinfo = flowinfo
a.sin6_scope_id = scope_id
def as_sockaddr_in6(self):
if self.addrlen != INET6Address.maxlen:
raise RSocketError("invalid address")
return cast(pointer(self.addr), POINTER(_c.sockaddr_in6)).contents
def __repr__(self):
try:
return '<INET6Address %s:%d %d %d>' % (self.get_host(),
self.get_port(),
self.get_flowinfo(),
self.get_scope_id())
except SocketError:
return '<INET6Address ?>'
def get_port(self):
a = self.as_sockaddr_in6()
return ntohs(a.sin6_port)
def get_flowinfo(self):
a = self.as_sockaddr_in6()
return a.sin6_flowinfo
def get_scope_id(self):
a = self.as_sockaddr_in6()
return a.sin6_scope_id
def eq(self, other): # __eq__() is not called by RPython :-/
return (isinstance(other, INET6Address) and
self.get_host() == other.get_host() and
self.get_port() == other.get_port() and
self.get_flowinfo() == other.get_flowinfo() and
self.get_scope_id() == other.get_scope_id())
def as_object(self, space):
return space.newtuple([space.wrap(self.get_host()),
space.wrap(self.get_port()),
space.wrap(self.get_flowinfo()),
space.wrap(self.get_scope_id())])
def from_object(space, w_address):
pieces_w = space.unpackiterable(w_address)
if not (2 <= len(pieces_w) <= 4):
raise TypeError("AF_INET6 address must be a tuple of length 2 "
"to 4, not %d" % len(pieces_w))
host = space.str_w(pieces_w[0])
port = space.int_w(pieces_w[1])
if len(pieces_w) > 2: flowinfo = space.int_w(pieces_w[2])
else: flowinfo = 0
if len(pieces_w) > 3: scope_id = space.int_w(pieces_w[3])
else: scope_id = 0
return INET6Address(host, port, flowinfo, scope_id)
from_object = staticmethod(from_object)
def fill_from_object(self, space, w_address):
# XXX a bit of code duplication
pieces_w = space.unpackiterable(w_address)
if not (2 <= len(pieces_w) <= 4):
raise RSocketError("AF_INET6 address must be a tuple of length 2 "
"to 4, not %d" % len(pieces_w))
port = space.int_w(pieces_w[1])
if len(pieces_w) > 2: flowinfo = space.int_w(pieces_w[2])
else: flowinfo = 0
if len(pieces_w) > 3: scope_id = space.int_w(pieces_w[3])
else: scope_id = 0
a = self.as_sockaddr_in6()
a.sin6_port = htons(port)
a.sin6_flowinfo = flowinfo
a.sin6_scope_id = scope_id
def from_in6_addr(in6_addr):
sin = _c.sockaddr_in6(sin6_family = AF_INET) # PLAT sin_len
sin.sin6_addr = in6_addr
paddr = cast(pointer(sin), _c.sockaddr_ptr)
result = instantiate(INET6Address)
result.addr = paddr.contents
result.addrlen = sizeof(_c.sockaddr_in6)
return result
from_in6_addr = staticmethod(from_in6_addr)
def extract_in_addr(self):
p = cast(pointer(self.as_sockaddr_in6().sin6_addr), c_void_p)
return p, sizeof(_c.in6_addr)
# ____________________________________________________________
if 'AF_UNIX' in constants:
class UNIXAddress(Address):
family = AF_UNIX
struct = _c.sockaddr_un
maxlen = sizeof(struct)
def __init__(self, path):
sun = _c.sockaddr_un(sun_family = AF_UNIX)
if _c.linux and path.startswith('\x00'):
# Linux abstract namespace extension
if len(path) > sizeof(sun.sun_path):
raise RSocketError("AF_UNIX path too long")
else:
# regular NULL-terminated string
if len(path) >= sizeof(sun.sun_path):
raise RSocketError("AF_UNIX path too long")
sun.sun_path[len(path)] = 0
for i in range(len(path)):
sun.sun_path[i] = ord(path[i])
self.sun = sun
self.addr = cast(pointer(sun), _c.sockaddr_ptr).contents
self.addrlen = offsetof(_c.sockaddr_un, 'sun_path') + len(path)
def as_sockaddr_un(self):
if self.addrlen <= offsetof(_c.sockaddr_un, 'sun_path'):
raise RSocketError("invalid address")
return cast(pointer(self.addr), POINTER(_c.sockaddr_un)).contents
def __repr__(self):
try:
return '<UNIXAddress %r>' % (self.get_path(),)
except SocketError:
return '<UNIXAddress ?>'
def get_path(self):
a = self.as_sockaddr_un()
if _c.linux and a.sun_path[0] == 0:
# Linux abstract namespace
buf = copy_buffer(cast(pointer(a.sun_path), POINTER(c_char)),
self.addrlen - offsetof(_c.sockaddr_un,
'sun_path'))
return buf.raw
else:
# regular NULL-terminated string
return cast(pointer(a.sun_path), c_char_p).value
def eq(self, other): # __eq__() is not called by RPython :-/
return (isinstance(other, UNIXAddress) and
self.get_path() == other.get_path())
def as_object(self, space):
return space.wrap(self.get_path())
def from_object(space, w_address):
return UNIXAddress(space.str_w(w_address))
from_object = staticmethod(from_object)
if 'AF_NETLINK' in constants:
class NETLINKAddress(Address):
family = AF_NETLINK
struct = _c.sockaddr_nl
maxlen = sizeof(struct)
def __init__(self, pid, groups):
addr = _c.sockaddr_nl(nl_family = AF_NETLINK)
addr.nl_pid = pid
addr.nl_groups = groups
self._addr_keepalive_netlink = addr
self.addr = cast(pointer(addr), _c.sockaddr_ptr).contents
self.addrlen = sizeof(addr)
def as_sockaddr_nl(self):
if self.addrlen != NETLINKAddress.maxlen:
raise RSocketError("invalid address")
return cast(pointer(self.addr), POINTER(_c.sockaddr_nl)).contents
def get_pid(self):
return self.as_sockaddr_nl().nl_pid
def get_groups(self):
return self.as_sockaddr_nl().nl_groups
def __repr__(self):
return '<NETLINKAddress %r>' % (self.get_pid(), self.get_groups())
def as_object(self, space):
return space.newtuple([space.wrap(self.get_pid()),
space.wrap(self.get_groups())])
def from_object(space, w_address):
try:
w_pid, w_groups = space.unpackiterable(w_address, 2)
except ValueError:
raise TypeError("AF_NETLINK address must be a tuple of length 2")
return NETLINKAddress(space.int_w(w_pid), space.int_w(w_groups))
from_object = staticmethod(from_object)
# ____________________________________________________________
def familyclass(family):
return _FAMILIES.get(family, Address)
af_get = familyclass
def make_address(addrptr, addrlen, result=None):
family = addrptr.contents.sa_family
if result is None:
result = instantiate(familyclass(family))
elif result.family != family:
raise RSocketError("address family mismatched")
paddr = result._addr_keepalive0 = copy_buffer(cast(addrptr, POINTER(c_char)), addrlen)
result.addr = cast(paddr, _c.sockaddr_ptr).contents
result.addrlen = addrlen
return result
def makeipv4addr(s_addr, result=None):
if result is None:
result = instantiate(INETAddress)
elif result.family != AF_INET:
raise RSocketError("address family mismatched")
sin = _c.sockaddr_in(sin_family = AF_INET) # PLAT sin_len
sin.sin_addr.s_addr = s_addr
paddr = cast(pointer(sin), _c.sockaddr_ptr)
result._addr_keepalive1 = sin
result.addr = paddr.contents
result.addrlen = sizeof(_c.sockaddr_in)
return result
def make_null_address(family):
klass = familyclass(family)
buf = create_string_buffer(klass.maxlen)
result = instantiate(klass)
result._addr_keepalive2 = buf
result.addr = cast(buf, _c.sockaddr_ptr).contents
result.addrlen = 0
return result, len(buf)
def copy_buffer(ptr, size):
buf = create_string_buffer(size)
for i in range(size):
buf[i] = ptr[i]
return buf
def ipaddr_from_object(space, w_sockaddr):
host = space.str_w(space.getitem(w_sockaddr, space.wrap(0)))
addr = makeipaddr(host)
addr.fill_from_object(space, w_sockaddr)
return addr
# ____________________________________________________________
class RSocket(object):
"""RPython-level socket object.
"""
_mixin_ = True # for interp_socket.py
fd = _c.INVALID_SOCKET
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0):
"""Create a new socket."""
fd = _c.socket(family, type, proto)
if _c.invalid_socket(fd):
raise self.error_handler()
# PLAT RISCOS
self.fd = fd
self.family = family
self.type = type
self.proto = proto
self.timeout = defaults.timeout
def __del__(self):
self.close()
if hasattr(_c, 'fcntl'):
def _setblocking(self, block):
delay_flag = _c.fcntl(self.fd, _c.F_GETFL, 0)
if block:
delay_flag &= ~_c.O_NONBLOCK
else:
delay_flag |= _c.O_NONBLOCK
_c.fcntl(self.fd, _c.F_SETFL, delay_flag)
elif hasattr(_c, 'ioctlsocket'):
def _setblocking(self, block):
flag = c_ulong(not block)
_c.ioctlsocket(self.fd, _c.FIONBIO, byref(flag))
if hasattr(_c, 'poll'):
def _select(self, for_writing):
"""Returns 0 when reading/writing is possible,
1 when timing out and -1 on error."""
if self.timeout <= 0.0 or self.fd < 0:
# blocking I/O or no socket.
return 0
pollfd = _c.pollfd()
pollfd.fd = self.fd
if for_writing:
pollfd.events = _c.POLLOUT
else:
pollfd.events = _c.POLLIN
timeout = int(self.timeout * 1000.0 + 0.5)
n = _c.poll(byref(pollfd), 1, timeout)
if n < 0:
return -1
if n == 0:
return 1
return 0
else:
# Version witout poll(): use select()
def _select(self, for_writing):
"""Returns 0 when reading/writing is possible,
1 when timing out and -1 on error."""
if self.timeout <= 0.0 or self.fd < 0:
# blocking I/O or no socket.
return 0
tv = _c.timeval(tv_sec=int(self.timeout),
tv_usec=int((self.timeout-int(self.timeout))
* 1000000))
fds = _c.fd_set(fd_count=1)
fds.fd_array[0] = self.fd
if for_writing:
n = _c.select(self.fd + 1, None, byref(fds), None, byref(tv))
else:
n = _c.select(self.fd + 1, byref(fds), None, None, byref(tv))
if n < 0:
return -1
if n == 0:
return 1
return 0
def error_handler(self):
return last_error()
# convert an Address into an app-level object
def addr_as_object(self, space, address):
return address.as_object(space)
# convert an app-level object into an Address
# based on the current socket's family
def addr_from_object(self, space, w_address):
return af_get(self.family).from_object(space, w_address)
def _addrbuf(self):
addr, maxlen = make_null_address(self.family)
return addr, _c.socklen_t(maxlen)
def accept(self, SocketClass=None):
"""Wait for an incoming connection.
Return (new socket object, client address)."""
if SocketClass is None:
SocketClass = RSocket
if self._select(False) == 1:
raise SocketTimeout
address, addrlen = self._addrbuf()
newfd = _c.socketaccept(self.fd, byref(address.addr), byref(addrlen))
if _c.invalid_socket(newfd):
raise self.error_handler()
address.addrlen = addrlen.value
sock = make_socket(newfd, self.family, self.type, self.proto,
SocketClass)
return (sock, address)
def bind(self, address):
"""Bind the socket to a local address."""
res = _c.socketbind(self.fd, byref(address.addr), address.addrlen)
if res < 0:
raise self.error_handler()
def close(self):
"""Close the socket. It cannot be used after this call."""
fd = self.fd
if fd != _c.INVALID_SOCKET:
self.fd = _c.INVALID_SOCKET
res = _c.socketclose(fd)
if res != 0:
raise self.error_handler()
def connect(self, address):
"""Connect the socket to a remote address."""
res = _c.socketconnect(self.fd, byref(address.addr), address.addrlen)
if self.timeout > 0.0:
errno = _c.geterrno()
if res < 0 and errno == _c.EINPROGRESS:
timeout = self._select(True)
if timeout == 0:
res = _c.socketconnect(self.fd, byref(address.addr),
address.addrlen)
elif timeout == -1:
raise self.error_handler()
else:
raise SocketTimeout
if res != 0:
raise self.error_handler()
def connect_ex(self, address):
"""This is like connect(address), but returns an error code (the errno
value) instead of raising an exception when an error occurs."""
res = _c.socketconnect(self.fd, byref(address.addr), address.addrlen)
if self.timeout > 0.0:
errno = _c.geterrno()
if res < 0 and errno == _c.EINPROGRESS:
timeout = self._select(True)
if timeout == 0:
res = _c.socketconnect(self.fd, byref(address.addr),
address.addrlen)
elif timeout == -1:
return _c.geterrno()
else:
return _c.EWOULDBLOCK
if res != 0:
return _c.geterrno()
return res
if hasattr(_c, 'dup'):
def dup(self, SocketClass=None):
if SocketClass is None:
SocketClass = RSocket
fd = _c.dup(self.fd)
if fd < 0:
raise self.error_handler()
return make_socket(fd, self.family, self.type, self.proto,
SocketClass=SocketClass)
def fileno(self):
fd = self.fd
if _c.invalid_socket(fd):
raise RSocketError("socket already closed")
return fd
def getpeername(self):
"""Return the address of the remote endpoint."""
address, addrlen = self._addrbuf()
res = _c.socketgetpeername(self.fd, byref(address.addr),
byref(addrlen))
if res < 0:
raise self.error_handler()
address.addrlen = addrlen.value
return address
def getsockname(self):
"""Return the address of the local endpoint."""
address, addrlen = self._addrbuf()
res = _c.socketgetsockname(self.fd, byref(address.addr),
byref(addrlen))
if res < 0:
raise self.error_handler()
address.addrlen = addrlen.value
return address
def getsockopt(self, level, option, maxlen):
buf = _c.create_string_buffer(maxlen)
bufsize = _c.socklen_t()
bufsize.value = maxlen
res = _c.socketgetsockopt(self.fd, level, option, cast(buf, POINTER(c_char)), byref(bufsize))
if res < 0:
raise self.error_handler()
size = bufsize.value
assert size > 0 # socklen_t is signed on Windows
return buf.raw[:size]
def getsockopt_int(self, level, option):
flag = _c.c_int()
flagsize = _c.socklen_t()
flagsize.value = _c.sizeof(flag)
res = _c.socketgetsockopt(self.fd, level, option,
byref(flag), byref(flagsize))
if res < 0:
raise self.error_handler()
return flag.value
def gettimeout(self):
"""Return the timeout of the socket. A timeout < 0 means that
timeouts are dissabled in the socket."""
return self.timeout
def listen(self, backlog):
"""Enable a server to accept connections. The backlog argument
must be at least 1; it specifies the number of unaccepted connections
that the system will allow before refusing new connections."""
if backlog < 1:
backlog = 1
res = _c.socketlisten(self.fd, backlog)
if res < 0:
raise self.error_handler()
def recv(self, buffersize, flags=0):
"""Receive up to buffersize bytes from the socket. For the optional
flags argument, see the Unix manual. When no data is available, block
until at least one byte is available or until the remote end is closed.
When the remote end is closed and all data is read, return the empty
string."""
read_bytes = -1
timeout = self._select(False)
if timeout == 1:
raise SocketTimeout
elif timeout == 0:
buf = create_string_buffer(buffersize)
read_bytes = _c.socketrecv(self.fd, buf, buffersize, flags)
if read_bytes < 0:
raise self.error_handler()
return buf[:read_bytes]
def recvfrom(self, buffersize, flags=0):
"""Like recv(buffersize, flags) but also return the sender's
address."""
read_bytes = -1
timeout = self._select(False)
if timeout == 1:
raise SocketTimeout
elif timeout == 0:
buf = create_string_buffer(buffersize)
address, addrlen = self._addrbuf()
read_bytes = _c.recvfrom(self.fd, buf, buffersize, flags,
byref(address.addr), byref(addrlen))
if read_bytes < 0:
raise self.error_handler()
result_addrlen = addrlen.value
if result_addrlen:
address.addrlen = result_addrlen
else:
address = None
return (buf[:read_bytes], address)
def send(self, data, flags=0):
"""Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent; this may be less than len(data) if the network is busy."""
res = -1
timeout = self._select(False)
if timeout == 1:
raise SocketTimeout
elif timeout == 0:
res = _c.send(self.fd, data, len(data), flags)
if res < 0:
raise self.error_handler()
return res
def sendall(self, data, flags=0):
"""Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent."""
while data:
res = self.send(data, flags)
data = data[res:]
def sendto(self, data, flags, address):
"""Like send(data, flags) but allows specifying the destination
address. (Note that 'flags' is mandatory here.)"""
res = -1
timeout = self._select(False)
if timeout == 1:
raise SocketTimeout
elif timeout == 0:
res = _c.sendto(self.fd, data, len(data), flags,
byref(address.addr), address.addrlen)
if res < 0:
raise self.error_handler()
return res
def setblocking(self, block):
if block:
timeout = -1.0
else:
timeout = 0.0
self.settimeout(timeout)
def setsockopt(self, level, option, value):
res = _c.socketsetsockopt(self.fd, level, option, c_char_p(value), len(value))
if res < 0:
raise self.error_handler()
def setsockopt_int(self, level, option, value):
flag = _c.c_int(value)
res = _c.socketsetsockopt(self.fd, level, option,
byref(flag), _c.sizeof(flag))
if res < 0:
raise self.error_handler()
def settimeout(self, timeout):
"""Set the timeout of the socket. A timeout < 0 means that
timeouts are dissabled in the socket."""
if timeout < 0.0:
self.timeout = -1.0
else:
self.timeout = timeout
self._setblocking(self.timeout < 0.0)
def shutdown(self, how):
"""Shut down the reading side of the socket (flag == SHUT_RD), the
writing side of the socket (flag == SHUT_WR), or both ends
(flag == SHUT_RDWR)."""
res = _c.socketshutdown(self.fd, how)
if res < 0:
raise self.error_handler()
# ____________________________________________________________
def make_socket(fd, family, type, proto, SocketClass=RSocket):
result = instantiate(SocketClass)
result.fd = fd
result.family = family
result.type = type
result.proto = proto
result.timeout = defaults.timeout
return result
make_socket._annspecialcase_ = 'specialize:arg(4)'
class SocketError(Exception):
applevelerrcls = 'error'
def __init__(self):
pass
def get_msg(self):
return ''
def __str__(self):
return self.get_msg()
class SocketErrorWithErrno(SocketError):
def __init__(self, errno):
self.errno = errno
class RSocketError(SocketError):
def __init__(self, message):
self.message = message
def get_msg(self):
return self.message
class CSocketError(SocketErrorWithErrno):
def get_msg(self):
return _c.socket_strerror(self.errno)
def last_error():
return CSocketError(_c.geterrno())
class GAIError(SocketErrorWithErrno):
applevelerrcls = 'gaierror'
def get_msg(self):
return _c.gai_strerror(self.errno)
class HSocketError(SocketError):
applevelerrcls = 'herror'
def __init__(self, host):
self.host = host
# XXX h_errno is not easily available, and hstrerror() is
# marked as deprecated in the Linux man pages
def get_msg(self):
return "host lookup failed: '%s'" % (self.host,)
class SocketTimeout(SocketError):
applevelerrcls = 'timeout'
def get_msg(self):
return 'timed out'
class Defaults:
timeout = -1.0 # Blocking
defaults = Defaults()
# ____________________________________________________________
if 'AF_UNIX' not in constants or AF_UNIX is None:
socketpair_default_family = AF_INET
else:
socketpair_default_family = AF_UNIX
if hasattr(_c, 'socketpair'):
def socketpair(family=socketpair_default_family, type=SOCK_STREAM, proto=0,
SocketClass=RSocket):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
result = _c.socketpair_t()
res = _c.socketpair(family, type, proto, byref(result))
if res < 0:
raise last_error()
return (make_socket(result[0], family, type, proto, SocketClass),
make_socket(result[1], family, type, proto, SocketClass))
if hasattr(_c, 'dup'):
def fromfd(fd, family, type, proto=0, SocketClass=RSocket):
# Dup the fd so it and the socket can be closed independently
fd = _c.dup(fd)
if fd < 0:
raise last_error()
return make_socket(fd, family, type, proto, SocketClass)
def getdefaulttimeout():
return defaults.timeout
def gethostname():
buf = create_string_buffer(1024)
res = _c.gethostname(buf, sizeof(buf)-1)
if res < 0:
raise last_error()
buf[sizeof(buf)-1] = '\x00'
return buf.value
def gethostbyname(name):
# this is explicitly not working with IPv6, because the docs say it
# should not. Just use makeipaddr(name) for an IPv6-friendly version...
result = instantiate(INETAddress)
makeipaddr(name, result)
return result
def gethost_common(hostname, hostent, addr=None):
if not hostent:
raise HSocketError(hostname)
family = hostent.contents.h_addrtype
if addr is not None and addr.family != family:
raise CSocketError(_c.EAFNOSUPPORT)
aliases = []
h_aliases = hostent.contents.h_aliases
if h_aliases: # h_aliases can be NULL, according to SF #1511317
i = 0
alias = h_aliases[0]
while alias is not None:
aliases.append(alias)
i += 1
alias = h_aliases[i]
address_list = []
h_addr_list = hostent.contents.h_addr_list
i = 0
paddr = h_addr_list[0]
while paddr:
if family == AF_INET:
p = cast(paddr, POINTER(_c.in_addr))
addr = INETAddress.from_in_addr(p.contents)
elif AF_INET6 is not None and family == AF_INET6:
p = cast(paddr, POINTER(_c.in6_addr))
addr = INET6Address.from_in6_addr(p.contents)
else:
raise RSocketError("unknown address family")
address_list.append(addr)
i += 1
paddr = h_addr_list[i]
return (hostent.contents.h_name, aliases, address_list)
def gethostbyname_ex(name):
# XXX use gethostbyname_r() if available, and/or use locks if not
addr = gethostbyname(name)
hostent = _c.gethostbyname(name)
return gethost_common(name, hostent, addr)
def gethostbyaddr(ip):
# XXX use gethostbyaddr_r() if available, and/or use locks if not
addr = makeipaddr(ip)
p, size = addr.extract_in_addr()
hostent =_c.gethostbyaddr(p, size, addr.family)
return gethost_common(ip, hostent, addr)
def getaddrinfo(host, port_or_service,
family=AF_UNSPEC, socktype=0, proto=0, flags=0,
address_to_fill=None):
# port_or_service is a string, not an int (but try str(port_number)).
assert port_or_service is None or isinstance(port_or_service, str)
hints = _c.addrinfo(ai_family = family,
ai_socktype = socktype,
ai_protocol = proto,
ai_flags = flags)
# XXX need to lock around getaddrinfo() calls?
res = _c.addrinfo_ptr()
error = _c.getaddrinfo(host, port_or_service, byref(hints), byref(res))
if error:
raise GAIError(error)
try:
result = []
p = res
while p:
info = p.contents
addr = make_address(info.ai_addr, info.ai_addrlen, address_to_fill)
canonname = info.ai_canonname
if canonname is None:
canonname = ""
result.append((info.ai_family,
info.ai_socktype,
info.ai_protocol,
canonname,
addr))
p = info.ai_next
finally:
_c.freeaddrinfo(res)
return result
def getservbyname(name, proto=None):
servent = _c.getservbyname(name, proto)
if not servent:
raise RSocketError("service/proto not found")
return _c.ntohs(servent.contents.s_port)
def getservbyport(port, proto=None):
servent = _c.getservbyport(htons(port), proto)
if not servent:
raise RSocketError("port/proto not found")
return servent.contents.s_name
def getprotobyname(name):
protoent = _c.getprotobyname(name)
if not protoent:
raise RSocketError("protocol not found")
return protoent.contents.p_proto
def getnameinfo(addr, flags):
host = create_string_buffer(NI_MAXHOST)
serv = create_string_buffer(NI_MAXSERV)
error =_c.getnameinfo(pointer(addr.addr), addr.addrlen,
host, len(host),
serv, len(serv), flags)
if error:
raise GAIError(error)
return host.value, serv.value
if hasattr(_c, 'inet_aton'):
def inet_aton(ip):
"IPv4 dotted string -> packed 32-bits string"
buf = create_string_buffer(sizeof(_c.in_addr))
if _c.inet_aton(ip, cast(buf, POINTER(_c.in_addr))):
return buf.raw
else:
raise RSocketError("illegal IP address string passed to inet_aton")
else:
def inet_aton(ip):
"IPv4 dotted string -> packed 32-bits string"
if ip == "255.255.255.255":
return "\xff\xff\xff\xff"
packed_addr = _c.inet_addr(ip)
if _c.c_long(packed_addr).value == INADDR_NONE:
raise RSocketError("illegal IP address string passed to inet_aton")
buf = copy_buffer(cast(pointer(c_ulong(packed_addr)),
POINTER(c_char)), 4)
return buf.raw
def inet_ntoa(packed):
"packet 32-bits string -> IPv4 dotted string"
if len(packed) != sizeof(_c.in_addr):
raise RSocketError("packed IP wrong length for inet_ntoa")
buf = create_string_buffer(sizeof(_c.in_addr))
buf.raw = packed
return _c.inet_ntoa(cast(buf, POINTER(_c.in_addr)).contents)
if hasattr(_c, 'inet_pton'):
def inet_pton(family, ip):
"human-readable string -> packed string"
if family == AF_INET:
size = sizeof(_c.in_addr)
elif AF_INET6 is not None and family == AF_INET6:
size = sizeof(_c.in6_addr)
else:
raise RSocketError("unknown address family")
buf = create_string_buffer(size)
res = _c.inet_pton(family, ip, cast(buf, c_void_p))
if res < 0:
raise last_error()
elif res == 0:
raise RSocketError("illegal IP address string passed to inet_pton")
else:
return buf.raw
if hasattr(_c, 'inet_ntop'):
def inet_ntop(family, packed):
"packed string -> human-readable string"
if family == AF_INET:
srcsize = sizeof(_c.in_addr)
dstsize = _c.INET_ADDRSTRLEN
elif AF_INET6 is not None and family == AF_INET6:
srcsize = sizeof(_c.in6_addr)
dstsize = _c.INET6_ADDRSTRLEN
else:
raise RSocketError("unknown address family")
if len(packed) != srcsize:
raise ValueError("packed IP wrong length for inet_ntop")
srcbuf = create_string_buffer(srcsize)
srcbuf.raw = packed
dstbuf = create_string_buffer(dstsize)
res = _c.inet_ntop(family, cast(srcbuf, c_void_p), dstbuf, dstsize)
if res is None:
raise last_error()
return res
def setdefaulttimeout(timeout):
if timeout < 0.0:
timeout = -1.0
defaults.timeout = timeout
# _______________________________________________________________
#
# Patch module, for platforms without getaddrinfo / getnameinfo
#
if not getattr(_c, 'getaddrinfo', None):
from pypy.rlib.getaddrinfo import getaddrinfo
from pypy.rlib.getaddrinfo import GAIError_getmsg
GAIError.get_msg = GAIError_getmsg
if not getattr(_c, 'getnameinfo', None):
from pypy.rlib.getnameinfo import getnameinfo
from pypy.rlib.getnameinfo import NI_NUMERICHOST, NI_NUMERICSERV
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
# ____________________________________________________________
# Framework GC features
class GcPool(object):
pass
def gc_swap_pool(newpool):
"""Set newpool as the current pool (create one if newpool is None).
All malloc'ed objects are put into the current pool;this is a
way to separate objects depending on when they were allocated.
"""
raise NotImplementedError("only works in stacklessgc translated versions")
def gc_clone(gcobject, pool):
"""Recursively clone the gcobject and everything it points to,
directly or indirectly -- but stops at objects that are not
in the specified pool. Pool can be None to mean the current one.
A new pool is built to contain the copies. Return (newobject, newpool).
"""
raise NotImplementedError("only works in stacklessgc translated versions")
# ____________________________________________________________
# Annotation and specialization
class GcPoolEntry(ExtRegistryEntry):
"Link GcPool to its Repr."
_type_ = GcPool
def get_repr(self, rtyper, s_pool):
config = rtyper.getconfig()
# if the gc policy doesn't support allocation pools, lltype
# pools as Void.
if config.translation.gc not in ['framework', 'stacklessgc']:
from pypy.annotation.model import s_None
return rtyper.getrepr(s_None)
else:
from pypy.rpython.rmodel import SimplePointerRepr
from pypy.rpython.memory.gc import X_POOL_PTR
return SimplePointerRepr(X_POOL_PTR)
class SwapPoolFnEntry(ExtRegistryEntry):
"Annotation and specialization of gc_swap_pool()."
_about_ = gc_swap_pool
def compute_result_annotation(self, s_newpool):
from pypy.annotation import model as annmodel
return annmodel.SomeExternalObject(GcPool)
def specialize_call(self, hop):
from pypy.annotation import model as annmodel
s_pool_ptr = annmodel.SomeExternalObject(GcPool)
r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr)
opname = 'gc_x_swap_pool'
config = hop.rtyper.getconfig()
if config.translation.gc not in ['framework', 'stacklessgc']:
# when the gc policy doesn't support pools, just return
# the argument (which is lltyped as Void anyway)
opname = 'same_as'
s_pool_ptr = annmodel.SomeExternalObject(GcPool)
r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr)
vlist = hop.inputargs(r_pool_ptr)
return hop.genop(opname, vlist, resulttype = r_pool_ptr)
def _raise():
raise RuntimeError
class CloneFnEntry(ExtRegistryEntry):
"Annotation and specialization of gc_clone()."
_about_ = gc_clone
def compute_result_annotation(self, s_gcobject, s_pool):
from pypy.annotation import model as annmodel
return annmodel.SomeTuple([s_gcobject,
annmodel.SomeExternalObject(GcPool)])
def specialize_call(self, hop):
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem import lltype, llmemory, rtuple
from pypy.annotation import model as annmodel
from pypy.rpython.memory.gc import X_CLONE, X_CLONE_PTR
config = hop.rtyper.getconfig()
if config.translation.gc not in ['framework', 'stacklessgc']:
# if the gc policy does not support allocation pools,
# gc_clone always raises RuntimeError
hop.exception_is_here()
hop.gendirectcall(_raise)
s_pool_ptr = annmodel.SomeExternalObject(GcPool)
r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr)
r_tuple = hop.r_result
v_gcobject, v_pool = hop.inputargs(hop.args_r[0], r_pool_ptr)
return rtuple.newtuple(hop.llops, r_tuple, [v_gcobject, v_pool])
r_gcobject = hop.args_r[0]
if (not isinstance(r_gcobject.lowleveltype, lltype.Ptr) or
r_gcobject.lowleveltype.TO._gckind != 'gc'):
raise TyperError("gc_clone() can only clone a dynamically "
"allocated object;\ngot %r" % (r_gcobject,))
s_pool_ptr = annmodel.SomeExternalObject(GcPool)
r_pool_ptr = hop.rtyper.getrepr(s_pool_ptr)
r_tuple = hop.r_result
c_CLONE = hop.inputconst(lltype.Void, X_CLONE)
c_gcobjectptr = hop.inputconst(lltype.Void, "gcobjectptr")
c_pool = hop.inputconst(lltype.Void, "pool")
v_gcobject, v_pool = hop.inputargs(hop.args_r[0], r_pool_ptr)
v_gcobjectptr = hop.genop('cast_opaque_ptr', [v_gcobject],
resulttype = llmemory.GCREF)
v_clonedata = hop.genop('malloc', [c_CLONE],
resulttype = X_CLONE_PTR)
hop.genop('setfield', [v_clonedata, c_gcobjectptr, v_gcobjectptr])
hop.genop('setfield', [v_clonedata, c_pool, v_pool])
hop.exception_is_here()
hop.genop('gc_x_clone', [v_clonedata])
v_gcobjectptr = hop.genop('getfield', [v_clonedata, c_gcobjectptr],
resulttype = llmemory.GCREF)
v_pool = hop.genop('getfield', [v_clonedata, c_pool],
resulttype = r_pool_ptr)
v_gcobject = hop.genop('cast_opaque_ptr', [v_gcobjectptr],
resulttype = r_tuple.items_r[0])
return rtuple.newtuple(hop.llops, r_tuple, [v_gcobject, v_pool])
# Support for collection.
import gc
class CollectEntry(ExtRegistryEntry):
_about_ = gc.collect
def compute_result_annotation(self):
from pypy.annotation import model as annmodel
return annmodel.s_None
def specialize_call(self, hop):
return hop.genop('gc__collect', [], resulttype=hop.r_result)
| Python |
# This are here only because it's always better safe than sorry.
# The issue is that from-time-to-time CPython's termios.tcgetattr
# returns list of mostly-strings of length one, but with few ints
# inside, so we make sure it works
import termios
from termios import *
def tcgetattr(fd):
# NOT_RPYTHON
lst = list(termios.tcgetattr(fd))
cc = lst[-1]
next_cc = []
for c in cc:
if isinstance(c, int):
next_cc.append(chr(c))
else:
next_cc.append(c)
lst[-1] = next_cc
return tuple(lst)
def tcsetattr(fd, when, mode):
# NOT_RPYTHON
# there are some bizarre requirements for that, stealing directly
# from cpython
mode_l = list(mode)
if mode_l[3] & termios.ICANON:
cc = mode_l[-1]
cc[termios.VMIN] = ord(cc[termios.VMIN])
cc[termios.VTIME] = ord(cc[termios.VTIME])
mode_l[-1] = cc
return termios.tcsetattr(fd, when, mode_l)
| Python |
"""
This file defines utilities for manipulating the stack in an
RPython-compliant way, intended mostly for use by the Stackless PyPy.
"""
import inspect
def stack_unwind():
raise RuntimeError("cannot unwind stack in non-translated versions")
def stack_capture():
raise RuntimeError("cannot unwind stack in non-translated versions")
def stack_frames_depth():
return len(inspect.stack())
def stack_too_big():
return False
def stack_check():
if stack_too_big():
# stack_unwind implementation is different depending on if stackless
# is enabled. If it is it unwinds the stack, otherwise it simply
# raises a RuntimeError.
stack_unwind()
# ____________________________________________________________
def yield_current_frame_to_caller():
raise NotImplementedError("only works in translated versions")
class frame_stack_top(object):
def switch(self):
raise NotImplementedError("only works in translated versions")
from pypy.rpython.extregistry import ExtRegistryEntry
def resume_point(label, *args, **kwds):
pass
class ResumePointFnEntry(ExtRegistryEntry):
_about_ = resume_point
def compute_result_annotation(self, s_label, *args_s, **kwds_s):
from pypy.annotation import model as annmodel
return annmodel.s_None
def specialize_call(self, hop, **kwds_i):
from pypy.rpython.lltypesystem import lltype
from pypy.objspace.flow import model
assert hop.args_s[0].is_constant()
c_label = hop.inputconst(lltype.Void, hop.args_s[0].const)
args_v = hop.args_v[1:]
if 'i_returns' in kwds_i:
assert len(kwds_i) == 1
returns_index = kwds_i['i_returns']
v_return = args_v.pop(returns_index-1)
assert isinstance(v_return, model.Variable), \
"resume_point returns= argument must be a Variable"
else:
assert not kwds_i
v_return = hop.inputconst(lltype.Void, None)
for v in args_v:
assert isinstance(v, model.Variable), "resume_point arguments must be Variables"
hop.exception_is_here()
return hop.genop('resume_point', [c_label, v_return] + args_v,
hop.r_result)
def resume_state_create(prevstate, label, *args):
raise RuntimeError("cannot resume states in non-translated versions")
def concretify_argument(hop, index):
from pypy.objspace.flow import model
v_arg = hop.args_v[index]
if isinstance(v_arg, model.Variable):
return v_arg
r_arg = hop.rtyper.bindingrepr(v_arg)
return hop.inputarg(r_arg, arg=index)
class ResumeStateCreateFnEntry(ExtRegistryEntry):
_about_ = resume_state_create
def compute_result_annotation(self, s_prevstate, s_label, *args_s):
from pypy.annotation import model as annmodel
return annmodel.SomeExternalObject(frame_stack_top)
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rmodel import SimplePointerRepr
from pypy.translator.stackless.frame import STATE_HEADER
assert hop.args_s[1].is_constant()
c_label = hop.inputconst(lltype.Void, hop.args_s[1].const)
v_state = hop.inputarg(hop.r_result, arg=0)
args_v = []
for i in range(2, len(hop.args_v)):
args_v.append(concretify_argument(hop, i))
hop.exception_is_here()
return hop.genop('resume_state_create', [v_state, c_label] + args_v,
hop.r_result)
def resume_state_invoke(type, state, **kwds):
raise NotImplementedError("only works in translated versions")
class ResumeStateInvokeFnEntry(ExtRegistryEntry):
_about_ = resume_state_invoke
def compute_result_annotation(self, s_type, s_state, **kwds):
from pypy.annotation.bookkeeper import getbookkeeper
assert s_type.is_constant()
return getbookkeeper().valueoftype(s_type.const)
def specialize_call(self, hop, **kwds_i):
from pypy.rpython.lltypesystem import lltype
v_state = hop.args_v[1]
if 'i_returning' in kwds_i:
assert len(kwds_i) == 1
returning_index = kwds_i['i_returning']
v_returning = concretify_argument(hop, returning_index)
v_raising = hop.inputconst(lltype.Void, None)
elif 'i_raising' in kwds_i:
assert len(kwds_i) == 1
raising_index = kwds_i['i_raising']
v_returning = hop.inputconst(lltype.Void, None)
v_raising = concretify_argument(hop, raising_index)
else:
assert not kwds_i
v_returning = hop.inputconst(lltype.Void, None)
v_raising = hop.inputconst(lltype.Void, None)
hop.exception_is_here()
return hop.genop('resume_state_invoke', [v_state, v_returning, v_raising],
hop.r_result)
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rlib.objectmodel import CDefinedIntSymbolic
def purefunction(func):
func._pure_function_ = True
return func
def hint(x, **kwds):
return x
class Entry(ExtRegistryEntry):
_about_ = hint
def compute_result_annotation(self, s_x, **kwds_s):
from pypy.annotation import model as annmodel
s_x = annmodel.not_const(s_x)
if 's_access_directly' in kwds_s:
if isinstance(s_x, annmodel.SomeInstance):
from pypy.objspace.flow.model import Constant
classdesc = s_x.classdef.classdesc
virtualizable = classdesc.read_attribute('_virtualizable_',
Constant(False)).value
if virtualizable:
flags = s_x.flags.copy()
flags['access_directly'] = True
s_x = annmodel.SomeInstance(s_x.classdef,
s_x.can_be_None,
flags)
return s_x
def specialize_call(self, hop, **kwds_i):
from pypy.rpython.lltypesystem import lltype
hints = {}
for key, index in kwds_i.items():
s_value = hop.args_s[index]
if not s_value.is_constant():
from pypy.rpython.error import TyperError
raise TyperError("hint %r is not constant" % (key,))
assert key.startswith('i_')
hints[key[2:]] = s_value.const
v = hop.inputarg(hop.args_r[0], arg=0)
c_hint = hop.inputconst(lltype.Void, hints)
hop.exception_cannot_occur()
return hop.genop('hint', [v, c_hint], resulttype=v.concretetype)
def we_are_jitted():
return False
# timeshifts to True
_we_are_jitted = CDefinedIntSymbolic('0 /* we are not jitted here */',
default=0)
class Entry(ExtRegistryEntry):
_about_ = we_are_jitted
def compute_result_annotation(self):
from pypy.annotation import model as annmodel
return annmodel.SomeInteger(nonneg=True)
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
return hop.inputconst(lltype.Signed, _we_are_jitted)
def _is_early_constant(x):
return False
class Entry(ExtRegistryEntry):
_about_ = _is_early_constant
def compute_result_annotation(self, s_value):
from pypy.annotation import model as annmodel
s = annmodel.SomeBool()
if s_value.is_constant():
s.const = True
return s
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
if hop.s_result.is_constant():
assert hop.s_result.const
return hop.inputconst(lltype.Bool, True)
v, = hop.inputargs(hop.args_r[0])
return hop.genop('is_early_constant', [v], resulttype=lltype.Bool)
| Python |
from pypy.rlib.rctypes.implementation import CTypeController, getcontroller
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython.lltypesystem import lltype
from ctypes import ARRAY, c_int, c_char
ArrayType = type(ARRAY(c_int, 10))
class ArrayCTypeController(CTypeController):
def __init__(self, ctype):
CTypeController.__init__(self, ctype)
self.itemcontroller = getcontroller(ctype._type_)
self.length = ctype._length_
self.knowntype = rctypesobject.RFixedArray(
self.itemcontroller.knowntype,
self.length)
def arraynew(*args):
obj = self.knowntype.allocate()
if args:
if len(args) > self.length:
raise ValueError("too many arguments for an array of "
"length %d" % (self.length,))
lst = list(args)
for i in range(len(args)):
self.setitem(obj, i, lst[i])
return obj
self.new = arraynew
def getitem(self, obj, i):
itemobj = obj.ref(i)
return self.itemcontroller.return_value(itemobj)
getitem._annspecialcase_ = 'specialize:arg(0)'
def setitem(self, obj, i, value):
itemobj = obj.ref(i)
self.itemcontroller.set_value(itemobj, value)
setitem._annspecialcase_ = 'specialize:arg(0)'
ArrayCTypeController.register_for_metatype(ArrayType)
| Python |
from pypy.rlib.rctypes.implementation import CTypeController
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rctypes import rcarithmetic as rcarith
from ctypes import c_char, c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint
from ctypes import c_long, c_ulong, c_longlong, c_ulonglong, c_float
from ctypes import c_double, c_wchar, c_char_p
ctypes_annotation_list = {
c_char: lltype.Char,
#c_wchar: lltype.UniChar,
c_byte: rcarith.CByte,
c_ubyte: rcarith.CUByte,
c_short: rcarith.CShort,
c_ushort: rcarith.CUShort,
c_int: rcarith.CInt,
c_uint: rcarith.CUInt,
c_long: rcarith.CLong,
c_ulong: rcarith.CULong,
c_longlong: rcarith.CLonglong,
c_ulonglong: rcarith.CULonglong,
#c_float: lltype.Float,
c_double: lltype.Float,
} # nb. platform-dependent duplicate ctypes are removed
def return_lltype(ll_type):
if isinstance(ll_type, lltype.Number):
return ll_type.normalized()
return ll_type
class PrimitiveCTypeController(CTypeController):
def __init__(self, ctype):
CTypeController.__init__(self, ctype)
self.VALUETYPE = ctypes_annotation_list[ctype]
self.RETTYPE = return_lltype(self.VALUETYPE)
self.is_char_type = self.VALUETYPE in (lltype.Char, lltype.UniChar)
self.knowntype = rctypesobject.Primitive(self.VALUETYPE)
def primitivenew(*initialvalue):
obj = self.knowntype.allocate()
if len(initialvalue) > 0:
if len(initialvalue) > 1:
raise TypeError("at most 1 argument expected")
self.set_value(obj, initialvalue[0])
return obj
self.new = primitivenew
def initialize_prebuilt(self, obj, x):
value = x.value
# convert 'value' to anything that cast_primitive will be happy with
if type(value) is long:
if value >= 0:
value = rcarith.rculonglong(value)
else:
value = rcarith.rclonglong(value)
self.set_value(obj, value)
def get_value(self, obj):
llvalue = obj.get_value()
return lltype.cast_primitive(self.RETTYPE, llvalue)
get_value._annspecialcase_ = 'specialize:arg(0)'
def set_value(self, obj, value):
# for integer and float types, any integer is accepted and silently
# cast. For char types, do a precise check
if self.is_char_type:
if lltype.typeOf(value) != self.RETTYPE:
raise TypeError("'value' must be set to a %s" % (
self.RETTYPE,))
llvalue = lltype.cast_primitive(self.VALUETYPE, value)
obj.set_value(llvalue)
set_value._annspecialcase_ = 'specialize:arg(0)'
# ctypes automatically unwraps the c_xxx() of primitive types when
# they are returned by most operations
return_value = get_value
store_value = set_value
def default_ctype_value(self):
return self.ctype().value
def is_true(self, obj):
llvalue = self.get_value(obj)
if self.is_char_type:
llvalue = ord(llvalue)
return bool(llvalue)
is_true._annspecialcase_ = 'specialize:arg(0)'
for _ctype in ctypes_annotation_list:
PrimitiveCTypeController.register_for_type(_ctype)
| Python |
from pypy.rlib.rctypes.implementation import CTypeController
from pypy.rlib.rctypes import rctypesobject
from ctypes import c_char_p
class CCharPCTypeController(CTypeController):
knowntype = rctypesobject.rc_char_p
def new(self, initialvalue=None):
obj = rctypesobject.rc_char_p.allocate()
obj.set_value(initialvalue)
return obj
def initialize_prebuilt(self, obj, x):
string = x.value
obj.set_value(string)
def get_value(self, obj):
return obj.get_value()
def set_value(self, obj, string):
obj.set_value(string)
# ctypes automatically unwraps the c_char_p() instances when
# they are returned by most operations
return_value = get_value
store_value = set_value
def default_ctype_value(self):
return None
def is_true(self, obj):
return obj.get_value() is not None
CCharPCTypeController.register_for_type(c_char_p)
| Python |
from pypy.annotation import model as annmodel
from pypy.rlib.rctypes.implementation import CTypeController, getcontroller
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.lltypesystem import lltype
from pypy.rlib.unroll import unrolling_iterable
from ctypes import Structure
StructType = type(Structure)
class StructCTypeController(CTypeController):
def __init__(self, ctype):
CTypeController.__init__(self, ctype)
# Map the field names to their controllers
controllers = []
fields = []
for name, field_ctype in ctype._fields_:
controller = getcontroller(field_ctype)
setattr(self, 'fieldcontroller_' + name, controller)
controllers.append((name, controller))
fields.append((name, controller.knowntype))
external = getattr(ctype, '_external_', False)
self.knowntype = rctypesobject.RStruct(ctype.__name__, fields,
c_external = external)
self.fieldcontrollers = controllers
# Build a custom new() method where the setting of the fields
# is unrolled
unrolled_controllers = unrolling_iterable(controllers)
def structnew(*args):
obj = self.knowntype.allocate()
if len(args) > len(fields):
raise ValueError("too many arguments for this structure")
for name, controller in unrolled_controllers:
if args:
value = args[0]
args = args[1:]
if controller.is_box(value):
structsetboxattr(obj, name, value)
else:
structsetattr(obj, name, value)
return obj
self.new = structnew
# Build custom getter and setter methods
def structgetattr(obj, attr):
controller = getattr(self, 'fieldcontroller_' + attr)
itemobj = getattr(obj, 'ref_' + attr)()
return controller.return_value(itemobj)
structgetattr._annspecialcase_ = 'specialize:arg(1)'
def structsetattr(obj, attr, value):
controller = getattr(self, 'fieldcontroller_' + attr)
itemobj = getattr(obj, 'ref_' + attr)()
controller.store_value(itemobj, value)
structsetattr._annspecialcase_ = 'specialize:arg(1)'
def structsetboxattr(obj, attr, valuebox):
controller = getattr(self, 'fieldcontroller_' + attr)
itemobj = getattr(obj, 'ref_' + attr)()
controller.store_box(itemobj, valuebox)
structsetboxattr._annspecialcase_ = 'specialize:arg(1)'
self.getattr = structgetattr
self.setattr = structsetattr
self.setboxattr = structsetboxattr
def initialize_prebuilt(self, obj, x):
for name, controller in self.fieldcontrollers:
fieldbox = controller.convert(getattr(x, name))
self.setboxattr(obj, name, fieldbox)
def insert_constructor_keywords(self, lst, prefix, kwds):
lst = list(lst)
kwds = kwds.copy()
for index, (name, field_ctype) in enumerate(self.ctype._fields_):
if prefix+name in kwds:
value = kwds.pop(prefix+name)
while len(lst) <= index:
lst.append(None)
if lst[index] is not None:
from pypy.rpython.error import TyperError
raise TyperError("duplicate value for argument %r" % name)
lst[index] = value
if kwds:
from pypy.rpython.error import TyperError
raise TyperError("unknown keyword(s): %r" % (kwds.keys(),))
return lst
def ctrl_new_ex(self, bookkeeper, *args_s, **kwds_s):
if kwds_s:
args_s = self.insert_constructor_keywords(args_s, 's_', kwds_s)
for i in range(len(args_s)):
if args_s[i] is None:
name, controller = self.fieldcontrollers[i]
x = controller.default_ctype_value()
args_s[i] = bookkeeper.immutablevalue(x)
return CTypeController.ctrl_new(self, *args_s)
def rtype_new(self, hop, **kwds_i):
if kwds_i:
lst = range(hop.nb_args)
for key, index in kwds_i.items():
lst[index] = None
lst = self.insert_constructor_keywords(lst, 'i_', kwds_i)
hop2 = hop.copy()
hop2.nb_args = len(lst)
hop2.args_v = []
hop2.args_s = []
hop2.args_r = []
for i, index in enumerate(lst):
if index is not None:
v = hop.args_v[index]
s = hop.args_s[index]
r = hop.args_r[index]
else:
# must insert a default value
from pypy.objspace.flow.model import Constant
name, controller = self.fieldcontrollers[i]
x = controller.default_ctype_value()
v = Constant(x)
s = hop.rtyper.annotator.bookkeeper.immutablevalue(x)
r = hop.rtyper.getrepr(s)
hop2.args_v.append(v)
hop2.args_s.append(s)
hop2.args_r.append(r)
hop = hop2
return CTypeController.rtype_new(self, hop)
StructCTypeController.register_for_metatype(StructType)
# ____________________________________________________________
def offsetof(Struct, fieldname):
"Utility function that returns the offset of a field in a structure."
return getattr(Struct, fieldname).offset
class OffsetOfFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to offsetof()"
_about_ = offsetof
def compute_result_annotation(self, s_Struct, s_fieldname):
assert s_Struct.is_constant()
assert s_fieldname.is_constant()
ofs = offsetof(s_Struct.const, s_fieldname.const)
assert ofs >= 0
s_result = annmodel.SomeInteger(nonneg=True)
s_result.const = ofs
return s_result
def specialize_call(self, hop):
ofs = hop.s_result.const
return hop.inputconst(lltype.Signed, ofs)
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython import annlowlevel
from pypy.interpreter.miscutils import InitializedClass
from pypy.tool.sourcetools import func_with_new_name
from pypy.rlib.objectmodel import keepalive_until_here
class RawMemBlock(object):
ofs_keepalives = 0
def __init__(self, num_keepalives):
self.keepalives = [None] * num_keepalives # list of RawMemBlocks
def addoffset(self, ofs_keepalives):
if ofs_keepalives == 0:
return self
else:
return RawMemSubBlock(self, ofs_keepalives)
## return self._addoffset(ofs_keepalives)
## def _addoffset(self, ofs_keepalives):
## return RawMemSubBlock(self, ofs_keepalives)
def getkeepalive(self, index):
return self.keepalives[self.ofs_keepalives + index]
def setkeepalive(self, index, memblock):
self.keepalives[self.ofs_keepalives + index] = memblock
EMPTY_RAW_MEM_BLOCK = RawMemBlock(0)
class AllocatedRawMemBlock(RawMemBlock):
def __init__(self, num_keepalives, rawsize, zero=True):
RawMemBlock.__init__(self, num_keepalives)
addr = llmemory.raw_malloc(rawsize)
self.addr = addr
if zero:
llmemory.raw_memclear(addr, rawsize)
#print 'raw_malloc: %x' % llmemory.cast_adr_to_int(addr)
def __del__(self):
#print 'raw_free: %x' % llmemory.cast_adr_to_int(self.addr)
llmemory.raw_free(self.addr)
class RawMemSubBlock(RawMemBlock):
def __init__(self, baseblock, ofs_keepalives):
self.baseblock = baseblock
self.keepalives = baseblock.keepalives
self.ofs_keepalives = ofs_keepalives
## def _addoffset(self, ofs_keepalives):
## ofs_keepalives = self.ofs_keepalives + ofs_keepalives
## return RawMemSubBlock(self.baseblock, ofs_keepalives)
class RCTypesObject(object):
__metaclass__ = InitializedClass
num_keepalives = 0
__slots__ = ('addr', 'memblock')
def __init__(self, addr, memblock):
self.addr = addr
self.memblock = memblock
def __initclass__(cls):
if hasattr(cls, 'LLTYPE'):
cls.__name__ = 'RCTypes_%s' % (cls.LLTYPE,)
if not hasattr(cls, 'CDATATYPE'):
if isinstance(cls.LLTYPE, lltype.ContainerType):
cls.CDATATYPE = cls.LLTYPE
else:
cls.CDATATYPE = lltype.FixedSizeArray(cls.LLTYPE, 1)
if not getattr(cls, 'can_allocate', True):
return
if not hasattr(cls, 'rawsize'):
cls.rawsize = llmemory.sizeof(cls.CDATATYPE)
def allocate1():
memblock = AllocatedRawMemBlock(cls.num_keepalives,
cls.rawsize)
return cls(memblock.addr, memblock)
cls.allocate = staticmethod(allocate1)
if hasattr(cls, 'llvalue2value') and not hasattr(cls, 'get_value'):
def get_value(self):
ptr = self.ll_ref(cls.CDATATYPE)
res = cls.llvalue2value(ptr[0])
keepalive_until_here(self)
return res
cls.get_value = get_value
if hasattr(cls, 'value2llvalue') and not hasattr(cls, 'set_value'):
def set_value(self, value):
ptr = self.ll_ref(cls.CDATATYPE)
ptr[0] = cls.value2llvalue(value)
keepalive_until_here(self)
cls.set_value = set_value
if hasattr(cls, 'get_value') and hasattr(cls, 'set_value'):
def copyfrom1(self, srcbox):
assert isinstance(srcbox, cls)
self.set_value(srcbox.get_value())
self._copykeepalives(0, srcbox)
else:
def copyfrom1(self, srcbox):
assert isinstance(srcbox, cls)
llmemory.raw_memcopy(srcbox.addr, self.addr, cls.rawsize)
self._copykeepalives(0, srcbox)
cls.copyfrom = copyfrom1
def sameaddr(self, otherbox):
return self.addr == otherbox.addr
def sizeof(self):
return self.rawsize
def _keepalivememblock(self, index, memblock):
self.memblock.setkeepalive(index, memblock)
def _copykeepalives(self, startindex, srcbox):
for i in range(self.num_keepalives):
memblock = srcbox.memblock.getkeepalive(startindex + i)
self.memblock.setkeepalive(i, memblock)
def _getmemblock(self, index, target_num_keepalives):
targetmemblock = self.memblock.getkeepalive(index)
if targetmemblock is None:
targetmemblock = RawMemBlock(target_num_keepalives)
self.memblock.setkeepalive(index, targetmemblock)
return targetmemblock
def ll_ref(self, CDATATYPE):
# Return a ptr to the memory that this object references.
# WARNING: always use 'keepalive_until_here(self)' when you
# are done using this ptr! Otherwise the memory might be
# deallocated.
return llmemory.cast_adr_to_ptr(self.addr, lltype.Ptr(CDATATYPE))
ll_ref._annspecialcase_ = 'specialize:arg(1)'
_abstract_classes = [RCTypesObject]
# ____________________________________________________________
_primitive_cache = {}
def Primitive(TYPE):
"""Build a return a new RCTypesPrimitive class."""
try:
return _primitive_cache[TYPE]
except KeyError:
assert not isinstance(TYPE, lltype.ContainerType)
class RCTypesPrimitive(RCTypesObject):
LLTYPE = TYPE
def _no_conversion_needed(x):
return x
llvalue2value = staticmethod(_no_conversion_needed)
value2llvalue = staticmethod(_no_conversion_needed)
#def get_value(self): added by __initclass__() above
#def set_value(self, value): added by __initclass__() above
_primitive_cache[TYPE] = RCTypesPrimitive
return RCTypesPrimitive
# a few prebuilt primitive types
rc_int = Primitive(lltype.Signed)
rc_char = Primitive(lltype.Char)
# ____________________________________________________________
##class _RCTypesStringData(object):
## ARRAYTYPE = lltype.FixedSizeArray(lltype.Char, 1)
## ITEMOFS = llmemory.sizeof(lltype.Char)
## def __init__(self, bufsize):
## rawsize = self.ITEMOFS * bufsize
## self.addr = llmemory.raw_malloc(rawsize)
## def __del__(self):
## llmemory.raw_free(self.addr)
def strlen(p):
n = 0
while p[n] != '\x00':
n += 1
return n
def strnlen(p, n_max):
n = 0
while n < n_max and p[n] != '\x00':
n += 1
return n
def charp2string(p, length):
lst = ['\x00'] * length
for i in range(length):
lst[i] = p[i]
return ''.join(lst)
def string2charp(p, length, string):
for i in range(length):
if i < len(string):
p[i] = string[i]
else:
p[i] = '\x00'
break
class RCTypesCharP(RCTypesObject):
ARRAYTYPE = lltype.FixedSizeArray(lltype.Char, 1)
ITEMOFS = llmemory.sizeof(lltype.Char)
LLTYPE = lltype.Ptr(ARRAYTYPE)
num_keepalives = 1
def llvalue2value(p):
if p:
length = strlen(p)
return charp2string(p, length)
else:
return None
llvalue2value = staticmethod(llvalue2value)
#def get_value(self): added by __initclass__() above
def set_value(self, string):
if string is not None:
n = len(string)
rawsize = RCTypesCharP.ITEMOFS * (n + 1)
targetmemblock = AllocatedRawMemBlock(0, rawsize, zero=False)
targetaddr = targetmemblock.addr
a = targetaddr
for i in range(n):
a.char[0] = string[i]
a += RCTypesCharP.ITEMOFS
a.char[0] = '\x00'
else:
targetmemblock = None
targetaddr = llmemory.NULL
ptr = self.ll_ref(RCTypesCharP.CDATATYPE)
ptr[0] = llmemory.cast_adr_to_ptr(targetaddr, RCTypesCharP.LLTYPE)
keepalive_until_here(self)
self._keepalivememblock(0, targetmemblock)
rc_char_p = RCTypesCharP
# ____________________________________________________________
def RPointer(contentscls):
"""Build and return a new RCTypesPointer class."""
try:
return contentscls._ptrcls
except AttributeError:
class RCTypesPtr(RCTypesObject):
LLTYPE = lltype.Ptr(lltype.ForwardReference())
num_keepalives = 1
setpointertype = classmethod(_rpointer_set_pointer_type)
def get_contents(self):
ptr = self.ll_ref(RCTypesPtr.CDATATYPE)
targetaddr = llmemory.cast_ptr_to_adr(ptr[0])
keepalive_until_here(self)
targetkeepalives = RCTypesPtr.CONTENTSCLS.num_keepalives
targetmemblock = self._getmemblock(0, targetkeepalives)
return RCTypesPtr.CONTENTSCLS(targetaddr, targetmemblock)
def ref(self, index):
ptr = self.ll_ref(RCTypesPtr.CDATATYPE)
targetaddr = llmemory.cast_ptr_to_adr(ptr[0])
if index:
targetaddr += self._OFS_ITEM * index
keepalive_until_here(self)
targetkeepalives = RCTypesPtr.CONTENTSCLS.num_keepalives
targetmemblock = self._getmemblock(0, targetkeepalives)
return RCTypesPtr.CONTENTSCLS(targetaddr, targetmemblock)
def set_contents(self, newcontentsbox):
targetaddr = newcontentsbox.addr
targetmemblock = newcontentsbox.memblock
ptr = self.ll_ref(RCTypesPtr.CDATATYPE)
ptr[0] = llmemory.cast_adr_to_ptr(targetaddr,
RCTypesPtr.LLTYPE)
keepalive_until_here(self)
self._keepalivememblock(0, targetmemblock)
def is_null(self):
ptr = self.ll_ref(RCTypesPtr.CDATATYPE)
res = not ptr[0]
keepalive_until_here(self)
return res
def set_null(self):
ptr = self.ll_ref(RCTypesPtr.CDATATYPE)
ptr[0] = lltype.nullptr(RCTypesPtr.LLTYPE.TO)
keepalive_until_here(self)
self._keepalivememblock(0, None)
if contentscls is None:
pass # forward pointer
else:
RCTypesPtr.setpointertype(contentscls)
return RCTypesPtr
RPointer._annspecialcase_ = 'specialize:memo'
def _rpointer_set_pointer_type(RCTypesPtr, contentscls, force=False):
assert issubclass(contentscls, RCTypesObject)
if contentscls in _abstract_classes:
raise Exception("cannot call RPointer(%s) or "
"pointer(x) if x degenerated to the base "
"%s class" % (contentscls.__name__,
contentscls.__name__,))
RCTypesPtr.CONTENTSCLS = contentscls
RCTypesPtr.CONTENTS = contentscls.CDATATYPE
RCTypesPtr.LLTYPE.TO.become(RCTypesPtr.CONTENTS)
RCTypesPtr._OFS_ITEM = llmemory.sizeof(contentscls.LLTYPE)
RCTypesPtr.__name__ = 'RCTypes_%s' % (RCTypesPtr.LLTYPE,)
if not force:
assert not hasattr(contentscls, '_ptrcls'), (
"the RPointer class corresponding to %r exists already" %
(contentscls,))
contentscls._ptrcls = RCTypesPtr
def pointer(x):
PTR = RPointer(x.__class__)
p = PTR.allocate()
p.set_contents(x)
return p
pointer._annspecialcase_ = 'specialize:argtype(0)'
def sizeof(x):
return x.sizeof()
# ____________________________________________________________
def RStruct(c_name, fields, c_external=False):
"""Build and return a new RCTypesStruct class."""
def cmangle(name):
# obscure: names starting with '_' are not allowed in
# lltype.Struct, so we prefix all nam4es with 'c_'
return 'c_' + name
fieldclasses = {}
llfields = []
num_keepalives = 0
for name, fieldboxcls in fields:
llname = cmangle(name)
fieldclasses[name] = llname, fieldboxcls, num_keepalives
llfields.append((llname, fieldboxcls.LLTYPE))
num_keepalives += fieldboxcls.num_keepalives
extras = {'hints': {'c_name': c_name, 'external': c_external}}
STRUCT = lltype.Struct(c_name, *llfields, **extras)
class RCTypesStruct(RCTypesObject):
LLTYPE = STRUCT
RCTypesStruct.num_keepalives = num_keepalives
def make_accessors(fieldname):
llname, fieldboxcls, ofs_keepalives = fieldclasses[fieldname]
FIELD = fieldboxcls.LLTYPE
FIELDOFS = llmemory.offsetof(STRUCT, llname)
def refgetter(self):
subaddr = self.addr + FIELDOFS
subblock = self.memblock.addoffset(ofs_keepalives)
return fieldboxcls(subaddr, subblock)
setattr(RCTypesStruct, 'ref_' + fieldname, refgetter)
for name in fieldclasses:
make_accessors(name)
return RCTypesStruct
# ____________________________________________________________
def RFixedArray(itemcls, fixedsize):
"""Build and return a new RCTypesFixedArray class."""
key = '_fixedarraycls%d' % (fixedsize,)
try:
return getattr(itemcls, key)
except AttributeError:
assert issubclass(itemcls, RCTypesObject)
if itemcls in _abstract_classes:
raise Exception("cannot call RFixedArray(%s)" % (
itemcls.__name__,))
ARRAYTYPE = lltype.FixedSizeArray(itemcls.LLTYPE, fixedsize)
FIRSTITEMOFS = llmemory.ArrayItemsOffset(ARRAYTYPE)
ITEMOFS = llmemory.sizeof(itemcls.LLTYPE)
class RCTypesFixedArray(RCTypesObject):
ITEM = ARRAYTYPE.OF
LLTYPE = ARRAYTYPE
length = fixedsize
num_keepalives = itemcls.num_keepalives * fixedsize
def ref(self, n):
subaddr = self.addr + (FIRSTITEMOFS + ITEMOFS * n)
subblock = self.memblock.addoffset(itemcls.num_keepalives * n)
return itemcls(subaddr, subblock)
if itemcls is rc_char:
# attach special methods for arrays of chars
def as_ll_charptr(self):
ptr = self.ll_ref(ARRAYTYPE)
return lltype.direct_arrayitems(ptr)
_initialize_array_of_char(RCTypesFixedArray, as_ll_charptr)
setattr(itemcls, key, RCTypesFixedArray)
return RCTypesFixedArray
RFixedArray._annspecialcase_ = 'specialize:memo'
def RVarArray(itemcls):
"""Build and return a new RCTypesVarArray class.
Note that this is *not* a subclass of RCTypesObject, so you cannot
take a pointer to it, use it as a field of a structure, etc.
You can take a pointer to one of its elements (e.g. the first),
though, and that pointer will keep the whole array alive.
"""
try:
return itemcls._vararraycls
except AttributeError:
assert issubclass(itemcls, RCTypesObject)
if itemcls in _abstract_classes:
raise Exception("cannot call RVarArray(%s)" % (
itemcls.__name__,))
ARRAYTYPE = lltype.Array(itemcls.LLTYPE, hints={'nolength': True})
FIRSTITEMOFS = llmemory.ArrayItemsOffset(ARRAYTYPE)
ITEMOFS = llmemory.sizeof(itemcls.LLTYPE)
class RCTypesVarArray(object):
ITEM = ARRAYTYPE.OF
def __init__(self, addr, memblock, length):
self.addr = addr
self.memblock = memblock
self.length = length
def sizeof(self):
rawsize = FIRSTITEMOFS + ITEMOFS * self.length
return rawsize
def allocate(length):
rawsize = FIRSTITEMOFS + ITEMOFS * length
num_keepalives = itemcls.num_keepalives * length
memblock = AllocatedRawMemBlock(num_keepalives, rawsize)
addr = memblock.addr + FIRSTITEMOFS
return RCTypesVarArray(addr, memblock, length)
allocate = staticmethod(allocate)
def fromitem(itembox, length):
"""Return a VarArray from a reference to its first element.
Note that if you use the VarArray to store pointer-ish data,
you have to keep the VarArray alive as long as you want
this new data to stay alive.
"""
assert isinstance(itembox, itemcls)
num_keepalives = itemcls.num_keepalives * length
memblock = RawMemBlock(num_keepalives)
res = RCTypesVarArray(itembox.addr, memblock, length)
res._keepalive_memblock_fromitem = itembox.memblock
return res
fromitem = staticmethod(fromitem)
def ref(self, n):
subaddr = self.addr + ITEMOFS * n
subblock = self.memblock.addoffset(itemcls.num_keepalives * n)
return itemcls(subaddr, subblock)
if itemcls is rc_char:
# attach special methods for arrays of chars
def as_ll_charptr(self):
return llmemory.cast_adr_to_ptr(self.addr, RCTypesCharP.LLTYPE)
_initialize_array_of_char(RCTypesVarArray, as_ll_charptr)
itemcls._vararraycls = RCTypesVarArray
return RCTypesVarArray
RVarArray._annspecialcase_ = 'specialize:memo'
# ____________________________________________________________
def _initialize_array_of_char(RCClass, as_ll_charptr):
# Attach additional methods for fixed- or variable-sized arrays of char
def get_value(self):
p = as_ll_charptr(self)
n = strnlen(p, self.length)
res = charp2string(p, n)
keepalive_until_here(self)
return res
def set_value(self, string):
string2charp(as_ll_charptr(self), self.length, string)
keepalive_until_here(self)
def get_raw(self):
res = charp2string(as_ll_charptr(self), self.length)
keepalive_until_here(self)
return res
def get_substring(self, start, length):
p = lltype.direct_ptradd(as_ll_charptr(self), start)
res = charp2string(p, length)
keepalive_until_here(self)
return res
RCClass.get_value = get_value
RCClass.set_value = set_value
RCClass.get_raw = get_raw
RCClass.get_substring = get_substring
create_string_buffer = RVarArray(rc_char).allocate
# ____________________________________________________________
_functype_cache = {}
def RFuncType(args_cls, rescls):
"""Build and return a new RCTypesFunc class.
Note that like lltype, but unlike ctypes, a 'function' type is not
automatically a pointer to a function. Conceptually, it represents
the area of memory where the function's machine code is stored."""
args_cls = tuple(args_cls)
try:
return _functype_cache[args_cls, rescls]
except KeyError:
ARGS = [cls.LLTYPE for cls in args_cls]
RES = rescls.LLTYPE
FUNCTYPE = lltype.FuncType(ARGS, RES)
PTRTYPE = lltype.Ptr(FUNCTYPE)
class RCTypesFunc(RCTypesObject):
LLTYPE = FUNCTYPE
can_allocate = False
def fromllptr(p):
addr = llmemory.cast_ptr_to_adr(p)
memblock = EMPTY_RAW_MEM_BLOCK
return RCTypesFunc(addr, memblock)
fromllptr = staticmethod(fromllptr)
def fromrpython(func):
"""Return an RCTypes function that references the given
RPython function."""
p = annlowlevel.llhelper(PTRTYPE, func)
return RCTypesFunc.fromllptr(p)
fromrpython._annspecialcase_ = 'specialize:arg(0)'
fromrpython = staticmethod(fromrpython)
def fromlib(rlib, c_funcname, llinterp_friendly_version=None):
flags = {'external': 'C'}
if rlib.pythonapi:
pass # no 'includes': hack to trigger
# in GenC a PyErr_Occurred() check
else:
flags['includes'] = rlib.c_includes
flags['libraries'] = rlib.c_libs
if llinterp_friendly_version:
flags['_callable'] = llinterp_friendly_version
p = lltype.functionptr(FUNCTYPE, c_funcname, **flags)
return RCTypesFunc.fromllptr(p)
fromlib._annspecialcase_ = 'specialize:memo'
fromlib = staticmethod(fromlib)
def call(self, *args):
assert len(args) == len(ARGS)
p = llmemory.cast_adr_to_ptr(self.addr, PTRTYPE)
return p(*args)
_functype_cache[args_cls, rescls] = RCTypesFunc
return RCTypesFunc
RFuncType._annspecialcase_ = 'specialize:memo'
class RLibrary(object):
"""A C library. Use to create references to external functions.
"""
# XXX for now, lltype only supports functions imported from external
# libraries, not variables
pythonapi = False
def __init__(self, c_libs=(), c_includes=()):
if isinstance(c_libs, str): c_libs = (c_libs,)
if isinstance(c_includes, str): c_includes = (c_includes,)
self.c_libs = c_libs
self.c_includes = c_includes
def _freeze_(self):
return True
LIBC = RLibrary()
| Python |
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.error import TyperError
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.controllerentry import SomeControlledInstance
from pypy.rlib.rctypes.implementation import getcontroller
from pypy.rlib.rctypes.implementation import register_function_impl
from pypy.rlib.rctypes import rctypesobject
import ctypes
#
# pointer()
#
register_function_impl(ctypes.pointer, rctypesobject.pointer,
revealargs = [0],
revealresult = lambda s_obj: ctypes.POINTER(
s_obj.controller.ctype))
#
# POINTER()
#
class Entry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.POINTER(): constant-folded."
_about_ = ctypes.POINTER
def compute_result_annotation(self, s_arg):
# POINTER(constant_ctype) returns the constant annotation
# corresponding to the POINTER(ctype).
assert s_arg.is_constant(), (
"POINTER(%r): argument must be constant" % (s_arg,))
RESTYPE = ctypes.POINTER(s_arg.const)
# POINTER(varsized_array_type): given that rctypes performs
# no index checking, this pointer-to-array type is equivalent
# to a pointer to an array of whatever size.
# ('0' is a bad idea, though, as FixedSizeArrays of length 0
# tend to say they have impossible items.)
#XXX: RESTYPE = POINTER(s_arg.ctype_array._type_ * 1)
return self.bookkeeper.immutablevalue(RESTYPE)
def specialize_call(self, hop):
assert hop.s_result.is_constant()
hop.exception_cannot_occur()
return hop.inputconst(lltype.Void, hop.s_result.const)
#
# sizeof()
#
sizeof_base_entry = register_function_impl(ctypes.sizeof, rctypesobject.sizeof,
revealargs=[0], register=False)
class Entry(sizeof_base_entry):
_about_ = ctypes.sizeof
def compute_result_annotation(self, s_arg):
return annmodel.SomeInteger(nonneg=True)
def specialize_call(self, hop):
s_arg = hop.args_s[0]
if isinstance(s_arg, SomeControlledInstance):
# sizeof(object)
return sizeof_base_entry.specialize_call(self, hop)
else:
# sizeof(type)
if not s_arg.is_constant():
raise TyperError("only supports sizeof(object) or "
"sizeof(constant-type)")
ctype = s_arg.const
sample = ctype() # XXX can we always instantiate ctype like this?
controller = getcontroller(ctype)
real_obj = controller.convert(sample)
size = rctypesobject.sizeof(real_obj)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Signed, size)
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rlib.rctypes.implementation import CTypeController, getcontroller
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython.lltypesystem import lltype
from ctypes import pointer, POINTER, byref, c_int
PointerType = type(POINTER(c_int))
class PointerCTypeController(CTypeController):
ready = 0
def __init__(self, ctype):
CTypeController.__init__(self, ctype)
self.knowntype = rctypesobject.RPointer(None)
def setup(self):
if self.ready == 0:
self.ready = 1
self.contentscontroller = getcontroller(self.ctype._type_)
self.knowntype.setpointertype(self.contentscontroller.knowntype,
force=True)
self.ready = 2
def new(self, ptrto=None):
obj = self.knowntype.allocate()
if ptrto is not None:
obj.set_contents(self.contentscontroller.unbox(ptrto))
return obj
new._annspecialcase_ = 'specialize:arg(0)'
def initialize_prebuilt(self, obj, x):
contentsbox = self.contentscontroller.convert(x.contents)
self.setbox_contents(obj, contentsbox)
def getitem(self, obj, index):
contentsobj = obj.ref(index)
return self.contentscontroller.return_value(contentsobj)
getitem._annspecialcase_ = 'specialize:arg(0)'
def setitem(self, obj, index, value):
if index != 0:
raise ValueError("assignment to pointer[x] with x != 0")
# not supported by ctypes either
contentsobj = obj.get_contents()
self.contentscontroller.set_value(contentsobj, value)
setitem._annspecialcase_ = 'specialize:arg(0)'
def setboxitem(self, obj, index, valuebox):
if index != 0:
raise ValueError("assignment to pointer[x] with x != 0")
# not supported by ctypes either
contentsobj = obj.get_contents()
contentsobj.copyfrom(valuebox)
setitem._annspecialcase_ = 'specialize:arg(0)'
def get_contents(self, obj):
return self.contentscontroller.box(obj.get_contents())
get_contents._annspecialcase_ = 'specialize:arg(0)'
def setbox_contents(self, obj, contentsbox):
obj.set_contents(contentsbox)
setbox_contents._annspecialcase_ = 'specialize:arg(0)'
def is_true(self, obj):
return not obj.is_null()
is_true._annspecialcase_ = 'specialize:arg(0)'
def store_box(self, obj, valuebox):
obj.set_contents(valuebox.ref(0))
PointerCTypeController.register_for_metatype(PointerType)
| Python |
# empty
| Python |
from pypy.annotation import model as annmodel
from pypy.rlib.rctypes.implementation import CTypeController, getcontroller
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython.lltypesystem import lltype
import ctypes
CFuncPtrType = type(ctypes.CFUNCTYPE(None))
class FuncPtrCTypeController(CTypeController):
ready = 0
def __init__(self, ctype):
CTypeController.__init__(self, ctype)
sample_instance = self.ctype()
self.argtypes = sample_instance.argtypes
self.restype = sample_instance.restype
self.knowntype = rctypesobject.RPointer(None)
def setup(self):
if self.ready == 0:
self.ready = 1
self.argscontrollers = [getcontroller(a) for a in self.argtypes]
self.rescontroller = getcontroller(self.restype)
argscls = [c.knowntype for c in self.argscontrollers]
rescls = self.rescontroller.knowntype
self.rfunctype = rctypesobject.RFuncType(argscls, rescls)
self.knowntype.setpointertype(self.rfunctype, force=True)
self.make_helpers()
self.ready = 2
def make_helpers(self):
# XXX need stuff to unwrap pointer boxes to lltype pointers
pass
def real_ctype_of(fnptr):
# in ctypes, most function pointers have argtypes and restype set
# on the function pointer object itself, not on its class
return ctypes.CFUNCTYPE(fnptr.restype, *fnptr.argtypes)
real_ctype_of = staticmethod(real_ctype_of)
def ctypecheck(self, x):
return (isinstance(type(x), CFuncPtrType) and
tuple(x.argtypes) == tuple(self.argtypes) and
x.restype == self.restype)
def new(self):
obj = self.knowntype.allocate()
return obj
def initialize_prebuilt(self, ptrobj, cfuncptr):
if not cfuncptr: # passed as arg to functions expecting func pointers
return
# XXX this assumes it is an external function, correctly initialized
# with includes and libraries attributes
name = cfuncptr.__name__
includes = getattr(cfuncptr, 'includes', ())
libraries = getattr(cfuncptr, 'libraries', ())
rlib = rctypesobject.RLibrary(libraries, includes)
llinterp_friendly_version = getattr(cfuncptr,
'llinterp_friendly_version',
None)
funcobj = self.rfunctype.fromlib(rlib, name, llinterp_friendly_version)
ptrobj.set_contents(funcobj)
def call(self, fnptrobj, *args):
return fnptrobj.get_contents().call(*args)
FuncPtrCTypeController.register_for_metatype(CFuncPtrType)
| Python |
import py
py.test.skip("extregistry conflicts with the other rctypes :-(")
from pypy.annotation import model as annmodel
from pypy.tool.tls import tlsobject
from pypy.rlib.rctypes import rctypesobject
from pypy.rpython import extregistry, controllerentry
from pypy.rpython.error import TyperError
from pypy.rpython.controllerentry import Controller, ControllerEntry
from pypy.rpython.controllerentry import ControllerEntryForPrebuilt
from pypy.rpython.controllerentry import SomeControlledInstance
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rctypes import rcarithmetic as rcarith
try:
import ctypes
if ctypes.__version__ < '0.9.9.6': # string comparison... good enough?
raise ImportError("requires ctypes >= 0.9.9.6, got %s" % (
ctypes.__version__,))
except ImportError, e:
py.test.skip(str(e))
class CTypeController(Controller):
def __init__(self, ctype):
self.ctype = ctype
self.instance_cache = {}
def setup(self):
pass
def register_for_type(cls, ctype):
class Entry(CTypesCallEntry):
_about_ = ctype
_controller_ = cls
class Entry(CTypesObjEntry):
_type_ = ctype
_controller_ = cls
register_for_type = classmethod(register_for_type)
def register_for_metatype(cls, ctype):
class Entry(CTypesCallEntry):
_type_ = ctype
_controller_ = cls
class Entry(CTypesObjEntry):
_metatype_ = ctype
_controller_ = cls
register_for_metatype = classmethod(register_for_metatype)
def ctypecheck(self, x):
return isinstance(x, self.ctype)
def convert(self, x):
if self.ctypecheck(x):
key = "by_id", id(x)
else:
key = "by_value", x
x = self.ctype(x)
try:
return self.instance_cache[key][0]
except KeyError:
obj = self.new()
self.instance_cache[key] = obj, x # keep 'x' alive
self.initialize_prebuilt(obj, x)
return obj
return_value = Controller.box
def store_box(self, obj, valuebox):
obj.copyfrom(valuebox)
def store_value(self, obj, value):
raise TypeError("cannot store a value into a non-primitive ctype")
store_value._annspecialcase_ = 'specialize:arg(0)'
def default_ctype_value(self):
return self.ctype()
# extension to the setattr/setitem support: if the new value is actually
# a CTypeControlled instance as well, reveal it automatically (i.e. turn
# it into an rctypesobject) and call a method with a different name.
def setboxattr(self, obj, attr, value):
return getattr(self, 'setbox_' + attr)(obj, value)
setboxattr._annspecialcase_ = 'specialize:arg(2)'
def ctrl_setattr(self, s_obj, s_attr, s_value):
if s_is_box(s_value):
return controllerentry.delegate(self.setboxattr,
s_obj, s_attr, s_value.s_real_obj)
else:
return controllerentry.delegate(self.setattr,
s_obj, s_attr, s_value)
def rtype_setattr(self, hop):
from pypy.rpython.rcontrollerentry import rtypedelegate
if s_is_box(hop.args_s[2]):
hop2 = revealbox(hop, 2)
return rtypedelegate(self.setboxattr, hop2)
else:
return rtypedelegate(self.setattr, hop)
def ctrl_setitem(self, s_obj, s_key, s_value):
if s_is_box(s_value):
return controllerentry.delegate(self.setboxitem,
s_obj, s_key, s_value.s_real_obj)
else:
return controllerentry.delegate(self.setitem,
s_obj, s_key, s_value)
def rtype_setitem(self, hop):
from pypy.rpython.rcontrollerentry import rtypedelegate
if s_is_box(hop.args_s[2]):
hop2 = revealbox(hop, 2)
return rtypedelegate(self.setboxitem, hop2)
else:
return rtypedelegate(self.setitem, hop)
class CTypesCallEntry(ControllerEntry):
def getcontroller(self, *args_s, **kwds_s):
ctype = self.instance
return _build_controller(self._controller_, ctype)
class CTypesObjEntry(ControllerEntryForPrebuilt):
def getcontroller(self):
if hasattr(self._controller_, 'real_ctype_of'):
ctype = self._controller_.real_ctype_of(self.instance)
else:
ctype = self.type
return _build_controller(self._controller_, ctype)
TLS = tlsobject()
def _build_controller(cls, ctype):
if hasattr(TLS, 'pending'):
# recursive case
controller = cls(ctype)
TLS.pending.append(controller)
else:
# non-recursive case
TLS.pending = []
try:
controller = cls(ctype)
TLS.pending.append(controller)
finally:
pending = TLS.pending
del TLS.pending
for c1 in pending:
c1.setup()
return controller
def getcontroller(ctype):
"""Return the CTypeController instance corresponding to the given ctype."""
entry = extregistry.lookup_type(ctype)
return entry.getcontroller()
def s_is_box(s_value):
return (isinstance(s_value, SomeControlledInstance) and
isinstance(s_value.controller, CTypeController))
def revealbox(hop, argindex):
hop2 = hop.copy()
r_value = hop2.args_r[argindex]
s_value, r_value = r_value.reveal(r_value)
hop2.args_s[argindex] = s_value
hop2.args_r[argindex] = r_value
return hop2
def register_function_impl(builtinfn, controllingfn,
revealargs=[], revealresult=None,
register=True):
class Entry(extregistry.ExtRegistryEntry):
if register:
_about_ = builtinfn
def compute_result_annotation(self, *args_s):
real_args_s = list(args_s)
if annmodel.s_ImpossibleValue in real_args_s:
return annmodel.s_ImpossibleValue # temporarily hopefully
for index in revealargs:
s_controlled = args_s[index]
if not isinstance(s_controlled, SomeControlledInstance):
raise TypeError("in call to %s:\nargs_s[%d] should be a "
"ControlledInstance,\ngot instead %s" % (
builtinfn, index, s_controlled))
real_args_s[index] = s_controlled.s_real_obj
s_result = controllerentry.delegate(controllingfn, *real_args_s)
if revealresult:
result_ctype = revealresult(*args_s)
controller = getcontroller(result_ctype)
if s_result != annmodel.s_ImpossibleValue:
s_result = SomeControlledInstance(s_result, controller)
return s_result
def specialize_call(self, hop):
from pypy.rpython.rcontrollerentry import rtypedelegate
return rtypedelegate(controllingfn, hop, revealargs, revealresult)
return Entry
# ____________________________________________________________
#
# Imports for side-effects
import pypy.rlib.rctypes.rprimitive
import pypy.rlib.rctypes.rarray
import pypy.rlib.rctypes.rpointer
import pypy.rlib.rctypes.rstruct
import pypy.rlib.rctypes.rbuiltin
import pypy.rlib.rctypes.rchar_p
import pypy.rlib.rctypes.rfunc
| Python |
from pypy.rlib.cslib.btree import BTreeNode
from pypy.rlib.cslib.rdomain import BaseFiniteDomain, ConsistencyError
class AbstractConstraint:
def __init__(self, variables):
"""variables is a list of variables which appear in the formula"""
assert isinstance(variables, list)
self._variables = variables
def revise(self, domains):
"domains : {'var':Domain}"
return False
def estimate_cost(self, domains):
cost = 1
for var in self._variables:
dom = domains[var]
assert isinstance(dom, BaseFiniteDomain)
cost = cost * dom.size()
return cost
class Quadruple(BTreeNode):
def __init__(self, key, varname, values, index):
BTreeNode.__init__( self, key )
self.var = varname
self.values = values
self.index = index
class Expression(AbstractConstraint):
"""A constraint represented as a functional expression."""
def __init__(self, variables):
AbstractConstraint.__init__(self, variables)
self.doms = {}
def filter_func(self, kwargs):
return False
def _init_result_cache(self):
"""key = (variable,value), value = [has_success,has_failure]"""
result_cache = {}
for var in self._variables:
result_cache[var] = {}
return result_cache
def _assign_values(self, doms):
kwargs = {}
sorted_vars = None
for variable in self._variables:
domain = doms[variable]
assert isinstance(domain, BaseFiniteDomain)
values = domain.get_values()
node = Quadruple(domain.size(),
variable,
values,
0)
if sorted_vars is None:
sorted_vars = node
else:
sorted_vars.add( node )
kwargs[variable] = values[0]
# get sorted variables to instanciate those with fewer possible values first
assert sorted_vars is not None
self._assign_values_state = sorted_vars.get_values()
return kwargs
def _next_value(self, kwargs):
# try to instanciate the next variable
variables = self._assign_values_state
for curr in variables:
if curr.index < curr.key:
kwargs[curr.var] = curr.values[curr.index]
curr.index += 1
break
else:
curr.index = 0
kwargs[curr.var] = curr.values[0]
else:
# it's over
return None
return kwargs
def revise(self, doms):
"""generic propagation algorithm for n-ary expressions"""
self.doms = doms
maybe_entailed = True
result_cache = self._init_result_cache()
kwargs = self._assign_values(doms)
while 1:
kwargs = self._next_value(kwargs)
if kwargs is None:
break
if maybe_entailed:
for varname, val in kwargs.iteritems():
val_dict = result_cache[varname]
if val not in val_dict:
break
else:
continue
if self.filter_func(kwargs):
for var, val in kwargs.items():
var_dict = result_cache[var]
var_dict[val] = True
else:
maybe_entailed = False
try: # XXX domains in rlib, too
for varname, keep in result_cache.items():
domain = doms[varname]
assert isinstance(domain, BaseFiniteDomain)
domain.remove_values([val
for val in domain.get_values()
if val not in keep])
except KeyError:
# There are no more value in result_cache
pass
return maybe_entailed
def __repr__(self):
return '<%s>' % self.formula
#--- Alldistinct
class VarDom(BTreeNode):
def __init__(self, key, var, dom):
BTreeNode.__init__(self, key)
self.var = var
self.dom = dom
class AllDistinct(AbstractConstraint):
"""Contraint: all values must be distinct"""
def __init__(self, variables):
AbstractConstraint.__init__(self, variables)
# worst case complexity
self._cost = len(self._variables) * (len(self._variables) - 1) / 2
def estimate_cost(self, domains):
return self._cost
def revise(self, doms):
sorted_vars = None
for var in self._variables:
dom = doms[var]
assert isinstance(dom, BaseFiniteDomain)
node = VarDom(dom.size(), var, dom)
if sorted_vars is None:
sorted_vars = node
else:
sorted_vars.add(node)
assert sorted_vars is not None
variables = sorted_vars.get_values()
# if a domain has a size of 1,
# then the value must be removed from the other domains
for var_dom in variables:
if var_dom.dom.size() == 1:
#print "AllDistinct removes values"
for var_dom2 in variables:
if var_dom2.var != var_dom.var:
try:
var_dom2.dom.remove_value(var_dom.dom.get_values()[0])
except KeyError, e:
# we ignore errors caused by the removal of
# non existing values
pass
# if there are less values than variables, the constraint fails
values = {}
for var_dom in variables:
for val in var_dom.dom.get_values():
values[val] = 0
if len(values) < len(variables):
#print "AllDistinct failed"
raise ConsistencyError
# the constraint is entailed if all domains have a size of 1
for var_dom in variables:
if not var_dom.dom.size() == 1:
return False
#print "All distinct entailed"
return True
#--- Binary expressions
class BinaryExpression(Expression):
"""A binary constraint represented as a python expression
This implementation uses a narrowing algorithm optimized for
binary constraints."""
def __init__(self, variables):
assert len(variables) == 2
Expression.__init__(self, variables)
def revise(self, domains):
"""specialized pruning algorithm for binary expressions
Runs much faster than the generic version"""
self.doms = domains
maybe_entailed = True
var1 = self._variables[0]
dom1 = domains[var1]
values1 = dom1.get_values()
var2 = self._variables[1]
dom2 = domains[var2]
values2 = dom2.get_values()
if dom2.size() < dom1.size():
var1, var2 = var2, var1
dom1, dom2 = dom2, dom1
values1, values2 = values2, values1
kwargs = {}
keep1 = {}
keep2 = {}
maybe_entailed = True
# iterate for all values
for val1 in values1:
kwargs[var1] = val1
for val2 in values2:
kwargs[var2] = val2
if val1 in keep1 and val2 in keep2 and not maybe_entailed:
continue
if self.filter_func(kwargs):
keep1[val1] = 1
keep2[val2] = 1
else:
maybe_entailed = False
dom1.remove_values([val for val in values1 if val not in keep1])
dom2.remove_values([val for val in values2 if val not in keep2])
return maybe_entailed
class BinEq(BinaryExpression):
def filter_func(self, kwargs):
values = kwargs.values()
return values[0]==values[1]
class BinLt(BinaryExpression):
def filter_func(self, kwargs):
values = kwargs.values()
return values[0] < values[1]
| Python |
class ConsistencyError(Exception):
pass
class BaseFiniteDomain:
"""
Variable Domain with a finite set of int values
"""
def __init__(self, values):
"""values is a list of values in the domain
This class uses a dictionnary to make sure that there are
no duplicate values"""
#assert isinstance(values, dict)
self._values = values.copy()
self._changed = False
def copy(self):
return BaseFiniteDomain(self._values)
def _value_removed(self):
"The implementation of remove_value should call this method"
if self.size() == 0:
raise ConsistencyError, "tried to make a domain empty"
self._changed = True
def remove_value(self, value):
"""Remove value of domain and check for consistency"""
del self._values[value]
self._value_removed()
def remove_values(self, values):
if len(values) > 0:
for val in values:
del self._values[val]
self._value_removed()
def size(self):
"""computes the size of a finite domain"""
return len(self._values)
def get_values(self):
return self._values.keys()
def __repr__(self):
return "<Domain %s>" % self._values.keys()
| Python |
"""
distributors - part of constraint satisfaction solver.
"""
def make_new_domains(domains):
"""return a shallow copy of dict of domains passed in argument"""
new_domains = {}
for key, domain in domains.items():
new_domains[key] = domain.copy()
return new_domains
class AbstractDistributor:
"""Implements DistributorInterface but abstract because
_distribute is left unimplemented."""
def find_smallest_domain(self, domains):
"""returns the variable having the smallest domain.
(or one of such varibles if there is a tie)
"""
k = 0
doms = domains.items()
while k<len(doms):
var, dom = doms[k]
sz = dom.size()
if sz>1:
min_size = sz
min_var = var
break
k += 1
else:
raise RuntimeError, "should not be here"
while k<len(doms):
var, dom = doms[k]
if 1 < dom.size() < min_size:
min_var = var
min_size = dom.size()
k += 1
return min_var
def distribute(self, domains):
"""
domains -> two variants of the same modified domain
do the minimal job and let concrete class distribute variables
"""
doms1 = make_new_domains(domains)
doms2 = make_new_domains(domains)
for modified_domain in self._distribute(doms1,doms2):
modified_domain._changed = False
return [doms1,doms2]
class AllOrNothingDistributor(AbstractDistributor):
"""distributes domains by splitting the smallest domain in 2 new domains
The first new domain has a size of one,
and the second has all the other values"""
def _distribute_on_choice(self, dom, choice):
if choice == 1:
dom.remove_values(dom.get_values()[1:])
else:
dom.remove_value(dom.get_values()[0])
def _distribute(self, doms1, doms2):
"""See AbstractDistributor"""
variable = self.find_smallest_domain(doms1)
values = doms1[variable].get_values()
self._distribute_on_choice(doms1[variable], 1)
self._distribute_on_choice(doms2[variable], 2)
return [doms1[variable], doms2[variable]]
class DichotomyDistributor(AbstractDistributor):
"""distributes domains by splitting the smallest domain in
two equal parts or as equal as possible."""
def _distribute_on_choice(self, dom, choice):
values = dom.get_values()
middle = len(values)/2
if choice == 1:
dom.remove_values(values[:middle])
else:
dom.remove_values(values[middle:])
def _distribute(self, doms1, doms2):
"""See AbstractDistributor"""
variable = self.find_smallest_domain(doms1)
values = doms1[variable].get_values()
middle = len(values)/2
self._distribute_on_choice(doms1[variable], 1)
self._distribute_on_choice(doms2[variable], 2)
return [doms1[variable], doms2[variable]]
DefaultDistributor = DichotomyDistributor
| Python |
"""
A minimalist binary tree implementation
whose values are (descendants of) BTreeNodes.
This alleviates some typing difficulties when
using TimSort on lists of the form [(key, Thing), ...]
"""
class BTreeNode:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
def add(self, val):
key = val.key
assert isinstance(key, int)
assert isinstance(val, BTreeNode)
if key > self.key:
if self.right:
self.right.add(val)
else:
self.right = val
else:
if self.left:
self.left.add(val)
else:
self.left = val
def _values(self, dest):
if self.left:
self.left._values(dest)
dest.append(self)
if self.right:
self.right._values(dest)
def get_values(self):
dest = []
self._values( dest )
return dest
| Python |
"""The code of the constraint propagation algorithms"""
from pypy.rlib.cslib.rconstraint import AbstractConstraint, ConsistencyError
class Repository:
"""Stores variables, domains and constraints
Propagates domain changes to constraints
Manages the constraint evaluation queue"""
def __init__(self, domains, constraints):
self._variables = domains.keys() # list of variable names
self._domains = domains # maps variable name to domain object
self._constraints = [] # list of constraint objects
self._varconst = {}
for var in self._variables:
self._varconst[var] = []
for constr in constraints:
self.add_constraint( constr )
def __repr__(self):
return '<Repository nb_constraints=%d domains=%s>' % \
(len(self._constraints), self._domains)
def add_constraint(self, constraint):
assert isinstance( constraint, AbstractConstraint )
if 0: # isinstance(constraint, BasicConstraint):
# Basic constraints are processed just once
# because they are straight away entailed
var = constraint.getVariable()
constraint.revise({var: self._domains[var]})
else:
self._constraints.append(constraint)
for var in constraint._variables:
self._varconst[var].append(constraint)
def _remove_constraint(self, constraint):
self._constraints.remove(constraint)
for var in constraint._variables:
try:
self._varconst[var].remove(constraint)
except ValueError:
raise ValueError('Error removing constraint from listener',
var,
self._varconst[var],
constraint)
def get_domains(self):
return self._domains
def distribute(self, distributor):
"""
create new repository using the distributor and self
using changed domains
"""
d1, d2 = distributor.distribute(self._domains)
return [Repository(d1, self._constraints),
Repository(d2, self._constraints)]
def propagate(self):
"""Prunes the domains of the variables
This method calls constraint.narrow() and queues constraints
that are affected by recent changes in the domains.
Returns True if a solution was found"""
_queue = [(constr.estimate_cost(self._domains), constr)
for constr in self._constraints ]
# XXX : _queue.sort()
_affected_constraints = {}
while True:
if not _queue:
# refill the queue if some constraints have been affected
_queue = [(constr.estimate_cost(self._domains), constr)
for constr in _affected_constraints]
if not _queue:
break
# XXX _queue.sort()
_affected_constraints.clear()
cost, constraint = _queue.pop(0)
entailed = constraint.revise(self._domains)
for var in constraint._variables:
# affected constraints are listeners of
# affected variables of this constraint
dom = self._domains[var]
if not dom._changed: # XXX
continue
for constr in self._varconst[var]:
if constr is not constraint:
_affected_constraints[constr] = True
dom._changed = False
if entailed:
self._remove_constraint(constraint)
if constraint in _affected_constraints:
del _affected_constraints[constraint]
for domain in self._domains.values():
if domain.size() != 1:
return 0
return 1
def solve_all(self, distributor):
solver = Solver(distributor)
return solver.solve_all(self)
import os
class Solver:
"""Top-level object used to manage the search"""
def __init__(self, distributor):
"""if no distributer given, will use the default one"""
self._verb = 0
self._distributor = distributor
self.todo = []
def solve_one(self, repository):
self.todo = [repository]
return self.next_sol()
def solve_all(self, repository):
self.todo = [repository]
solutions = []
while True:
sol = self.next_sol()
if sol is not None:
solutions.append( sol )
if self._verb:
os.write(1, 'found solution : %s\n' % sol)
else:
break
return solutions
def next_sol(self):
found_solution = False
todo = self.todo
while todo:
repo = todo.pop()
try:
found_solution = repo.propagate()
except ConsistencyError:
continue
if found_solution:
solution = {}
for variable, domain in repo.get_domains().items():
solution[variable] = domain.get_values()[0]
return solution
else:
for rep in repo.distribute(self._distributor):
todo.append( rep )
return None
| Python |
#
| Python |
from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift
## ------------------------------------------------------------------------
## Lots of code for an adaptive, stable, natural mergesort. There are many
## pieces to this algorithm; read listsort.txt for overviews and details.
## ------------------------------------------------------------------------
## Adapted from CPython, original code and algorithms by Tim Peters
## CAREFUL:
## this class has to be used carefully, because all the lists that are
## sorted will be unified
class TimSort:
"""TimSort(list).sort()
Sorts the list in-place, using the overridable method lt() for comparison.
"""
def __init__(self, list, listlength=None):
self.list = list
if listlength is None:
listlength = len(list)
self.listlength = listlength
def lt(self, a, b):
return a < b
def le(self, a, b):
return not self.lt(b, a) # always use self.lt() as the primitive
# binarysort is the best method for sorting small arrays: it does
# few compares, but can do data movement quadratic in the number of
# elements.
# "a" is a contiguous slice of a list, and is sorted via binary insertion.
# This sort is stable.
# On entry, the first "sorted" elements are already sorted.
# Even in case of error, the output slice will be some permutation of
# the input (nothing is lost or duplicated).
def binarysort(self, a, sorted=1):
for start in xrange(a.base + sorted, a.base + a.len):
# set l to where list[start] belongs
l = a.base
r = start
pivot = a.list[r]
# Invariants:
# pivot >= all in [base, l).
# pivot < all in [r, start).
# The second is vacuously true at the start.
while l < r:
p = l + ((r - l) >> 1)
if self.lt(pivot, a.list[p]):
r = p
else:
l = p+1
assert l == r
# The invariants still hold, so pivot >= all in [base, l) and
# pivot < all in [l, start), so pivot belongs at l. Note
# that if there are elements equal to pivot, l points to the
# first slot after them -- that's why this sort is stable.
# Slide over to make room.
for p in xrange(start, l, -1):
a.list[p] = a.list[p-1]
a.list[l] = pivot
# Compute the length of the run in the slice "a".
# "A run" is the longest ascending sequence, with
#
# a[0] <= a[1] <= a[2] <= ...
#
# or the longest descending sequence, with
#
# a[0] > a[1] > a[2] > ...
#
# Return (run, descending) where descending is False in the former case,
# or True in the latter.
# For its intended use in a stable mergesort, the strictness of the defn of
# "descending" is needed so that the caller can safely reverse a descending
# sequence without violating stability (strict > ensures there are no equal
# elements to get out of order).
def count_run(self, a):
if a.len <= 1:
n = a.len
descending = False
else:
n = 2
if self.lt(a.list[a.base + 1], a.list[a.base]):
descending = True
for p in xrange(a.base + 2, a.base + a.len):
if self.lt(a.list[p], a.list[p-1]):
n += 1
else:
break
else:
descending = False
for p in xrange(a.base + 2, a.base + a.len):
if self.lt(a.list[p], a.list[p-1]):
break
else:
n += 1
return ListSlice(a.list, a.base, n), descending
# Locate the proper position of key in a sorted vector; if the vector
# contains an element equal to key, return the position immediately to the
# left of the leftmost equal element -- or to the right of the rightmost
# equal element if the flag "rightmost" is set.
#
# "hint" is an index at which to begin the search, 0 <= hint < a.len.
# The closer hint is to the final result, the faster this runs.
#
# The return value is the index 0 <= k <= a.len such that
#
# a[k-1] < key <= a[k] (if rightmost is False)
# a[k-1] <= key < a[k] (if rightmost is True)
#
# as long as the indices are in bound. IOW, key belongs at index k;
# or, IOW, the first k elements of a should precede key, and the last
# n-k should follow key.
def gallop(self, key, a, hint, rightmost):
assert 0 <= hint < a.len
if rightmost:
lower = self.le # search for the largest k for which a[k] <= key
else:
lower = self.lt # search for the largest k for which a[k] < key
p = a.base + hint
lastofs = 0
ofs = 1
if lower(a.list[p], key):
# a[hint] < key -- gallop right, until
# a[hint + lastofs] < key <= a[hint + ofs]
maxofs = a.len - hint # a[a.len-1] is highest
while ofs < maxofs:
if lower(a.list[p + ofs], key):
lastofs = ofs
try:
ofs = ovfcheck_lshift(ofs, 1)
except OverflowError:
ofs = maxofs
else:
ofs = ofs + 1
else: # key <= a[hint + ofs]
break
if ofs > maxofs:
ofs = maxofs
# Translate back to offsets relative to a.
lastofs += hint
ofs += hint
else:
# key <= a[hint] -- gallop left, until
# a[hint - ofs] < key <= a[hint - lastofs]
maxofs = hint + 1 # a[0] is lowest
while ofs < maxofs:
if lower(a.list[p - ofs], key):
break
else:
# key <= a[hint - ofs]
lastofs = ofs
try:
ofs = ovfcheck_lshift(ofs, 1)
except OverflowError:
ofs = maxofs
else:
ofs = ofs + 1
if ofs > maxofs:
ofs = maxofs
# Translate back to positive offsets relative to a.
lastofs, ofs = hint-ofs, hint-lastofs
assert -1 <= lastofs < ofs <= a.len
# Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
# right of lastofs but no farther right than ofs. Do a binary
# search, with invariant a[lastofs-1] < key <= a[ofs].
lastofs += 1
while lastofs < ofs:
m = lastofs + ((ofs - lastofs) >> 1)
if lower(a.list[a.base + m], key):
lastofs = m+1 # a[m] < key
else:
ofs = m # key <= a[m]
assert lastofs == ofs # so a[ofs-1] < key <= a[ofs]
return ofs
# hint for the annotator: the argument 'rightmost' is always passed in as
# a constant (either True or False), so we can specialize the function for
# the two cases. (This is actually needed for technical reasons: the
# variable 'lower' must contain a known method, which is the case in each
# specialized version but not in the unspecialized one.)
gallop._annspecialcase_ = "specialize:arg(4)"
# ____________________________________________________________
# When we get into galloping mode, we stay there until both runs win less
# often than MIN_GALLOP consecutive times. See listsort.txt for more info.
MIN_GALLOP = 7
def merge_init(self):
# This controls when we get *into* galloping mode. It's initialized
# to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for
# random data, and lower for highly structured data.
self.min_gallop = self.MIN_GALLOP
# A stack of n pending runs yet to be merged. Run #i starts at
# address pending[i].base and extends for pending[i].len elements.
# It's always true (so long as the indices are in bounds) that
#
# pending[i].base + pending[i].len == pending[i+1].base
#
# so we could cut the storage for this, but it's a minor amount,
# and keeping all the info explicit simplifies the code.
self.pending = []
# Merge the slice "a" with the slice "b" in a stable way, in-place.
# a.len and b.len must be > 0, and a.base + a.len == b.base.
# Must also have that b.list[b.base] < a.list[a.base], that
# a.list[a.base+a.len-1] belongs at the end of the merge, and should have
# a.len <= b.len. See listsort.txt for more info.
def merge_lo(self, a, b):
assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
min_gallop = self.min_gallop
dest = a.base
a = a.copyitems()
# Invariant: elements in "a" are waiting to be reinserted into the list
# at "dest". They should be merged with the elements of "b".
# b.base == dest + a.len.
# We use a finally block to ensure that the elements remaining in
# the copy "a" are reinserted back into self.list in all cases.
try:
self.list[dest] = b.popleft()
dest += 1
if a.len == 1 or b.len == 0:
return
while True:
acount = 0 # number of times A won in a row
bcount = 0 # number of times B won in a row
# Do the straightforward thing until (if ever) one run
# appears to win consistently.
while True:
if self.lt(b.list[b.base], a.list[a.base]):
self.list[dest] = b.popleft()
dest += 1
if b.len == 0:
return
bcount += 1
acount = 0
if bcount >= min_gallop:
break
else:
self.list[dest] = a.popleft()
dest += 1
if a.len == 1:
return
acount += 1
bcount = 0
if acount >= min_gallop:
break
# One run is winning so consistently that galloping may
# be a huge win. So try that, and continue galloping until
# (if ever) neither run appears to be winning consistently
# anymore.
min_gallop += 1
while True:
min_gallop -= min_gallop > 1
self.min_gallop = min_gallop
acount = self.gallop(b.list[b.base], a, hint=0,
rightmost=True)
for p in xrange(a.base, a.base + acount):
self.list[dest] = a.list[p]
dest += 1
a.advance(acount)
# a.len==0 is impossible now if the comparison
# function is consistent, but we can't assume
# that it is.
if a.len <= 1:
return
self.list[dest] = b.popleft()
dest += 1
if b.len == 0:
return
bcount = self.gallop(a.list[a.base], b, hint=0,
rightmost=False)
for p in xrange(b.base, b.base + bcount):
self.list[dest] = b.list[p]
dest += 1
b.advance(bcount)
if b.len == 0:
return
self.list[dest] = a.popleft()
dest += 1
if a.len == 1:
return
if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
break
min_gallop += 1 # penalize it for leaving galloping mode
self.min_gallop = min_gallop
finally:
# The last element of a belongs at the end of the merge, so we copy
# the remaining elements of b before the remaining elements of a.
assert a.len >= 0 and b.len >= 0
for p in xrange(b.base, b.base + b.len):
self.list[dest] = b.list[p]
dest += 1
for p in xrange(a.base, a.base + a.len):
self.list[dest] = a.list[p]
dest += 1
# Same as merge_lo(), but should have a.len >= b.len.
def merge_hi(self, a, b):
assert a.len > 0 and b.len > 0 and a.base + a.len == b.base
min_gallop = self.min_gallop
dest = b.base + b.len
b = b.copyitems()
# Invariant: elements in "b" are waiting to be reinserted into the list
# before "dest". They should be merged with the elements of "a".
# a.base + a.len == dest - b.len.
# We use a finally block to ensure that the elements remaining in
# the copy "b" are reinserted back into self.list in all cases.
try:
dest -= 1
self.list[dest] = a.popright()
if a.len == 0 or b.len == 1:
return
while True:
acount = 0 # number of times A won in a row
bcount = 0 # number of times B won in a row
# Do the straightforward thing until (if ever) one run
# appears to win consistently.
while True:
nexta = a.list[a.base + a.len - 1]
nextb = b.list[b.base + b.len - 1]
if self.lt(nextb, nexta):
dest -= 1
self.list[dest] = nexta
a.len -= 1
if a.len == 0:
return
acount += 1
bcount = 0
if acount >= min_gallop:
break
else:
dest -= 1
self.list[dest] = nextb
b.len -= 1
if b.len == 1:
return
bcount += 1
acount = 0
if bcount >= min_gallop:
break
# One run is winning so consistently that galloping may
# be a huge win. So try that, and continue galloping until
# (if ever) neither run appears to be winning consistently
# anymore.
min_gallop += 1
while True:
min_gallop -= min_gallop > 1
self.min_gallop = min_gallop
nextb = b.list[b.base + b.len - 1]
k = self.gallop(nextb, a, hint=a.len-1, rightmost=True)
acount = a.len - k
for p in xrange(a.base + a.len - 1, a.base + k - 1, -1):
dest -= 1
self.list[dest] = a.list[p]
a.len -= acount
if a.len == 0:
return
dest -= 1
self.list[dest] = b.popright()
if b.len == 1:
return
nexta = a.list[a.base + a.len - 1]
k = self.gallop(nexta, b, hint=b.len-1, rightmost=False)
bcount = b.len - k
for p in xrange(b.base + b.len - 1, b.base + k - 1, -1):
dest -= 1
self.list[dest] = b.list[p]
b.len -= bcount
# b.len==0 is impossible now if the comparison
# function is consistent, but we can't assume
# that it is.
if b.len <= 1:
return
dest -= 1
self.list[dest] = a.popright()
if a.len == 0:
return
if acount < self.MIN_GALLOP and bcount < self.MIN_GALLOP:
break
min_gallop += 1 # penalize it for leaving galloping mode
self.min_gallop = min_gallop
finally:
# The last element of a belongs at the end of the merge, so we copy
# the remaining elements of a and then the remaining elements of b.
assert a.len >= 0 and b.len >= 0
for p in xrange(a.base + a.len - 1, a.base - 1, -1):
dest -= 1
self.list[dest] = a.list[p]
for p in xrange(b.base + b.len - 1, b.base - 1, -1):
dest -= 1
self.list[dest] = b.list[p]
# Merge the two runs at stack indices i and i+1.
def merge_at(self, i):
a = self.pending[i]
b = self.pending[i+1]
assert a.len > 0 and b.len > 0
assert a.base + a.len == b.base
# Record the length of the combined runs and remove the run b
self.pending[i] = ListSlice(self.list, a.base, a.len + b.len)
del self.pending[i+1]
# Where does b start in a? Elements in a before that can be
# ignored (already in place).
k = self.gallop(b.list[b.base], a, hint=0, rightmost=True)
a.advance(k)
if a.len == 0:
return
# Where does a end in b? Elements in b after that can be
# ignored (already in place).
b.len = self.gallop(a.list[a.base+a.len-1], b, hint=b.len-1,
rightmost=False)
if b.len == 0:
return
# Merge what remains of the runs. The direction is chosen to
# minimize the temporary storage needed.
if a.len <= b.len:
self.merge_lo(a, b)
else:
self.merge_hi(a, b)
# Examine the stack of runs waiting to be merged, merging adjacent runs
# until the stack invariants are re-established:
#
# 1. len[-3] > len[-2] + len[-1]
# 2. len[-2] > len[-1]
#
# See listsort.txt for more info.
def merge_collapse(self):
p = self.pending
while len(p) > 1:
if len(p) >= 3 and p[-3].len <= p[-2].len + p[-1].len:
if p[-3].len < p[-1].len:
self.merge_at(-3)
else:
self.merge_at(-2)
elif p[-2].len <= p[-1].len:
self.merge_at(-2)
else:
break
# Regardless of invariants, merge all runs on the stack until only one
# remains. This is used at the end of the mergesort.
def merge_force_collapse(self):
p = self.pending
while len(p) > 1:
if len(p) >= 3 and p[-3].len < p[-1].len:
self.merge_at(-3)
else:
self.merge_at(-2)
# Compute a good value for the minimum run length; natural runs shorter
# than this are boosted artificially via binary insertion.
#
# If n < 64, return n (it's too small to bother with fancy stuff).
# Else if n is an exact power of 2, return 32.
# Else return an int k, 32 <= k <= 64, such that n/k is close to, but
# strictly less than, an exact power of 2.
#
# See listsort.txt for more info.
def merge_compute_minrun(self, n):
r = 0 # becomes 1 if any 1 bits are shifted off
while n >= 64:
r |= n & 1
n >>= 1
return n + r
# ____________________________________________________________
# Entry point.
def sort(self):
remaining = ListSlice(self.list, 0, self.listlength)
if remaining.len < 2:
return
# March over the array once, left to right, finding natural runs,
# and extending short natural runs to minrun elements.
self.merge_init()
minrun = self.merge_compute_minrun(remaining.len)
while remaining.len > 0:
# Identify next run.
run, descending = self.count_run(remaining)
if descending:
run.reverse()
# If short, extend to min(minrun, nremaining).
if run.len < minrun:
sorted = run.len
run.len = min(minrun, remaining.len)
self.binarysort(run, sorted)
# Advance remaining past this run.
remaining.advance(run.len)
# Push run onto pending-runs stack, and maybe merge.
self.pending.append(run)
self.merge_collapse()
assert remaining.base == self.listlength
self.merge_force_collapse()
assert len(self.pending) == 1
assert self.pending[0].base == 0
assert self.pending[0].len == self.listlength
class ListSlice:
"A sublist of a list."
def __init__(self, list, base, len):
self.list = list
self.base = base
self.len = len
def copyitems(self):
"Make a copy of the slice of the original list."
start = self.base
stop = self.base + self.len
assert 0 <= start <= stop # annotator hint
return ListSlice(self.list[start:stop], 0, self.len)
def advance(self, n):
self.base += n
self.len -= n
def popleft(self):
result = self.list[self.base]
self.base += 1
self.len -= 1
return result
def popright(self):
self.len -= 1
return self.list[self.base + self.len]
def reverse(self):
"Reverse the slice in-place."
list = self.list
lo = self.base
hi = lo + self.len - 1
while lo < hi:
list[lo], list[hi] = list[hi], list[lo]
lo += 1
hi -= 1
| Python |
"""
An RPython implementation of getnameinfo() based on ctypes.
This is a rewrite of the CPython source: Modules/getaddrinfo.c
"""
from ctypes import POINTER, sizeof, cast, pointer
from pypy.rlib import _rsocket_ctypes as _c
from pypy.rlib.rsocket import RSocketError, GAIError
NI_NOFQDN = 0x00000001
NI_NUMERICHOST = 0x00000002
NI_NAMEREQD = 0x00000004
NI_NUMERICSERV = 0x00000008
NI_DGRAM = 0x00000010
def _getservicename(sin_port, flags):
if flags & NI_NUMERICSERV:
sp = None
elif flags & NI_DGRAM:
sp = _c.getservbyport(sin_port, "udp")
else:
sp = _c.getservbyport(sin_port, "tcp")
if sp:
serv = sp.contents.s_name
else:
serv = "%d" % _c.ntohs(sin_port)
return serv
def getnameinfo(_addr, flags):
addr = _addr.addr
if addr.sa_family != _c.AF_INET:
raise RSocketError("unknown address family")
sockaddr = cast(pointer(addr), POINTER(_c.sockaddr_in)).contents
sin_port = sockaddr.sin_port
sin_addr = sockaddr.sin_addr
v4a = _c.ntohl(sin_addr.s_addr)
if (v4a & 0xf0000000 == 0xe0000000 or # IN_MULTICAST()
v4a & 0xe0000000 == 0xe0000000): # IN_EXPERIMENTAL()
flags |= NI_NUMERICHOST
# XXX Why does this work in CPython?
# v4a >>= 24 # = IN_CLASSA_NSHIFT
# if v4a in (0, 127): # = IN_LOOPBACKNET
# flags |= NI_NUMERICHOST
numsize = _c.INET_ADDRSTRLEN
serv = _getservicename(sin_port, flags)
if not (flags & NI_NUMERICHOST):
hostent = _c.gethostbyaddr(pointer(sin_addr), sizeof(_c.in_addr), addr.sa_family)
else:
hostent = None
if hostent:
from pypy.rlib.rsocket import gethost_common
host, _, _ = gethost_common("", hostent)
else:
from pypy.rlib.rsocket import copy_buffer
host = _c.inet_ntoa(sin_addr)
#buf = copy_buffer(ptr, len(ptr))
#host = buf.raw
return host, serv
| Python |
# this code is a version of the mersenne twister random number generator which
# is supposed to be used from RPython without the Python interpreter wrapping
# machinery etc.
# this is stolen from CPython's _randommodule.c
from pypy.rlib.rarithmetic import r_uint
N = 624
M = 397
MATRIX_A = r_uint(0x9908b0df) # constant vector a
UPPER_MASK = r_uint(0x80000000) # most significant w-r bits
LOWER_MASK = r_uint(0x7fffffff) # least significant r bits
MASK_32 = r_uint(0xffffffff)
TEMPERING_MASK_A = r_uint(0x9d2c5680)
TEMPERING_MASK_B = r_uint(0xefc60000)
MAGIC_CONSTANT_A = r_uint(1812433253)
MAGIC_CONSTANT_B = r_uint(19650218)
MAGIC_CONSTANT_C = r_uint(1664525)
MAGIC_CONSTANT_D = r_uint(1566083941)
class Random(object):
def __init__(self, seed=r_uint(0)):
self.state = [r_uint(0)] * N
self.index = 0
if seed:
self.init_genrand(seed)
def init_genrand(self, s):
mt = self.state
mt[0]= s & MASK_32
for mti in range(1, N):
mt[mti] = (MAGIC_CONSTANT_A *
(mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti)
# See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier.
# In the previous versions, MSBs of the seed affect
# only MSBs of the array mt[].
# for >32 bit machines
mt[mti] &= MASK_32
self.index = N
def init_by_array(self, init_key):
key_length = len(init_key)
mt = self.state
self.init_genrand(MAGIC_CONSTANT_B)
i = 1
j = 0
if N > key_length:
max_k = N
else:
max_k = key_length
for k in range(max_k, 0, -1):
mt[i] = ((mt[i] ^
((mt[i - 1] ^ (mt[i - 1] >> 30)) * MAGIC_CONSTANT_C))
+ init_key[j] + j) # non linear
mt[i] &= MASK_32 # for WORDSIZE > 32 machines
i += 1
j += 1
if i >= N:
mt[0] = mt[N - 1]
i = 1
if j >= key_length:
j = 0
for k in range(N - 1, 0, -1):
mt[i] = ((mt[i] ^
((mt[i - 1] ^ (mt[i - 1] >> 30)) * MAGIC_CONSTANT_D))
- i) # non linear
mt[i] &= MASK_32 # for WORDSIZE > 32 machines
i += 1
if (i>=N):
mt[0] = mt[N - 1]
i = 1
mt[0] = UPPER_MASK
def genrand32(self):
mag01 = [0, MATRIX_A]
mt = self.state
if self.index >= N:
for kk in range(N - M):
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK)
mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & r_uint(1)]
for kk in range(N - M, N - 1):
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK)
mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ mag01[y & r_uint(1)]
y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK)
mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ mag01[y & r_uint(1)]
self.index = 0
y = mt[self.index]
self.index += 1
y ^= y >> 11
y ^= (y << 7) & TEMPERING_MASK_A
y ^= (y << 15) & TEMPERING_MASK_B
y ^= (y >> 18)
return y
def random(self):
a = self.genrand32() >> 5
b = self.genrand32() >> 6
return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0)
def jumpahead(self, n):
mt = self.state
for i in range(N - 1, 0, -1):
j = n % i
mt[i], mt[j] = mt[j], mt[i]
for i in range(N):
mt[i] += i + 1
self.index = N
| Python |
import os
from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool import util # ctypes.util from 0.9.9.6
# Not used here, but exported for other code.
from pypy.rpython.rctypes.aerrno import geterrno
from ctypes import c_ushort, c_int, c_uint, c_char_p, c_void_p, c_char, c_ubyte
from ctypes import c_short, c_long, c_ulong
from ctypes import POINTER, ARRAY, cdll, sizeof, SetPointerType
from pypy.rlib.rarithmetic import intmask, r_uint
# Also not used here, but exported for other code.
from ctypes import cast, pointer, create_string_buffer
_POSIX = os.name == "posix"
_MS_WINDOWS = os.name == "nt"
if _POSIX:
includes = ('sys/types.h',
'sys/socket.h',
'sys/un.h',
'sys/poll.h',
'sys/select.h',
'netinet/in.h',
'netinet/tcp.h',
'unistd.h',
'fcntl.h',
'stdio.h',
'netdb.h',
'arpa/inet.h',
'stdint.h',
'errno.h',
)
cond_includes = [('AF_NETLINK', 'linux/netlink.h')]
HEADER = ''.join(['#include <%s>\n' % filename for filename in includes])
COND_HEADER = ''.join(['#ifdef %s\n#include <%s>\n#endif\n' % cond_include
for cond_include in cond_includes])
if _MS_WINDOWS:
HEADER = '\n'.join([
'#include <WinSock2.h>',
'#include <WS2tcpip.h>',
# winsock2 defines AF_UNIX, but not sockaddr_un
'#undef AF_UNIX',
# these types do not exist on windows
'typedef int ssize_t;',
'typedef unsigned __int16 uint16_t;',
'typedef unsigned __int32 uint32_t;',
])
COND_HEADER = ''
constants = {}
class CConfig:
_header_ = HEADER + COND_HEADER
# constants
linux = ctypes_platform.Defined('linux')
MS_WINDOWS = ctypes_platform.Defined('_WIN32')
O_NONBLOCK = ctypes_platform.DefinedConstantInteger('O_NONBLOCK')
F_GETFL = ctypes_platform.DefinedConstantInteger('F_GETFL')
F_SETFL = ctypes_platform.DefinedConstantInteger('F_SETFL')
FIONBIO = ctypes_platform.DefinedConstantInteger('FIONBIO')
INVALID_SOCKET = ctypes_platform.DefinedConstantInteger('INVALID_SOCKET')
INET_ADDRSTRLEN = ctypes_platform.DefinedConstantInteger('INET_ADDRSTRLEN')
INET6_ADDRSTRLEN= ctypes_platform.DefinedConstantInteger('INET6_ADDRSTRLEN')
EINPROGRESS = ctypes_platform.DefinedConstantInteger('EINPROGRESS')
WSAEINPROGRESS = ctypes_platform.DefinedConstantInteger('WSAEINPROGRESS')
EWOULDBLOCK = ctypes_platform.DefinedConstantInteger('EWOULDBLOCK')
WSAEWOULDBLOCK = ctypes_platform.DefinedConstantInteger('WSAEWOULDBLOCK')
EAFNOSUPPORT = ctypes_platform.DefinedConstantInteger('EAFNOSUPPORT')
WSAEAFNOSUPPORT = ctypes_platform.DefinedConstantInteger('WSAEAFNOSUPPORT')
constant_names = '''
AF_AAL5 AF_APPLETALK AF_ASH AF_ATMPVC AF_ATMSVC AF_AX25 AF_BLUETOOTH AF_BRIDGE
AD_DECnet AF_ECONET AF_INET AF_INET6 AF_IPX AF_IRDA AF_KEY AF_LLC AF_NETBEUI
AF_NETLINK AF_NETROM AF_PACKET AF_PPPOX AF_ROSE AF_ROUTE AF_SECURITY AF_SNA
AF_UNIX AF_WANPIPE AF_X25 AF_UNSPEC
AI_ADDRCONFIG AI_ALL AI_CANONNAME AI_DEFAULT AI_MASK AI_NUMERICHOST
AI_NUMERICSERV AI_PASSIVE AI_V4MAPPED AI_V4MAPPED_CFG
BTPROTO_L2CAP BTPROTO_SCO BTPROTO_RFCOMM
EAI_ADDRFAMILY EAI_AGAIN EAI_BADFLAGS EAI_BADHINTS EAI_FAIL EAI_FAMILY EAI_MAX
EAI_MEMORY EAI_NODATA EAI_NONAME EAI_OVERFLOW EAI_PROTOCOL EAI_SERVICE
EAI_SOCKTYPE EAI_SYSTEM
IPPROTO_AH IPPROTO_BIP IPPROTO_DSTOPTS IPPROTO_EGP IPPROTO_EON IPPROTO_ESP
IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS
IPPROTO_ICMPV6 IPPROTO_IDP IPPROTO_IGMP IPPROTO_IPCOMP IPPROTO_IPIP
IPPROTO_IPV4 IPPROTO_IPV6 IPPROTO_MAX IPPROTO_MOBILE IPPROTO_ND IPPROTO_NONE
IPPROTO_PIM IPPROTO_PUP IPPROTO_ROUTING IPPROTO_RSVP IPPROTO_TCP IPPROTO_TP
IPPROTO_VRRP IPPROTO_XTP
IPV6_CHECKSUM IPV6_DONTFRAG IPV6_DSTOPTS IPV6_HOPLIMIT IPV6_HOPOPTS
IPV6_JOIN_GROUP IPV6_LEAVE_GROUP IPV6_MULTICAST_HOPS IPV6_MULTICAST_IF
IPV6_MULTICAST_LOOP IPV6_NEXTHOP IPV6_PATHMTU IPV6_PKTINFO IPV6_RECVDSTOPTS
IPV6_RECVHOPLIMIT IPV6_RECVHOPOPTS IPV6_RECVPATHMTU IPV6_RECVPKTINFO
IPV6_RECVRTHDR IPV6_RECVTCLASS IPV6_RTHDR IPV6_RTHDRDSTOPTS IPV6_RTHDR_TYPE_0
IPV6_TCLASS IPV6_UNICAST_HOPS IPV6_USE_MIN_MTU IPV6_V6ONLY
IP_ADD_MEMBERSHIP IP_DEFAULT_MULTICAST_LOOP IP_DEFAULT_MULTICAST_TTL
IP_DROP_MEMBERSHIP IP_HDRINCL IP_MAX_MEMBERSHIPS IP_MULTICAST_IF
IP_MULTICAST_LOOP IP_MULTICAST_TTL IP_OPTIONS IP_RECVDSTADDR IP_RECVOPTS
IP_RECVRETOPTS IP_RETOPTS IP_TOS IP_TTL
MSG_BTAG MSG_ETAG MSG_CTRUNC MSG_DONTROUTE MSG_DONTWAIT MSG_EOR MSG_OOB
MSG_PEEK MSG_TRUNC MSG_WAITALL
NI_DGRAM NI_MAXHOST NI_MAXSERV NI_NAMEREQD NI_NOFQDN NI_NUMERICHOST
NI_NUMERICSERV
NETLINK_ROUTE NETLINK_SKIP NETLINK_W1 NETLINK_USERSOCK NETLINK_FIREWALL
NETLINK_TCPDIAG NETLINK_NFLOG NETLINK_XFRM NETLINK_ARPD NETLINK_ROUTE6
NETLINK_IP6_FW NETLINK_DNRTMSG NETLINK_TAPBASE
PACKET_HOST PACKET_BROADCAST PACKET_MULTICAST PACKET_OTHERHOST PACKET_OUTGOING
PACKET_LOOPBACK PACKET_FASTROUTE
SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM
SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE
SO_ACCEPTCONN SO_BROADCAST SO_DEBUG SO_DONTROUTE SO_ERROR SO_EXCLUSIVEADDRUSE
SO_KEEPALIVE SO_LINGER SO_OOBINLINE SO_RCVBUF SO_RCVLOWAT SO_RCVTIMEO
SO_REUSEADDR SO_REUSEPORT SO_SNDBUF SO_SNDLOWAT SO_SNDTIMEO SO_TYPE
SO_USELOOPBACK
TCP_CORK TCP_DEFER_ACCEPT TCP_INFO TCP_KEEPCNT TCP_KEEPIDLE TCP_KEEPINTVL
TCP_LINGER2 TCP_MAXSEG TCP_NODELAY TCP_QUICKACK TCP_SYNCNT TCP_WINDOW_CLAMP
IPX_TYPE
POLLIN POLLPRI POLLOUT POLLERR POLLHUP POLLNVAL
POLLRDNORM POLLRDBAND POLLWRNORM POLLWEBAND POLLMSG
FD_READ FD_WRITE FD_ACCEPT FD_CONNECT FD_CLOSE
WSA_WAIT_TIMEOUT WSA_WAIT_FAILED INFINITE
FD_CONNECT_BIT FD_CLOSE_BIT
WSA_IO_PENDING WSA_IO_INCOMPLETE WSA_INVALID_HANDLE
WSA_INVALID_PARAMETER WSA_NOT_ENOUGH_MEMORY WSA_OPERATION_ABORTED
'''.split()
for name in constant_names:
setattr(CConfig, name, ctypes_platform.DefinedConstantInteger(name))
constants["BDADDR_ANY"] = "00:00:00:00:00:00"
constants["BDADDR_LOCAL"] = "00:00:00:FF:FF:FF"
constants_w_defaults = [('SOL_IP', 0),
('SOL_TCP', 6),
('SOL_UDP', 17),
('SOMAXCONN', 5),
('IPPROTO_IP', 6),
('IPPROTO_ICMP', 1),
('IPPROTO_TCP', 6),
('IPPROTO_UDP', 17),
('IPPROTO_RAW', 255),
('IPPORT_RESERVED', 1024),
('IPPORT_USERRESERVED', 5000),
('INADDR_ANY', 0x00000000),
('INADDR_BROADCAST', 0xffffffff),
('INADDR_LOOPBACK', 0x7F000001),
('INADDR_UNSPEC_GROUP', 0xe0000000),
('INADDR_ALLHOSTS_GROUP', 0xe0000001),
('INADDR_MAX_LOCAL_GROUP', 0xe00000ff),
('INADDR_NONE', 0xffffffff),
('SHUT_RD', 0),
('SHUT_WR', 1),
('SHUT_RDWR', 2),
('POLLIN', 1),
('POLLPRI', 2),
('POLLOUT', 4),
('POLLERR', 8),
('POLLHUP', 16),
]
for name, default in constants_w_defaults:
setattr(CConfig, name, ctypes_platform.DefinedConstantInteger(name))
# types
CConfig.uint16_t = ctypes_platform.SimpleType('uint16_t', c_ushort)
CConfig.uint32_t = ctypes_platform.SimpleType('uint32_t', c_uint)
CConfig.size_t = ctypes_platform.SimpleType('size_t', c_int)
CConfig.ssize_t = ctypes_platform.SimpleType('ssize_t', c_int)
CConfig.socklen_t = ctypes_platform.SimpleType('socklen_t', c_int)
if _MS_WINDOWS:
socketfd_type = c_uint
else:
socketfd_type = c_int
# struct types
CConfig.sockaddr = ctypes_platform.Struct('struct sockaddr',
[('sa_family', c_int),
('sa_data', c_char * 0)])
sockaddr_ptr = POINTER('sockaddr')
CConfig.in_addr = ctypes_platform.Struct('struct in_addr',
[('s_addr', c_uint)])
CConfig.in6_addr = ctypes_platform.Struct('struct in6_addr',
[])
CConfig.sockaddr_in = ctypes_platform.Struct('struct sockaddr_in',
[('sin_family', c_int),
('sin_port', c_ushort),
('sin_addr', CConfig.in_addr)])
CConfig.sockaddr_in6 = ctypes_platform.Struct('struct sockaddr_in6',
[('sin6_family', c_int),
('sin6_port', c_ushort),
('sin6_addr', CConfig.in6_addr),
('sin6_flowinfo', c_int),
('sin6_scope_id', c_int)])
CConfig.sockaddr_un = ctypes_platform.Struct('struct sockaddr_un',
[('sun_family', c_int),
('sun_path', c_ubyte * 0)],
ifdef='AF_UNIX')
CConfig.sockaddr_nl = ctypes_platform.Struct('struct sockaddr_nl',
[('nl_family', c_int),
('nl_pid', c_int),
('nl_groups', c_int)],
ifdef='AF_NETLINK')
addrinfo_ptr = POINTER("addrinfo")
CConfig.addrinfo = ctypes_platform.Struct('struct addrinfo',
[('ai_flags', c_int),
('ai_family', c_int),
('ai_socktype', c_int),
('ai_protocol', c_int),
('ai_addrlen', c_int),
('ai_addr', sockaddr_ptr),
('ai_canonname', c_char_p),
('ai_next', addrinfo_ptr)])
CConfig.hostent = ctypes_platform.Struct('struct hostent',
[('h_name', c_char_p),
('h_aliases', POINTER(c_char_p)),
('h_addrtype', c_int),
('h_length', c_int),
('h_addr_list', POINTER(c_void_p)),
])
CConfig.servent = ctypes_platform.Struct('struct servent',
[('s_name', c_char_p),
('s_port', c_int),
('s_proto', c_char_p),
])
CConfig.protoent = ctypes_platform.Struct('struct protoent',
[('p_proto', c_int),
])
if _POSIX:
CConfig.nfds_t = ctypes_platform.SimpleType('nfds_t')
CConfig.pollfd = ctypes_platform.Struct('struct pollfd',
[('fd', socketfd_type),
('events', c_short),
('revents', c_short)])
if _MS_WINDOWS:
CConfig.WSAEVENT = ctypes_platform.SimpleType('WSAEVENT', c_void_p)
CConfig.WSANETWORKEVENTS = ctypes_platform.Struct(
'struct _WSANETWORKEVENTS',
[('lNetworkEvents', c_long),
('iErrorCode', c_int * 10), #FD_MAX_EVENTS
])
CConfig.timeval = ctypes_platform.Struct('struct timeval',
[('tv_sec', c_long),
('tv_usec', c_long)])
if _MS_WINDOWS:
CConfig.fd_set = ctypes_platform.Struct('struct fd_set',
[('fd_count', c_uint),
# XXX use FD_SETSIZE
('fd_array', socketfd_type * 64)])
if _MS_WINDOWS:
CConfig.WSAData = ctypes_platform.Struct('struct WSAData',
[('wVersion', c_ushort),
('wHighVersion', c_ushort),
('szDescription', c_char * 1), # (WSADESCRIPTION_LEN+1)
('szSystemStatus', c_char * 1), # (WSASYS_STATUS_LEN+1)
('iMaxSockets', c_ushort),
('iMaxUdpDg', c_ushort),
('lpVendorInfo', c_char_p)])
class cConfig:
pass
cConfig.__dict__.update(ctypes_platform.configure(CConfig))
# HACK HACK HACK
if _MS_WINDOWS:
from ctypes import Structure
for struct in cConfig.__dict__.values():
if isinstance(struct, type) and issubclass(struct, Structure):
if struct.__name__ == 'in6_addr':
struct.__name__ = '_in6_addr'
else:
struct._external_ = True # hack to avoid redeclaration of the struct in C
# fill in missing constants with reasonable defaults
cConfig.NI_MAXHOST = cConfig.NI_MAXHOST or 1025
cConfig.NI_MAXSERV = cConfig.NI_MAXSERV or 32
cConfig.INET_ADDRSTRLEN = cConfig.INET_ADDRSTRLEN or 16
for name in constant_names:
value = getattr(cConfig, name)
if value is not None:
constants[name] = value
for name, default in constants_w_defaults:
value = getattr(cConfig, name)
if value is not None:
constants[name] = value
else:
constants[name] = default
constants['has_ipv6'] = True # This is a configuration option in CPython
for name, value in constants.items():
if isinstance(value, long):
if r_uint(value) == value:
constants[name] = intmask(value)
locals().update(constants)
O_NONBLOCK = cConfig.O_NONBLOCK
F_GETFL = cConfig.F_GETFL
F_SETFL = cConfig.F_SETFL
FIONBIO = cConfig.FIONBIO
INET_ADDRSTRLEN = cConfig.INET_ADDRSTRLEN
INET6_ADDRSTRLEN = cConfig.INET6_ADDRSTRLEN
EINPROGRESS = cConfig.EINPROGRESS or cConfig.WSAEINPROGRESS
EWOULDBLOCK = cConfig.EWOULDBLOCK or cConfig.WSAEWOULDBLOCK
EAFNOSUPPORT = cConfig.EAFNOSUPPORT or cConfig.WSAEAFNOSUPPORT
linux = cConfig.linux
MS_WINDOWS = cConfig.MS_WINDOWS
assert MS_WINDOWS == _MS_WINDOWS
if MS_WINDOWS:
def invalid_socket(fd):
return fd == INVALID_SOCKET
INVALID_SOCKET = cConfig.INVALID_SOCKET
else:
def invalid_socket(fd):
return fd < 0
INVALID_SOCKET = -1
uint16_t = cConfig.uint16_t
uint32_t = cConfig.uint32_t
size_t = cConfig.size_t
ssize_t = cConfig.ssize_t
socklen_t = cConfig.socklen_t
sockaddr = cConfig.sockaddr
sockaddr_size = sizeof(sockaddr)
sockaddr_in = cConfig.sockaddr_in
sockaddr_in6 = cConfig.sockaddr_in6
sockaddr_un = cConfig.sockaddr_un
if cConfig.sockaddr_nl is not None:
sockaddr_nl = cConfig.sockaddr_nl
in_addr = cConfig.in_addr
in_addr_size = sizeof(in_addr)
in6_addr = cConfig.in6_addr
addrinfo = cConfig.addrinfo
if _POSIX:
nfds_t = cConfig.nfds_t
pollfd = cConfig.pollfd
if MS_WINDOWS:
WSAEVENT = cConfig.WSAEVENT
WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS
timeval = cConfig.timeval
if MS_WINDOWS:
fd_set = cConfig.fd_set
c_int_size = sizeof(c_int)
SetPointerType(addrinfo_ptr, addrinfo)
SetPointerType(sockaddr_ptr, sockaddr)
# functions
if MS_WINDOWS:
from ctypes import windll
dllname = util.find_library('ws2_32')
assert dllname is not None
socketdll = windll.LoadLibrary(dllname)
else:
dllname = util.find_library('c')
assert dllname is not None
socketdll = cdll.LoadLibrary(dllname)
if _POSIX:
dup = socketdll.dup
dup.argtypes = [socketfd_type]
dup.restype = socketfd_type
#errno = c_int.in_dll(socketdll, 'errno')
if _POSIX:
strerror = socketdll.strerror
strerror.argtypes = [c_int]
strerror.restype = c_char_p
gai_strerror = socketdll.gai_strerror
gai_strerror.argtypes = [c_int]
gai_strerror.restype = c_char_p
#h_errno = c_int.in_dll(socketdll, 'h_errno')
#
#hstrerror = socketdll.hstrerror
#hstrerror.argtypes = [c_int]
#hstrerror.restype = c_char_p
socket = socketdll.socket
socket.argtypes = [c_int, c_int, c_int]
socket.restype = socketfd_type
if MS_WINDOWS:
socketclose = socketdll.closesocket
else:
socketclose = socketdll.close
socketclose.argtypes = [socketfd_type]
socketclose.restype = c_int
socketconnect = socketdll.connect
socketconnect.argtypes = [socketfd_type, sockaddr_ptr, socklen_t]
socketconnect.restype = c_int
if not MS_WINDOWS:
getaddrinfo = socketdll.getaddrinfo
getaddrinfo.argtypes = [c_char_p, c_char_p, addrinfo_ptr,
POINTER(addrinfo_ptr)]
getaddrinfo.restype = c_int
freeaddrinfo = socketdll.freeaddrinfo
freeaddrinfo.argtypes = [addrinfo_ptr]
freeaddrinfo.restype = None
getnameinfo = socketdll.getnameinfo
getnameinfo.argtypes = [sockaddr_ptr, socklen_t,
c_char_p, size_t,
c_char_p, size_t, c_int]
getnameinfo.restype = c_int
htonl = socketdll.htonl
htonl.argtypes = [uint32_t]
htonl.restype = uint32_t
htons = socketdll.htons
htons.argtypes = [uint16_t]
htons.restype = uint16_t
ntohl = socketdll.ntohl
ntohl.argtypes = [uint32_t]
ntohl.restype = uint32_t
ntohs = socketdll.ntohs
ntohs.argtypes = [uint16_t]
ntohs.restype = uint16_t
if _POSIX:
inet_aton = socketdll.inet_aton
inet_aton.argtypes = [c_char_p, POINTER(in_addr)]
inet_aton.restype = c_int
inet_ntoa = socketdll.inet_ntoa
inet_ntoa.argtypes = [in_addr]
inet_ntoa.restype = c_char_p
if _POSIX:
inet_pton = socketdll.inet_pton
inet_pton.argtypes = [c_int, c_char_p, c_void_p]
inet_pton.restype = c_int
inet_ntop = socketdll.inet_ntop
inet_ntop.argtypes = [c_int, c_void_p, c_char_p, socklen_t]
inet_ntop.restype = c_char_p
inet_addr = socketdll.inet_addr
inet_addr.argtypes = [c_char_p]
inet_addr.restype = c_uint
socketaccept = socketdll.accept
socketaccept.argtypes = [socketfd_type, sockaddr_ptr, POINTER(socklen_t)]
socketaccept.restype = socketfd_type
socketbind = socketdll.bind
socketbind.argtypes = [socketfd_type, sockaddr_ptr, socklen_t]
socketbind.restype = c_int
socketlisten = socketdll.listen
socketlisten.argtypes = [socketfd_type, c_int]
socketlisten.restype = c_int
socketgetpeername = socketdll.getpeername
socketgetpeername.argtypes = [socketfd_type, sockaddr_ptr, POINTER(socklen_t)]
socketgetpeername.restype = c_int
socketgetsockname = socketdll.getsockname
socketgetsockname.argtypes = [socketfd_type, sockaddr_ptr, POINTER(socklen_t)]
socketgetsockname.restype = c_int
socketgetsockopt = socketdll.getsockopt
socketgetsockopt.argtypes = [socketfd_type, c_int, c_int,
c_void_p, POINTER(socklen_t)]
socketgetsockopt.restype = c_int
socketsetsockopt = socketdll.setsockopt
socketsetsockopt.argtypes = [socketfd_type, c_int, c_int,
c_void_p, #this should be constant
socklen_t]
socketsetsockopt.restype = c_int
socketrecv = socketdll.recv
socketrecv.argtypes = [socketfd_type, c_void_p, c_int, c_int]
socketrecv.recv = ssize_t
recvfrom = socketdll.recvfrom
recvfrom.argtypes = [socketfd_type, c_void_p, size_t,
c_int, sockaddr_ptr, POINTER(socklen_t)]
recvfrom.restype = ssize_t
send = socketdll.send
send.argtypes = [socketfd_type,
c_void_p, #this should be constant
size_t, c_int]
send.restype = ssize_t
sendto = socketdll.sendto
sendto.argtypes = [socketfd_type, c_void_p, #this should be constant
size_t, c_int, sockaddr_ptr, #this should be const
socklen_t]
sendto.restype = ssize_t
socketshutdown = socketdll.shutdown
socketshutdown.argtypes = [socketfd_type, c_int]
socketshutdown.restype = c_int
gethostname = socketdll.gethostname
gethostname.argtypes = [c_char_p, c_int]
gethostname.restype = c_int
gethostbyname = socketdll.gethostbyname
gethostbyname.argtypes = [c_char_p]
gethostbyname.restype = POINTER(cConfig.hostent)
gethostbyaddr = socketdll.gethostbyaddr
gethostbyaddr.argtypes = [c_void_p, c_int, c_int]
gethostbyaddr.restype = POINTER(cConfig.hostent)
getservbyname = socketdll.getservbyname
getservbyname.argtypes = [c_char_p, c_char_p]
getservbyname.restype = POINTER(cConfig.servent)
getservbyport = socketdll.getservbyport
getservbyport.argtypes = [c_int, c_char_p]
getservbyport.restype = POINTER(cConfig.servent)
getprotobyname = socketdll.getprotobyname
getprotobyname.argtypes = [c_char_p]
getprotobyname.restype = POINTER(cConfig.protoent)
if _POSIX:
fcntl = socketdll.fcntl
fcntl.argtypes = [socketfd_type, c_int, c_int]
fcntl.restype = c_int
socketpair_t = ARRAY(socketfd_type, 2)
socketpair = socketdll.socketpair
socketpair.argtypes = [c_int, c_int, c_int, POINTER(socketpair_t)]
socketpair.restype = c_int
if _MS_WINDOWS:
ioctlsocket = socketdll.ioctlsocket
ioctlsocket.argtypes = [socketfd_type, c_long, POINTER(c_ulong)]
ioctlsocket.restype = c_int
shutdown = socketdll.shutdown
shutdown.argtypes = [c_int, c_int]
shutdown.restype = c_int
if _POSIX:
poll = socketdll.poll
poll.argtypes = [POINTER(pollfd), nfds_t, c_int]
poll.restype = c_int
elif MS_WINDOWS:
select = socketdll.select
select.argtypes = [c_int,
POINTER(fd_set), POINTER(fd_set), POINTER(fd_set),
POINTER(timeval)]
select.restype = c_int
WSACreateEvent = socketdll.WSACreateEvent
WSACreateEvent.argtypes = []
WSACreateEvent.restype = WSAEVENT
WSACloseEvent = socketdll.WSACloseEvent
WSACloseEvent.argtypes = [WSAEVENT]
WSACloseEvent.restype = c_int
WSAEventSelect = socketdll.WSAEventSelect
WSAEventSelect.argtypes = [socketfd_type, WSAEVENT, c_long]
WSAEventSelect.restype = c_int
WSAWaitForMultipleEvents = socketdll.WSAWaitForMultipleEvents
WSAWaitForMultipleEvents.argtypes = [c_long, POINTER(WSAEVENT),
c_int, c_long, c_int]
WSAWaitForMultipleEvents.restype = c_long
WSAEnumNetworkEvents = socketdll.WSAEnumNetworkEvents
WSAEnumNetworkEvents.argtypes = [socketfd_type, WSAEVENT,
POINTER(WSANETWORKEVENTS)]
WSAEnumNetworkEvents.restype = c_int
if MS_WINDOWS:
WSAData = cConfig.WSAData
WSAStartup = socketdll.WSAStartup
WSAStartup.argtypes = [c_int, POINTER(WSAData)]
WSAStartup.restype = c_int
WSAStartup.libraries = ('ws2_32',)
WSAGetLastError = socketdll.WSAGetLastError
WSAGetLastError.argtypes = []
WSAGetLastError.restype = c_int
geterrno = WSAGetLastError
import errno
WIN32_ERROR_MESSAGES = {
errno.WSAEINTR: "Interrupted system call",
errno.WSAEBADF: "Bad file descriptor",
errno.WSAEACCES: "Permission denied",
errno.WSAEFAULT: "Bad address",
errno.WSAEINVAL: "Invalid argument",
errno.WSAEMFILE: "Too many open files",
errno.WSAEWOULDBLOCK:
"The socket operation could not complete without blocking",
errno.WSAEINPROGRESS: "Operation now in progress",
errno.WSAEALREADY: "Operation already in progress",
errno.WSAENOTSOCK: "Socket operation on non-socket",
errno.WSAEDESTADDRREQ: "Destination address required",
errno.WSAEMSGSIZE: "Message too long",
errno.WSAEPROTOTYPE: "Protocol wrong type for socket",
errno.WSAENOPROTOOPT: "Protocol not available",
errno.WSAEPROTONOSUPPORT: "Protocol not supported",
errno.WSAESOCKTNOSUPPORT: "Socket type not supported",
errno.WSAEOPNOTSUPP: "Operation not supported",
errno.WSAEPFNOSUPPORT: "Protocol family not supported",
errno.WSAEAFNOSUPPORT: "Address family not supported",
errno.WSAEADDRINUSE: "Address already in use",
errno.WSAEADDRNOTAVAIL: "Can't assign requested address",
errno.WSAENETDOWN: "Network is down",
errno.WSAENETUNREACH: "Network is unreachable",
errno.WSAENETRESET: "Network dropped connection on reset",
errno.WSAECONNABORTED: "Software caused connection abort",
errno.WSAECONNRESET: "Connection reset by peer",
errno.WSAENOBUFS: "No buffer space available",
errno.WSAEISCONN: "Socket is already connected",
errno.WSAENOTCONN: "Socket is not connected",
errno.WSAESHUTDOWN: "Can't send after socket shutdown",
errno.WSAETOOMANYREFS: "Too many references: can't splice",
errno.WSAETIMEDOUT: "Operation timed out",
errno.WSAECONNREFUSED: "Connection refused",
errno.WSAELOOP: "Too many levels of symbolic links",
errno.WSAENAMETOOLONG: "File name too long",
errno.WSAEHOSTDOWN: "Host is down",
errno.WSAEHOSTUNREACH: "No route to host",
errno.WSAENOTEMPTY: "Directory not empty",
errno.WSAEPROCLIM: "Too many processes",
errno.WSAEUSERS: "Too many users",
errno.WSAEDQUOT: "Disc quota exceeded",
errno.WSAESTALE: "Stale NFS file handle",
errno.WSAEREMOTE: "Too many levels of remote in path",
errno.WSASYSNOTREADY: "Network subsystem is unvailable",
errno.WSAVERNOTSUPPORTED: "WinSock version is not supported",
errno.WSANOTINITIALISED: "Successful WSAStartup() not yet performed",
errno.WSAEDISCON: "Graceful shutdown in progress",
# Resolver errors
# XXX Not exported by errno. Replace by the values in winsock.h
# errno.WSAHOST_NOT_FOUND: "No such host is known",
# errno.WSATRY_AGAIN: "Host not found, or server failed",
# errno.WSANO_RECOVERY: "Unexpected server error encountered",
# errno.WSANO_DATA: "Valid name without requested data",
# errno.WSANO_ADDRESS: "No address, look for MX record",
# select() errors
WSA_IO_PENDING: "WSA_IO_PENDING",
WSA_IO_INCOMPLETE: "WSA_IO_INCOMPLETE",
WSA_INVALID_HANDLE: "WSA_INVALID_HANDLE",
WSA_INVALID_PARAMETER: "WSA_INVALID_PARAMETER",
WSA_NOT_ENOUGH_MEMORY: "WSA_NOT_ENOUGH_MEMORY",
WSA_OPERATION_ABORTED: "WSA_OPERATION_ABORTED",
}
assert len(WIN32_ERROR_MESSAGES) == 53 # detect duplicates
def socket_strerror(errno):
return WIN32_ERROR_MESSAGES.get(errno, "winsock error %d" % errno)
else:
def socket_strerror(errno):
return strerror(errno)
| Python |
"""
This file defines utilities for manipulating objects in an
RPython-compliant way.
"""
import sys, new
# specialize is a decorator factory for attaching _annspecialcase_
# attributes to functions: for example
#
# f._annspecialcase_ = 'specialize:memo' can be expressed with:
# @specialize.memo()
# def f(...
#
# f._annspecialcase_ = 'specialize:arg(0)' can be expressed with:
# @specialize.arg(0)
# def f(...
#
class _AttachSpecialization(object):
def __init__(self, tag):
self.tag = tag
def __call__(self, *args):
if not args:
args = ""
else:
args = "("+','.join([repr(arg) for arg in args]) +")"
specialcase = "specialize:%s%s" % (self.tag, args)
def specialize_decorator(func):
func._annspecialcase_ = specialcase
return func
return specialize_decorator
class _Specialize(object):
def __getattr__(self, name):
return _AttachSpecialization(name)
specialize = _Specialize()
# ____________________________________________________________
class Symbolic(object):
def annotation(self):
return None
def lltype(self):
return None
def __cmp__(self, other):
if self is other:
return 0
else:
raise TypeError("Symbolics can not be compared!")
def __hash__(self):
raise TypeError("Symbolics are not hashable!")
def __nonzero__(self):
raise TypeError("Symbolics are not comparable")
class ComputedIntSymbolic(Symbolic):
def __init__(self, compute_fn):
self.compute_fn = compute_fn
def annotation(self):
from pypy.annotation import model
return model.SomeInteger()
def lltype(self):
from pypy.rpython.lltypesystem import lltype
return lltype.Signed
class CDefinedIntSymbolic(Symbolic):
def __init__(self, expr, default=0):
self.expr = expr
self.default = default
def annotation(self):
from pypy.annotation import model
return model.SomeInteger()
def lltype(self):
from pypy.rpython.lltypesystem import lltype
return lltype.Signed
malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0)
# ____________________________________________________________
def instantiate(cls):
"Create an empty instance of 'cls'."
if isinstance(cls, type):
return cls.__new__(cls)
else:
return new.instance(cls)
def we_are_translated():
return False
# annotation -> True
def keepalive_until_here(*values):
pass
# ____________________________________________________________
class FREED_OBJECT(object):
def __getattribute__(self, attr):
raise RuntimeError("trying to access freed object")
def __setattr__(self, attr, value):
raise RuntimeError("trying to access freed object")
def free_non_gc_object(obj):
assert not getattr(obj.__class__, "_alloc_flavor_", 'gc').startswith('gc'), "trying to free gc object"
obj.__dict__ = {}
obj.__class__ = FREED_OBJECT
def cast_object_to_weakgcaddress(obj):
from pypy.rpython.lltypesystem.llmemory import fakeweakaddress
return fakeweakaddress(obj)
def cast_weakgcaddress_to_object(address, expected_result):
if address.ref is None: # NULL address
return None
obj = address.get()
assert obj is not None
assert isinstance(obj, expected_result)
return obj
from pypy.rpython.extregistry import ExtRegistryEntry
class Entry(ExtRegistryEntry):
_about_ = cast_object_to_weakgcaddress
def compute_result_annotation(self, s_obj):
from pypy.annotation import model as annmodel
return annmodel.SomeWeakGcAddress()
def specialize_call(self, hop):
vlist = hop.inputargs(hop.args_r[0])
return hop.genop('cast_ptr_to_weakadr', vlist,
resulttype=hop.r_result.lowleveltype)
class Entry(ExtRegistryEntry):
_about_ = cast_weakgcaddress_to_object
def compute_result_annotation(self, s_int, s_clspbc):
from pypy.annotation import model as annmodel
assert len(s_clspbc.descriptions) == 1
desc = s_clspbc.descriptions.keys()[0]
cdef = desc.getuniqueclassdef()
return annmodel.SomeInstance(cdef, can_be_None=True)
def specialize_call(self, hop):
from pypy.rpython import raddress
assert isinstance(hop.args_r[0], raddress.WeakGcAddressRepr)
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
return hop.genop('cast_weakadr_to_ptr', vlist,
resulttype = hop.r_result.lowleveltype)
def cast_weakgcaddress_to_int(address):
if address.ref is None: # NULL address
return 0
return address.cast_to_int()
class Entry(ExtRegistryEntry):
_about_ = cast_weakgcaddress_to_int
def compute_result_annotation(self, s_int):
return annmodel.SomeInteger()
def specialize_call(self, hop):
from pypy.rpython import raddress
assert isinstance(hop.args_r[0], raddress.WeakGcAddressRepr)
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
return hop.genop('cast_weakadr_to_int', vlist,
resulttype = hop.r_result.lowleveltype)
# ____________________________________________________________
def debug_assert(x, msg):
"""After translation to C, this becomes an RPyAssert."""
assert x, msg
class Entry(ExtRegistryEntry):
_about_ = debug_assert
def compute_result_annotation(self, s_x, s_msg):
assert s_msg.is_constant(), ("debug_assert(x, msg): "
"the msg must be constant")
return None
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
vlist = hop.inputargs(lltype.Bool, lltype.Void)
hop.genop('debug_assert', vlist)
def hlinvoke(repr, llcallable, *args):
raise TypeError, "hlinvoke is meant to be rtyped and not called direclty"
class UnboxedValue(object):
"""A mixin class to use for classes that have exactly one field which
is an integer. They are represented as a tagged pointer."""
_mixin_ = True
def __new__(cls, value):
assert '__init__' not in cls.__dict__ # won't be called anyway
assert isinstance(cls.__slots__, str) or len(cls.__slots__) == 1
return super(UnboxedValue, cls).__new__(cls)
def __init__(self, value):
# this funtion is annotated but not included in the translated program
int_as_pointer = value * 2 + 1 # XXX for now
if -sys.maxint-1 <= int_as_pointer <= sys.maxint:
if isinstance(self.__class__.__slots__, str):
setattr(self, self.__class__.__slots__, value)
else:
setattr(self, self.__class__.__slots__[0], value)
else:
raise OverflowError("UnboxedValue: argument out of range")
def __repr__(self):
return '<unboxed %d>' % (self.getvalue(),)
def getvalue(self): # helper, equivalent to reading the custom field
if isinstance(self.__class__.__slots__, str):
return getattr(self, self.__class__.__slots__)
else:
return getattr(self, self.__class__.__slots__[0])
# ____________________________________________________________
class r_dict(object):
"""An RPython dict-like object.
Only provides the interface supported by RPython.
The functions key_eq() and key_hash() are used by the key comparison
algorithm."""
def __init__(self, key_eq, key_hash):
self._dict = {}
self.key_eq = key_eq
self.key_hash = key_hash
def __getitem__(self, key):
return self._dict[_r_dictkey(self, key)]
def __setitem__(self, key, value):
self._dict[_r_dictkey(self, key)] = value
def __delitem__(self, key):
del self._dict[_r_dictkey(self, key)]
def __len__(self):
return len(self._dict)
def __iter__(self):
for dk in self._dict:
yield dk.key
def __contains__(self, key):
return _r_dictkey(self, key) in self._dict
def get(self, key, default):
return self._dict.get(_r_dictkey(self, key), default)
def copy(self):
result = r_dict(self.key_eq, self.key_hash)
result.update(self)
return result
def update(self, other):
for key, value in other.items():
self[key] = value
def keys(self):
return [dk.key for dk in self._dict]
def values(self):
return self._dict.values()
def items(self):
return [(dk.key, value) for dk, value in self._dict.items()]
iterkeys = __iter__
def itervalues(self):
return self._dict.itervalues()
def iteritems(self):
for dk, value in self._dict.items():
yield dk.key, value
def clear(self):
self._dict.clear()
def __repr__(self):
"Representation for debugging purposes."
return 'r_dict(%r)' % (self._dict,)
def __hash__(self):
raise TypeError("cannot hash r_dict instances")
class _r_dictkey(object):
__slots__ = ['dic', 'key', 'hash']
def __init__(self, dic, key):
self.dic = dic
self.key = key
self.hash = dic.key_hash(key)
def __eq__(self, other):
if not isinstance(other, _r_dictkey):
return NotImplemented
return self.dic.key_eq(self.key, other.key)
def __ne__(self, other):
if not isinstance(other, _r_dictkey):
return NotImplemented
return not self.dic.key_eq(self.key, other.key)
def __hash__(self):
return self.hash
def __repr__(self):
return repr(self.key)
class _r_dictkey_with_hash(_r_dictkey):
def __init__(self, dic, key, hash):
self.dic = dic
self.key = key
self.hash = hash
| Python |
import py
try:
set
except NameError:
from sets import Set as set, ImmutableSet as frozenset
def compress_char_set(chars):
chars = list(chars)
chars.sort()
result = [chars[0], 1]
for a, b in zip(chars[:-1], chars[1:]):
if ord(a) == ord(b) - 1:
result.append(result.pop() + 1)
else:
result.append(b)
result.append(1)
real_result = []
for i in range(len(result) // 2):
real_result.append((result[i * 2 + 1], result[i * 2]))
real_result.sort()
real_result = zip(*zip(*real_result)[::-1])
return real_result
class LexerError(Exception):
def __init__(self, input, state, source_pos):
self.input = input
self.state = state
self.source_pos = source_pos
self.args = (input, state, source_pos)
def nice_error_message(self, filename="<unknown>"):
result = [" File %s, line %s" % (filename, self.source_pos.lineno)]
result.append(self.input.split("\n")[self.source_pos.lineno])
result.append(" " * self.source_pos.columnno + "^")
result.append("LexerError")
return "\n".join(result)
def make_nice_charset_repr(chars):
charranges = compress_char_set(chars)
result = []
for a, num in charranges:
if num == 1:
result.append(a)
if a == "-":
result.append("\\-")
else:
result.append("%s-%s" % (repr(a)[1:-1], repr(chr(ord(a) + num - 1))[1:-1]))
return "".join(result)
class DFA(object):
def __init__(self, num_states=0, transitions=None, final_states=None,
unmergeable_states=None, names=None):
self.num_states = 0
if transitions is None:
transitions = {}
if final_states is None:
final_states = set()
if unmergeable_states is None:
unmergeable_states = set()
if names is None:
names = []
self.transitions = transitions
self.final_states = final_states
self.unmergeable_states = unmergeable_states
self.names = names
def __repr__(self):
from pprint import pformat
return "DFA%s" % (pformat((
self.num_states, self.transitions, self.final_states,
self.unmergeable_states, self.names)), )
def add_state(self, name=None, final=False, unmergeable=False):
state = self.num_states
self.num_states += 1
if final:
self.final_states.add(state)
if unmergeable:
self.unmergeable_states.add(state)
if name is None:
name = str(state)
self.names.append(name)
return self.num_states - 1
def __setitem__(self, (state, input), next_state):
self.transitions[state, input] = next_state
def __getitem__(self, (state, input)):
return self.transitions[state, input]
def __contains__(self, (state, input)):
return (state, input) in self.transitions
def get_all_chars(self):
all_chars = set()
for state, input in self.transitions:
all_chars.add(input)
return all_chars
def optimize(self):
all_chars = self.get_all_chars()
# find mergeable
non_final = frozenset(set(range(self.num_states)) - self.final_states -
self.unmergeable_states)
final = frozenset(self.final_states - self.unmergeable_states)
state_to_set = {}
equivalence_sets = set()
if non_final:
equivalence_sets.add(non_final)
if final:
equivalence_sets.add(final)
for state in range(self.num_states):
if state in final:
state_to_set[state] = final
elif state in self.unmergeable_states:
singleset = frozenset([state])
state_to_set[state] = singleset
equivalence_sets.add(singleset)
else:
state_to_set[state] = non_final
assert len(equivalence_sets) <= self.num_states
while len(equivalence_sets) < self.num_states:
new_equivalence_sets = set()
changed = False
for equivalent in equivalence_sets:
#print "checking", equivalent
for char in all_chars:
targets = {}
for state in equivalent:
if (state, char) in self:
nextstate = self[state, char]
target = frozenset(state_to_set[nextstate])
else:
nextstate = None
target = None
targets.setdefault(target, set()).add(state)
if len(targets) != 1:
#print "\nsplitting %s with %r\ninto %s" % (equivalent, char, targets.values())
for target, newequivalent in targets.iteritems():
#print " ", newequivalent
newequivalent = frozenset(newequivalent)
new_equivalence_sets.add(newequivalent)
for state in newequivalent:
state_to_set[state] = newequivalent
#print " ", new_equivalence_sets
changed = True
break
else:
new_equivalence_sets.add(equivalent)
if not changed:
break
#print "end", equivalence_sets
#print new_equivalence_sets
equivalence_sets = new_equivalence_sets
if len(equivalence_sets) == self.num_states:
return False
#print equivalence_sets
# merging the states
newnames = []
newtransitions = {}
newnum_states = len(equivalence_sets)
newstates = list(equivalence_sets)
newstate_to_index = {}
newfinal_states = set()
newunmergeable_states = set()
for i, newstate in enumerate(newstates):
newstate_to_index[newstate] = i
# bring startstate into first slot
startstateindex = newstate_to_index[state_to_set[0]]
newstates[0], newstates[startstateindex] = newstates[startstateindex], newstates[0]
newstate_to_index[newstates[0]] = 0
newstate_to_index[newstates[startstateindex]] = startstateindex
for i, newstate in enumerate(newstates):
name = ", ".join([self.names[s] for s in newstate])
for state in newstate:
if state in self.unmergeable_states:
newunmergeable_states.add(i)
name = self.names[state]
if state in self.final_states:
newfinal_states.add(i)
newnames.append(name)
for (state, char), nextstate in self.transitions.iteritems():
newstate = newstate_to_index[state_to_set[state]]
newnextstate = newstate_to_index[state_to_set[nextstate]]
newtransitions[newstate, char] = newnextstate
self.names = newnames
self.transitions = newtransitions
self.num_states = newnum_states
self.final_states = newfinal_states
self.unmergeable_states = newunmergeable_states
return True
def make_code(self):
from pypy.rlib.parsing.codebuilder import Codebuilder
result = Codebuilder()
result.start_block("def recognize(input):")
result.emit("i = 0")
result.emit("state = 0")
result.start_block("while 1:")
state_to_chars = {}
for (state, char), nextstate in self.transitions.iteritems():
state_to_chars.setdefault(state, {}).setdefault(nextstate, set()).add(char)
above = set()
for state, nextstates in state_to_chars.iteritems():
above.add(state)
for _ in result.start_block("if state == %s:" % (state, )):
for _ in result.start_block("if i < len(input):"):
result.emit("char = input[i]")
result.emit("i += 1")
for _ in result.start_block("else:"):
if state in self.final_states:
result.emit("return True")
else:
result.emit("break")
elif_prefix = ""
for nextstate, chars in nextstates.iteritems():
final = nextstate in self.final_states
compressed = compress_char_set(chars)
if nextstate in above:
continue_prefix = "continue"
else:
continue_prefix = ""
for i, (a, num) in enumerate(compressed):
if num < 5:
for charord in range(ord(a), ord(a) + num):
for _ in result.start_block(
"%sif char == %r:" % (
elif_prefix, chr(charord))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
else:
for _ in result.start_block(
"%sif %r <= char <= %r:" % (
elif_prefix, a, chr(ord(a) + num - 1))):
result.emit("state = %s""" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
for _ in result.start_block("else:"):
result.emit("break")
#print state_to_chars.keys()
for state in range(self.num_states):
if state in state_to_chars:
continue
for _ in result.start_block("if state == %s:" % (state, )):
for _ in result.start_block("if i == len(input):"):
result.emit("return True")
for _ in result.start_block("else:"):
result.emit("break")
result.emit("break")
result.end_block("while")
result.emit("raise LexerError(input, state, i)")
result.end_block("def")
result = result.get_code()
while "\n\n" in result:
result = result.replace("\n\n", "\n")
#print result
d = {'LexerError': LexerError}
exec py.code.Source(result).compile() in d
return d['recognize']
def make_lexing_code(self):
from pypy.rlib.parsing.codebuilder import Codebuilder
result = Codebuilder()
result.start_block("def recognize(runner, i):")
result.emit("assert i >= 0")
result.emit("input = runner.text")
result.emit("state = 0")
result.start_block("while 1:")
state_to_chars = {}
for (state, char), nextstate in self.transitions.iteritems():
state_to_chars.setdefault(state, {}).setdefault(nextstate, set()).add(char)
state_to_chars_sorted = state_to_chars.items()
state_to_chars_sorted.sort()
above = set()
for state, nextstates in state_to_chars_sorted:
above.add(state)
for _ in result.start_block("if state == %s:" % (state, )):
if state in self.final_states:
result.emit("runner.last_matched_index = i - 1")
result.emit("runner.last_matched_state = state")
for _ in result.start_block("try:"):
result.emit("char = input[i]")
result.emit("i += 1")
for _ in result.start_block("except IndexError:"):
result.emit("runner.state = %s" % (state, ))
if state in self.final_states:
result.emit("return i")
else:
result.emit("return ~i")
elif_prefix = ""
for nextstate, chars in nextstates.iteritems():
final = nextstate in self.final_states
compressed = compress_char_set(chars)
if nextstate in above:
continue_prefix = "continue"
else:
continue_prefix = ""
for i, (a, num) in enumerate(compressed):
if num < 3:
for charord in range(ord(a), ord(a) + num):
for _ in result.start_block("%sif char == %r:"
% (elif_prefix, chr(charord))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
else:
for _ in result.start_block(
"%sif %r <= char <= %r:" % (
elif_prefix, a, chr(ord(a) + num - 1))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
for _ in result.start_block("else:"):
result.emit("break")
#print state_to_chars.keys()
for state in range(self.num_states):
if state in state_to_chars:
continue
assert state in self.final_states
result.emit("""
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break""")
result.end_block("while")
result.emit("""
runner.state = state
return ~i""")
result.end_block("def")
result = result.get_code()
while "\n\n" in result:
result = result.replace("\n\n", "\n")
#print result
exec py.code.Source(result).compile()
return recognize
def get_runner(self):
return DFARunner(self)
def make_nondeterministic(self):
result = NFA()
result.num_states = self.num_states
result.names = self.names
result.start_states = set([0])
result.final_states = self.final_states.copy()
for (state, input), nextstate in self.transitions.iteritems():
result.add_transition(state, nextstate, input)
return result
def dot(self):
result = ["graph G {"]
for i in range(self.num_states):
if i == 0:
extra = ", color=red"
else:
extra = ""
if i in self.final_states:
shape = "octagon"
else:
shape = "box"
result.append(
'state%s [label="%s", shape=%s%s];' %
(i, repr(self.names[i]).replace("\\", "\\\\"), shape, extra))
edges = {}
for (state, input), next_state in self.transitions.iteritems():
edges.setdefault((state, next_state), set()).add(input)
for (state, next_state), inputs in edges.iteritems():
inputs = make_nice_charset_repr(inputs)
result.append('state%s -- state%s [label="%s", arrowhead=normal];' %
(state, next_state, repr(inputs).replace("\\", "\\\\")))
result.append("}")
return "\n".join(result)
def view(self):
from dotviewer import graphclient
p = py.test.ensuretemp("automaton").join("temp.dot")
dot = self.dot()
p.write(dot)
plainpath = p.new(ext="plain")
try:
py.process.cmdexec("neato -Tplain %s > %s" % (p, plainpath))
except py.error.Error:
py.process.cmdexec("fdp -Tplain %s > %s" % (p, plainpath))
graphclient.display_dot_file(str(plainpath))
class DFARunner(object):
def __init__(self, automaton):
self.automaton = automaton
self.state = 0
def nextstate(self, char):
self.state = self.automaton[self.state, char]
return self.state
def recognize(self, s):
self.state = 0
try:
for char in s:
self.nextstate(char)
except KeyError:
return False
return self.state in self.automaton.final_states
class NFA(object):
def __init__(self):
self.num_states = 0
self.names = []
self.transitions = {}
self.start_states = set()
self.final_states = set()
self.unmergeable_states = set()
def add_state(self, name=None, start=False, final=False,
unmergeable=False):
new_state = self.num_states
self.num_states += 1
if name is None:
name = str(new_state)
self.names.append(name)
if start:
self.start_states.add(new_state)
if final:
self.final_states.add(new_state)
if unmergeable:
self.unmergeable_states.add(new_state)
return new_state
def add_transition(self, state, next_state, input=None):
subtransitions = self.transitions.setdefault(state, {})
subtransitions.setdefault(input, set()).add(next_state)
def get_next_states(self, state, char):
result = set()
sub_transitions = self.transitions.get(state, {})
for e_state in self.epsilon_closure([state]):
result.update(self.transitions.get(e_state, {}).get(char, set()))
return result
def epsilon_closure(self, states):
result = set(states)
stack = list(states)
while stack:
state = stack.pop()
for next_state in self.transitions.get(state, {}).get(None, set()):
if next_state not in result:
result.add(next_state)
stack.append(next_state)
return result
def make_deterministic(self, name_precedence=None):
fda = DFA()
set_to_state = {}
stack = []
def get_state(states):
states = self.epsilon_closure(states)
frozenstates = frozenset(states)
if frozenstates in set_to_state:
return set_to_state[frozenstates]
if states == self.start_states:
assert not set_to_state
final = bool(
filter(None, [state in self.final_states for state in states]))
name = ", ".join([self.names[state] for state in states])
if name_precedence is not None:
name_index = len(name_precedence)
unmergeable = False
for state in states:
#print state
if state in self.unmergeable_states:
new_name = self.names[state]
if name_precedence is not None:
try:
index = name_precedence.index(new_name)
except ValueError:
index = name_index
#print new_name, index, name_precedence
if index < name_index:
name_index = index
name = new_name
else:
name = new_name
unmergeable = True
result = set_to_state[frozenstates] = fda.add_state(
name, final, unmergeable)
stack.append((result, states))
return result
startstate = get_state(self.start_states)
while stack:
fdastate, ndastates = stack.pop()
chars_to_states = {}
for state in ndastates:
sub_transitions = self.transitions.get(state, {})
for char, next_states in sub_transitions.iteritems():
chars_to_states.setdefault(char, set()).update(next_states)
for char, states in chars_to_states.iteritems():
if char is None:
continue
fda[fdastate, char] = get_state(states)
return fda
def update(self, other):
mapping = {}
for i, name in enumerate(other.names):
new_state = self.add_state(name)
mapping[i] = new_state
for state, subtransitions in other.transitions.iteritems():
new_state = mapping[state]
new_subtransitions = self.transitions.setdefault(new_state, {})
for input, next_states in subtransitions.iteritems():
next_states = [mapping[i] for i in next_states]
new_subtransitions.setdefault(input, set()).update(next_states)
return mapping
def view(self):
from dotviewer import graphclient
p = py.test.ensuretemp("automaton").join("temp.dot")
dot = self.dot()
p.write(dot)
plainpath = p.new(ext="plain")
try:
try:
py.process.cmdexec("neato -Tplain %s > %s" % (p, plainpath))
except py.error.Error:
py.process.cmdexec("fdp -Tplain %s > %s" % (p, plainpath))
except py.error.Error:
p.write(
dot.replace("graph G {", "digraph G {").replace(" -- ", " -> "))
py.process.cmdexec("dot -Tplain %s > %s" % (p, plainpath))
graphclient.display_dot_file(str(plainpath))
def dot(self):
result = ["graph G {"]
for i in range(self.num_states):
if i in self.start_states:
extra = ", color=red"
else:
extra = ""
if i in self.final_states:
peripheries = 2
extra += ", shape=octagon"
else:
peripheries = 1
result.append(
'state%s [label="%s", peripheries=%s%s];' %
(i, self.names[i], peripheries, extra))
for state, sub_transitions in self.transitions.iteritems():
for input, next_states in sub_transitions.iteritems():
for next_state in next_states:
result.append(
'state%s -- state%s [label="%s", arrowhead=normal];' %
(state, next_state, repr(input).replace("\\", "\\\\")))
result.append("}")
return "\n".join(result)
class SetNFARunner(object):
def __init__(self, automaton):
self.automaton = automaton
def next_state(self, char):
nextstates = set()
for state in self.states:
nextstates.update(self.automaton.get_next_states(state, char))
return nextstates
def recognize(self, s):
self.states = self.automaton.start_states.copy()
for char in s:
nextstates = self.next_state(char)
if not nextstates:
return False
self.states = nextstates
for state in self.states:
if state in self.automaton.final_states:
return True
return False
class BacktrackingNFARunner(object):
def __init__(self, automaton):
self.automaton = automaton
def recognize(self, s):
def recurse(i, state):
if i == len(s):
return state in self.automaton.final_states
for next_state in self.automaton.get_next_states(state, s[i]):
if recurse(i + 1, next_state):
return True
return False
for state in self.automaton.start_states:
if recurse(0, state):
return True
return False
| Python |
from pypy.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
class Parser(object):
def NAME(self):
return self._NAME().result
def _NAME(self):
_key = self._pos
_status = self._dict_NAME.get(_key, None)
if _status is None:
_status = self._dict_NAME[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1074651696()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def SPACE(self):
return self._SPACE().result
def _SPACE(self):
_key = self._pos
_status = self._dict_SPACE.get(_key, None)
if _status is None:
_status = self._dict_SPACE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__(' ')
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def COMMENT(self):
return self._COMMENT().result
def _COMMENT(self):
_key = self._pos
_status = self._dict_COMMENT.get(_key, None)
if _status is None:
_status = self._dict_COMMENT[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex528667127()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def IGNORE(self):
return self._IGNORE().result
def _IGNORE(self):
_key = self._pos
_status = self._dict_IGNORE.get(_key, None)
if _status is None:
_status = self._dict_IGNORE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1979538501()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def newline(self):
return self._newline().result
def _newline(self):
_key = self._pos
_status = self._dict_newline.get(_key, None)
if _status is None:
_status = self._dict_newline[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._COMMENT()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_result = self._regex299149370()
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._newline()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def REGEX(self):
return self._REGEX().result
def _REGEX(self):
_key = self._pos
_status = self._dict_REGEX.get(_key, None)
if _status is None:
_status = self._dict_REGEX[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1006631623()
r = _result
_result = (Symbol('REGEX', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTE(self):
return self._QUOTE().result
def _QUOTE(self):
_key = self._pos
_status = self._dict_QUOTE.get(_key, None)
if _status is None:
_status = self._dict_QUOTE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1124192327()
r = _result
_result = (Symbol('QUOTE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def PYTHONCODE(self):
return self._PYTHONCODE().result
def _PYTHONCODE(self):
_key = self._pos
_status = self._dict_PYTHONCODE.get(_key, None)
if _status is None:
_status = self._dict_PYTHONCODE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex291086639()
r = _result
_result = (Symbol('PYTHONCODE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def file(self):
return self._file().result
def _file(self):
_key = self._pos
_status = self._dict_file.get(_key, None)
if _status is None:
_status = self._dict_file[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._list()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._file()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def list(self):
return self._list().result
def _list(self):
_key = self._pos
_status = self._dict_list.get(_key, None)
if _status is None:
_status = self._dict_list[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._production()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._production()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
content = _result
_result = (Nonterminal('list', content))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._list()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def production(self):
return self._production().result
def _production(self):
_key = self._pos
_status = self._dict_production.get(_key, None)
if _status is None:
_status = self._dict_production[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._productionargs()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_result = self.__chars__(':')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_result = self.__chars__(';')
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = (Nonterminal('production', [name, args, what]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._production()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def productionargs(self):
return self._productionargs().result
def _productionargs(self):
_key = self._pos
_status = self._dict_productionargs.get(_key, None)
if _status is None:
_status = self._dict_productionargs[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
arg = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(')')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
_result = (Nonterminal('productionargs', []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._productionargs()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def or_(self):
return self._or_().result
def _or_(self):
_key = self._pos
_status = self._dict_or_.get(_key, None)
if _status is None:
_status = self._dict_or_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_all1 = []
_call_status = self._commands()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__('|')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
_all1.append(_result)
while 1:
_choice5 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_result = self.__chars__('|')
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all1
l = _result
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = (Nonterminal('or', l + [last]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._or_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commands(self):
return self._commands().result
def _commands(self):
_key = self._pos
_status = self._dict_commands.get(_key, None)
if _status is None:
_status = self._dict_commands[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = _call_status.error
cmd = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1 = []
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
_all1.append(_result)
while 1:
_choice3 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard4 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all1
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commands()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def command(self):
return self._command().result
def _command(self):
_key = self._pos
_status = self._dict_command.get(_key, None)
if _status is None:
_status = self._dict_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def simplecommand(self):
return self._simplecommand().result
def _simplecommand(self):
_key = self._pos
_status = self._dict_simplecommand.get(_key, None)
if _status is None:
_status = self._dict_simplecommand[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._return_()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._if_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._named_command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._choose()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._simplecommand()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def return_(self):
return self._return_().result
def _return_(self):
_key = self._pos
_status = self._dict_return_.get(_key, None)
if _status is None:
_status = self._dict_return_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('return')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
code = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = (Nonterminal('return', [code]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._return_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def if_(self):
return self._if_().result
def _if_(self):
_key = self._pos
_status = self._dict_if_.get(_key, None)
if _status is None:
_status = self._dict_if_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('do')
_call_status = self._newline()
_result = _call_status.result
_error = _call_status.error
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('if')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('if')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
_result = self.__chars__('if')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all14 = []
while 1:
_choice15 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
_result = _all14
_result = (Nonterminal('if', [condition]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._if_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def choose(self):
return self._choose().result
def _choose(self):
_key = self._pos
_status = self._dict_choose.get(_key, None)
if _status is None:
_status = self._dict_choose[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('choose')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
name = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = self.__chars__('in')
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
expr = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmds = _result
_result = (Nonterminal('choose', [name, expr, cmds]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._choose()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commandchain(self):
return self._commandchain().result
def _commandchain(self):
_key = self._pos
_status = self._dict_commandchain.get(_key, None)
if _status is None:
_status = self._dict_commandchain[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._simplecommand()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
result = _result
_result = (Nonterminal('commands', result))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commandchain()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def named_command(self):
return self._named_command().result
def _named_command(self):
_key = self._pos
_status = self._dict_named_command.get(_key, None)
if _status is None:
_status = self._dict_named_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = self.__chars__('=')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_result = (Nonterminal('named_command', [name, cmd]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._named_command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = _call_status.error
what = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('?')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
while 1:
_choice8 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
while 1:
_choice14 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = (Nonterminal('repetition', [repetition, what]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def negation(self):
return self._negation().result
def _negation(self):
_key = self._pos
_status = self._dict_negation.get(_key, None)
if _status is None:
_status = self._dict_negation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('!')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._negation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def enclosed(self):
return self._enclosed().result
def _enclosed(self):
_key = self._pos
_status = self._dict_enclosed.get(_key, None)
if _status is None:
_status = self._dict_enclosed[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('<')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = self.__chars__('>')
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('[')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(']')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
try:
_before_discard15 = _result
_result = self.__chars__('(')
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = _before_discard15
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard18 = _result
_result = self.__chars__(')')
_all19 = []
while 1:
_choice20 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._enclosed()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._call()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._REGEX()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
try:
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard9 = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = _before_discard9
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def call(self):
return self._call().result
def _call(self):
_key = self._pos
_status = self._dict_call.get(_key, None)
if _status is None:
_status = self._dict_call[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
x = _result
_call_status = self._arguments()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = (Nonterminal("call", [x, args]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._call()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def arguments(self):
return self._arguments().result
def _arguments(self):
_key = self._pos
_status = self._dict_arguments.get(_key, None)
if _status is None:
_status = self._dict_arguments[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = self.__chars__(')')
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
_result = (Nonterminal("args", []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._arguments()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_NAME = {}
self._dict_SPACE = {}
self._dict_COMMENT = {}
self._dict_IGNORE = {}
self._dict_newline = {}
self._dict_REGEX = {}
self._dict_QUOTE = {}
self._dict_PYTHONCODE = {}
self._dict_EOF = {}
self._dict_file = {}
self._dict_list = {}
self._dict_production = {}
self._dict_productionargs = {}
self._dict_or_ = {}
self._dict_commands = {}
self._dict_command = {}
self._dict_simplecommand = {}
self._dict_return_ = {}
self._dict_if_ = {}
self._dict_choose = {}
self._dict_commandchain = {}
self._dict_named_command = {}
self._dict_repetition = {}
self._dict_negation = {}
self._dict_enclosed = {}
self._dict_primary = {}
self._dict_call = {}
self._dict_arguments = {}
self._pos = 0
self._inputstream = inputstream
def _regex299149370(self):
_choice13 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_299149370(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice13
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1006631623(self):
_choice14 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1006631623(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice14
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex528667127(self):
_choice15 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_528667127(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice15
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex291086639(self):
_choice16 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_291086639(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice16
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1074651696(self):
_choice17 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1074651696(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice17
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1124192327(self):
_choice18 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1124192327(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice18
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1979538501(self):
_choice19 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1979538501(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice19
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_299149370(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return i
if char == ' ':
state = 1
elif char == '\n':
state = 2
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if char == ' ':
state = 1
continue
elif char == '\n':
state = 2
else:
break
if state == 2:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 2
return i
if char == '\n':
state = 2
continue
elif char == ' ':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1006631623(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == '`':
state = 1
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if '\x00' <= char <= '[':
state = 1
continue
elif ']' <= char <= '_':
state = 1
continue
elif 'a' <= char <= '\xff':
state = 1
continue
elif char == '\\':
state = 2
elif char == '`':
state = 3
else:
break
if state == 2:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 2
return ~i
if '\x00' <= char <= '\xff':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_528667127(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == ' ':
state = 0
continue
elif char == '#':
state = 1
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
if state == 2:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 2
return i
if char == ' ':
state = 0
continue
elif char == '#':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_291086639(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == '{':
state = 1
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '|':
state = 1
continue
elif '~' <= char <= '\xff':
state = 1
continue
elif char == '}':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1074651696(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if 'A' <= char <= 'Z':
state = 1
elif char == '_':
state = 1
elif 'a' <= char <= 'z':
state = 1
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return i
if '0' <= char <= '9':
state = 1
continue
elif 'A' <= char <= 'Z':
state = 1
continue
elif char == '_':
state = 1
continue
elif 'a' <= char <= 'z':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1124192327(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == "'":
state = 1
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if '\x00' <= char <= '&':
state = 1
continue
elif '(' <= char <= '\xff':
state = 1
continue
elif char == "'":
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1979538501(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == '#':
state = 1
elif char == '\t':
state = 2
elif char == '\n':
state = 2
elif char == ' ':
state = 2
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
| Python |
class Codebuilder(object):
def __init__(self):
self.blocks = []
self.code = []
def get_code(self):
assert not self.blocks
return "\n".join([" " * depth + line for depth, line in self.code])
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def emit(self, line):
for line in line.split("\n"):
if line:
self.code.append((len(self.blocks), line))
def emit_initcode(self, line):
for line in line.split("\n"):
self.initcode.append(line)
def start_block(self, blockstarter):
assert blockstarter.endswith(":")
self.emit(blockstarter)
self.blocks.append(blockstarter)
def BlockEnder():
yield None
self.end_block(blockstarter)
return BlockEnder()
def end_block(self, starterpart=""):
block = self.blocks.pop()
assert starterpart in block, "ended wrong block %s with %s" % (
block, starterpart)
def store_code_away(self):
result = self.blocks, self.code
self.code = []
self.blocks = []
return result
def restore_code(self, (blocks, code)):
result = self.blocks, self.code
self.code = code
self.blocks = blocks
return result
def add_code(self, (blocks, code)):
self.code += [(depth + len(self.blocks), line) for depth, line in code]
self.blocks += blocks
| Python |
import py
from pypy.rlib.parsing.lexer import SourcePos
from pypy.rlib.parsing.tree import Node, Symbol, Nonterminal
class Rule(object):
def __init__(self, nonterminal, expansions):
self.nonterminal = nonterminal
self.expansions = expansions
def getkey(self):
return (self.nonterminal, tuple(self.expansions))
# def __hash__(self):
# return hash(self.getkey())
def __eq__(self, other):
return self.getkey() == other.getkey()
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (
self.nonterminal, " | ".join([repr(e) for e in self.expansions]))
def __repr__(self):
return "Rule(%r, %r)" % (self.nonterminal, self.expansions)
class LazyInputStream(object):
def __init__(self, iterator):
self.iterator = iter(iterator)
self.data = []
def __getitem__(self, index):
assert index >= 0
while len(self.data) <= index:
try:
self.data.append(self.iterator.next())
except StopIteration:
raise IndexError("index out of range")
return self.data[index]
class ParseError(Exception):
def __init__(self, source_pos, errorinformation):
self.source_pos = source_pos
self.errorinformation = errorinformation
self.args = (source_pos, errorinformation)
def nice_error_message(self, filename="<unknown>", source=""):
result = [" File %s, line %s" % (filename, self.source_pos.lineno)]
if source:
result.append(source.split("\n")[self.source_pos.lineno])
result.append(" " * self.source_pos.columnno + "^")
else:
result.append("<couldn't get source>")
if self.errorinformation:
failure_reasons = self.errorinformation.failure_reasons
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class ErrorInformation(object):
def __init__(self, pos, failure_reasons=None):
if failure_reasons is None:
failure_reasons = []
self.failure_reasons = failure_reasons
self.pos = pos
def combine_errors(self, other):
if self is None:
return other
if (other is None or self.pos > other.pos or
len(other.failure_reasons) == 0):
return self
elif other.pos > self.pos or len(self.failure_reasons) == 0:
return other
failure_reasons = []
already_there = {}
for fr in [self.failure_reasons, other.failure_reasons]:
for reason in fr:
if reason not in already_there:
already_there[reason] = True
failure_reasons.append(reason)
return ErrorInformation(self.pos, failure_reasons)
class LazyParseTable(object):
def __init__(self, input, parser):
self.parser = parser
self.input = input
self.matched = {}
self.errorinformation = {}
def match_symbol(self, i, symbol):
#print i, symbol
#print self.matched.keys()
if (i, symbol) in self.matched:
return self.matched[i, symbol]
error = None # for the annotator
if self.parser.is_nonterminal(symbol):
rule = self.parser.get_rule(symbol)
lastexpansion = len(rule.expansions) - 1
subsymbol = None
error = None
for expansionindex in range(len(rule.expansions)):
expansion = rule.expansions[expansionindex]
curr = i
children = []
for j in range(len(expansion)):
subsymbol = expansion[j]
node, next, error2 = self.match_symbol(curr, subsymbol)
if node is None:
error = combine_errors(error, error2)
break
children.append(node)
curr = next
else:
assert len(expansion) == len(children)
result = (Nonterminal(symbol, children), curr, error)
self.matched[i, symbol] = result
return result
self.matched[i, symbol] = None, 0, error
return None, 0, error
else:
try:
input = self.input[i]
if self.terminal_equality(symbol, input):
result = (Symbol(symbol, input.source, input), i + 1, error)
self.matched[i, symbol] = result
return result
else:
# XXX hack unnice: handles the sort of token names that
# ebnfparse produces
if (symbol.startswith("__") and
symbol.split("_")[2][0] in "0123456789"):
expected = symbol.split("_")[-1]
else:
expected = symbol
error = ErrorInformation(i, [expected])
except IndexError:
error = ErrorInformation(i)
return None, 0, error
def terminal_equality(self, symbol, input):
return symbol == input.name
class PackratParser(object):
def __init__(self, rules, startsymbol, parsetablefactory=LazyParseTable,
check_for_left_recursion=True):
self.rules = rules
self.nonterminal_to_rule = {}
for rule in rules:
self.nonterminal_to_rule[rule.nonterminal] = rule
self.startsymbol = startsymbol
if check_for_left_recursion:
assert not self.has_left_recursion()
self.parsetablefactory = parsetablefactory
def is_nonterminal(self, symbol):
return symbol in self.nonterminal_to_rule
def get_rule(self, symbol):
return self.nonterminal_to_rule[symbol]
def parse(self, tokeniterator, lazy=False):
if lazy:
input = LazyInputStream(tokeniterator)
else:
input = list(tokeniterator)
table = self.parsetablefactory(input, self)
result = table.match_symbol(0, self.startsymbol)
if result[0] is None:
error = result[2]
raise ParseError(input[error.pos].source_pos, error)
return result[0]
def has_left_recursion(self):
"""NOT_RPYTHON"""
follows = {}
for rule in self.rules:
follow = py.builtin.set()
follows[rule.nonterminal] = follow
for expansion in rule.expansions:
if expansion and self.is_nonterminal(expansion[0]):
follow.add(expansion[0])
changed = True
while changed:
changed = False
for nonterminal, follow in follows.iteritems():
for nt in follow:
subfollow = follows[nt]
update = subfollow - follow
if update:
changed = True
follow.update(update)
break
for nonterminal, follow in follows.iteritems():
if nonterminal in follow:
print "nonterminal %s is in its own follow %s" % (nonterminal, follow)
return True
return False
def __repr__(self):
from pprint import pformat
return "%s%s" % (self.__class__.__name__,
pformat((self.rules, self.startsymbol)), )
class ParserCompiler(object):
def __init__(self, parser):
self.parser = parser
self.allcode = []
self.symbol_to_number = {}
self.made = {}
def compile(self):
from pypy.tool.sourcetools import func_with_new_name
self.allcode.append("class CompileableParser(baseclass):")
self.make_matcher(self.parser.startsymbol)
self.make_fixed()
miniglobals = globals().copy()
miniglobals["baseclass"] = self.parser.__class__
#print "\n".join(self.allcode)
exec py.code.Source("\n".join(self.allcode)).compile() in miniglobals
kls = miniglobals["CompileableParser"]
# XXX
parsetable = self.parser.parsetablefactory([], self.parser)
kls.terminal_equality = func_with_new_name(
parsetable.terminal_equality.im_func,
"terminal_equality_compileable")
return kls
def get_number(self, symbol):
if symbol in self.symbol_to_number:
return self.symbol_to_number[symbol]
result = len(self.symbol_to_number)
self.symbol_to_number[symbol] = result
return result
def make_matcher(self, symbol):
if symbol not in self.made:
self.made[symbol] = True
if self.parser.is_nonterminal(symbol):
self.make_nonterminal_matcher(symbol)
else:
self.make_terminal_matcher(symbol)
def make_terminal_matcher(self, symbol):
number = self.get_number(symbol)
self.allcode.append("""
def match_terminal%(number)s(self, i):
# matcher for terminal %(number)s %(symbol)r
if i in self.matched_terminals%(number)s:
return self.matched_terminals%(number)s[i]
try:
input = self.input[i]
if self.terminal_equality(%(symbol)r, input):
symbol = Symbol(%(symbol)r, input.name, input)
result = (symbol, i + 1)
self.matched_terminals%(number)s[i] = result
return result
except IndexError:
pass
return None, i""" % vars())
def make_nonterminal_matcher(self, symbol):
number = self.get_number(symbol)
rule = self.parser.nonterminal_to_rule[symbol]
code = []
code.append("""
def match_nonterminal%(number)s(self, i):
# matcher for nonterminal %(number)s %(symbol)s
if i in self.matched_nonterminals%(number)s:
return self.matched_nonterminals%(number)s[i]
last_failed_position = 0
subsymbol = None
expansionindex = 0
while 1:""" % vars())
for expansionindex, expansion in enumerate(rule.expansions):
nextindex = expansionindex + 1
code.append("""\
if expansionindex == %s:""" % (expansionindex, ))
if not expansion:
code.append("""\
result = (Nonterminal(symbol, []), i)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
continue
code.append("""\
curr = i
children = []""")
for subsymbol in expansion:
self.make_matcher(subsymbol)
if self.parser.is_nonterminal(subsymbol):
match = "match_nonterminal%s" % self.get_number(subsymbol)
else:
match = "match_terminal%s" % self.get_number(subsymbol)
code.append("""\
node, next = self.%(match)s(curr)
if node is None:
last_failed_position = next
expansionindex = %(nextindex)s
continue
curr = next""" % vars())
code.append("""\
result = (Nonterminal(%(symbol)r, children), curr)
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
code.append("""\
if expansionindex == %(nextindex)s:
result = None, last_failed_position
self.matched_nonterminals%(number)s[i] = result
return result""" % vars())
self.allcode.extend(code)
def make_fixed(self):
# __init__
code = ["""
def __init__(self):
self.rules = [] # dummy
self.nonterminal_to_rule = {} # dummy
self.startsymbol = "" # dummy
self.parsetablefactory = None # dummy"""]
for symbol, number in self.symbol_to_number.iteritems():
if self.parser.is_nonterminal(symbol):
name = "matched_nonterminals%s" % number
else:
name = "matched_terminals%s" % number
code.append("""\
self.%(name)s = {}""" % vars())
# parse
startsymbol = self.get_number(self.parser.startsymbol)
code.append("""
def parse(self, tokenlist, lazy=True):
self.input = tokenlist
result = self.match_nonterminal%(startsymbol)s(0)
if result[0] is None:
raise ParseError(None, self.input[result[1]])
return result[0]""" % (vars()))
self.allcode.extend(code)
| Python |
import py
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.parsing import deterministic, regex
class Token(object):
def __init__(self, name, source, source_pos):
self.name = name
self.source = source
self.source_pos = source_pos
def __eq__(self, other):
# for testing only
return self.__dict__ == other.__dict__
def __ne__(self, other):
# for testing only
return not self == other
def __repr__(self):
return "Token(%r, %r, %r)" % (self.name, self.source, self.source_pos)
class SourcePos(object):
def __init__(self, i, lineno, columnno):
self.i = i
self.lineno = lineno
self.columnno = columnno
def __eq__(self, other):
# for testing only
return self.__dict__ == other.__dict__
def __ne__(self, other):
# for testing only
return not self == other
def __repr__(self):
return "SourcePos(%r, %r, %r)" % (self.i, self.lineno, self.columnno)
class Lexer(object):
def __init__(self, token_regexs, names, ignore=None):
self.token_regexs = token_regexs
self.names = names
self.rex = regex.LexingOrExpression(token_regexs, names)
automaton = self.rex.make_automaton()
self.automaton = automaton.make_deterministic(names)
self.automaton.optimize() # XXX not sure whether this is a good idea
if ignore is None:
ignore = []
self.ignore = []
for ign in ignore:
assert ign in names
self.ignore = dict.fromkeys(ignore)
self.matcher = self.automaton.make_lexing_code()
def get_runner(self, text, eof=False):
return LexingDFARunner(self.matcher, self.automaton, text,
self.ignore, eof)
def tokenize(self, text, eof=False):
r = LexingDFARunner(self.matcher, self.automaton, text,
self.ignore, eof)
result = []
while 1:
try:
tok = r.find_next_token()
result.append(tok)
except StopIteration:
break
return result
def get_dummy_repr(self):
return '%s\nlexer = DummyLexer(recognize, %r, %r)' % (
py.code.Source(self.matcher),
self.automaton,
self.ignore)
def __getstate__(self):
return (self.token_regexs, self.names, self.ignore)
def __setstate__(self, args):
self.__init__(*args)
class DummyLexer(Lexer):
def __init__(self, matcher, automaton, ignore):
self.token_regexs = None
self.names = None
self.rex = None
self.automaton = automaton
self.ignore = ignore
self.matcher = matcher
class LexingDFARunner(deterministic.DFARunner):
i = 0
def __init__(self, matcher, automaton, text, ignore, eof=False):
self.automaton = automaton
self.state = 0
self.text = text
self.last_matched_state = 0
self.last_matched_index = -1
self.ignore = ignore
self.eof = eof
self.matcher = matcher
self.lineno = 0
self.columnno = 0
def find_next_token(self):
while 1:
self.state = 0
i = self.last_matched_index + 1
start = i
assert start >= 0
if i == len(self.text):
if self.eof:
self.last_matched_index += 1
return Token("EOF", "EOF",
SourcePos(i, self.lineno, self.columnno))
else:
raise StopIteration
if i >= len(self.text) + 1:
raise StopIteration
i = self.inner_loop(i)
if i < 0:
i = ~i
if start == self.last_matched_index + 1:
source_pos = SourcePos(i - 1, self.lineno, self.columnno)
raise deterministic.LexerError(self.text, self.state,
source_pos)
stop = self.last_matched_index + 1
assert stop >= 0
source = self.text[start: stop]
lineno, columnno = self.adjust_position(source)
if self.automaton.names[self.last_matched_state] in self.ignore:
continue
source_pos = SourcePos(start, lineno, columnno)
return Token(self.automaton.names[self.last_matched_state],
source, source_pos)
if self.last_matched_index == i - 1:
token = self.text[start: ]
lineno, columnno = self.adjust_position(token)
if self.automaton.names[self.last_matched_state] in self.ignore:
if self.eof:
self.last_matched_index += 1
return Token("EOF", "EOF",
SourcePos(i, self.lineno, self.columnno))
else:
raise StopIteration
return Token(self.automaton.names[self.last_matched_state],
self.text[start:],
SourcePos(start, lineno, columnno))
source_pos = SourcePos(i - 1, self.lineno, self.columnno)
raise deterministic.LexerError(self.text, self.state, source_pos)
def adjust_position(self, token):
lineno = self.lineno
columnno = self.columnno
self.lineno += token.count("\n")
if lineno == self.lineno:
self.columnno += len(token)
else:
self.columnno = token.rfind("\n")
return lineno, columnno
# def inner_loop(self, i):
# while i < len(self.text):
# char = self.text[i]
# #print i, self.last_matched_index, self.last_matched_state, repr(char)
# try:
# state = self.nextstate(char)
# except KeyError:
# return ~i
# if state in self.automaton.final_states:
# self.last_matched_state = state
# self.last_matched_index = i
# i += 1
# if state not in self.automaton.final_states:
# return ~i
# return i
def inner_loop(self, i):
return self.matcher(self, i)
next = find_next_token
def __iter__(self):
return self
| Python |
import py
from pypy.rlib.parsing.parsing import PackratParser, Rule
from pypy.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from pypy.rlib.parsing.codebuilder import Codebuilder
from pypy.rlib.parsing.regexparse import parse_regex
import string
from pypy.rlib.parsing.regex import *
from pypy.rlib.parsing.deterministic import DFA
from pypy.rlib.parsing.lexer import Lexer, DummyLexer
from pypy.rlib.objectmodel import we_are_translated
set = py.builtin.set
def make_ebnf_parser():
NONTERMINALNAME = parse_regex("([a-z]|_)[a-z0-9_]*")
SYMBOLNAME = parse_regex("_*[A-Z]([A-Z]|_)*")
LONGQUOTED = parse_regex(r'"[^\"]*(\\\"?[^\"]+)*(\\\")?"')
QUOTEDQUOTE = parse_regex("""'"'""")
COMMENT = parse_regex("#[^\\n]*\\n")
names1 = ['SYMBOLNAME', 'NONTERMINALNAME', 'QUOTE', 'QUOTE', 'IGNORE',
'IGNORE', 'IGNORE', 'IGNORE']
regexs1 = [SYMBOLNAME, NONTERMINALNAME, LONGQUOTED, QUOTEDQUOTE, COMMENT,
StringExpression('\n'), StringExpression(' '),
StringExpression('\t')]
rs, rules, transformer = parse_ebnf(r"""
file: list EOF;
list: element+;
element: <regex> | <production>;
regex: SYMBOLNAME ":" QUOTE ";";
production: NONTERMINALNAME ":" body ";";
body: (expansion ["|"])* expansion;
expansion: decorated+;
decorated: enclosed "*" |
enclosed "+" |
enclosed "?" |
<enclosed>;
enclosed: "[" expansion "]" |
">" expansion "<" |
"<" primary ">" |
"(" <expansion> ")" |
<primary>;
primary: NONTERMINALNAME | SYMBOLNAME | QUOTE;
""")
names2, regexs2 = zip(*rs)
lexer = Lexer(regexs1 + list(regexs2), names1 + list(names2),
ignore=['IGNORE'])
parser = PackratParser(rules, "file")
return parser, lexer, transformer
def parse_ebnf(s):
visitor = ParserBuilder()
tokens = lexer.tokenize(s, True)
#print tokens
s = parser.parse(tokens)
s = s.visit(EBNFToAST())
assert len(s) == 1
s = s[0]
s.visit(visitor)
rules, changes = visitor.get_rules_and_changes()
maker = TransformerMaker(rules, changes)
ToAstVisitor = maker.make_transformer()
return zip(visitor.names, visitor.regexs), rules, ToAstVisitor
def check_for_missing_names(names, regexs, rules):
known_names = dict.fromkeys(names, True)
known_names["EOF"] = True
for rule in rules:
known_names[rule.nonterminal] = True
for rule in rules:
for expansion in rule.expansions:
for symbol in expansion:
if symbol not in known_names:
raise ValueError("symbol '%s' not known" % (symbol, ))
def make_parse_function(regexs, rules, eof=False):
from pypy.rlib.parsing.lexer import Lexer
names, regexs = zip(*regexs)
if "IGNORE" in names:
ignore = ["IGNORE"]
else:
ignore = []
check_for_missing_names(names, regexs, rules)
lexer = Lexer(list(regexs), list(names), ignore=ignore)
parser = PackratParser(rules, rules[0].nonterminal)
def parse(s):
tokens = lexer.tokenize(s, eof=eof)
s = parser.parse(tokens)
if not we_are_translated():
try:
if py.test.config.option.view:
s.view()
except AttributeError:
pass
return s
return parse
class ParserBuilder(object):
def __init__(self):
self.regexs = []
self.names = []
self.rules = []
self.changes = []
self.maybe_rules = {}
self.num_plus_symbols = 0
self.first_rule = None
self.literals = {}
def visit_file(self, node):
return node.children[0].visit(self)
def visit_list(self, node):
for child in node.children:
child.visit(self)
def visit_regex(self, node):
regextext = node.children[2].additional_info[1:-1].replace('\\"', '"')
regex = parse_regex(regextext)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
self.regexs.append(regex)
self.names.append(node.children[0].additional_info)
def visit_production(self, node):
name = node.children[0].additional_info
expansions = node.children[2].visit(self)
changes = []
rule_expansions = []
for expansion in expansions:
expansion, change = zip(*expansion)
rule_expansions.append(list(expansion))
changes.append("".join(change))
if self.first_rule is None:
self.first_rule = name
self.changes.append(changes)
self.rules.append(Rule(name, rule_expansions))
def visit_body(self, node):
expansions = []
for child in node.children:
expansion = child.visit(self)
expansions.append(expansion)
return expansions
def visit_expansion(self, node):
expansions = []
for child in node.children:
expansion = child.visit(self)
expansions += expansion
return expansions
def visit_enclosed(self, node):
result = []
newchange = node.children[0].additional_info
for name, change in node.children[1].visit(self):
assert change == " " or change == newchange
result.append((name, newchange))
return result
def visit_decorated(self, node):
expansions = node.children[0].visit(self)
expansions, changes = zip(*expansions)
expansions, changes = list(expansions), "".join(changes)
if node.children[1].additional_info == "*":
name = "_star_symbol%s" % (len(self.maybe_rules), )
maybe_rule = True
expansions = [expansions + [name]]
changes = [changes + ">", changes]
elif node.children[1].additional_info == "+":
name = "_plus_symbol%s" % (self.num_plus_symbols, )
self.num_plus_symbols += 1
maybe_rule = False
expansions = [expansions + [name], expansions]
changes = [changes + ">", changes]
elif node.children[1].additional_info == "?":
name = "_maybe_symbol%s" % (len(self.maybe_rules), )
maybe_rule = True
expansions = [expansions]
changes = [changes]
self.rules.append(Rule(name, expansions))
self.changes.append(changes)
if maybe_rule:
self.maybe_rules[name] = self.rules[-1]
return [(name, ">")]
def visit_primary_parens(self, node):
if len(node.children) == 1:
return node.children[0].visit(self)
else:
return node.children[1].visit(self)
def visit_primary(self, node):
if node.children[0].symbol == "QUOTE":
from pypy.rlib.parsing.regexparse import unescape
content = node.children[0].additional_info[1:-1]
expression = unescape(content)
name = self.get_literal_name(expression)
return [(name, " ")]
else:
return [(node.children[0].additional_info, " ")]
def get_literal_name(self, expression):
if expression in self.literals:
return self.literals[expression]
name = "__%s_%s" % (len(self.literals), expression)
self.literals[expression] = name
self.regexs.insert(0, StringExpression(expression))
self.names.insert(0, name)
return name
def get_rules_and_changes(self):
self.fix_rule_order()
return self.add_all_possibilities()
def fix_rule_order(self):
if self.rules[0].nonterminal != self.first_rule:
for i, r in enumerate(self.rules):
if r.nonterminal == self.first_rule:
break
self.rules[i], self.rules[0] = self.rules[0], self.rules[i]
self.changes[i], self.changes[0] = self.changes[0], self.changes[i]
def add_all_possibilities(self):
all_rules = []
other_rules = []
all_changes = []
other_changes = []
for rule, changes in zip(self.rules, self.changes):
real_changes = []
real_expansions = []
for index, (expansion, change) in enumerate(
zip(rule.expansions, changes)):
maybe_pattern = [symbol in self.maybe_rules
for symbol in expansion]
n = maybe_pattern.count(True)
if n == 0:
real_expansions.append(expansion)
real_changes.append(change)
continue
assert n != len(expansion), (
"currently an expansion needs at least one"
"symbol that always has to occur")
slices = []
start = 0
for i, (maybe, symbol) in enumerate(
zip(maybe_pattern, expansion)):
if maybe:
slices.append((start, i + 1))
start = i + 1
rest_slice = (start, i + 1)
name = rule.nonterminal
for i, (start, stop) in enumerate(slices):
nextname = "__%s_rest_%s_%s" % (rule.nonterminal, index, i)
if i < len(slices) - 1:
new_expansions = [
expansion[start: stop] + [nextname],
expansion[start: stop - 1] + [nextname]]
new_changes = [change[start: stop] + ">",
change[start: stop - 1] + ">"]
else:
rest_expansion = expansion[slice(*rest_slice)]
new_expansions = [
expansion[start: stop] + rest_expansion,
expansion[start: stop - 1] + rest_expansion]
rest_change = change[slice(*rest_slice)]
new_changes = [change[start: stop] + rest_change,
change[start: stop - 1] + rest_change]
if i == 0:
real_expansions += new_expansions
real_changes += new_changes
else:
other_rules.append(Rule(name, new_expansions))
other_changes.append(new_changes)
name = nextname
all_rules.append(Rule(rule.nonterminal, real_expansions))
all_changes.append(real_changes)
return all_rules + other_rules, all_changes + other_changes
class TransformerMaker(Codebuilder):
def __init__(self, rules, changes):
Codebuilder.__init__(self)
self.rules = rules
self.changes = changes
self.nonterminals = dict.fromkeys([rule.nonterminal for rule in rules])
def make_transformer(self, print_code=False):
self.start_block("class ToAST(object):")
for i in range(len(self.rules)):
self.create_visit_method(i)
self.start_block("def transform(self, tree):")
self.emit("assert isinstance(tree, Nonterminal)")
startsymbol = self.rules[0].nonterminal
self.emit("assert tree.symbol == %r" % (startsymbol, ))
self.emit("r = self.visit_%s(tree)" % (startsymbol, ))
self.emit("assert len(r) == 1")
self.start_block("if not we_are_translated():")
self.start_block("try:")
self.start_block("if py.test.config.option.view:")
self.emit("r[0].view()")
self.end_block("option.view")
self.end_block("try")
self.start_block("except AttributeError:")
self.emit("pass")
self.end_block("except")
self.end_block("we_are_translated")
self.emit("return r[0]")
self.end_block("transform")
self.end_block("ToAST")
code = self.get_code()
if print_code:
print code
ns = {"RPythonVisitor": RPythonVisitor, "Nonterminal": Nonterminal,
"we_are_translated": we_are_translated, "py": py}
exec py.code.Source(code).compile() in ns
ToAST = ns["ToAST"]
ToAST.__module__ = "pypy.rlib.parsing.ebnfparse"
assert isinstance(ToAST, type)
assert ToAST.__name__ == "ToAST"
ToAST.source = code
ToAST.changes = self.changes
return ToAST
def dispatch(self, symbol, expr):
if symbol in self.nonterminals:
return "self.visit_%s(%s)" % (symbol, expr)
return "[%s]" % (expr, )
def create_visit_method(self, index):
rule = self.rules[index]
change = self.changes[index]
self.start_block("def visit_%s(self, node):" % (rule.nonterminal, ))
for expansion, subchange in self.generate_conditions(index):
if "<" in subchange:
i = subchange.index("<")
assert subchange.count("<") == 1, (
"cannot expand more than one node in rule %s" % (rule, ))
i = subchange.index("<")
returnval = self.dispatch(
expansion[i], "node.children[%s]" % (i, ))
self.emit("return " + returnval)
else:
self.create_returning_code(expansion, subchange)
self.end_block(rule.nonterminal)
def create_returning_code(self, expansion, subchange):
assert len(expansion) == len(subchange)
children = []
self.emit("children = []")
for i, (symbol, c) in enumerate(zip(expansion, subchange)):
if c == "[":
continue
expr = self.dispatch(symbol, "node.children[%s]" % (i, ))
if c == " ":
self.emit("children.extend(%s)" % (expr, ))
if c == ">":
self.emit("expr = %s" % (expr, ))
self.emit("assert len(expr) == 1")
self.emit("children.extend(expr[0].children)")
self.emit("return [Nonterminal(node.symbol, children)]")
def generate_conditions(self, index):
rule = self.rules[index]
change = self.changes[index]
len_partition = {}
if len(rule.expansions) == 1:
yield rule.expansions[0], change[0]
return
for expansion, subchange in zip(rule.expansions, change):
len_partition.setdefault(len(expansion), []).append(
(expansion, subchange))
len_partition = len_partition.items()
len_partition.sort()
last_length = len_partition[-1][0]
self.emit("length = len(node.children)")
for length, items in len_partition:
if length < last_length:
self.start_block("if length == %s:" % (length, ))
if len(items) == 1:
yield items[0]
if length < last_length:
self.end_block("if length ==")
continue
# XXX quite bad complexity, might be ok in practice
while items:
shorter = False
for i in range(length):
symbols = {}
for pos, item in enumerate(items):
expansion = item[0]
symbol = expansion[i]
symbols.setdefault(symbol, []).append((pos, item))
symbols = symbols.items()
symbols.sort()
remove = []
for symbol, subitems in symbols:
if (len(subitems) == 1 and
(len(items) - len(remove)) > 1):
self.start_block(
"if node.children[%s].symbol == %r:" % (
i, symbol))
pos, subitem = subitems[0]
yield subitem
remove.append(pos)
shorter = True
self.end_block("if node.children[")
remove.sort()
for pos in remove[::-1]:
items.pop(pos)
if shorter:
if len(items) == 1:
yield items[0]
items.pop(0)
else:
continue
break
# for the remaining items we do a brute force comparison
# could be even cleverer, but very unlikely to be useful
assert len(items) != 1
for expansion, subchange in items:
conds = []
for i, symbol in enumerate(expansion):
conds.append("node.children[%s].symbol == %r" % (
i, symbol))
self.start_block("if (%s):" % (" and ".join(conds), ))
yield expansion, subchange
self.end_block("if")
if length < last_length:
self.end_block("if length ==")
# generated code between this line and its other occurence
class EBNFToAST(object):
def visit_file(self, node):
children = []
children.extend(self.visit_list(node.children[0]))
children.extend([node.children[1]])
return [Nonterminal(node.symbol, children)]
def visit__plus_symbol0(self, node):
length = len(node.children)
if length == 1:
children = []
children.extend(self.visit_element(node.children[0]))
return [Nonterminal(node.symbol, children)]
children = []
children.extend(self.visit_element(node.children[0]))
expr = self.visit__plus_symbol0(node.children[1])
assert len(expr) == 1
children.extend(expr[0].children)
return [Nonterminal(node.symbol, children)]
def visit_list(self, node):
children = []
expr = self.visit__plus_symbol0(node.children[0])
assert len(expr) == 1
children.extend(expr[0].children)
return [Nonterminal(node.symbol, children)]
def visit_element(self, node):
length = len(node.children)
if node.children[0].symbol == 'production':
return self.visit_production(node.children[0])
return self.visit_regex(node.children[0])
def visit_regex(self, node):
children = []
children.extend([node.children[0]])
children.extend([node.children[1]])
children.extend([node.children[2]])
children.extend([node.children[3]])
return [Nonterminal(node.symbol, children)]
def visit_production(self, node):
children = []
children.extend([node.children[0]])
children.extend([node.children[1]])
children.extend(self.visit_body(node.children[2]))
children.extend([node.children[3]])
return [Nonterminal(node.symbol, children)]
def visit__star_symbol0(self, node):
length = len(node.children)
if length == 2:
children = []
children.extend(self.visit_expansion(node.children[0]))
return [Nonterminal(node.symbol, children)]
children = []
children.extend(self.visit_expansion(node.children[0]))
expr = self.visit__star_symbol0(node.children[2])
assert len(expr) == 1
children.extend(expr[0].children)
return [Nonterminal(node.symbol, children)]
def visit_body(self, node):
length = len(node.children)
if length == 1:
children = []
children.extend(self.visit_expansion(node.children[0]))
return [Nonterminal(node.symbol, children)]
children = []
expr = self.visit__star_symbol0(node.children[0])
assert len(expr) == 1
children.extend(expr[0].children)
children.extend(self.visit_expansion(node.children[1]))
return [Nonterminal(node.symbol, children)]
def visit__plus_symbol1(self, node):
length = len(node.children)
if length == 1:
children = []
children.extend(self.visit_decorated(node.children[0]))
return [Nonterminal(node.symbol, children)]
children = []
children.extend(self.visit_decorated(node.children[0]))
expr = self.visit__plus_symbol1(node.children[1])
assert len(expr) == 1
children.extend(expr[0].children)
return [Nonterminal(node.symbol, children)]
def visit_expansion(self, node):
children = []
expr = self.visit__plus_symbol1(node.children[0])
assert len(expr) == 1
children.extend(expr[0].children)
return [Nonterminal(node.symbol, children)]
def visit_decorated(self, node):
length = len(node.children)
if length == 1:
return self.visit_enclosed(node.children[0])
if node.children[1].symbol == '__3_*':
children = []
children.extend(self.visit_enclosed(node.children[0]))
children.extend([node.children[1]])
return [Nonterminal(node.symbol, children)]
if node.children[1].symbol == '__4_+':
children = []
children.extend(self.visit_enclosed(node.children[0]))
children.extend([node.children[1]])
return [Nonterminal(node.symbol, children)]
children = []
children.extend(self.visit_enclosed(node.children[0]))
children.extend([node.children[1]])
return [Nonterminal(node.symbol, children)]
def visit_enclosed(self, node):
length = len(node.children)
if length == 1:
return self.visit_primary(node.children[0])
if node.children[0].symbol == '__10_(':
return self.visit_expansion(node.children[1])
if node.children[0].symbol == '__6_[':
children = []
children.extend([node.children[0]])
children.extend(self.visit_expansion(node.children[1]))
children.extend([node.children[2]])
return [Nonterminal(node.symbol, children)]
if node.children[0].symbol == '__8_>':
children = []
children.extend([node.children[0]])
children.extend(self.visit_expansion(node.children[1]))
children.extend([node.children[2]])
return [Nonterminal(node.symbol, children)]
children = []
children.extend([node.children[0]])
children.extend(self.visit_primary(node.children[1]))
children.extend([node.children[2]])
return [Nonterminal(node.symbol, children)]
def visit_primary(self, node):
length = len(node.children)
if node.children[0].symbol == 'NONTERMINALNAME':
children = []
children.extend([node.children[0]])
return [Nonterminal(node.symbol, children)]
if node.children[0].symbol == 'QUOTE':
children = []
children.extend([node.children[0]])
return [Nonterminal(node.symbol, children)]
children = []
children.extend([node.children[0]])
return [Nonterminal(node.symbol, children)]
def transform(self, tree):
assert isinstance(tree, Nonterminal)
assert tree.symbol == 'file'
r = self.visit_file(tree)
assert len(r) == 1
if not we_are_translated():
try:
if py.test.config.option.view:
r[0].view()
except AttributeError:
pass
return r[0]
parser = PackratParser([Rule('file', [['list', 'EOF']]),
Rule('_plus_symbol0', [['element', '_plus_symbol0'], ['element']]),
Rule('list', [['_plus_symbol0']]),
Rule('element', [['regex'], ['production']]),
Rule('regex', [['SYMBOLNAME', '__0_:', 'QUOTE', '__1_;']]),
Rule('production', [['NONTERMINALNAME', '__0_:', 'body', '__1_;']]),
Rule('_star_symbol0', [['expansion', '__2_|', '_star_symbol0'], ['expansion', '__2_|']]),
Rule('body', [['_star_symbol0', 'expansion'], ['expansion']]),
Rule('_plus_symbol1', [['decorated', '_plus_symbol1'], ['decorated']]),
Rule('expansion', [['_plus_symbol1']]),
Rule('decorated', [['enclosed', '__3_*'], ['enclosed', '__4_+'], ['enclosed', '__5_?'], ['enclosed']]),
Rule('enclosed', [['__6_[', 'expansion', '__7_]'], ['__8_>', 'expansion', '__9_<'], ['__9_<', 'primary', '__8_>'], ['__10_(', 'expansion', '__11_)'], ['primary']]),
Rule('primary', [['NONTERMINALNAME'], ['SYMBOLNAME'], ['QUOTE']])],
'file')
def recognize(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == '\t':
state = 1
elif char == '\n':
state = 2
elif 'A' <= char <= 'Z':
state = 3
elif char == ' ':
state = 4
elif char == '#':
state = 5
elif char == '"':
state = 6
elif char == "'":
state = 7
elif char == ')':
state = 8
elif char == '(':
state = 9
elif char == '+':
state = 10
elif char == '*':
state = 11
elif char == ';':
state = 12
elif char == ':':
state = 13
elif char == '<':
state = 14
elif char == '?':
state = 15
elif char == '>':
state = 16
elif char == '[':
state = 17
elif char == ']':
state = 18
elif char == '_':
state = 19
elif 'a' <= char <= 'z':
state = 20
elif char == '|':
state = 21
else:
break
if state == 3:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 3
return i
if 'A' <= char <= 'Z':
state = 3
continue
elif char == '_':
state = 3
continue
else:
break
if state == 5:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 5
return ~i
if char == '\n':
state = 27
elif '\x00' <= char <= '\t':
state = 5
continue
elif '\x0b' <= char <= '\xff':
state = 5
continue
else:
break
if state == 6:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 6
return ~i
if char == '\\':
state = 24
elif char == '"':
state = 25
elif '\x00' <= char <= '!':
state = 6
continue
elif '#' <= char <= '[':
state = 6
continue
elif ']' <= char <= '\xff':
state = 6
continue
else:
break
if state == 7:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 7
return ~i
if char == '"':
state = 22
else:
break
if state == 19:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 19
return i
if char == '_':
state = 19
continue
elif 'A' <= char <= 'Z':
state = 3
continue
elif '0' <= char <= '9':
state = 20
elif 'a' <= char <= 'z':
state = 20
else:
break
if state == 20:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 20
return i
if '0' <= char <= '9':
state = 20
continue
elif char == '_':
state = 20
continue
elif 'a' <= char <= 'z':
state = 20
continue
else:
break
if state == 22:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 22
return ~i
if char == "'":
state = 23
else:
break
if state == 24:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 24
return ~i
if char == '\\':
state = 24
continue
elif char == '"':
state = 26
elif '\x00' <= char <= '!':
state = 6
continue
elif '#' <= char <= '[':
state = 6
continue
elif ']' <= char <= '\xff':
state = 6
continue
else:
break
if state == 26:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 26
return i
if char == '"':
state = 25
elif '\x00' <= char <= '!':
state = 6
continue
elif '#' <= char <= '\xff':
state = 6
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
lexer = DummyLexer(recognize, DFA(28,
{(0, '\t'): 1,
(0, '\n'): 2,
(0, ' '): 4,
(0, '"'): 6,
(0, '#'): 5,
(0, "'"): 7,
(0, '('): 9,
(0, ')'): 8,
(0, '*'): 11,
(0, '+'): 10,
(0, ':'): 13,
(0, ';'): 12,
(0, '<'): 14,
(0, '>'): 16,
(0, '?'): 15,
(0, 'A'): 3,
(0, 'B'): 3,
(0, 'C'): 3,
(0, 'D'): 3,
(0, 'E'): 3,
(0, 'F'): 3,
(0, 'G'): 3,
(0, 'H'): 3,
(0, 'I'): 3,
(0, 'J'): 3,
(0, 'K'): 3,
(0, 'L'): 3,
(0, 'M'): 3,
(0, 'N'): 3,
(0, 'O'): 3,
(0, 'P'): 3,
(0, 'Q'): 3,
(0, 'R'): 3,
(0, 'S'): 3,
(0, 'T'): 3,
(0, 'U'): 3,
(0, 'V'): 3,
(0, 'W'): 3,
(0, 'X'): 3,
(0, 'Y'): 3,
(0, 'Z'): 3,
(0, '['): 17,
(0, ']'): 18,
(0, '_'): 19,
(0, 'a'): 20,
(0, 'b'): 20,
(0, 'c'): 20,
(0, 'd'): 20,
(0, 'e'): 20,
(0, 'f'): 20,
(0, 'g'): 20,
(0, 'h'): 20,
(0, 'i'): 20,
(0, 'j'): 20,
(0, 'k'): 20,
(0, 'l'): 20,
(0, 'm'): 20,
(0, 'n'): 20,
(0, 'o'): 20,
(0, 'p'): 20,
(0, 'q'): 20,
(0, 'r'): 20,
(0, 's'): 20,
(0, 't'): 20,
(0, 'u'): 20,
(0, 'v'): 20,
(0, 'w'): 20,
(0, 'x'): 20,
(0, 'y'): 20,
(0, 'z'): 20,
(0, '|'): 21,
(3, 'A'): 3,
(3, 'B'): 3,
(3, 'C'): 3,
(3, 'D'): 3,
(3, 'E'): 3,
(3, 'F'): 3,
(3, 'G'): 3,
(3, 'H'): 3,
(3, 'I'): 3,
(3, 'J'): 3,
(3, 'K'): 3,
(3, 'L'): 3,
(3, 'M'): 3,
(3, 'N'): 3,
(3, 'O'): 3,
(3, 'P'): 3,
(3, 'Q'): 3,
(3, 'R'): 3,
(3, 'S'): 3,
(3, 'T'): 3,
(3, 'U'): 3,
(3, 'V'): 3,
(3, 'W'): 3,
(3, 'X'): 3,
(3, 'Y'): 3,
(3, 'Z'): 3,
(3, '_'): 3,
(5, '\x00'): 5,
(5, '\x01'): 5,
(5, '\x02'): 5,
(5, '\x03'): 5,
(5, '\x04'): 5,
(5, '\x05'): 5,
(5, '\x06'): 5,
(5, '\x07'): 5,
(5, '\x08'): 5,
(5, '\t'): 5,
(5, '\n'): 27,
(5, '\x0b'): 5,
(5, '\x0c'): 5,
(5, '\r'): 5,
(5, '\x0e'): 5,
(5, '\x0f'): 5,
(5, '\x10'): 5,
(5, '\x11'): 5,
(5, '\x12'): 5,
(5, '\x13'): 5,
(5, '\x14'): 5,
(5, '\x15'): 5,
(5, '\x16'): 5,
(5, '\x17'): 5,
(5, '\x18'): 5,
(5, '\x19'): 5,
(5, '\x1a'): 5,
(5, '\x1b'): 5,
(5, '\x1c'): 5,
(5, '\x1d'): 5,
(5, '\x1e'): 5,
(5, '\x1f'): 5,
(5, ' '): 5,
(5, '!'): 5,
(5, '"'): 5,
(5, '#'): 5,
(5, '$'): 5,
(5, '%'): 5,
(5, '&'): 5,
(5, "'"): 5,
(5, '('): 5,
(5, ')'): 5,
(5, '*'): 5,
(5, '+'): 5,
(5, ','): 5,
(5, '-'): 5,
(5, '.'): 5,
(5, '/'): 5,
(5, '0'): 5,
(5, '1'): 5,
(5, '2'): 5,
(5, '3'): 5,
(5, '4'): 5,
(5, '5'): 5,
(5, '6'): 5,
(5, '7'): 5,
(5, '8'): 5,
(5, '9'): 5,
(5, ':'): 5,
(5, ';'): 5,
(5, '<'): 5,
(5, '='): 5,
(5, '>'): 5,
(5, '?'): 5,
(5, '@'): 5,
(5, 'A'): 5,
(5, 'B'): 5,
(5, 'C'): 5,
(5, 'D'): 5,
(5, 'E'): 5,
(5, 'F'): 5,
(5, 'G'): 5,
(5, 'H'): 5,
(5, 'I'): 5,
(5, 'J'): 5,
(5, 'K'): 5,
(5, 'L'): 5,
(5, 'M'): 5,
(5, 'N'): 5,
(5, 'O'): 5,
(5, 'P'): 5,
(5, 'Q'): 5,
(5, 'R'): 5,
(5, 'S'): 5,
(5, 'T'): 5,
(5, 'U'): 5,
(5, 'V'): 5,
(5, 'W'): 5,
(5, 'X'): 5,
(5, 'Y'): 5,
(5, 'Z'): 5,
(5, '['): 5,
(5, '\\'): 5,
(5, ']'): 5,
(5, '^'): 5,
(5, '_'): 5,
(5, '`'): 5,
(5, 'a'): 5,
(5, 'b'): 5,
(5, 'c'): 5,
(5, 'd'): 5,
(5, 'e'): 5,
(5, 'f'): 5,
(5, 'g'): 5,
(5, 'h'): 5,
(5, 'i'): 5,
(5, 'j'): 5,
(5, 'k'): 5,
(5, 'l'): 5,
(5, 'm'): 5,
(5, 'n'): 5,
(5, 'o'): 5,
(5, 'p'): 5,
(5, 'q'): 5,
(5, 'r'): 5,
(5, 's'): 5,
(5, 't'): 5,
(5, 'u'): 5,
(5, 'v'): 5,
(5, 'w'): 5,
(5, 'x'): 5,
(5, 'y'): 5,
(5, 'z'): 5,
(5, '{'): 5,
(5, '|'): 5,
(5, '}'): 5,
(5, '~'): 5,
(5, '\x7f'): 5,
(5, '\x80'): 5,
(5, '\x81'): 5,
(5, '\x82'): 5,
(5, '\x83'): 5,
(5, '\x84'): 5,
(5, '\x85'): 5,
(5, '\x86'): 5,
(5, '\x87'): 5,
(5, '\x88'): 5,
(5, '\x89'): 5,
(5, '\x8a'): 5,
(5, '\x8b'): 5,
(5, '\x8c'): 5,
(5, '\x8d'): 5,
(5, '\x8e'): 5,
(5, '\x8f'): 5,
(5, '\x90'): 5,
(5, '\x91'): 5,
(5, '\x92'): 5,
(5, '\x93'): 5,
(5, '\x94'): 5,
(5, '\x95'): 5,
(5, '\x96'): 5,
(5, '\x97'): 5,
(5, '\x98'): 5,
(5, '\x99'): 5,
(5, '\x9a'): 5,
(5, '\x9b'): 5,
(5, '\x9c'): 5,
(5, '\x9d'): 5,
(5, '\x9e'): 5,
(5, '\x9f'): 5,
(5, '\xa0'): 5,
(5, '\xa1'): 5,
(5, '\xa2'): 5,
(5, '\xa3'): 5,
(5, '\xa4'): 5,
(5, '\xa5'): 5,
(5, '\xa6'): 5,
(5, '\xa7'): 5,
(5, '\xa8'): 5,
(5, '\xa9'): 5,
(5, '\xaa'): 5,
(5, '\xab'): 5,
(5, '\xac'): 5,
(5, '\xad'): 5,
(5, '\xae'): 5,
(5, '\xaf'): 5,
(5, '\xb0'): 5,
(5, '\xb1'): 5,
(5, '\xb2'): 5,
(5, '\xb3'): 5,
(5, '\xb4'): 5,
(5, '\xb5'): 5,
(5, '\xb6'): 5,
(5, '\xb7'): 5,
(5, '\xb8'): 5,
(5, '\xb9'): 5,
(5, '\xba'): 5,
(5, '\xbb'): 5,
(5, '\xbc'): 5,
(5, '\xbd'): 5,
(5, '\xbe'): 5,
(5, '\xbf'): 5,
(5, '\xc0'): 5,
(5, '\xc1'): 5,
(5, '\xc2'): 5,
(5, '\xc3'): 5,
(5, '\xc4'): 5,
(5, '\xc5'): 5,
(5, '\xc6'): 5,
(5, '\xc7'): 5,
(5, '\xc8'): 5,
(5, '\xc9'): 5,
(5, '\xca'): 5,
(5, '\xcb'): 5,
(5, '\xcc'): 5,
(5, '\xcd'): 5,
(5, '\xce'): 5,
(5, '\xcf'): 5,
(5, '\xd0'): 5,
(5, '\xd1'): 5,
(5, '\xd2'): 5,
(5, '\xd3'): 5,
(5, '\xd4'): 5,
(5, '\xd5'): 5,
(5, '\xd6'): 5,
(5, '\xd7'): 5,
(5, '\xd8'): 5,
(5, '\xd9'): 5,
(5, '\xda'): 5,
(5, '\xdb'): 5,
(5, '\xdc'): 5,
(5, '\xdd'): 5,
(5, '\xde'): 5,
(5, '\xdf'): 5,
(5, '\xe0'): 5,
(5, '\xe1'): 5,
(5, '\xe2'): 5,
(5, '\xe3'): 5,
(5, '\xe4'): 5,
(5, '\xe5'): 5,
(5, '\xe6'): 5,
(5, '\xe7'): 5,
(5, '\xe8'): 5,
(5, '\xe9'): 5,
(5, '\xea'): 5,
(5, '\xeb'): 5,
(5, '\xec'): 5,
(5, '\xed'): 5,
(5, '\xee'): 5,
(5, '\xef'): 5,
(5, '\xf0'): 5,
(5, '\xf1'): 5,
(5, '\xf2'): 5,
(5, '\xf3'): 5,
(5, '\xf4'): 5,
(5, '\xf5'): 5,
(5, '\xf6'): 5,
(5, '\xf7'): 5,
(5, '\xf8'): 5,
(5, '\xf9'): 5,
(5, '\xfa'): 5,
(5, '\xfb'): 5,
(5, '\xfc'): 5,
(5, '\xfd'): 5,
(5, '\xfe'): 5,
(5, '\xff'): 5,
(6, '\x00'): 6,
(6, '\x01'): 6,
(6, '\x02'): 6,
(6, '\x03'): 6,
(6, '\x04'): 6,
(6, '\x05'): 6,
(6, '\x06'): 6,
(6, '\x07'): 6,
(6, '\x08'): 6,
(6, '\t'): 6,
(6, '\n'): 6,
(6, '\x0b'): 6,
(6, '\x0c'): 6,
(6, '\r'): 6,
(6, '\x0e'): 6,
(6, '\x0f'): 6,
(6, '\x10'): 6,
(6, '\x11'): 6,
(6, '\x12'): 6,
(6, '\x13'): 6,
(6, '\x14'): 6,
(6, '\x15'): 6,
(6, '\x16'): 6,
(6, '\x17'): 6,
(6, '\x18'): 6,
(6, '\x19'): 6,
(6, '\x1a'): 6,
(6, '\x1b'): 6,
(6, '\x1c'): 6,
(6, '\x1d'): 6,
(6, '\x1e'): 6,
(6, '\x1f'): 6,
(6, ' '): 6,
(6, '!'): 6,
(6, '"'): 25,
(6, '#'): 6,
(6, '$'): 6,
(6, '%'): 6,
(6, '&'): 6,
(6, "'"): 6,
(6, '('): 6,
(6, ')'): 6,
(6, '*'): 6,
(6, '+'): 6,
(6, ','): 6,
(6, '-'): 6,
(6, '.'): 6,
(6, '/'): 6,
(6, '0'): 6,
(6, '1'): 6,
(6, '2'): 6,
(6, '3'): 6,
(6, '4'): 6,
(6, '5'): 6,
(6, '6'): 6,
(6, '7'): 6,
(6, '8'): 6,
(6, '9'): 6,
(6, ':'): 6,
(6, ';'): 6,
(6, '<'): 6,
(6, '='): 6,
(6, '>'): 6,
(6, '?'): 6,
(6, '@'): 6,
(6, 'A'): 6,
(6, 'B'): 6,
(6, 'C'): 6,
(6, 'D'): 6,
(6, 'E'): 6,
(6, 'F'): 6,
(6, 'G'): 6,
(6, 'H'): 6,
(6, 'I'): 6,
(6, 'J'): 6,
(6, 'K'): 6,
(6, 'L'): 6,
(6, 'M'): 6,
(6, 'N'): 6,
(6, 'O'): 6,
(6, 'P'): 6,
(6, 'Q'): 6,
(6, 'R'): 6,
(6, 'S'): 6,
(6, 'T'): 6,
(6, 'U'): 6,
(6, 'V'): 6,
(6, 'W'): 6,
(6, 'X'): 6,
(6, 'Y'): 6,
(6, 'Z'): 6,
(6, '['): 6,
(6, '\\'): 24,
(6, ']'): 6,
(6, '^'): 6,
(6, '_'): 6,
(6, '`'): 6,
(6, 'a'): 6,
(6, 'b'): 6,
(6, 'c'): 6,
(6, 'd'): 6,
(6, 'e'): 6,
(6, 'f'): 6,
(6, 'g'): 6,
(6, 'h'): 6,
(6, 'i'): 6,
(6, 'j'): 6,
(6, 'k'): 6,
(6, 'l'): 6,
(6, 'm'): 6,
(6, 'n'): 6,
(6, 'o'): 6,
(6, 'p'): 6,
(6, 'q'): 6,
(6, 'r'): 6,
(6, 's'): 6,
(6, 't'): 6,
(6, 'u'): 6,
(6, 'v'): 6,
(6, 'w'): 6,
(6, 'x'): 6,
(6, 'y'): 6,
(6, 'z'): 6,
(6, '{'): 6,
(6, '|'): 6,
(6, '}'): 6,
(6, '~'): 6,
(6, '\x7f'): 6,
(6, '\x80'): 6,
(6, '\x81'): 6,
(6, '\x82'): 6,
(6, '\x83'): 6,
(6, '\x84'): 6,
(6, '\x85'): 6,
(6, '\x86'): 6,
(6, '\x87'): 6,
(6, '\x88'): 6,
(6, '\x89'): 6,
(6, '\x8a'): 6,
(6, '\x8b'): 6,
(6, '\x8c'): 6,
(6, '\x8d'): 6,
(6, '\x8e'): 6,
(6, '\x8f'): 6,
(6, '\x90'): 6,
(6, '\x91'): 6,
(6, '\x92'): 6,
(6, '\x93'): 6,
(6, '\x94'): 6,
(6, '\x95'): 6,
(6, '\x96'): 6,
(6, '\x97'): 6,
(6, '\x98'): 6,
(6, '\x99'): 6,
(6, '\x9a'): 6,
(6, '\x9b'): 6,
(6, '\x9c'): 6,
(6, '\x9d'): 6,
(6, '\x9e'): 6,
(6, '\x9f'): 6,
(6, '\xa0'): 6,
(6, '\xa1'): 6,
(6, '\xa2'): 6,
(6, '\xa3'): 6,
(6, '\xa4'): 6,
(6, '\xa5'): 6,
(6, '\xa6'): 6,
(6, '\xa7'): 6,
(6, '\xa8'): 6,
(6, '\xa9'): 6,
(6, '\xaa'): 6,
(6, '\xab'): 6,
(6, '\xac'): 6,
(6, '\xad'): 6,
(6, '\xae'): 6,
(6, '\xaf'): 6,
(6, '\xb0'): 6,
(6, '\xb1'): 6,
(6, '\xb2'): 6,
(6, '\xb3'): 6,
(6, '\xb4'): 6,
(6, '\xb5'): 6,
(6, '\xb6'): 6,
(6, '\xb7'): 6,
(6, '\xb8'): 6,
(6, '\xb9'): 6,
(6, '\xba'): 6,
(6, '\xbb'): 6,
(6, '\xbc'): 6,
(6, '\xbd'): 6,
(6, '\xbe'): 6,
(6, '\xbf'): 6,
(6, '\xc0'): 6,
(6, '\xc1'): 6,
(6, '\xc2'): 6,
(6, '\xc3'): 6,
(6, '\xc4'): 6,
(6, '\xc5'): 6,
(6, '\xc6'): 6,
(6, '\xc7'): 6,
(6, '\xc8'): 6,
(6, '\xc9'): 6,
(6, '\xca'): 6,
(6, '\xcb'): 6,
(6, '\xcc'): 6,
(6, '\xcd'): 6,
(6, '\xce'): 6,
(6, '\xcf'): 6,
(6, '\xd0'): 6,
(6, '\xd1'): 6,
(6, '\xd2'): 6,
(6, '\xd3'): 6,
(6, '\xd4'): 6,
(6, '\xd5'): 6,
(6, '\xd6'): 6,
(6, '\xd7'): 6,
(6, '\xd8'): 6,
(6, '\xd9'): 6,
(6, '\xda'): 6,
(6, '\xdb'): 6,
(6, '\xdc'): 6,
(6, '\xdd'): 6,
(6, '\xde'): 6,
(6, '\xdf'): 6,
(6, '\xe0'): 6,
(6, '\xe1'): 6,
(6, '\xe2'): 6,
(6, '\xe3'): 6,
(6, '\xe4'): 6,
(6, '\xe5'): 6,
(6, '\xe6'): 6,
(6, '\xe7'): 6,
(6, '\xe8'): 6,
(6, '\xe9'): 6,
(6, '\xea'): 6,
(6, '\xeb'): 6,
(6, '\xec'): 6,
(6, '\xed'): 6,
(6, '\xee'): 6,
(6, '\xef'): 6,
(6, '\xf0'): 6,
(6, '\xf1'): 6,
(6, '\xf2'): 6,
(6, '\xf3'): 6,
(6, '\xf4'): 6,
(6, '\xf5'): 6,
(6, '\xf6'): 6,
(6, '\xf7'): 6,
(6, '\xf8'): 6,
(6, '\xf9'): 6,
(6, '\xfa'): 6,
(6, '\xfb'): 6,
(6, '\xfc'): 6,
(6, '\xfd'): 6,
(6, '\xfe'): 6,
(6, '\xff'): 6,
(7, '"'): 22,
(19, '0'): 20,
(19, '1'): 20,
(19, '2'): 20,
(19, '3'): 20,
(19, '4'): 20,
(19, '5'): 20,
(19, '6'): 20,
(19, '7'): 20,
(19, '8'): 20,
(19, '9'): 20,
(19, 'A'): 3,
(19, 'B'): 3,
(19, 'C'): 3,
(19, 'D'): 3,
(19, 'E'): 3,
(19, 'F'): 3,
(19, 'G'): 3,
(19, 'H'): 3,
(19, 'I'): 3,
(19, 'J'): 3,
(19, 'K'): 3,
(19, 'L'): 3,
(19, 'M'): 3,
(19, 'N'): 3,
(19, 'O'): 3,
(19, 'P'): 3,
(19, 'Q'): 3,
(19, 'R'): 3,
(19, 'S'): 3,
(19, 'T'): 3,
(19, 'U'): 3,
(19, 'V'): 3,
(19, 'W'): 3,
(19, 'X'): 3,
(19, 'Y'): 3,
(19, 'Z'): 3,
(19, '_'): 19,
(19, 'a'): 20,
(19, 'b'): 20,
(19, 'c'): 20,
(19, 'd'): 20,
(19, 'e'): 20,
(19, 'f'): 20,
(19, 'g'): 20,
(19, 'h'): 20,
(19, 'i'): 20,
(19, 'j'): 20,
(19, 'k'): 20,
(19, 'l'): 20,
(19, 'm'): 20,
(19, 'n'): 20,
(19, 'o'): 20,
(19, 'p'): 20,
(19, 'q'): 20,
(19, 'r'): 20,
(19, 's'): 20,
(19, 't'): 20,
(19, 'u'): 20,
(19, 'v'): 20,
(19, 'w'): 20,
(19, 'x'): 20,
(19, 'y'): 20,
(19, 'z'): 20,
(20, '0'): 20,
(20, '1'): 20,
(20, '2'): 20,
(20, '3'): 20,
(20, '4'): 20,
(20, '5'): 20,
(20, '6'): 20,
(20, '7'): 20,
(20, '8'): 20,
(20, '9'): 20,
(20, '_'): 20,
(20, 'a'): 20,
(20, 'b'): 20,
(20, 'c'): 20,
(20, 'd'): 20,
(20, 'e'): 20,
(20, 'f'): 20,
(20, 'g'): 20,
(20, 'h'): 20,
(20, 'i'): 20,
(20, 'j'): 20,
(20, 'k'): 20,
(20, 'l'): 20,
(20, 'm'): 20,
(20, 'n'): 20,
(20, 'o'): 20,
(20, 'p'): 20,
(20, 'q'): 20,
(20, 'r'): 20,
(20, 's'): 20,
(20, 't'): 20,
(20, 'u'): 20,
(20, 'v'): 20,
(20, 'w'): 20,
(20, 'x'): 20,
(20, 'y'): 20,
(20, 'z'): 20,
(22, "'"): 23,
(24, '\x00'): 6,
(24, '\x01'): 6,
(24, '\x02'): 6,
(24, '\x03'): 6,
(24, '\x04'): 6,
(24, '\x05'): 6,
(24, '\x06'): 6,
(24, '\x07'): 6,
(24, '\x08'): 6,
(24, '\t'): 6,
(24, '\n'): 6,
(24, '\x0b'): 6,
(24, '\x0c'): 6,
(24, '\r'): 6,
(24, '\x0e'): 6,
(24, '\x0f'): 6,
(24, '\x10'): 6,
(24, '\x11'): 6,
(24, '\x12'): 6,
(24, '\x13'): 6,
(24, '\x14'): 6,
(24, '\x15'): 6,
(24, '\x16'): 6,
(24, '\x17'): 6,
(24, '\x18'): 6,
(24, '\x19'): 6,
(24, '\x1a'): 6,
(24, '\x1b'): 6,
(24, '\x1c'): 6,
(24, '\x1d'): 6,
(24, '\x1e'): 6,
(24, '\x1f'): 6,
(24, ' '): 6,
(24, '!'): 6,
(24, '"'): 26,
(24, '#'): 6,
(24, '$'): 6,
(24, '%'): 6,
(24, '&'): 6,
(24, "'"): 6,
(24, '('): 6,
(24, ')'): 6,
(24, '*'): 6,
(24, '+'): 6,
(24, ','): 6,
(24, '-'): 6,
(24, '.'): 6,
(24, '/'): 6,
(24, '0'): 6,
(24, '1'): 6,
(24, '2'): 6,
(24, '3'): 6,
(24, '4'): 6,
(24, '5'): 6,
(24, '6'): 6,
(24, '7'): 6,
(24, '8'): 6,
(24, '9'): 6,
(24, ':'): 6,
(24, ';'): 6,
(24, '<'): 6,
(24, '='): 6,
(24, '>'): 6,
(24, '?'): 6,
(24, '@'): 6,
(24, 'A'): 6,
(24, 'B'): 6,
(24, 'C'): 6,
(24, 'D'): 6,
(24, 'E'): 6,
(24, 'F'): 6,
(24, 'G'): 6,
(24, 'H'): 6,
(24, 'I'): 6,
(24, 'J'): 6,
(24, 'K'): 6,
(24, 'L'): 6,
(24, 'M'): 6,
(24, 'N'): 6,
(24, 'O'): 6,
(24, 'P'): 6,
(24, 'Q'): 6,
(24, 'R'): 6,
(24, 'S'): 6,
(24, 'T'): 6,
(24, 'U'): 6,
(24, 'V'): 6,
(24, 'W'): 6,
(24, 'X'): 6,
(24, 'Y'): 6,
(24, 'Z'): 6,
(24, '['): 6,
(24, '\\'): 24,
(24, ']'): 6,
(24, '^'): 6,
(24, '_'): 6,
(24, '`'): 6,
(24, 'a'): 6,
(24, 'b'): 6,
(24, 'c'): 6,
(24, 'd'): 6,
(24, 'e'): 6,
(24, 'f'): 6,
(24, 'g'): 6,
(24, 'h'): 6,
(24, 'i'): 6,
(24, 'j'): 6,
(24, 'k'): 6,
(24, 'l'): 6,
(24, 'm'): 6,
(24, 'n'): 6,
(24, 'o'): 6,
(24, 'p'): 6,
(24, 'q'): 6,
(24, 'r'): 6,
(24, 's'): 6,
(24, 't'): 6,
(24, 'u'): 6,
(24, 'v'): 6,
(24, 'w'): 6,
(24, 'x'): 6,
(24, 'y'): 6,
(24, 'z'): 6,
(24, '{'): 6,
(24, '|'): 6,
(24, '}'): 6,
(24, '~'): 6,
(24, '\x7f'): 6,
(24, '\x80'): 6,
(24, '\x81'): 6,
(24, '\x82'): 6,
(24, '\x83'): 6,
(24, '\x84'): 6,
(24, '\x85'): 6,
(24, '\x86'): 6,
(24, '\x87'): 6,
(24, '\x88'): 6,
(24, '\x89'): 6,
(24, '\x8a'): 6,
(24, '\x8b'): 6,
(24, '\x8c'): 6,
(24, '\x8d'): 6,
(24, '\x8e'): 6,
(24, '\x8f'): 6,
(24, '\x90'): 6,
(24, '\x91'): 6,
(24, '\x92'): 6,
(24, '\x93'): 6,
(24, '\x94'): 6,
(24, '\x95'): 6,
(24, '\x96'): 6,
(24, '\x97'): 6,
(24, '\x98'): 6,
(24, '\x99'): 6,
(24, '\x9a'): 6,
(24, '\x9b'): 6,
(24, '\x9c'): 6,
(24, '\x9d'): 6,
(24, '\x9e'): 6,
(24, '\x9f'): 6,
(24, '\xa0'): 6,
(24, '\xa1'): 6,
(24, '\xa2'): 6,
(24, '\xa3'): 6,
(24, '\xa4'): 6,
(24, '\xa5'): 6,
(24, '\xa6'): 6,
(24, '\xa7'): 6,
(24, '\xa8'): 6,
(24, '\xa9'): 6,
(24, '\xaa'): 6,
(24, '\xab'): 6,
(24, '\xac'): 6,
(24, '\xad'): 6,
(24, '\xae'): 6,
(24, '\xaf'): 6,
(24, '\xb0'): 6,
(24, '\xb1'): 6,
(24, '\xb2'): 6,
(24, '\xb3'): 6,
(24, '\xb4'): 6,
(24, '\xb5'): 6,
(24, '\xb6'): 6,
(24, '\xb7'): 6,
(24, '\xb8'): 6,
(24, '\xb9'): 6,
(24, '\xba'): 6,
(24, '\xbb'): 6,
(24, '\xbc'): 6,
(24, '\xbd'): 6,
(24, '\xbe'): 6,
(24, '\xbf'): 6,
(24, '\xc0'): 6,
(24, '\xc1'): 6,
(24, '\xc2'): 6,
(24, '\xc3'): 6,
(24, '\xc4'): 6,
(24, '\xc5'): 6,
(24, '\xc6'): 6,
(24, '\xc7'): 6,
(24, '\xc8'): 6,
(24, '\xc9'): 6,
(24, '\xca'): 6,
(24, '\xcb'): 6,
(24, '\xcc'): 6,
(24, '\xcd'): 6,
(24, '\xce'): 6,
(24, '\xcf'): 6,
(24, '\xd0'): 6,
(24, '\xd1'): 6,
(24, '\xd2'): 6,
(24, '\xd3'): 6,
(24, '\xd4'): 6,
(24, '\xd5'): 6,
(24, '\xd6'): 6,
(24, '\xd7'): 6,
(24, '\xd8'): 6,
(24, '\xd9'): 6,
(24, '\xda'): 6,
(24, '\xdb'): 6,
(24, '\xdc'): 6,
(24, '\xdd'): 6,
(24, '\xde'): 6,
(24, '\xdf'): 6,
(24, '\xe0'): 6,
(24, '\xe1'): 6,
(24, '\xe2'): 6,
(24, '\xe3'): 6,
(24, '\xe4'): 6,
(24, '\xe5'): 6,
(24, '\xe6'): 6,
(24, '\xe7'): 6,
(24, '\xe8'): 6,
(24, '\xe9'): 6,
(24, '\xea'): 6,
(24, '\xeb'): 6,
(24, '\xec'): 6,
(24, '\xed'): 6,
(24, '\xee'): 6,
(24, '\xef'): 6,
(24, '\xf0'): 6,
(24, '\xf1'): 6,
(24, '\xf2'): 6,
(24, '\xf3'): 6,
(24, '\xf4'): 6,
(24, '\xf5'): 6,
(24, '\xf6'): 6,
(24, '\xf7'): 6,
(24, '\xf8'): 6,
(24, '\xf9'): 6,
(24, '\xfa'): 6,
(24, '\xfb'): 6,
(24, '\xfc'): 6,
(24, '\xfd'): 6,
(24, '\xfe'): 6,
(24, '\xff'): 6,
(26, '\x00'): 6,
(26, '\x01'): 6,
(26, '\x02'): 6,
(26, '\x03'): 6,
(26, '\x04'): 6,
(26, '\x05'): 6,
(26, '\x06'): 6,
(26, '\x07'): 6,
(26, '\x08'): 6,
(26, '\t'): 6,
(26, '\n'): 6,
(26, '\x0b'): 6,
(26, '\x0c'): 6,
(26, '\r'): 6,
(26, '\x0e'): 6,
(26, '\x0f'): 6,
(26, '\x10'): 6,
(26, '\x11'): 6,
(26, '\x12'): 6,
(26, '\x13'): 6,
(26, '\x14'): 6,
(26, '\x15'): 6,
(26, '\x16'): 6,
(26, '\x17'): 6,
(26, '\x18'): 6,
(26, '\x19'): 6,
(26, '\x1a'): 6,
(26, '\x1b'): 6,
(26, '\x1c'): 6,
(26, '\x1d'): 6,
(26, '\x1e'): 6,
(26, '\x1f'): 6,
(26, ' '): 6,
(26, '!'): 6,
(26, '"'): 25,
(26, '#'): 6,
(26, '$'): 6,
(26, '%'): 6,
(26, '&'): 6,
(26, "'"): 6,
(26, '('): 6,
(26, ')'): 6,
(26, '*'): 6,
(26, '+'): 6,
(26, ','): 6,
(26, '-'): 6,
(26, '.'): 6,
(26, '/'): 6,
(26, '0'): 6,
(26, '1'): 6,
(26, '2'): 6,
(26, '3'): 6,
(26, '4'): 6,
(26, '5'): 6,
(26, '6'): 6,
(26, '7'): 6,
(26, '8'): 6,
(26, '9'): 6,
(26, ':'): 6,
(26, ';'): 6,
(26, '<'): 6,
(26, '='): 6,
(26, '>'): 6,
(26, '?'): 6,
(26, '@'): 6,
(26, 'A'): 6,
(26, 'B'): 6,
(26, 'C'): 6,
(26, 'D'): 6,
(26, 'E'): 6,
(26, 'F'): 6,
(26, 'G'): 6,
(26, 'H'): 6,
(26, 'I'): 6,
(26, 'J'): 6,
(26, 'K'): 6,
(26, 'L'): 6,
(26, 'M'): 6,
(26, 'N'): 6,
(26, 'O'): 6,
(26, 'P'): 6,
(26, 'Q'): 6,
(26, 'R'): 6,
(26, 'S'): 6,
(26, 'T'): 6,
(26, 'U'): 6,
(26, 'V'): 6,
(26, 'W'): 6,
(26, 'X'): 6,
(26, 'Y'): 6,
(26, 'Z'): 6,
(26, '['): 6,
(26, '\\'): 6,
(26, ']'): 6,
(26, '^'): 6,
(26, '_'): 6,
(26, '`'): 6,
(26, 'a'): 6,
(26, 'b'): 6,
(26, 'c'): 6,
(26, 'd'): 6,
(26, 'e'): 6,
(26, 'f'): 6,
(26, 'g'): 6,
(26, 'h'): 6,
(26, 'i'): 6,
(26, 'j'): 6,
(26, 'k'): 6,
(26, 'l'): 6,
(26, 'm'): 6,
(26, 'n'): 6,
(26, 'o'): 6,
(26, 'p'): 6,
(26, 'q'): 6,
(26, 'r'): 6,
(26, 's'): 6,
(26, 't'): 6,
(26, 'u'): 6,
(26, 'v'): 6,
(26, 'w'): 6,
(26, 'x'): 6,
(26, 'y'): 6,
(26, 'z'): 6,
(26, '{'): 6,
(26, '|'): 6,
(26, '}'): 6,
(26, '~'): 6,
(26, '\x7f'): 6,
(26, '\x80'): 6,
(26, '\x81'): 6,
(26, '\x82'): 6,
(26, '\x83'): 6,
(26, '\x84'): 6,
(26, '\x85'): 6,
(26, '\x86'): 6,
(26, '\x87'): 6,
(26, '\x88'): 6,
(26, '\x89'): 6,
(26, '\x8a'): 6,
(26, '\x8b'): 6,
(26, '\x8c'): 6,
(26, '\x8d'): 6,
(26, '\x8e'): 6,
(26, '\x8f'): 6,
(26, '\x90'): 6,
(26, '\x91'): 6,
(26, '\x92'): 6,
(26, '\x93'): 6,
(26, '\x94'): 6,
(26, '\x95'): 6,
(26, '\x96'): 6,
(26, '\x97'): 6,
(26, '\x98'): 6,
(26, '\x99'): 6,
(26, '\x9a'): 6,
(26, '\x9b'): 6,
(26, '\x9c'): 6,
(26, '\x9d'): 6,
(26, '\x9e'): 6,
(26, '\x9f'): 6,
(26, '\xa0'): 6,
(26, '\xa1'): 6,
(26, '\xa2'): 6,
(26, '\xa3'): 6,
(26, '\xa4'): 6,
(26, '\xa5'): 6,
(26, '\xa6'): 6,
(26, '\xa7'): 6,
(26, '\xa8'): 6,
(26, '\xa9'): 6,
(26, '\xaa'): 6,
(26, '\xab'): 6,
(26, '\xac'): 6,
(26, '\xad'): 6,
(26, '\xae'): 6,
(26, '\xaf'): 6,
(26, '\xb0'): 6,
(26, '\xb1'): 6,
(26, '\xb2'): 6,
(26, '\xb3'): 6,
(26, '\xb4'): 6,
(26, '\xb5'): 6,
(26, '\xb6'): 6,
(26, '\xb7'): 6,
(26, '\xb8'): 6,
(26, '\xb9'): 6,
(26, '\xba'): 6,
(26, '\xbb'): 6,
(26, '\xbc'): 6,
(26, '\xbd'): 6,
(26, '\xbe'): 6,
(26, '\xbf'): 6,
(26, '\xc0'): 6,
(26, '\xc1'): 6,
(26, '\xc2'): 6,
(26, '\xc3'): 6,
(26, '\xc4'): 6,
(26, '\xc5'): 6,
(26, '\xc6'): 6,
(26, '\xc7'): 6,
(26, '\xc8'): 6,
(26, '\xc9'): 6,
(26, '\xca'): 6,
(26, '\xcb'): 6,
(26, '\xcc'): 6,
(26, '\xcd'): 6,
(26, '\xce'): 6,
(26, '\xcf'): 6,
(26, '\xd0'): 6,
(26, '\xd1'): 6,
(26, '\xd2'): 6,
(26, '\xd3'): 6,
(26, '\xd4'): 6,
(26, '\xd5'): 6,
(26, '\xd6'): 6,
(26, '\xd7'): 6,
(26, '\xd8'): 6,
(26, '\xd9'): 6,
(26, '\xda'): 6,
(26, '\xdb'): 6,
(26, '\xdc'): 6,
(26, '\xdd'): 6,
(26, '\xde'): 6,
(26, '\xdf'): 6,
(26, '\xe0'): 6,
(26, '\xe1'): 6,
(26, '\xe2'): 6,
(26, '\xe3'): 6,
(26, '\xe4'): 6,
(26, '\xe5'): 6,
(26, '\xe6'): 6,
(26, '\xe7'): 6,
(26, '\xe8'): 6,
(26, '\xe9'): 6,
(26, '\xea'): 6,
(26, '\xeb'): 6,
(26, '\xec'): 6,
(26, '\xed'): 6,
(26, '\xee'): 6,
(26, '\xef'): 6,
(26, '\xf0'): 6,
(26, '\xf1'): 6,
(26, '\xf2'): 6,
(26, '\xf3'): 6,
(26, '\xf4'): 6,
(26, '\xf5'): 6,
(26, '\xf6'): 6,
(26, '\xf7'): 6,
(26, '\xf8'): 6,
(26, '\xf9'): 6,
(26, '\xfa'): 6,
(26, '\xfb'): 6,
(26, '\xfc'): 6,
(26, '\xfd'): 6,
(26, '\xfe'): 6,
(26, '\xff'): 6},
set([1, 2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26, 27]),
set([1, 2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26, 27]),
['0, 0, 0, final*, start*, 0, 0, 1, final*, start*, 0, 0, 0, start|, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0',
'IGNORE',
'IGNORE',
'SYMBOLNAME',
'IGNORE',
'1, 0, start|, 0, final*, start*, 0, 0, 1, final|, start|, 0, final*, start*, 0, 0, final|, start|, 0, 1, final*, start*, 0',
'1, 0, final*, start*, start|, 0, final|, final*, start*, 0, 0, 0, start|, 0, 0, final*, final|, start|, 0, final|, final*, start*, 0, 0, 0, start|, 1, 0, start*, 0, final*, final|, start|, 0, 1, final*, start*, 0, 0, 0, final|, start|, 0, start*, 0, 1, final|, start|, 0, final*, start*, 0, final*, final*, 1, final|, final*, 0, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, 0, final|, start|, 0, 1, final*, start*, 0, final*, final*, final|, 1, final*, 0, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, 1, final|, final*, 0, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, final|, 1, final*, 0, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, final*, 0, 1, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, final*, 0, final|, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, 1, final|, final*, 0, 1, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 0, final*, final*, 0, final|, 1, final*, 0, final|, start|, 0, 1, final|, start|, 0, final*, start*, 0, final*, final*, 1, final|, final*, 0, 1, final|, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, 0, final|, start|, 0, 1, final*, start*, 0, final*, final*, final|, 1, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 0, 0, 1, final|, start|, 0, final*, start*, 0, final*, final*, final*, 0, 1, final|, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 0, 0, final|, start|, 0, 1, final*, start*, 0, final*, final*, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 0',
'1',
'__11_)',
'__10_(',
'__4_+',
'__3_*',
'__1_;',
'__0_:',
'__9_<',
'__5_?',
'__8_>',
'__6_[',
'__7_]',
'NONTERMINALNAME',
'NONTERMINALNAME',
'__2_|',
'2',
'QUOTE',
'0, 1, final*, 0, final|, start|, 0, final*, 0, final|, start|, 0, 1, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 1, 0, 0, final|, start|, 0, 1, final*, start*, 0, 1, 0, final|, start|, 0, 0, start|, 0, final*, start*, 0, final|, start|, 0, 1, 0, 0, final|, start|, 0, 1, final*, start*, 0, 1, final*, 0, final|, start|, 0, final*, 0, start|, 0, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 1, 0, 0, final|, start|, 0, 1, final*, start*, 0, 1, final*, 0, final|, start|, 0, final*, 0, final|, start|, 0, 1, final*, 0, start|, 0, final*, start*, final*, start*, 0, final|, start|, 0, 1, 0, 0, final|, start|, 0, 1, final*, start*, 0, 1, final*, 0, final|, start|, 0, final*, 0, final|, start|, 0, 1, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 1, 0, 0, 1, final*, 0, final|, start|, 0, final*, 0, start|, 0, final*, 0, final|, start|, 0, 1, final*, start*, final*, start*, 0, final|, start|, 0, 1, 0',
'QUOTE',
'QUOTE',
'IGNORE']), {'IGNORE': None})
# generated code between this line and its other occurence
if __name__ == '__main__':
f = py.magic.autopath()
oldcontent = f.read()
s = "# GENERATED CODE BETWEEN THIS LINE AND ITS OTHER OCCURENCE\n".lower()
pre, gen, after = oldcontent.split(s)
parser, lexer, ToAST = make_ebnf_parser()
transformer = ToAST.source
newcontent = "%s%s%s\nparser = %r\n%s\n%s%s" % (
pre, s, transformer.replace("ToAST", "EBNFToAST"),
parser, lexer.get_dummy_repr(), s, after)
print newcontent
f.write(newcontent)
| Python |
import py
class Node(object):
def view(self):
from dotviewer import graphclient
content = ["digraph G{"]
content.extend(self.dot())
content.append("}")
p = py.test.ensuretemp("automaton").join("temp.dot")
p.write("\n".join(content))
graphclient.display_dot_file(str(p))
class Symbol(Node):
def __init__(self, symbol, additional_info, token):
self.symbol = symbol
self.additional_info = additional_info
self.token = token
def __repr__(self):
return "Symbol(%r, %r)" % (self.symbol, self.additional_info)
def dot(self):
symbol = (self.symbol.replace("\\", "\\\\").replace('"', '\\"')
.replace('\n', '\\l'))
addinfo = str(self.additional_info).replace('"', "'") or "_"
yield ('"%s" [shape=box,label="%s\\n%s"];' % (
id(self), symbol,
repr(addinfo).replace('"', '').replace("\\", "\\\\")))
def visit(self, visitor):
"NOT_RPYTHON"
if isinstance(visitor, RPythonVisitor):
return visitor.dispatch(self)
method = getattr(visitor, "visit_" + self.symbol, None)
if method is None:
return self
return method(self)
class Nonterminal(Node):
def __init__(self, symbol, children):
self.children = children
self.symbol = symbol
def __str__(self):
return "%s(%s)" % (self.symbol, ", ".join([str(c) for c in self.children]))
def __repr__(self):
return "Nonterminal(%r, %r)" % (self.symbol, self.children)
def dot(self):
yield '"%s" [label="%s"];' % (id(self), self.symbol)
for child in self.children:
yield '"%s" -> "%s";' % (id(self), id(child))
if isinstance(child, Node):
for line in child.dot():
yield line
else:
yield '"%s" [label="%s"];' % (
id(child),
repr(child).replace('"', '').replace("\\", "\\\\"))
def visit(self, visitor):
"NOT_RPYTHON"
if isinstance(visitor, RPythonVisitor):
return visitor.dispatch(self)
general = getattr(visitor, "visit", None)
if general is None:
return getattr(visitor, "visit_" + self.symbol)(self)
else:
specific = getattr(visitor, "visit_" + self.symbol, None)
if specific is None:
return general(self)
else:
return specific(self)
class VisitError(Exception):
def __init__(self, node):
self.node = node
self.args = (node, )
def __str__(self):
return "could not visit %s" % (self.node, )
def make_dispatch_function(__general_nonterminal_visit=None,
__general_symbol_visit=None,
__general_visit=None,
**dispatch_table):
def dispatch(self, node):
if isinstance(node, Nonterminal):
if node.symbol not in dispatch_table:
if __general_nonterminal_visit:
return __general_nonterminal_visit(self, node)
else:
return dispatch_table[node.symbol](self, node)
elif isinstance(node, Symbol):
if node.symbol not in dispatch_table:
if __general_symbol_visit:
return __general_symbol_visit(self, node)
else:
return dispatch_table[node.symbol](self, node)
if __general_visit:
return __general_visit(self, node)
raise VisitError(node)
return dispatch
class CreateDispatchDictionaryMetaclass(type):
def __new__(cls, name_, bases, dct):
dispatch_table = {}
for name, value in dct.iteritems():
if name.startswith("visit_"):
dispatch_table[name[len("visit_"):]] = value
for special in ["general_symbol_visit",
"general_nonterminal_visit",
"general_visit"]:
if special in dct:
dispatch_table["__" + special] = dct[special]
dct["dispatch"] = make_dispatch_function(**dispatch_table)
return type.__new__(cls, name_, bases, dct)
class RPythonVisitor(object):
__metaclass__ = CreateDispatchDictionaryMetaclass
| Python |
import py
from pypy.rlib.parsing.parsing import PackratParser, Rule
from pypy.rlib.parsing.tree import Nonterminal
from pypy.rlib.parsing.regex import StringExpression, RangeExpression
from pypy.rlib.parsing.lexer import Lexer, DummyLexer
from pypy.rlib.parsing.deterministic import compress_char_set, DFA
import string
set = py.builtin.set
ESCAPES = {
"\\a": "\a",
"\\b": "\b",
"\\f": "\f",
"\\n": "\n",
"\\r": "\r",
"\\t": "\t",
"\\v": "\v",
"\\": "\\"
}
for i in range(256):
# 'x' and numbers are reserved for hexadecimal/octal escapes
if chr(i) in 'x01234567':
continue
escaped = "\\" + chr(i)
if escaped not in ESCAPES:
ESCAPES[escaped] = chr(i)
for a in "0123456789ABCDEFabcdef":
for b in "0123456789ABCDEFabcdef":
escaped = "\\x%s%s" % (a, b)
if escaped not in ESCAPES:
ESCAPES[escaped] = chr(int("%s%s" % (a, b), 16))
for a in "0123":
for b in "01234567":
for c in "01234567":
escaped = "\\x%s%s%s" % (a, b, c)
if escaped not in ESCAPES:
ESCAPES[escaped] = chr(int("%s%s%s" % (a, b, c), 8))
def unescape(s):
result = []
i = 0
while i < len(s):
if s[i] != "\\":
result.append(s[i])
i += 1
continue
if s[i + 1] == "x":
escaped = s[i: i + 4]
i += 4
elif s[i + 1] in "01234567":
escaped = s[i: i + 4]
i += 4
else:
escaped = s[i: i + 2]
i += 2
if escaped not in ESCAPES:
raise ValueError("escape %r unknown" % (escaped, ))
else:
result.append(ESCAPES[escaped])
return "".join(result)
syntax = r"""
EOF:
!__any__;
parse:
regex
[EOF];
regex:
r1 = concatenation
'|'
r2 = regex
return {r1 | r2}
| concatenation;
concatenation:
l = repetition+
return {reduce(operator.add, l, regex.StringExpression(""))};
repetition:
r1 = primary
'*'
return {r1.kleene()}
| r1 = primary
'+'
return {r1 + r1.kleene()}
| r1 = primary
'?'
return {regex.StringExpression("") | r1}
| r = primary
'{'
n = numrange
'}'
return {r * n[0] + reduce(operator.or_, [r * i for i in range(n[1] - n[0] + 1)], regex.StringExpression(""))}
| primary;
primary:
['('] regex [')']
| range
| c = char
return {regex.StringExpression(c)}
| '.'
return {regex.RangeExpression(chr(0), chr(255))};
char:
c = QUOTEDCHAR
return {unescape(c)}
| c = CHAR
return {c};
QUOTEDCHAR:
`(\\x[0-9a-fA-F]{2})|(\\.)`;
CHAR:
`[^\*\+\(\)\[\]\{\}\|\.\-\?\,\^]`;
range:
'['
s = rangeinner
']'
return {reduce(operator.or_, [regex.RangeExpression(a, chr(ord(a) + b - 1)) for a, b in compress_char_set(s)])};
rangeinner:
'^'
s = subrange
return {set([chr(c) for c in range(256)]) - s}
| subrange;
subrange:
l = rangeelement+
return {reduce(operator.or_, l)};
rangeelement:
c1 = char
'-'
c2 = char
return {set([chr(i) for i in range(ord(c1), ord(c2) + 1)])}
| c = char
return {set([c])};
numrange:
n1 = NUM
','
n2 = NUM
return {n1, n2}
| n1 = NUM
return {n1, n1};
NUM:
c = `0|([1-9][0-9]*)`
return {int(c)};
"""
def parse_regex(s):
p = RegexParser(s)
r = p.parse()
return r
def make_runner(regex, view=False):
p = RegexParser(regex)
r = p.parse()
nfa = r.make_automaton()
dfa = nfa.make_deterministic()
if view:
dfa.view()
dfa.optimize()
if view:
dfa.view()
r = dfa.get_runner()
return r
# generated code between this line and its other occurence
from pypy.rlib.parsing.pypackrat import PackratParser, Status
from pypy.rlib.parsing.pypackrat import BacktrackException
from pypy.rlib.parsing import regex
import operator
class Parser(object):
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def parse(self):
return self._parse().result
def _parse(self):
_key = self._pos
_status = self._dict_parse.get(_key, None)
if _status is None:
_status = self._dict_parse[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._regex()
_result = _call_status.result
_error = _call_status.error
_before_discard0 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard0
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._parse()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def regex(self):
return self._regex().result
def _regex(self):
_key = self._pos
_status = self._dict_regex.get(_key, None)
if _status is None:
_status = self._dict_regex[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._concatenation()
_result = _call_status.result
_error = _call_status.error
r1 = _result
_result = self.__chars__('|')
_call_status = self._regex()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
r2 = _result
_result = (r1 | r2)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._concatenation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_call_status = self._concatenation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._regex()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def concatenation(self):
return self._concatenation().result
def _concatenation(self):
_key = self._pos
_status = self._dict_concatenation.get(_key, None)
if _status is None:
_status = self._dict_concatenation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._repetition()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
l = _result
_result = (reduce(operator.add, l, regex.StringExpression("")))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._concatenation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = _call_status.error
r1 = _result
_result = self.__chars__('*')
_result = (r1.kleene())
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
r1 = _result
_result = self.__chars__('+')
_result = (r1 + r1.kleene())
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
r1 = _result
_result = self.__chars__('?')
_result = (regex.StringExpression("") | r1)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
r = _result
_result = self.__chars__('{')
_call_status = self._numrange()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
n = _result
_result = self.__chars__('}')
_result = (r * n[0] + reduce(operator.or_, [r * i for i in range(n[1] - n[0] + 1)], regex.StringExpression("")))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_before_discard1 = _result
_result = self.__chars__('(')
_result = _before_discard1
_call_status = self._regex()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__(')')
_result = _before_discard2
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice3 = self._pos
try:
_call_status = self._range()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._char()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c = _result
_result = (regex.StringExpression(c))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_result = self.__chars__('.')
_result = (regex.RangeExpression(chr(0), chr(255)))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_result = self.__chars__('.')
_result = (regex.RangeExpression(chr(0), chr(255)))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def char(self):
return self._char().result
def _char(self):
_key = self._pos
_status = self._dict_char.get(_key, None)
if _status is None:
_status = self._dict_char[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._QUOTEDCHAR()
_result = _call_status.result
_error = _call_status.error
c = _result
_result = (unescape(c))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._CHAR()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c = _result
_result = (c)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_call_status = self._CHAR()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c = _result
_result = (c)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._char()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTEDCHAR(self):
return self._QUOTEDCHAR().result
def _QUOTEDCHAR(self):
_key = self._pos
_status = self._dict_QUOTEDCHAR.get(_key, None)
if _status is None:
_status = self._dict_QUOTEDCHAR[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1380912319()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def CHAR(self):
return self._CHAR().result
def _CHAR(self):
_key = self._pos
_status = self._dict_CHAR.get(_key, None)
if _status is None:
_status = self._dict_CHAR[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1323868075()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def range(self):
return self._range().result
def _range(self):
_key = self._pos
_status = self._dict_range.get(_key, None)
if _status is None:
_status = self._dict_range[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('[')
_call_status = self._rangeinner()
_result = _call_status.result
_error = _call_status.error
s = _result
_result = self.__chars__(']')
_result = (reduce(operator.or_, [regex.RangeExpression(a, chr(ord(a) + b - 1)) for a, b in compress_char_set(s)]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._range()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def rangeinner(self):
return self._rangeinner().result
def _rangeinner(self):
_key = self._pos
_status = self._dict_rangeinner.get(_key, None)
if _status is None:
_status = self._dict_rangeinner[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('^')
_call_status = self._subrange()
_result = _call_status.result
_error = _call_status.error
s = _result
_result = (set([chr(c) for c in range(256)]) - s)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._subrange()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_call_status = self._subrange()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._rangeinner()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def subrange(self):
return self._subrange().result
def _subrange(self):
_key = self._pos
_status = self._dict_subrange.get(_key, None)
if _status is None:
_status = self._dict_subrange[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._rangeelement()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._rangeelement()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
l = _result
_result = (reduce(operator.or_, l))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._subrange()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def rangeelement(self):
return self._rangeelement().result
def _rangeelement(self):
_key = self._pos
_status = self._dict_rangeelement.get(_key, None)
if _status is None:
_status = self._dict_rangeelement[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._char()
_result = _call_status.result
_error = _call_status.error
c1 = _result
_result = self.__chars__('-')
_call_status = self._char()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c2 = _result
_result = (set([chr(i) for i in range(ord(c1), ord(c2) + 1)]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._char()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c = _result
_result = (set([c]))
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_call_status = self._char()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
c = _result
_result = (set([c]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._rangeelement()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def numrange(self):
return self._numrange().result
def _numrange(self):
_key = self._pos
_status = self._dict_numrange.get(_key, None)
if _status is None:
_status = self._dict_numrange[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._NUM()
_result = _call_status.result
_error = _call_status.error
n1 = _result
_result = self.__chars__(',')
_call_status = self._NUM()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
n2 = _result
_result = (n1, n2)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._NUM()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
n1 = _result
_result = (n1, n1)
break
except BacktrackException, _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_call_status = self._NUM()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
n1 = _result
_result = (n1, n1)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._numrange()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def NUM(self):
return self._NUM().result
def _NUM(self):
_key = self._pos
_status = self._dict_NUM.get(_key, None)
if _status is None:
_status = self._dict_NUM[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1166214427()
c = _result
_result = (int(c))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException, _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_EOF = {}
self._dict_parse = {}
self._dict_regex = {}
self._dict_concatenation = {}
self._dict_repetition = {}
self._dict_primary = {}
self._dict_char = {}
self._dict_QUOTEDCHAR = {}
self._dict_CHAR = {}
self._dict_range = {}
self._dict_rangeinner = {}
self._dict_subrange = {}
self._dict_rangeelement = {}
self._dict_numrange = {}
self._dict_NUM = {}
self._pos = 0
self._inputstream = inputstream
def _regex1166214427(self):
_choice0 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1166214427(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice0
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1323868075(self):
_choice1 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1323868075(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice1
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
def _regex1380912319(self):
_choice2 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1380912319(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice2
raise BacktrackException
_upto = _runner.last_matched_index + 1
_result = self._inputstream[self._pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_1166214427(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if '1' <= char <= '9':
state = 1
elif char == '0':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return i
if '0' <= char <= '9':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1323868075(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if '\x00' <= char <= "'":
state = 1
elif '/' <= char <= '>':
state = 1
elif '@' <= char <= 'Z':
state = 1
elif char == '\\':
state = 1
elif '_' <= char <= 'z':
state = 1
elif '~' <= char <= '\xff':
state = 1
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1380912319(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 0
return ~i
if char == '\\':
state = 1
else:
break
if state == 1:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 1
return ~i
if char == 'x':
state = 2
elif '\x00' <= char <= 'w':
state = 3
elif 'y' <= char <= '\xff':
state = 3
else:
break
if state == 2:
runner.last_matched_index = i - 1
runner.last_matched_state = state
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 2
return i
if '0' <= char <= '9':
state = 4
elif 'A' <= char <= 'F':
state = 4
elif 'a' <= char <= 'f':
state = 4
else:
break
if state == 4:
if i < len(input):
char = input[i]
i += 1
else:
runner.state = 4
return ~i
if '0' <= char <= '9':
state = 3
elif 'A' <= char <= 'F':
state = 3
elif 'a' <= char <= 'f':
state = 3
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class RegexParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in RegexParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in RegexParser.__dict__ and key not in forbidden:
setattr(RegexParser, key, value)
RegexParser.init_parser = Parser.__init__.im_func
# generated code between this line and its other occurence
def test_generate():
f = py.magic.autopath()
oldcontent = f.read()
s = "# GENERATED CODE BETWEEN THIS LINE AND ITS OTHER OCCURENCE\n".lower()
pre, gen, after = oldcontent.split(s)
from pypackrat import PyPackratSyntaxParser
from makepackrat import TreeOptimizer, ParserBuilder
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """\
%s\
%s
from pypy.rlib.parsing.pypackrat import PackratParser, Status
from pypy.rlib.parsing.pypackrat import BacktrackException
from pypy.rlib.parsing import regex
import operator
%s
class RegexParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in RegexParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in RegexParser.__dict__ and key not in forbidden:
setattr(RegexParser, key, value)
RegexParser.init_parser = Parser.__init__.im_func
%s
%s\
""" % (pre, s, code, s, after)
print content
f.write(content)
| Python |
import py
import string
from pypy.rlib.parsing.deterministic import NFA
set = py.builtin.set
class RegularExpression(object):
def __init__(self):
raise NotImplementedError("abstract base class")
def make_automaton(self):
raise NotImplementedError("abstract base class")
def __add__(self, other):
return AddExpression(self, other)
def __or__(self, other):
return OrExpression(self, other)
def __pos__(self):
return AddExpression(self, self.kleene())
def __mul__(self, i):
result = StringExpression("")
for x in range(i):
result += self
return result
def __invert__(self):
return NotExpression(self)
def kleene(self):
return KleeneClosure(self)
class StringExpression(RegularExpression):
def __init__(self, s):
self.string = s
def __add__(self, other):
if not isinstance(other, StringExpression):
return super(StringExpression, self).__add__(other)
return StringExpression(self.string + other.string)
def make_automaton(self):
nfa = NFA()
firstfinal = not self.string
state = nfa.add_state(start=True, final=firstfinal)
for i, char in enumerate(self.string):
final = i == len(self.string) - 1
next_state = nfa.add_state(final=final)
nfa.add_transition(state, next_state, char)
state = next_state
return nfa
def __repr__(self):
return "StringExpression(%r)" % (self.string, )
class RangeExpression(RegularExpression):
def __init__(self, fromchar, tochar):
self.fromchar = fromchar
self.tochar = tochar
def make_automaton(self):
nfa = NFA()
startstate = nfa.add_state(start=True)
finalstate = nfa.add_state(final=True)
for i in range(ord(self.fromchar), ord(self.tochar) + 1):
char = chr(i)
nfa.add_transition(startstate, finalstate, char)
return nfa
def __repr__(self):
return "RangeExpression(%r, %r)" % (self.fromchar, self.tochar)
class AddExpression(RegularExpression):
def __init__(self, rega, regb):
self.rega = rega
self.regb = regb
def make_automaton(self):
nfa1 = self.rega.make_automaton()
nfa2 = self.regb.make_automaton()
finalstates1 = nfa1.final_states
nfa1.final_states = set()
real_final = nfa1.add_state("final*", final=True)
orig_to_copy = nfa1.update(nfa2)
for final_state in finalstates1:
for start_state in nfa2.start_states:
start_state = orig_to_copy[start_state]
nfa1.add_transition(final_state, start_state)
for final_state in nfa2.final_states:
final_state = orig_to_copy[final_state]
nfa1.add_transition(final_state, real_final)
return nfa1
def __repr__(self):
return "AddExpression(%r, %r)" % (self.rega, self.regb)
class ExpressionTag(RegularExpression):
def __init__(self, reg, tag):
self.reg = reg
self.tag = tag
def make_automaton(self):
nfa = self.reg.make_automaton()
finalstates = nfa.final_states
nfa.final_states = set()
real_final = nfa.add_state(self.tag, final=True, unmergeable=True)
for final_state in finalstates:
nfa.add_transition(final_state, real_final)
return nfa
def __repr__(self):
return "ExpressionTag(%r, %r)" % (self.reg, self.tag)
class KleeneClosure(RegularExpression):
def __init__(self, regex):
self.regex = regex
def make_automaton(self):
nfa = self.regex.make_automaton()
oldfinal = nfa.final_states
nfa.final_states = set()
oldstart = nfa.start_states
nfa.start_states = set()
real_final = nfa.add_state("final*", final=True)
real_start = nfa.add_state("start*", start=True)
for start in oldstart:
nfa.add_transition(real_start, start)
for final in oldfinal:
nfa.add_transition(final, real_final)
nfa.add_transition(real_start, real_final)
nfa.add_transition(real_final, real_start)
return nfa
def __repr__(self):
return "KleeneClosure(%r)" % (self.regex, )
class OrExpression(RegularExpression):
def __init__(self, rega, regb):
self.rega = rega
self.regb = regb
def make_automaton(self):
nfa1 = self.rega.make_automaton()
nfa2 = self.regb.make_automaton()
oldfinal1 = nfa1.final_states
nfa1.final_states = set()
oldstart1 = nfa1.start_states
nfa1.start_states = set()
real_final = nfa1.add_state("final|", final=True)
real_start = nfa1.add_state("start|", start=True)
orig_to_copy = nfa1.update(nfa2)
for start in oldstart1:
nfa1.add_transition(real_start, start)
for final in oldfinal1:
nfa1.add_transition(final, real_final)
for start in nfa2.start_states:
start = orig_to_copy[start]
nfa1.add_transition(real_start, start)
for final in nfa2.final_states:
final = orig_to_copy[final]
nfa1.add_transition(final, real_final)
return nfa1
def __repr__(self):
return "OrExpression(%r, %r)" % (self.rega, self.regb)
class NotExpression(RegularExpression):
def __init__(self, reg):
self.reg = reg
def make_automaton(self):
nfa = self.reg.make_automaton()
# add error state
error = nfa.add_state("error")
for state in range(nfa.num_states):
occurring = set(nfa.transitions.get(state, {}).keys())
toerror = set([chr(i) for i in range(256)]) - occurring
for input in toerror:
nfa.add_transition(state, error, input)
nfa.final_states = set(range(nfa.num_states)) - nfa.final_states
return nfa
def __invert__(self):
return self.reg
class LexingOrExpression(RegularExpression):
def __init__(self, regs, names):
self.regs = regs
self.names = names
def make_automaton(self):
dfas = [reg.make_automaton().make_deterministic() for reg in self.regs]
[dfa.optimize() for dfa in dfas]
nfas = [dfa.make_nondeterministic() for dfa in dfas]
result_nfa = NFA()
start_state = result_nfa.add_state(start=True)
for i, nfa in enumerate(nfas):
final_state = result_nfa.add_state(self.names[i], final=True,
unmergeable=True)
state_map = {}
for j, name in enumerate(nfa.names):
start = j in nfa.start_states
final = j in nfa.final_states
newstate = result_nfa.add_state(name)
state_map[j] = newstate
if start:
result_nfa.add_transition(start_state, newstate)
if final:
result_nfa.add_transition(newstate, final_state)
for state, subtransitions in nfa.transitions.iteritems():
for input, states in subtransitions.iteritems():
newstate = state_map[state]
newstates = [state_map[s] for s in states]
for newtargetstate in newstates:
result_nfa.add_transition(
newstate, newtargetstate, input)
return result_nfa
def __repr__(self):
return "LexingOrExpression(%r, %r)" % (self.regs, self.names)
| Python |
import py
import sys
from pypy.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from pypy.rlib.parsing.codebuilder import Codebuilder
from pypy.rlib.objectmodel import we_are_translated
class BacktrackException(Exception):
def __init__(self, error=None):
self.error = error
if not we_are_translated():
Exception.__init__(self, error)
class TreeOptimizer(RPythonVisitor):
def visit_or(self, t):
if len(t.children) == 1:
return self.dispatch(t.children[0])
return self.general_nonterminal_visit(t)
visit_commands = visit_or
def visit_negation(self, t):
child = self.dispatch(t.children[0])
if child.symbol == "negation":
child.symbol = "lookahead"
return child
t.children[0] = child
return t
def general_nonterminal_visit(self, t):
for i in range(len(t.children)):
t.children[i] = self.dispatch(t.children[i])
return t
def general_visit(self, t):
return t
syntax = r"""
NAME:
`[a-zA-Z_][a-zA-Z0-9_]*`;
SPACE:
' ';
COMMENT:
`( *#[^\n]*\n)+`;
IGNORE:
`(#[^\n]*\n)|\n|\t| `;
newline:
COMMENT
| `( *\n *)*`;
REGEX:
r = `\`[^\\\`]*(\\.[^\\\`]*)*\``
return {Symbol('REGEX', r, None)};
QUOTE:
r = `'[^\']*'`
return {Symbol('QUOTE', r, None)};
PYTHONCODE:
r = `\{[^\n\}]*\}`
return {Symbol('PYTHONCODE', r, None)};
EOF:
!__any__;
file:
IGNORE*
list
[EOF];
list:
content = production+
return {Nonterminal('list', content)};
production:
name = NAME
SPACE*
args = productionargs
':'
IGNORE*
what = or_
IGNORE*
';'
IGNORE*
return {Nonterminal('production', [name, args, what])};
productionargs:
'('
IGNORE*
args = (
NAME
[
IGNORE*
','
IGNORE*
]
)*
arg = NAME
IGNORE*
')'
IGNORE*
return {Nonterminal('productionargs', args + [arg])}
| return {Nonterminal('productionargs', [])};
or_:
l = (commands ['|' IGNORE*])+
last = commands
return {Nonterminal('or', l + [last])}
| commands;
commands:
cmd = command
newline
cmds = (command [newline])+
return {Nonterminal('commands', [cmd] + cmds)}
| command;
command:
simplecommand;
simplecommand:
return_
| if_
| named_command
| repetition
| choose
| negation;
return_:
'return'
SPACE*
code = PYTHONCODE
IGNORE*
return {Nonterminal('return', [code])};
if_:
'do'
newline
cmd = command
SPACE*
'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [cmd, condition])}
| 'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [condition])};
choose:
'choose'
SPACE*
name = NAME
SPACE*
'in'
SPACE*
expr = PYTHONCODE
IGNORE*
cmds = commands
return {Nonterminal('choose', [name, expr, cmds])};
commandchain:
result = simplecommand+
return {Nonterminal('commands', result)};
named_command:
name = NAME
SPACE*
'='
SPACE*
cmd = command
return {Nonterminal('named_command', [name, cmd])};
repetition:
what = enclosed
SPACE* '?' IGNORE*
return {Nonterminal('maybe', [what])}
| what = enclosed
SPACE*
repetition = ('*' | '+')
IGNORE*
return {Nonterminal('repetition', [repetition, what])};
negation:
'!'
SPACE*
what = negation
IGNORE*
return {Nonterminal('negation', [what])}
| enclosed;
enclosed:
'<'
IGNORE*
what = primary
IGNORE*
'>'
IGNORE*
return {Nonterminal('exclusive', [what])}
| '['
IGNORE*
what = or_
IGNORE*
']'
IGNORE*
return {Nonterminal('ignore', [what])}
| ['(' IGNORE*] or_ [')' IGNORE*]
| primary;
primary:
call | REGEX [IGNORE*] | QUOTE [IGNORE*];
call:
x = NAME
args = arguments
IGNORE*
return {Nonterminal("call", [x, args])};
arguments:
'('
IGNORE*
args = (
PYTHONCODE
[IGNORE* ',' IGNORE*]
)*
last = PYTHONCODE
')'
IGNORE*
return {Nonterminal("args", args + [last])}
| return {Nonterminal("args", [])};
"""
class ErrorInformation(object):
def __init__(self, pos, expected=None):
if expected is None:
expected = []
self.expected = expected
self.pos = pos
def __str__(self):
return "ErrorInformation(%s, %s)" % (self.pos, self.expected)
def get_line_column(self, source):
uptoerror = source[:self.pos]
lineno = uptoerror.count("\n")
columnno = self.pos - uptoerror.rfind("\n")
return lineno, columnno
def nice_error_message(self, filename='<filename>', source=""):
if source:
lineno, columnno = self.get_line_column(source)
result = [" File %s, line %s" % (filename, lineno + 1)]
result.append(source.split("\n")[lineno])
result.append(" " * columnno + "^")
else:
result.append("<couldn't get source>")
if self.expected:
failure_reasons = self.expected
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class Status(object):
# status codes:
NORMAL = 0
ERROR = 1
INPROGRESS = 2
LEFTRECURSION = 3
SOMESOLUTIONS = 4
_annspecialcase_ = 'specialize:ctr_location' # polymorphic
def __repr__(self):
return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error,
self.status)
def __init__(self):
self.pos = 0
self.error = None
self.status = self.INPROGRESS
self.result = None
class ParserBuilder(RPythonVisitor, Codebuilder):
def __init__(self):
Codebuilder.__init__(self)
self.initcode = []
self.names = {}
self.matchers = {}
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def memoize_header(self, name, args):
dictname = "_dict_%s" % (name, )
self.emit_initcode("self.%s = {}" % (dictname, ))
if args:
self.emit("_key = (self._pos, %s)" % (", ".join(args)))
else:
self.emit("_key = self._pos")
self.emit("_status = self.%s.get(_key, None)" % (dictname, ))
for _ in self.start_block("if _status is None:"):
self.emit("_status = self.%s[_key] = Status()" % (
dictname, ))
for _ in self.start_block("else:"):
self.emit("_statusstatus = _status.status")
for _ in self.start_block("if _statusstatus == _status.NORMAL:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
for _ in self.start_block("elif _statusstatus == _status.ERROR:"):
self.emit("raise BacktrackException(_status.error)")
if self.have_call:
for _ in self.start_block(
"elif (_statusstatus == _status.INPROGRESS or\n"
" _statusstatus == _status.LEFTRECURSION):"):
self.emit("_status.status = _status.LEFTRECURSION")
for _ in self.start_block("if _status.result is not None:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
for _ in self.start_block("else:"):
self.emit("raise BacktrackException(None)")
for _ in self.start_block(
"elif _statusstatus == _status.SOMESOLUTIONS:"):
self.emit("_status.status = _status.INPROGRESS")
self.emit("_startingpos = self._pos")
self.start_block("try:")
self.emit("_result = None")
self.emit("_error = None")
def memoize_footer(self, name, args):
dictname = "_dict_%s" % (name, )
if self.have_call:
for _ in self.start_block(
"if _status.status == _status.LEFTRECURSION:"):
for _ in self.start_block("if _status.result is not None:"):
for _ in self.start_block("if _status.pos >= self._pos:"):
self.emit("_status.status = _status.NORMAL")
self.emit("self._pos = _status.pos")
self.emit("return _status")
self.emit("_status.pos = self._pos")
self.emit("_status.status = _status.SOMESOLUTIONS")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("self._pos = _startingpos")
self.emit("return self._%s(%s)" % (name, ', '.join(args)))
else:
self.emit("assert _status.status != _status.LEFTRECURSION")
self.emit("_status.status = _status.NORMAL")
self.emit("_status.pos = self._pos")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("return _status")
self.end_block("try")
for _ in self.start_block("except BacktrackException, _exc:"):
self.emit("_status.pos = -1")
self.emit("_status.result = None")
self.combine_error('_exc.error')
self.emit("_status.error = _error")
self.emit("_status.status = _status.ERROR")
self.emit("raise BacktrackException(_error)")
def choice_point(self, name=None):
var = "_choice%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = self._pos" % (var, ))
return var
def revert(self, var):
self.emit("self._pos = %s" % (var, ))
def visit_list(self, t):
self.start_block("class Parser(object):")
for elt in t.children:
self.dispatch(elt)
for _ in self.start_block("def __init__(self, inputstream):"):
for line in self.initcode:
self.emit(line)
self.emit("self._pos = 0")
self.emit("self._inputstream = inputstream")
if self.matchers:
self.emit_regex_code()
self.end_block("class")
def emit_regex_code(self):
for regex, matcher in self.matchers.iteritems():
for _ in self.start_block(
"def _regex%s(self):" % (abs(hash(regex)), )):
c = self.choice_point()
self.emit("_runner = self._Runner(self._inputstream, self._pos)")
self.emit("_i = _runner.recognize_%s(self._pos)" % (
abs(hash(regex)), ))
self.start_block("if _runner.last_matched_state == -1:")
self.revert(c)
self.emit("raise BacktrackException")
self.end_block("if")
self.emit("_upto = _runner.last_matched_index + 1")
self.emit("_pos = self._pos")
self.emit("assert _pos >= 0")
self.emit("assert _upto >= 0")
self.emit("_result = self._inputstream[_pos: _upto]")
self.emit("self._pos = _upto")
self.emit("return _result")
for _ in self.start_block("class _Runner(object):"):
for _ in self.start_block("def __init__(self, text, pos):"):
self.emit("self.text = text")
self.emit("self.pos = pos")
self.emit("self.last_matched_state = -1")
self.emit("self.last_matched_index = -1")
self.emit("self.state = -1")
for regex, matcher in self.matchers.iteritems():
matcher = str(matcher).replace(
"def recognize(runner, i)",
"def recognize_%s(runner, i)" % (abs(hash(regex)), ))
self.emit(str(matcher))
def visit_production(self, t):
name = t.children[0]
if name in self.names:
raise Exception("name %s appears twice" % (name, ))
self.names[name] = True
otherargs = t.children[1].children
argswithself = ", ".join(["self"] + otherargs)
argswithoutself = ", ".join(otherargs)
for _ in self.start_block("def %s(%s):" % (name, argswithself)):
self.emit("return self._%s(%s).result" % (name, argswithoutself))
self.start_block("def _%s(%s):" % (name, argswithself, ))
self.namecount = 0
self.resultname = "_result"
self.have_call = False
self.created_error = False
allother = self.store_code_away()
self.dispatch(t.children[-1])
subsequent = self.restore_code(allother)
self.memoize_header(name, otherargs)
self.add_code(subsequent)
self.memoize_footer(name, otherargs)
self.end_block("def")
def visit_or(self, t, first=False):
possibilities = t.children
if len(possibilities) > 1:
self.start_block("while 1:")
for i, p in enumerate(possibilities):
c = self.choice_point()
for _ in self.start_block("try:"):
self.dispatch(p)
self.emit("break")
for _ in self.start_block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
if i == len(possibilities) - 1:
self.emit("raise BacktrackException(_error)")
self.dispatch(possibilities[-1])
if len(possibilities) > 1:
self.emit("break")
self.end_block("while")
def visit_commands(self, t):
for elt in t.children:
self.dispatch(elt)
def visit_maybe(self, t):
c = self.choice_point()
for _ in self.start_block("try:"):
self.dispatch(t.children[0])
for _ in self.start_block("except BacktrackException:"):
self.revert(c)
def visit_repetition(self, t):
name = "_all%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = []" % (name, ))
if t.children[0] == '+':
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
for _ in self.start_block("while 1:"):
c = self.choice_point()
for _ in self.start_block("try:"):
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
for _ in self.start_block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
self.emit("break")
self.emit("_result = %s" % (name, ))
def visit_exclusive(self, t):
self.resultname = "_enclosed"
self.dispatch(t.children[0])
self.emit("_enclosed = _result")
def visit_ignore(self, t):
resultname = "_before_discard%i" % (self.namecount, )
self.namecount += 1
self.emit("%s = _result" % (resultname, ))
self.dispatch(t.children[0])
self.emit("_result = %s" % (resultname, ))
def visit_negation(self, t):
c = self.choice_point()
resultname = "_stored_result%i" % (self.namecount, )
self.namecount += 1
child = t.children[0]
self.emit("%s = _result" % (resultname, ))
for _ in self.start_block("try:"):
self.dispatch(child)
for _ in self.start_block("except BacktrackException:"):
self.revert(c)
self.emit("_result = %s" % (resultname, ))
for _ in self.start_block("else:"):
# heuristic to get nice error messages sometimes
if isinstance(child, Symbol) and child.symbol == "QUOTE":
error = "self._ErrorInformation(%s, ['NOT %s'])" % (
c, child.additional_info[1:-1], )
else:
error = "None"
self.emit("raise BacktrackException(%s)" % (error, ))
def visit_lookahead(self, t):
resultname = "_stored_result%i" % (self.namecount, )
self.emit("%s = _result" % (resultname, ))
c = self.choice_point()
self.dispatch(t.children[0])
self.revert(c)
self.emit("_result = %s" % (resultname, ))
def visit_named_command(self, t):
name = t.children[0]
self.dispatch(t.children[1])
self.emit("%s = _result" % (name, ))
def visit_return(self, t):
self.emit("_result = (%s)" % (t.children[0].additional_info[1:-1], ))
def visit_if(self, t):
if len(t.children) == 2:
self.dispatch(t.children[0])
for _ in self.start_block("if not (%s):" % (
t.children[-1].additional_info[1:-1], )):
self.emit("raise BacktrackException(")
self.emit(" self._ErrorInformation(")
self.emit(" _startingpos, ['condition not met']))")
def visit_choose(self, t):
for _ in self.start_block("for %s in (%s):" % (
t.children[0], t.children[1].additional_info[1:-1], )):
for _ in self.start_block("try:"):
self.dispatch(t.children[2])
self.emit("break")
for _ in self.start_block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
for _ in self.start_block("else:"):
self.emit("raise BacktrackException(_error)")
def visit_call(self, t):
self.have_call = True
args = ", ".join(['(%s)' % (arg.additional_info[1:-1], )
for arg in t.children[1].children])
if t.children[0].startswith("_"):
callname = t.children[0]
self.emit("_result = self.%s(%s)" % (callname, args))
else:
callname = "_" + t.children[0]
self.emit("_call_status = self.%s(%s)" % (callname, args))
self.emit("_result = _call_status.result")
self.combine_error('_call_status.error')
def visit_REGEX(self, t):
r = t.additional_info[1:-1].replace('\\`', '`')
matcher = self.get_regex(r)
self.emit("_result = self._regex%s()" % (abs(hash(r)), ))
def visit_QUOTE(self, t):
self.emit("_result = self.__chars__(%r)" % (
str(t.additional_info[1:-1]), ))
def get_regex(self, r):
from pypy.rlib.parsing.regexparse import parse_regex
if r in self.matchers:
return self.matchers[r]
regex = parse_regex(r)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
automaton = regex.make_automaton().make_deterministic()
automaton.optimize()
matcher = automaton.make_lexing_code()
self.matchers[r] = py.code.Source(matcher)
return matcher
def combine_error(self, newerror):
if self.created_error:
self.emit(
"_error = self._combine_errors(_error, %s)" % (newerror, ))
else:
self.emit("_error = %s" % (newerror, ))
self.created_error = True
class MetaPackratParser(type):
def __new__(cls, name_, bases, dct):
if '__doc__' not in dct or dct['__doc__'] is None:
return type.__new__(cls, name_, bases, dct)
from pypackrat import PyPackratSyntaxParser
import sys, new, inspect
frame = sys._getframe(1)
source = dct['__doc__']
p = PyPackratSyntaxParser(source)
try:
t = p.file()
except BacktrackException, exc:
print exc.error.nice_error_message("<docstring>", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
try:
code = frame.f_code
source = inspect.getsource(code)
lineno_in_orig = source.split("\n").index(errorline)
if lineno_in_orig >= 0:
print "probable error position:"
print "file:", code.co_filename
print "line:", lineno_in_orig + code.co_firstlineno + 1
except (IOError, ValueError):
pass
raise exc
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
pcls = visitor.make_parser()
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in dct
#XXX XXX XXX
if 'BacktrackException' not in frame.f_globals:
raise Exception("must import BacktrackException")
if 'Status' not in frame.f_globals:
raise Exception("must import Status")
result = type.__new__(cls, name_, bases, dct)
for key, value in pcls.__dict__.iteritems():
if isinstance(value, type):
value.__module__ = result.__module__ #XXX help the annotator
if isinstance(value, type(lambda: None)):
value = new.function(value.func_code, frame.f_globals)
if not hasattr(result, key) and key not in forbidden:
setattr(result, key, value)
if result.__init__ is object.__init__:
result.__init__ = pcls.__dict__['__init__']
result.init_parser = pcls.__dict__['__init__']
result._code = visitor.get_code()
return result
class PackratParser(object):
__metaclass__ = MetaPackratParser
_ErrorInformation = ErrorInformation
_BacktrackException = BacktrackException
def __chars__(self, chars):
#print '__chars__(%s)' % (chars, ), self._pos
try:
for i in range(len(chars)):
if self._inputstream[self._pos + i] != chars[i]:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
self._pos += len(chars)
return chars
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
def __any__(self):
try:
result = self._inputstream[self._pos]
self._pos += 1
return result
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, ['anything']))
def _combine_errors(self, error1, error2):
if error1 is None:
return error2
if (error2 is None or error1.pos > error2.pos or
len(error2.expected) == 0):
return error1
elif error2.pos > error1.pos or len(error1.expected) == 0:
return error2
expected = []
already_there = {}
for ep in [error1.expected, error2.expected]:
for reason in ep:
if reason not in already_there:
already_there[reason] = True
expected.append(reason)
return ErrorInformation(error1.pos, expected)
def test_generate():
f = py.magic.autopath().dirpath().join("pypackrat.py")
from pypackrat import PyPackratSyntaxParser
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """
from pypy.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
%s
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
""" % (code, )
print content
f.write(content)
| Python |
from pypy.tool.uid import uid
# Support for explicit specialization: in code using global constants
# that are instances of SpecTag, code paths are not merged when
# the same variable holds a different SpecTag instance.
class SpecTag(object):
__slots__ = ()
def __repr__(self):
return '%s(0x%x)' % (self.__class__.__name__, uid(self))
def _freeze_(self):
return True
class unrolling_int(int, SpecTag):
def __add__(self, other):
return unrolling_int(int.__add__(self, other))
__radd__ = __add__
def __sub__(self, other):
return unrolling_int(int.__sub__(self, other))
def __rsub__(self, other):
return unrolling_int(int.__rsub__(self, other))
unrolling_zero = unrolling_int(0)
# ____________________________________________________________
# 'for' iteration over iterables wrapped in an instance
# of unrolling_iterable will be unrolled by the flow space,
# like in:
# names = unrolling_iterable(['a', 'b', 'c'])
# def f(x):
# for name in names:
# setattr(x, name, 0)
class unrolling_iterable(SpecTag):
def __init__(self, iterable):
self._items = list(iterable)
self._head = _unroller(self._items)
def __iter__(self):
return iter(self._items)
def get_unroller(self):
return self._head
class _unroller(SpecTag):
def __init__(self, items, i=0):
self._items = items
self._i = i
self._next = None
def step(self):
v = self._items[self._i]
if self._next is None:
self._next = _unroller(self._items, self._i+1)
return v, self._next
| Python |
"""New standard I/O library.
Based on sio.py from Guido van Rossum.
- This module contains various stream classes which provide a subset of the
classic Python I/O API: read(n), write(s), tell(), seek(offset, whence=0),
readall(), readline(), truncate(size), flush(), close(), peek(),
flushable(), try_to_find_file_descriptor().
- This is not for general usage:
* read(n) may return less than n bytes, just like os.read().
* some other methods also have no default parameters.
* close() should be called exactly once and no further operations performed;
there is no __del__() closing the stream for you.
* some methods may raise NotImplementedError.
* peek() returns some (or no) characters that have already been read ahead.
* flushable() returns True/False if flushing that stream is useful/pointless.
- A 'basis stream' provides I/O using a low-level API, like the os, mmap or
socket modules.
- A 'filtering stream' builds on top of another stream. There are filtering
streams for universal newline translation, for unicode translation, and
for buffering.
You typically take a basis stream, place zero or more filtering
streams on top of it, and then top it off with an input-buffering and/or
an outout-buffering stream.
"""
import os, sys
from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC
O_BINARY = getattr(os, "O_BINARY", 0)
# (basemode, plus)
OS_MODE = {('r', False): O_RDONLY,
('r', True): O_RDWR,
('w', False): O_WRONLY | O_CREAT | O_TRUNC,
('w', True): O_RDWR | O_CREAT | O_TRUNC,
('a', False): O_WRONLY | O_CREAT,
('a', True): O_RDWR | O_CREAT,
}
# ____________________________________________________________
def replace_crlf_with_lf(s):
substrings = s.split("\r")
result = [substrings[0]]
for substring in substrings[1:]:
if not substring:
result.append("")
elif substring[0] == "\n":
result.append(substring[1:])
else:
result.append(substring)
return "\n".join(result)
def replace_char_with_str(string, c, s):
return s.join(string.split(c))
def open_file_as_stream(path, mode="r", buffering=-1):
os_flags, universal, reading, writing, basemode = decode_mode(mode)
stream = open_path_helper(path, os_flags, basemode == "a")
return construct_stream_tower(stream, buffering, universal, reading,
writing)
def fdopen_as_stream(fd, mode, buffering):
# XXX XXX XXX you want do check whether the modes are compatible
# otherwise you get funny results
os_flags, universal, reading, writing, basemode = decode_mode(mode)
stream = DiskFile(fd)
return construct_stream_tower(stream, buffering, universal, reading,
writing)
def open_path_helper(path, os_flags, append):
# XXX for now always return DiskFile
fd = os.open(path, os_flags, 0666)
if append:
try:
os.lseek(fd, 0, 2)
except OSError:
# XXX does this pass make sense?
pass
return DiskFile(fd)
def decode_mode(mode):
if mode[0] == 'U':
mode = 'r' + mode
basemode = mode[0] # 'r', 'w' or 'a'
plus = False
universal = False
binary = False
for c in mode[1:]:
if c == '+':
plus = True
elif c == 'U':
universal = True
elif c == 'b':
binary = True
else:
break
flag = OS_MODE[basemode, plus]
if binary or universal:
flag |= O_BINARY
reading = basemode == 'r' or plus
writing = basemode != 'r' or plus
return flag, universal, reading, writing, basemode
def construct_stream_tower(stream, buffering, universal, reading, writing):
if buffering == 0: # no buffering
pass
elif buffering == 1: # line-buffering
if writing:
stream = LineBufferingOutputStream(stream)
if reading:
stream = BufferingInputStream(stream)
else: # default or explicit buffer sizes
if buffering is not None and buffering < 0:
buffering = -1
if writing:
stream = BufferingOutputStream(stream, buffering)
if reading:
stream = BufferingInputStream(stream, buffering)
if universal: # Wants universal newlines
if writing and os.linesep != '\n':
stream = TextOutputFilter(stream)
if reading:
stream = TextInputFilter(stream)
return stream
class StreamError(Exception):
def __init__(self, message):
self.message = message
class Stream(object):
"""Base class for streams. Provides a default implementation of
some methods."""
def read(self, n):
raise NotImplementedError
def write(self, data):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def seek(self, offset, whence):
raise NotImplementedError
def readall(self):
bufsize = 8192
result = []
while True:
data = self.read(bufsize)
if not data:
break
result.append(data)
if bufsize < 4194304: # 4 Megs
bufsize <<= 1
return ''.join(result)
def readline(self):
# very inefficient unless there is a peek()
result = []
while True:
# "peeks" on the underlying stream to see how many characters
# we can safely read without reading past an end-of-line
peeked = self.peek()
pn = peeked.find("\n")
if pn < 0:
pn = len(peeked)
c = self.read(pn + 1)
if not c:
break
result.append(c)
if c.endswith('\n'):
break
return ''.join(result)
def truncate(self, size):
raise NotImplementedError
def flush(self):
pass
def flushable(self):
return False
def close(self):
pass
def peek(self):
return ''
def try_to_find_file_descriptor(self):
return -1
def getnewlines(self):
return 0
class DiskFile(Stream):
"""Standard I/O basis stream using os.open/close/read/write/lseek"""
def __init__(self, fd):
self.fd = fd
def seek(self, offset, whence):
os.lseek(self.fd, offset, whence)
def tell(self):
#XXX for running on top of the cpy objspace. later we want r_longlong
return int(os.lseek(self.fd, 0, 1))
def read(self, n):
return os.read(self.fd, n)
def write(self, data):
while data:
n = os.write(self.fd, data)
data = data[n:]
def close(self):
os.close(self.fd)
if sys.platform == "win32":
def truncate(self, size):
raise NotImplementedError
else:
def truncate(self, size):
os.ftruncate(self.fd, size)
def try_to_find_file_descriptor(self):
return self.fd
# next class is not RPython
class MMapFile(Stream):
"""Standard I/O basis stream using mmap."""
def __init__(self, fd, mmapaccess):
"""NOT_RPYTHON"""
self.fd = fd
self.access = mmapaccess
self.pos = 0
self.remapfile()
def remapfile(self):
import mmap
size = os.fstat(self.fd).st_size
self.mm = mmap.mmap(self.fd, size, access=self.access)
def close(self):
self.mm.close()
os.close(self.fd)
def tell(self):
return self.pos
def seek(self, offset, whence):
if whence == 0:
self.pos = max(0, offset)
elif whence == 1:
self.pos = max(0, self.pos + offset)
elif whence == 2:
self.pos = max(0, self.mm.size() + offset)
else:
raise StreamError("seek(): whence must be 0, 1 or 2")
def readall(self):
filesize = self.mm.size() # Actual file size, may be more than mapped
n = filesize - self.pos
data = self.mm[self.pos:]
if len(data) < n:
del data
# File grew since opened; remap to get the new data
self.remapfile()
data = self.mm[self.pos:]
self.pos += len(data)
return data
def read(self, n):
end = self.pos + n
data = self.mm[self.pos:end]
if not data:
# is there more data to read?
filesize = self.mm.size() #Actual file size, may be more than mapped
if filesize > self.pos:
# File grew since opened; remap to get the new data
self.remapfile()
data = self.mm[self.pos:end]
self.pos += len(data)
return data
def readline(self):
hit = self.mm.find("\n", self.pos) + 1
if not hit:
# is there more data to read?
filesize = self.mm.size() #Actual file size, may be more than mapped
if filesize > len(self.mm):
# File grew since opened; remap to get the new data
self.remapfile()
hit = self.mm.find("\n", self.pos) + 1
if hit:
# Got a whole line
data = self.mm[self.pos:hit]
self.pos = hit
else:
# Read whatever we've got -- may be empty
data = self.mm[self.pos:]
self.pos += len(data)
return data
def write(self, data):
end = self.pos + len(data)
try:
self.mm[self.pos:end] = data
# This can raise IndexError on Windows, ValueError on Unix
except (IndexError, ValueError):
# XXX On Unix, this resize() call doesn't work
self.mm.resize(end)
self.mm[self.pos:end] = data
self.pos = end
def flush(self):
self.mm.flush()
def flushable(self):
import mmap
return self.access == mmap.ACCESS_WRITE
def try_to_find_file_descriptor(self):
return self.fd
# ____________________________________________________________
STREAM_METHODS = dict([
("read", [int]),
("write", [str]),
("tell", []),
("seek", ["index", int]),
("readall", []),
("readline", []),
("truncate", [int]),
("flush", []),
("flushable", []),
("close", []),
("peek", []),
("try_to_find_file_descriptor", []),
("getnewlines", []),
])
def PassThrough(meth_name, flush_buffers):
if meth_name in STREAM_METHODS:
signature = STREAM_METHODS[meth_name]
args = ", ".join(["v%s" % (i, ) for i in range(len(signature))])
else:
assert 0, "not a good idea"
args = "*args"
if flush_buffers:
code = """def %s(self, %s):
self.flush_buffers()
return self.base.%s(%s)
"""
else:
code = """def %s(self, %s):
return self.base.%s(%s)
"""
d = {}
exec code % (meth_name, args, meth_name, args) in d
return d[meth_name]
class BufferingInputStream(Stream):
"""Standard buffering input stream.
This, and BufferingOutputStream if needed, are typically at the top of
the stack of streams.
"""
bigsize = 2**19 # Half a Meg
bufsize = 2**13 # 8 K
def __init__(self, base, bufsize=-1):
self.base = base
self.do_read = base.read # function to fill buffer some more
self.do_tell = base.tell # return a byte offset
self.do_seek = base.seek # seek to a byte offset
if bufsize == -1: # Get default from the class
bufsize = self.bufsize
self.bufsize = bufsize # buffer size (hint only)
self.lines = [] # ready-made lines (sans "\n")
self.buf = "" # raw data (may contain "\n")
# Invariant: readahead == "\n".join(self.lines + [self.buf])
# self.lines contains no "\n"
# self.buf may contain "\n"
def flush_buffers(self):
if self.lines or self.buf:
try:
self.do_seek(self.tell(), 0)
except NotImplementedError:
pass
else:
self.lines = []
self.buf = ""
def tell(self):
bytes = self.do_tell() # This may fail
offset = len(self.buf)
for line in self.lines:
offset += len(line) + 1
assert bytes >= offset #, (locals(), self.__dict__)
return bytes - offset
def seek(self, offset, whence):
# This may fail on the do_seek() or do_tell() call.
# But it won't call either on a relative forward seek.
# Nor on a seek to the very end.
if whence == 0:
self.do_seek(offset, 0)
self.lines = []
self.buf = ""
return
if whence == 1:
if offset < 0:
self.do_seek(self.tell() + offset, 0)
self.lines = []
self.buf = ""
return
while self.lines:
line = self.lines[0]
if offset <= len(line):
assert offset >= 0
self.lines[0] = line[offset:]
return
offset -= len(self.lines[0]) - 1
del self.lines[0]
assert not self.lines
if offset <= len(self.buf):
assert offset >= 0
self.buf = self.buf[offset:]
return
offset -= len(self.buf)
self.buf = ""
try:
self.do_seek(offset, 1)
except NotImplementedError:
self.read(offset)
return
if whence == 2:
try:
self.do_seek(offset, 2)
except NotImplementedError:
pass
else:
self.lines = []
self.buf = ""
return
# Skip relative to EOF by reading and saving only just as
# much as needed
data = "\n".join(self.lines + [self.buf])
total = len(data)
buffers = [data]
self.lines = []
self.buf = ""
while 1:
data = self.do_read(self.bufsize)
if not data:
break
buffers.append(data)
total += len(data)
while buffers and total >= len(buffers[0]) - offset:
total -= len(buffers[0])
del buffers[0]
cutoff = total + offset
if cutoff < 0:
raise StreamError("cannot seek back")
if buffers:
buffers[0] = buffers[0][cutoff:]
self.buf = "".join(buffers)
self.lines = []
return
raise StreamError("whence should be 0, 1 or 2")
def readall(self):
self.lines.append(self.buf)
more = ["\n".join(self.lines)]
self.lines = []
self.buf = ""
bufsize = self.bufsize
while 1:
data = self.do_read(bufsize)
if not data:
break
more.append(data)
bufsize = min(bufsize*2, self.bigsize)
return "".join(more)
def read(self, n):
assert n >= 0
if self.lines:
# See if this can be satisfied from self.lines[0]
line = self.lines[0]
if len(line) >= n:
self.lines[0] = line[n:]
return line[:n]
# See if this can be satisfied *without exhausting* self.lines
k = 0
i = 0
for line in self.lines:
k += len(line)
if k >= n:
lines = self.lines[:i]
data = self.lines[i]
cutoff = len(data) - (k-n)
assert cutoff >= 0
lines.append(data[:cutoff])
del self.lines[:i]
self.lines[0] = data[cutoff:]
return "\n".join(lines)
k += 1
i += 1
# See if this can be satisfied from self.lines plus self.buf
if k + len(self.buf) >= n:
lines = self.lines
self.lines = []
cutoff = n - k
assert cutoff >= 0
lines.append(self.buf[:cutoff])
self.buf = self.buf[cutoff:]
return "\n".join(lines)
else:
# See if this can be satisfied from self.buf
data = self.buf
k = len(data)
if k >= n:
cutoff = len(data) - (k-n)
assert cutoff >= 0
assert len(data) >= cutoff
self.buf = data[cutoff:]
return data[:cutoff]
lines = self.lines
self.lines = []
lines.append(self.buf)
self.buf = ""
data = "\n".join(lines)
more = [data]
k = len(data)
while k < n:
data = self.do_read(max(self.bufsize, n-k))
k += len(data)
more.append(data)
if not data:
break
cutoff = len(data) - (k-n)
assert cutoff >= 0
if len(data) <= cutoff:
self.buf = ""
else:
self.buf = data[cutoff:]
more[-1] = data[:cutoff]
return "".join(more)
def readline(self):
if self.lines:
return self.lines.pop(0) + "\n"
# This block is needed because read() can leave self.buf
# containing newlines
self.lines = self.buf.split("\n")
self.buf = self.lines.pop()
if self.lines:
return self.lines.pop(0) + "\n"
if self.buf:
buf = [self.buf]
else:
buf = []
while 1:
self.buf = self.do_read(self.bufsize)
self.lines = self.buf.split("\n")
self.buf = self.lines.pop()
if self.lines:
buf.append(self.lines.pop(0))
buf.append("\n")
break
if not self.buf:
break
buf.append(self.buf)
return "".join(buf)
def peek(self):
if self.lines:
return self.lines[0] + "\n"
else:
return self.buf
write = PassThrough("write", flush_buffers=True)
truncate = PassThrough("truncate", flush_buffers=True)
flush = PassThrough("flush", flush_buffers=True)
flushable = PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
class BufferingOutputStream(Stream):
"""Standard buffering output stream.
This, and BufferingInputStream if needed, are typically at the top of
the stack of streams.
"""
bigsize = 2**19 # Half a Meg
bufsize = 2**13 # 8 K
def __init__(self, base, bufsize=-1):
self.base = base
self.do_write = base.write # write more data
self.do_tell = base.tell # return a byte offset
if bufsize == -1: # Get default from the class
bufsize = self.bufsize
self.bufsize = bufsize # buffer size (hint only)
self.buf = ""
def flush_buffers(self):
if self.buf:
self.do_write(self.buf)
self.buf = ""
def tell(self):
return self.do_tell() + len(self.buf)
def write(self, data):
buflen = len(self.buf)
datalen = len(data)
if datalen + buflen < self.bufsize:
self.buf += data
elif buflen:
slice = self.bufsize - buflen
assert slice >= 0
self.buf += data[:slice]
self.do_write(self.buf)
self.buf = ""
self.write(data[slice:])
else:
self.do_write(data)
read = PassThrough("read", flush_buffers=True)
readall = PassThrough("readall", flush_buffers=True)
readline = PassThrough("readline", flush_buffers=True)
seek = PassThrough("seek", flush_buffers=True)
truncate = PassThrough("truncate", flush_buffers=True)
flush = PassThrough("flush", flush_buffers=True)
close = PassThrough("close", flush_buffers=True)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
def flushable(self):
return True
class LineBufferingOutputStream(BufferingOutputStream):
"""Line buffering output stream.
This is typically the top of the stack.
"""
def write(self, data):
BufferingOutputStream.write(self, data)
p = self.buf.rfind('\n') + 1
if p >= 0:
self.do_write(self.buf[:p])
self.buf = self.buf[p:]
# ____________________________________________________________
class CRLFFilter(Stream):
"""Filtering stream for universal newlines.
TextInputFilter is more general, but this is faster when you don't
need tell/seek.
"""
def __init__(self, base):
self.base = base
self.do_read = base.read
self.atcr = False
def read(self, n):
data = self.do_read(n)
if self.atcr:
if data.startswith("\n"):
data = data[1:] # Very rare case: in the middle of "\r\n"
self.atcr = False
if "\r" in data:
self.atcr = data.endswith("\r") # Test this before removing \r
data = replace_crlf_with_lf(data)
return data
flush = PassThrough("flush", flush_buffers=False)
flushable= PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
class TextInputFilter(Stream):
"""Filtering input stream for universal newline translation."""
def __init__(self, base):
self.base = base # must implement read, may implement tell, seek
self.do_read = base.read
self.atcr = False # Set when last char read was \r
self.buf = "" # Optional one-character read-ahead buffer
self.CR = False
self.NL = False
self.CRLF = False
def getnewlines(self):
return self.CR * 1 + self.NL * 2 + self.CRLF * 4
def read(self, n):
"""Read up to n bytes."""
if self.buf:
assert not self.atcr
data = self.buf
self.buf = ""
else:
data = self.do_read(n)
# The following whole ugly mess is because we need to keep track of
# exactly which line separators we have seen for self.newlines,
# grumble, grumble. This has an interesting corner-case.
#
# Consider a file consisting of exactly one line ending with '\r'.
# The first time you read(), you will not know whether it is a
# CR separator or half of a CRLF separator. Neither will be marked
# as seen, since you are waiting for your next read to determine
# what you have seen. But there's no more to read ...
if self.atcr:
if data.startswith("\n"):
data = data[1:]
self.CRLF = True
if not data:
data = self.do_read(n)
else:
self.CR = True
self.atcr = False
for i in range(len(data)):
if data[i] == '\n':
if i > 0 and data[i-1] == '\r':
self.CRLF = True
else:
self.NL = True
elif data[i] == '\r':
if i < len(data)-1 and data[i+1] != '\n':
self.CR = True
if "\r" in data:
self.atcr = data.endswith("\r")
data = replace_crlf_with_lf(data)
return data
def readline(self):
result = []
while True:
# "peeks" on the underlying stream to see how many characters
# we can safely read without reading past an end-of-line
peeked = self.base.peek()
pn = peeked.find("\n")
pr = peeked.find("\r")
if pn < 0: pn = len(peeked)
if pr < 0: pr = len(peeked)
c = self.read(min(pn, pr) + 1)
if not c:
break
result.append(c)
if c.endswith('\n'):
break
return ''.join(result)
def seek(self, offset, whence):
"""Seeks based on knowledge that does not come from a tell()
may go to the wrong place, since the number of
characters seen may not match the number of characters
that are actually in the file (where \r\n is the
line separator). Arithmetics on the result
of a tell() that moves beyond a newline character may in the
same way give the wrong result.
"""
if whence == 1:
offset -= len(self.buf) # correct for already-read-ahead character
self.base.seek(offset, whence)
self.atcr = False
self.buf = ""
def tell(self):
pos = self.base.tell()
if self.atcr:
# Must read the next byte to see if it's \n,
# because then we must report the next position.
assert not self.buf
self.buf = self.do_read(1)
pos += 1
self.atcr = False
if self.buf == "\n":
self.buf = ""
return pos - len(self.buf)
def flush_buffers(self):
if self.atcr:
assert not self.buf
self.buf = self.do_read(1)
self.atcr = False
if self.buf == "\n":
self.buf = ""
if self.buf:
try:
self.base.seek(-len(self.buf), 1)
except NotImplementedError:
pass
else:
self.buf = ""
def peek(self):
return self.buf
write = PassThrough("write", flush_buffers=True)
truncate = PassThrough("truncate", flush_buffers=True)
flush = PassThrough("flush", flush_buffers=True)
flushable = PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
class TextOutputFilter(Stream):
"""Filtering output stream for universal newline translation."""
def __init__(self, base, linesep=os.linesep):
assert linesep in ["\n", "\r\n", "\r"]
self.base = base # must implement write, may implement seek, tell
self.linesep = linesep
def write(self, data):
data = replace_char_with_str(data, "\n", self.linesep)
self.base.write(data)
tell = PassThrough("tell", flush_buffers=False)
seek = PassThrough("seek", flush_buffers=False)
read = PassThrough("read", flush_buffers=False)
readall = PassThrough("readall", flush_buffers=False)
readline = PassThrough("readline", flush_buffers=False)
truncate = PassThrough("truncate", flush_buffers=False)
flush = PassThrough("flush", flush_buffers=False)
flushable = PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
# _________________________________________________
# The following functions are _not_ RPython!
class DecodingInputFilter(Stream):
"""Filtering input stream that decodes an encoded file."""
def __init__(self, base, encoding="utf8", errors="strict"):
"""NOT_RPYTHON"""
self.base = base
self.do_read = base.read
self.encoding = encoding
self.errors = errors
def read(self, n):
"""Read *approximately* n bytes, then decode them.
Under extreme circumstances,
the return length could be longer than n!
Always return a unicode string.
This does *not* translate newlines;
you can stack TextInputFilter.
"""
data = self.do_read(n)
try:
return data.decode(self.encoding, self.errors)
except ValueError:
# XXX Sigh. decode() doesn't handle incomplete strings well.
# Use the retry strategy from codecs.StreamReader.
for i in range(9):
more = self.do_read(1)
if not more:
raise
data += more
try:
return data.decode(self.encoding, self.errors)
except ValueError:
pass
raise
write = PassThrough("write", flush_buffers=False)
truncate = PassThrough("truncate", flush_buffers=False)
flush = PassThrough("flush", flush_buffers=False)
flushable = PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
class EncodingOutputFilter(Stream):
"""Filtering output stream that writes to an encoded file."""
def __init__(self, base, encoding="utf8", errors="strict"):
"""NOT_RPYTHON"""
self.base = base
self.do_write = base.write
self.encoding = encoding
self.errors = errors
def write(self, chars):
if isinstance(chars, str):
chars = unicode(chars) # Fail if it's not ASCII
self.do_write(chars.encode(self.encoding, self.errors))
tell = PassThrough("tell", flush_buffers=False)
seek = PassThrough("seek", flush_buffers=False)
read = PassThrough("read", flush_buffers=False)
readall = PassThrough("readall", flush_buffers=False)
readline = PassThrough("readline", flush_buffers=False)
truncate = PassThrough("truncate", flush_buffers=False)
flush = PassThrough("flush", flush_buffers=False)
flushable = PassThrough("flushable", flush_buffers=False)
close = PassThrough("close", flush_buffers=False)
try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor",
flush_buffers=False)
| Python |
"""
Usage:
python alarm.py <timeout> <scriptname> <args...>
Run the given script. If the timeout elapses, trying interrupting it by
sending KeyboardInterrupts.
"""
import traceback
def _main_with_alarm(finished):
import sys, os
import time
import thread
def timeout_thread(timeout, finished):
stderr = sys.stderr
interrupt_main = thread.interrupt_main
sleep = time.sleep
now = time.time
while now() < timeout and not finished:
sleep(1.65123)
if not finished:
stderr.write("="*26 + "timedout" + "="*26 + "\n")
while not finished:
# send KeyboardInterrupt repeatedly until the main
# thread dies. Then quit (in case we are on a system
# where exiting the main thread doesn't kill us too).
interrupt_main()
sleep(0.031416)
timeout = time.time() + float(sys.argv[1])
thread.start_new_thread(timeout_thread, (timeout, finished))
del sys.argv[:2]
sys.path.insert(0, os.path.dirname(sys.argv[0]))
return sys.argv[0]
finished = []
try:
execfile(_main_with_alarm(finished))
finally:
finished.append(True)
| Python |
import types
class AbstractMethods(type):
def __new__(cls, cls_name, bases, cls_dict):
for key, value in cls_dict.iteritems():
if isinstance(value, types.FunctionType):
cls_dict[key] = cls.decorator(value)
return type.__new__(cls, cls_name, bases, cls_dict)
class StaticMethods(AbstractMethods):
"""
Metaclass that turns plain methods into staticmethods.
"""
decorator = staticmethod
class ClassMethods(AbstractMethods):
"""
Metaclass that turns plain methods into classmethods.
"""
decorator = classmethod
| Python |
# XXX this does not produce a correct _exceptions anymore because the logic to reconstruct
# type checks is broken
# this script is used for extracting
# the information available for exceptions
# via introspection.
# The idea is to use it once to create
# a template for a re-birth of exceptions.py
import autopath
import types
from pypy.tool.sourcetools import render_docstr
def classOfAttribute(klass, attname):
if attname in klass.__dict__:
return klass
for base in klass.__bases__:
ret = classOfAttribute(base, attname)
if ret:
return ret
def getAttributes(klass, ignorelist = []):
return [name for name in dir(klass) if name not in ignorelist]
def makeExceptionsTemplate(f=None):
def enumClassesInOrder(module):
seen = {}
ordered = []
def enumerateOne(exc):
seen[exc] = 1
for each in exc.__bases__:
if each not in seen:
enumerateOne(each)
ordered.append(exc)
for each in module.__dict__.values():
if isinstance(each, (types.ClassType, type)) and \
each not in seen:
enumerateOne(each)
return ordered
if not f:
f = sys.stdout
import exceptions
print >> f, render_docstr(exceptions, "")
for exc in enumClassesInOrder(exceptions):
name = exc.__name__
bases = exc.__bases__
doc = exc.__doc__
bases = [this.__name__ for this in bases]
bases = ", ".join(bases)
if bases: bases = "(%s)" % bases
ignorelist = "__doc__ __module__".split()
# find out class variables and methods
simple = []
difficult = []
for attname in getAttributes(exc, ignorelist):
if classOfAttribute(exc, attname) is exc:
obj = getattr(exc, attname)
(simple, difficult)[callable(obj)].append( (attname, obj) )
print >> f
print >> f, "class %s%s:" % (name, bases)
if doc:
print >> f, ' """%s"""' % doc
if not (simple or difficult or doc):
print >> f, " pass"
for tup in simple:
print >> f, " %s = %r" % tup
for attname, meth in difficult:
print >> f
func = globals().get("tryGenerate" + attname, None)
if not func:
print >> f, " # please implement %s.%s (%r)" % (name, attname, meth)
else:
try:
for line in func(exc):
print >> f, " " + line
except ValueError, e:
print >> f, " # %s" % e
print >> f, " # please implement %s.%s (%r)" % (name, attname, meth)
def tryGenerate__getitem__(exc):
for args in (), (1, 2, 3):
try:
sample = exc(*args)
except:
raise ValueError, "cannot create instance"
if "args" not in sample.__dict__:
raise ValueError, "args attribute not found in __dict__"
if args != sample.args:
raise ValueError, "instance has modified args"
for i in range(5):
try: x = sample[i]
except IndexError: x = 42
try: y = args[i]
except IndexError: y = 42
if x != y:
raise ValueError, "args does not behave like a sequence"
del sample.args
try: x = sample[0]
except: x = 42
use_default = x is None
# looks fine so far.
yield "# auto-generated code, please check carefully!"
yield "def __getitem__(self, idx):"
if use_default:
yield " if not hasattr(self, 'args'):"
yield " return None"
yield " return self.args[idx]"
class ProbeObject(object):
""" this class creates general "any" objects, and
for the special case of SyntaxError, it can behave
like a subscriptable object
"""
def __init__(self, argpos, maxprobe=None):
self.argpos = argpos
self.maxprobe = maxprobe
self.probed = []
def __getitem__(self, idx):
if idx not in self.probed:
self.probed.append(idx)
if self.maxprobe is not None and idx > self.maxprobe:
raise IndexError, "cheat cheat %d" % idx
return "arg%d_%s" % (self.argpos, idx)
def __repr__(self):
if self.probed:
return "<arg%d:%r>" % (self.argpos, self.probed)
else:
return "<arg%d>" % self.argpos
def __str__(self):
# make this different from repr!
return repr(self)[1:-1]
def __cmp__(self, other):
return cmp( (self.argpos, self.probed), other)
def genArgsToTry(argpos):
args = [ProbeObject(argpos),
"arg%d" % argpos, u"arg%d" %argpos, 1000+argpos*10]
return args
def cartesian(*args):
if len(args)== 0:
yield args
elif len(args) == 1:
for item in args[0]:
yield (item,)
else:
for item in args[0]:
for other in cartesian(*args[1:]):
yield (item,) + other
def probeArgCount(exc, maxprobe=20):
worksmaybe = []
for i in range(maxprobe):
try:
probe = exc(*(i,)*i) # test i-tuple
worksmaybe.append(i)
except TypeError, e:
if not str(e).startswith("function takes "):
worksmaybe.append(i)
except:
pass
return min(worksmaybe), max(worksmaybe)
def refreshArgs(tup):
res = []
for arg in tup:
if type(arg) is ProbeObject:
arg = ProbeObject(arg.argpos) # cleanup probing
res.append(arg)
return tuple(res)
def findAllArgs(exc, maxprobe):
minargs, maxargs = probeArgCount(exc, maxprobe)
res = []
# for minargs args, we need to try combinations
arglist = tuple([genArgsToTry(i) for i in range(minargs)])
for args in cartesian(*arglist):
try:
probe = exc(*args)
res.append(args)
works = refreshArgs(args)
break
except Exception, e:
continue
else:
raise TypeError, "cannot analyse arguments of %s" % exc.__name__
# for the variable part, don't try combinations
for i in range(minargs, maxargs):
for arg in genArgsToTry(i):
args = works + (arg,)
try:
probe = exc(*args)
res.append(args)
works = refreshArgs(args)
break
except:
continue
else:
raise TypeError, "cannot analyse arguments of %s" % exc.__name__
return minargs, maxargs, res
def captureAssignments(exc, args):
""" we wrap a class around the exc class and record attribute access """
assigned = []
class WrapExc(exc):
def __setattr__(self, name, obj):
assigned.append( (name, obj) )
self.__dict__[name] = obj
probe = WrapExc(*args)
names = {}
names[args] = "args"
for i, arg in enumerate(args):
names[arg] = "args[%d]" % i
if not isinstance(arg, ProbeObject):
continue
for subidx in arg.probed:
names[arg[subidx]] = "args[%d][%d]" % (i, subidx)
def nameof(obj):
if obj in names:
return names[obj]
elif isinstance(obj, (tuple, list)):
stuff = [nameof(x) for x in obj]
br = str(type(obj)())
txt = br[0] + ", ".join(stuff) + br[-1]
names[obj] = txt
else:
names[obj] = "%r" % obj
return names[obj]
res = []
for i,(name, obj) in enumerate(assigned):
if isinstance(obj,ProbeObject) or name == 'args' or obj==None:
res.append("self.%s = %s" % (name, nameof(obj)))
else:
res.append("if type(%s) == %s:"%(nameof(obj),repr(type(obj))[7:-2]))
res.append(" self.%s = %s" % (name, nameof(obj)))
res.append("else:")
reason ="argument %i must be %s, not %s"%(i-1,repr(type(obj))[7:-2],'%s')
reason2=''.join(["%type(","%s"%nameof(obj),")"])
reason = "'"+ reason+"'" +reason2
res.append(" raise TypeError(%s)"%(reason))
return tuple(res)
def tryGenerate__init__(exc, maxprobe=20):
minargs, maxargs, working = findAllArgs(exc, maxprobe)
# merge assignments in order, record set of arg counts
foldcases = {}
for args in working:
singleprog = captureAssignments(exc, args)
for tup in enumerate(singleprog):
foldcases.setdefault(tup, [])
foldcases[tup].append(len(args))
# group assignments by set of arg counts and order
groupassign = {}
for (order, assignment), argcounts in foldcases.items():
key = tuple(argcounts)
# special case: we don't raise errors
# and always assign to self.args
if assignment == "self.args = args" and len(key) != maxprobe:
assignment += " # modified: always assign args, no error check"
key = tuple(range(maxprobe))
groupassign.setdefault(key, [])
groupassign[key].append( (order, assignment) )
cases = groupassign.items()
cases.sort()
yield "# auto-generated code, please check carefully!"
yield "def __init__(self, *args):"
if len(cases) > 1 or len(cases[0][0]) != maxprobe:
yield " argc = len(args)"
for argcounts, ordered_statements in cases:
ordered_statements.sort()
trailer = None
if len(argcounts) == maxprobe:
# all counts, no condition
indent = 1
else:
indent = 2
dense = tuple(range(argcounts[0], argcounts[-1]+1)) == argcounts
if len(argcounts) == 1:
yield " if argc == %d:" % argcounts
if maxargs == minargs:
trailer = [" else:"]
err_msg = ""
trailer += [" raise TypeError('function takes exactly "+str(argcounts[0])+" arguments (%d given)'%argc)"]
elif dense and argcounts[0] == 0:
yield " if argc <= %d:" % argcounts[-1]
elif dense and argcounts[-1] == maxprobe-1:
yield " if argc >= %d:" % argcounts[0]
elif dense:
yield " if %d <= argc <= %d:" % (argcounts[0], argcounts[-1])
else:
yield " if argc in %r:" % (argcounts, )
if len(ordered_statements)>0:
for order, line in ordered_statements:
yield indent * " " + line
else:
yield indent * " " + "pass"
if trailer:
for line in trailer : yield line
def tryGenerate__str__(exc, maxprobe=20):
if exc in known__str__:
import inspect
src = inspect.getsource(known__str__[exc])
for line in src.split("\n"):
yield line
return
minargs, maxargs, working = findAllArgs(exc, maxprobe)
# checking the default case (well, there are two)
simple = False
arg1_methods = []
for args in working:
test = str(exc(*args))
if len(args) == 0 and test != "":
break
if len(args) == 1:
if test == repr(args[0]):
arg1_methods.append("repr")
elif test == str(args[0]):
arg1_methods.append("str")
else:
break
if len(args) >= 2 and test != repr(args):
break
else:
simple = arg1_methods and min(arg1_methods) == max(arg1_methods)
yield "# auto-generated code, please check carefully!"
if simple:
yield "def __str__(self):"
yield " args = self.args"
yield " argc = len(args)"
yield " if argc == 0:"
yield " return ''"
yield " elif argc == 1:"
yield " return %s(args[0])" % arg1_methods.pop()
yield " else:"
yield " return str(args)"
return
# no idea how I should do this
probe = exc(*working[0])
dic = probe.__dict__
for key in dic.keys():
if key.startswith("__") and key.endswith("__"):
del dic[key]
yield "def __str__(self):"
yield " # this is a bad hack, please supply an implementation"
yield " res = ' '.join(["
for key in dic.keys():
yield " '%s=' + str(getattr(self, '%s', None))," % (key, key)
yield " ])"
yield " return res"
known__str__ = {}
# SyntaxError
def __str__(self):
if type(self.msg) is not str:
return self.msg
buffer = self.msg
have_filename = type(self.filename) is str
have_lineno = type(self.lineno) is int
if have_filename or have_lineno:
import os
fname = os.path.basename(self.filename or "???")
if have_filename and have_lineno:
buffer = "%s (%s, line %ld)" % (self.msg, fname, self.lineno)
elif have_filename:
buffer ="%s (%s)" % (self.msg, fname)
elif have_lineno:
buffer = "%s (line %ld)" % (self.msg, self.lineno)
return buffer
known__str__[SyntaxError] = __str__
# EnvironmentError
def __str__(self):
if self.filename is not None:
return "[Errno %s] %s: %s" % (self.errno,
self.strerror,
self.filename)
if self.errno and self.strerror:
return "[Errno %s] %s" % (self.errno, self.strerror)
return StandardError.__str__(self)
known__str__[EnvironmentError] = __str__
if __name__ == "__main__":
import pypy, os
prefix = os.path.dirname(pypy.__file__)
libdir = os.path.join(prefix, "lib")
fname = "_exceptions.pre.py"
fpath = os.path.join(libdir, fname)
makeExceptionsTemplate(file(fpath, "w"))
| Python |
"""disassembler of Python byte code into mnemonics.
XXX this only works for python-2.3 because of the linenumber
optimization
"""
import autopath
import sys
from pypy.tool import stdlib_opcode
from pypy.tool.stdlib_opcode import *
__all__ = ["dis","pydisassemble","distb","disco"] + stdlib_opcode.__all__
EXTENDED_ARG = stdlib_opcode.opcodedesc.EXTENDED_ARG.index
class Bytecode:
def __init__(self, disresult, bytecodeindex, oparg, lineno):
self.disresult = disresult
self.index = bytecodeindex
self.op = ord(disresult.code.co_code[self.index])
self.name = opname[self.op]
self.oparg = oparg
self.lineno = lineno
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.index == other.index and
self.op == other.op and
self.name == other.name and
self.oparg == other.oparg)
def __ne__(self, other):
return not (self == other)
def reprargstring(self, space = None):
""" return a string representation of any arguments. (empty for no args)"""
oparg = self.oparg
if oparg is None:
return ''
co = self.disresult.code
op = self.op
s = repr(oparg).rjust(5) + " "
if op in hasconst:
consts = self.get_consts(space)
s += '(' + consts[oparg] + ')'
elif op in hasname:
s += '(' + co.co_names[oparg] + ')'
elif op in hasjrel:
s += '(to ' + repr(self.index + oparg) + ')'
elif op in haslocal:
s += '(' + co.co_varnames[oparg] + ')'
elif op in hascompare:
s += '(' + cmp_op[oparg] + ')'
elif op in hasfree:
#if free is None:
free = co.co_cellvars + co.co_freevars
s += '(' + free[oparg] + ')'
return s
def get_consts(self, space=None):
# support both real code objects and PyCode objects
co = self.disresult.code
if hasattr(co, "co_consts"):
return [repr(c) for c in co.co_consts]
if space is None:
return [repr(c) for c in co.co_consts_w]
r = lambda x: space.str_w(space.repr(x))
return [r(c) for c in co.co_consts_w]
def repr_with_space(self, space):
return self.name + self.reprargstring(space)
def __repr__(self):
return self.name + self.reprargstring()
class DisResult:
""" an instance of this class gets returned for disassembling
objects/functions/code objects whatever.
"""
def __init__(self, code):
self.code = code
self.bytecodes = []
def append(self, bytecodeindex, oparg, lineno):
""" append bytecode anaylsis information ..."""
bc = Bytecode(self, bytecodeindex, oparg, lineno)
self.bytecodes.append(bc)
def getbytecode(self, index):
""" return bytecode instance matching the given index. """
for bytecode in self.bytecodes:
if bytecode.index == index:
return bytecode
raise ValueError, "no bytecode found on index %s in code \n%s" % (
index, pydis(self.code))
def format(self):
lastlineno = -1
labels = findlabels(self.code.co_code)
lines = []
for bc in self.bytecodes:
l = []
if bc.lineno != lastlineno:
lastlineno = bc.lineno
l.append("%3d" % bc.lineno)
else:
l.append(" ")
l.append(bc.index in labels and ">>" or " ")
l.append(repr(bc.index).rjust(4))
l.append(bc.name.ljust(20))
l.append(bc.reprargstring())
lines.append(" ".join(l))
return "\n".join(lines)
__repr__ = format
def pydis(co):
"""return result of dissassembling a code object. """
if hasattr(co, 'func_code'):
co = co.func_code
if hasattr(co, 'code'):
co = co.code
disresult = DisResult(co)
code = co.co_code
byte_increments = [ord(c) for c in co.co_lnotab[0::2]]
line_increments = [ord(c) for c in co.co_lnotab[1::2]]
table_length = len(byte_increments)
lineno = co.co_firstlineno
table_index = 0
while (table_index < table_length
and byte_increments[table_index] == 0):
lineno += line_increments[table_index]
table_index += 1
addr = 0
line_incr = 0
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i >= addr:
lineno += line_incr
while table_index < table_length:
addr += byte_increments[table_index]
line_incr = line_increments[table_index]
table_index += 1
if line_incr:
break
else:
addr = sys.maxint
current_bytecodeindex = i
i = i+1
oparg = None
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
disresult.append(current_bytecodeindex, oparg, lineno)
assert disresult is not None
return disresult
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
| Python |
import sys
from py.__.misc.terminal_helper import ansi_print, get_terminal_width
"""
Black 0;30 Dark Gray 1;30
Blue 0;34 Light Blue 1;34
Green 0;32 Light Green 1;32
Cyan 0;36 Light Cyan 1;36
Red 0;31 Light Red 1;31
Purple 0;35 Light Purple 1;35
Brown 0;33 Yellow 1;33
Light Gray 0;37 White 1;37
"""
palette = [39, 34, 35, 36, 31, 33, 32, 37]
colour_range = None # used for debugging
def print_pixel(colour, value_range, invert=1):
global colour_range
chars = [".", ".", "+", "*", "%", "#"]
idx = lambda chars: (colour+1) * (len(chars) - 1) / value_range
if invert:
idx = lambda chars, idx=idx:len(chars) - 1 - idx(chars)
char = chars[idx(chars)]
ansi_colour = palette[idx(palette)]
ansi_print(char, ansi_colour, newline=False, flush=True)
#if colour_range is None:
# colour_range = [colour, colour]
#else:
# colour_range = [min(colour_range[0], colour), max(colour_range[1], colour)]
class Mandelbrot:
def __init__ (self, width=100, height=28, x_pos=-0.5, y_pos=0, distance=6.75):
self.xpos = x_pos
self.ypos = y_pos
aspect_ratio = 1/3.
factor = float(distance) / width # lowering the distance will zoom in
self.xscale = factor * aspect_ratio
self.yscale = factor
self.iterations = 170
self.x = width
self.y = height
self.z0 = complex(0, 0)
def init(self):
self.reset_lines = False
xmin = self.xpos - self.xscale * self.x / 2
ymin = self.ypos - self.yscale * self.y / 2
self.x_range = [xmin + self.xscale * ix for ix in range(self.x)]
self.y_range = [ymin + self.yscale * iy for iy in range(self.y)]
#print "x", self.x_range[0], self.x_range[-1]
#print "y", self.y_range[0], self.y_range[-1]
def reset(self, cnt):
self.reset_lines = cnt
def generate(self):
self.reset_lines = False
iy = 0
while iy < self.y:
ix = 0
while ix < self.x:
c = complex(self.x_range[ix], self.y_range[iy])
z = self.z0
colour = 0
mind = 2
for i in range(self.iterations):
z = z * z + c
d = abs(z)
if d >= 2:
colour = min(int(mind / 0.007), 254) + 1
break
else:
mind = min(d, mind)
yield ix, iy, colour
if self.reset_lines is not False: # jump to the beginning of the line
iy += self.reset_lines
do_break = bool(self.reset_lines)
self.reset_lines = False
if do_break:
break
ix = 0
else:
ix += 1
iy += 1
class Driver(object):
zoom_locations = [
# x, y, "distance", range
(0.37865401, 0.669227668, 0.04, 111),
(-1.15, -0.28, 0.9, 94),
(-1.15, -0.28, 0.3, 58),
(-1.15, -0.28, 0.05, 26),
]
def __init__(self, **kwargs):
self.kwargs = kwargs
self.zoom_location = -1
self.colour_range = 256
self.invert = True
self.init()
def init(self):
self.width = get_terminal_width()
self.mandelbrot = Mandelbrot(width=(self.width or 1), **self.kwargs)
self.mandelbrot.init()
self.gen = self.mandelbrot.generate()
def reset(self, cnt=0):
""" Resets to the beginning of the line and drops cnt lines internally. """
self.mandelbrot.reset(cnt)
def catchup(self):
""" Fills the current line. """
x = 0
while x != self.width - 1:
x, y, c = self.gen.next()
print_pixel(c, self.colour_range, self.invert)
print >>sys.stderr
def restart(self):
""" Restarts the current generator. """
print >>sys.stderr
self.init()
def dot(self):
""" Emits a colourful character. """
x = c = 0
try:
x, y, c = self.gen.next()
if x == 0:
width = get_terminal_width()
if width != self.width:
self.init()
except StopIteration:
kwargs = self.kwargs
self.zoom_location += 1
self.zoom_location %= len(self.zoom_locations)
loc = self.zoom_locations[self.zoom_location]
kwargs.update({"x_pos": loc[0], "y_pos": loc[1], "distance": loc[2]})
self.colour_range = loc[3]
#global colour_range
#print colour_range, loc[2]
#colour_range = None
return self.restart()
print_pixel(c, self.colour_range, self.invert)
if x == self.width - 1:
print >>sys.stderr
if __name__ == '__main__':
import random
from time import sleep
d = Driver()
for x in xrange(15000):
sleep(random.random() / 300)
d.dot()
if 0 and random.random() < 0.01:
d.catchup()
print "WARNING! " * 3
d.reset(1)
# print "R",
if 0 and random.random() < 0.01:
string = "WARNING! " * 3
d.jump(len(string))
print string,
| Python |
"""
Two magic tricks for classes:
class X:
__metaclass__ = extendabletype
...
# in some other file...
class __extend__(X):
... # and here you can add new methods and class attributes to X
Mostly useful together with the second trick, which lets you build
methods whose 'self' is a pair of objects instead of just one:
class __extend__(pairtype(X, Y)):
attribute = 42
def method((x, y), other, arguments):
...
pair(x, y).attribute
pair(x, y).method(other, arguments)
This finds methods and class attributes based on the actual
class of both objects that go into the pair(), with the usual
rules of method/attribute overriding in (pairs of) subclasses.
For more information, see test_pairtype.
"""
class extendabletype(type):
"""A type with a syntax trick: 'class __extend__(t)' actually extends
the definition of 't' instead of creating a new subclass."""
def __new__(cls, name, bases, dict):
if name == '__extend__':
for cls in bases:
for key, value in dict.items():
if key == '__module__':
continue
# XXX do we need to provide something more for pickling?
setattr(cls, key, value)
return None
else:
return super(extendabletype, cls).__new__(cls, name, bases, dict)
def pair(a, b):
"""Return a pair object."""
tp = pairtype(a.__class__, b.__class__)
return tp((a, b)) # tp is a subclass of tuple
pairtypecache = {}
def pairtype(cls1, cls2):
"""type(pair(a,b)) is pairtype(a.__class__, b.__class__)."""
try:
pair = pairtypecache[cls1, cls2]
except KeyError:
name = 'pairtype(%s, %s)' % (cls1.__name__, cls2.__name__)
bases1 = [pairtype(base1, cls2) for base1 in cls1.__bases__]
bases2 = [pairtype(cls1, base2) for base2 in cls2.__bases__]
bases = tuple(bases1 + bases2) or (tuple,) # 'tuple': ultimate base
pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {})
return pair
| Python |
""" error handling features, just a way of displaying errors
"""
from pypy.tool.ansi_print import ansi_log, raise_nicer_exception
from pypy.objspace.flow.model import Constant, Variable
import sys
import py
log = py.log.Producer("error")
py.log.setconsumer("error", ansi_log)
SHOW_TRACEBACK = False
SHOW_ANNOTATIONS = True
SHOW_DEFAULT_LINES_OF_CODE = 0
from pypy.interpreter.pytraceback import offset2lineno
import traceback
def source_lines(graph, block, operindex=None, offset=None, long=False, \
show_lines_of_code=SHOW_DEFAULT_LINES_OF_CODE):
if block is not None:
if block is graph.returnblock:
return ['<return block>']
try:
source = graph.source
except AttributeError:
return ['no source!']
else:
graph_lines = source.split("\n")
if offset is not None:
linestart = offset2lineno(graph.func.func_code, offset)
linerange = (linestart, linestart)
here = None
else:
if block is None or not block.operations:
return []
def toline(operindex):
return offset2lineno(graph.func.func_code, block.operations[operindex].offset)
if operindex is None:
linerange = (toline(0), toline(-1))
if not long:
return ['?']
here = None
else:
operline = toline(operindex)
if long:
linerange = (toline(0), toline(-1))
here = operline
else:
linerange = (operline, operline)
here = None
lines = ["Happened at file %s line %d" % (graph.filename, here or linerange[0]), ""]
for n in range(max(0, linerange[0]-show_lines_of_code), \
min(linerange[1]+1+show_lines_of_code, len(graph_lines)+graph.startline)):
if n == here:
prefix = '==> '
else:
prefix = ' '
lines.append(prefix + graph_lines[n-graph.startline])
lines.append("")
return lines
class FlowingError(Exception):
pass
class AnnotatorError(Exception):
pass
class NoSuchAttrError(Exception):
pass
def gather_error(annotator, block, graph):
msg = [""]
msg.append('-+' * 30)
from pypy.annotation import model
msg.append("Blocked block -- operation cannot succeed")
if model.DEBUG:
_, _, operindex = annotator.why_not_annotated[block][1].break_at
else:
# guess the blocked operation by the fact that its return value is
# not annotated
for operindex in range(len(block.operations)):
if block.operations[operindex].result not in annotator.bindings:
break
else:
operindex = None
if operindex is not None:
oper = block.operations[operindex]
msg.append(" " + str(oper))
else:
oper = None
msg.append(" (inconsistency - the block is fully annotated??)")
msg += source_lines(graph, block, operindex, long=True)
if oper is not None:
if SHOW_ANNOTATIONS:
msg.append("Known variable annotations:")
for arg in oper.args + [oper.result]:
if isinstance(arg, Variable):
try:
msg.append(" " + str(arg) + " = " + str(annotator.binding(arg)))
except KeyError:
pass
if model.DEBUG and SHOW_TRACEBACK:
msg.extend(traceback.format_exception(*annotator.why_not_annotated[block]))
return "\n".join(msg)
def format_blocked_annotation_error(annotator, blocked_blocks):
text = []
for block, graph in blocked_blocks.items():
text.append(gather_error(annotator, block, graph))
return '\n'.join(text)
def format_someobject_error(annotator, position_key, what, s_value, called_from_graph, binding=""):
#block = getattr(annotator, 'flowin_block', None) or block
msg = ["annotation of %r degenerated to SomeObject()" % (what,)]
if position_key is not None:
graph, block, operindex = position_key
if operindex is not None:
oper = block.operations[operindex]
msg.append(str(oper))
else:
msg.append("at the start of the block with input arguments:")
for v in block.inputargs:
s_v = annotator.binding(v, "(no annotation)")
msg.append("%8s: %s" % (v, s_v))
msg.append('')
msg += source_lines(graph, block, operindex, long=True)
if called_from_graph is not None:
msg.append(".. called from %r" % (called_from_graph,))
if s_value.origin is not None:
msg.append(".. SomeObject() origin: %s" % (
annotator.whereami(s_value.origin),))
msg.append("Previous annotation:")
msg.append(" " + str(binding))
return "\n".join(msg)
def format_global_error(graph, offset, message):
msg = []
msg.append('-+' * 30)
msg.append(message)
msg += source_lines(graph, None, offset=offset)
return "\n".join(msg)
def debug(drv, use_pdb=True):
# XXX unify some code with pypy.translator.goal.translate
from pypy.translator.tool.pdbplus import PdbPlusShow
from pypy.translator.driver import log
t = drv.translator
class options:
huge = 100
tb = None
import traceback
errmsg = ["Error:\n"]
exc, val, tb = sys.exc_info()
errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, [])])
block = getattr(val, '__annotator_block', None)
if block:
class FileLike:
def write(self, s):
errmsg.append(" %s" % s)
errmsg.append("Processing block:\n")
t.about(block, FileLike())
log.ERROR(''.join(errmsg))
log.event("start debugger...")
def server_setup(port=None):
if port is not None:
from pypy.translator.tool.graphserver import run_async_server
serv_start, serv_show, serv_stop = self.async_server = run_async_server(t, options, port)
return serv_start, serv_show, serv_stop
else:
from pypy.translator.tool.graphserver import run_server_for_inprocess_client
return run_server_for_inprocess_client(t, options)
if use_pdb:
pdb_plus_show = PdbPlusShow(t)
pdb_plus_show.start(tb, server_setup, graphic=True)
| Python |
from __future__ import generators
import autopath
import py
from py.__.magic import exprinfo
from pypy.interpreter import gateway
from pypy.interpreter.error import OperationError
from py.__.test.outcome import ExceptionFailure
# ____________________________________________________________
class AppCode(object):
def __init__(self, space, pycode):
self.code = space.unwrap(space.getattr(pycode, space.wrap('co_code')))
self.w_file = space.getattr(pycode, space.wrap('co_filename'))
self.name = space.getattr(pycode, space.wrap('co_name'))
self.firstlineno = space.unwrap(space.getattr(pycode, space.wrap('co_firstlineno')))
#try:
# self.path = space.unwrap(space.getattr(self.w_file, space.wrap('__path__')))
#except OperationError:
# self.path = space.unwrap(space.getattr(
self.path = py.path.local(space.str_w(self.w_file))
self.space = space
def fullsource(self):
try:
return self.space.str_w(self.w_file).__source__
except AttributeError:
return py.code.Source(self.path.read(mode="rU"))
fullsource = property(fullsource, None, None, "Full source of AppCode")
class AppFrame(py.code.Frame):
def __init__(self, space, pyframe):
self.code = AppCode(space, \
space.unwrap(space.getattr(pyframe, space.wrap('f_code'))))
#self.code = py.code.Code(pyframe.pycode)
self.lineno = space.unwrap(space.getattr(pyframe, space.wrap('f_lineno'))) - 1
#pyframe.get_last_lineno() - 1
self.space = space
self.w_globals = space.getattr(pyframe, space.wrap('f_globals'))
self.w_locals = space.getattr(pyframe, space.wrap('f_locals'))
self.f_locals = self.w_locals # for py.test's recursion detection
def eval(self, code, **vars):
space = self.space
for key, w_value in vars.items():
space.setitem(self.w_locals, space.wrap(key), w_value)
return space.eval(code, self.w_globals, self.w_locals)
def exec_(self, code, **vars):
space = self.space
for key, w_value in vars.items():
space.setitem(self.w_locals, space.wrap(key), w_value)
space.exec_(code, self.w_globals, self.w_locals)
def repr(self, w_value):
return self.space.unwrap(self.space.repr(w_value))
def is_true(self, w_value):
return self.space.is_true(w_value)
class AppExceptionInfo(py.code.ExceptionInfo):
"""An ExceptionInfo object representing an app-level exception."""
def __init__(self, space, operr):
self.space = space
self.operr = operr
self.typename = operr.w_type.getname(space, "?")
self.traceback = AppTraceback(space, self.operr.application_traceback)
debug_excs = getattr(operr, 'debug_excs', [])
if debug_excs:
self._excinfo = debug_excs[0]
def exconly(self, tryshort=True):
return '(application-level) ' + self.operr.errorstr(self.space)
def errisinstance(self, exc):
clsname = exc.__name__
try:
w_exc = getattr(self.space, 'w_' + clsname)
except KeyboardInterrupt:
raise
except:
pass
else:
return self.operr.match(self.space, w_exc)
return False
def __str__(self):
return '(application-level) ' + self.operr.errorstr(self.space)
class AppTracebackEntry(py.code.Traceback.Entry):
exprinfo = None
def __init__(self, space, tb):
self.frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame')))
self.lineno = space.unwrap(space.getattr(tb, space.wrap('tb_lineno'))) - 1
def reinterpret(self):
# XXX we need to solve a general problem: how to prevent
# reinterpretation from generating a different exception?
# This problem includes the fact that exprinfo will generate
# its own long message that looks like
# OperationError: << [<W_TypeObject(NameError)>: W_StringObj...
# which is much less nice than the one produced by str(self).
# XXX this reinterpret() is only here to prevent reinterpretation.
return self.exprinfo
class AppTraceback(py.code.Traceback):
Entry = AppTracebackEntry
def __init__(self, space, apptb):
l = []
while apptb is not space.w_None:
l.append(self.Entry(space, apptb))
apptb = space.getattr(apptb, space.wrap('tb_next'))
list.__init__(self, l)
# ____________________________________________________________
def build_pytest_assertion(space):
def my_init(space, w_self, __args__):
"Our new AssertionError.__init__()."
w_parent_init = space.getattr(w_BuiltinAssertionError,
space.wrap('__init__'))
space.call_args(w_parent_init, __args__.prepend(w_self))
framestack = space.getexecutioncontext().framestack
## # Argh! we may see app-level helpers in the frame stack!
## # that's very probably very bad...
## if frame.code.co_name == 'normalize_exception':
## frame = framestack.top(1)
# if the assertion provided a message, don't do magic
args_w, kwargs_w = __args__.unpack()
if args_w:
w_msg = args_w[0]
else:
frame = framestack.top(0)
runner = AppFrame(space, frame)
try:
source = runner.statement
source = str(source).strip()
except py.error.ENOENT:
source = None
from pypy import conftest
if source and not conftest.option.nomagic:
msg = exprinfo.interpret(source, runner, should_fail=True)
space.setattr(w_self, space.wrap('args'),
space.newtuple([space.wrap(msg)]))
w_msg = space.wrap(msg)
else:
w_msg = space.w_None
space.setattr(w_self, space.wrap('msg'), w_msg)
# build a new AssertionError class to replace the original one.
w_BuiltinAssertionError = space.getitem(space.builtin.w_dict,
space.wrap('AssertionError'))
w_metaclass = space.type(w_BuiltinAssertionError)
w_init = space.wrap(gateway.interp2app_temp(my_init,
unwrap_spec=[gateway.ObjSpace,
gateway.W_Root,
gateway.Arguments]))
w_dict = space.newdict()
space.setitem(w_dict, space.wrap('__init__'), w_init)
return space.call_function(w_metaclass,
space.wrap('AssertionError'),
space.newtuple([w_BuiltinAssertionError]),
w_dict)
def pypyraises(space, w_ExpectedException, w_expr, __args__):
"""A built-in function providing the equivalent of py.test.raises()."""
args_w, kwds_w = __args__.unpack()
if space.is_true(space.isinstance(w_expr, space.w_str)):
if args_w:
raise OperationError(space.w_TypeError,
space.wrap("raises() takes no argument "
"after a string expression"))
expr = space.unwrap(w_expr)
source = py.code.Source(expr)
frame = space.getexecutioncontext().framestack.top()
w_locals = frame.getdictscope()
w_locals = space.call_method(w_locals, 'copy')
for key, w_value in kwds_w.items():
space.setitem(w_locals, space.wrap(key), w_value)
try:
space.exec_(str(source), frame.w_globals, w_locals)
except OperationError, e:
if e.match(space, w_ExpectedException):
return space.sys.call('exc_info')
raise
else:
try:
space.call_args(w_expr, __args__)
except OperationError, e:
if e.match(space, w_ExpectedException):
return space.sys.call('exc_info')
raise
raise OperationError(space.w_AssertionError,
space.wrap("DID NOT RAISE"))
app_raises = gateway.interp2app_temp(pypyraises,
unwrap_spec=[gateway.ObjSpace,
gateway.W_Root,
gateway.W_Root,
gateway.Arguments])
def pypyskip(space, w_message):
"""skip a test at app-level. """
msg = space.unwrap(w_message)
py.test.skip(msg)
app_skip = gateway.interp2app_temp(pypyskip)
def raises_w(space, w_ExpectedException, *args, **kwds):
try:
excinfo = py.test.raises(OperationError, *args, **kwds)
type, value, tb = excinfo._excinfo
if not value.match(space, w_ExpectedException):
raise type, value, tb
return excinfo
except ExceptionFailure, e:
e.tbindex = getattr(e, 'tbindex', -1) - 1
raise
def eq_w(space, w_obj1, w_obj2):
""" return interp-level boolean of eq(w_obj1, w_obj2). """
return space.is_true(space.eq(w_obj1, w_obj2))
| Python |
#! /usr/bin/env python
"""
the html test reporter
"""
import sys, os, re
import pprint
import py
from pypy.tool.pytest import result
from pypy.tool.pytest.overview import ResultCache
#
# various interesting path objects
#
html = py.xml.html
NBSP = py.xml.raw(" ")
class HtmlReport(object):
def __init__(self, resultdir):
self.resultcache = ResultCache(resultdir)
def parselatest(self):
self.resultcache.parselatest()
#
# rendering
#
def render_latest_table(self, results):
table = html.table(
[html.th(x, align='left')
for x in ("failure", "filename", "revision",
"user", "platform", "elapsed",
"options", "last error line"
)],
)
r = results[:]
def f(x, y):
xnum = x.isok() and 1 or (x.istimeout() and 2 or 3)
ynum = y.isok() and 1 or (y.istimeout() and 2 or 3)
res = -cmp(xnum, ynum)
if res == 0:
return cmp(x['execution-time'], y['execution-time'])
return res
r.sort(f)
for result in r:
table.append(self.render_result_row(result))
return table
def render_result_row(self, result):
dp = py.path.local(result['fspath'])
options = " ".join([x for x in result.get('options', []) if x!= 'core'])
if not options:
options = NBSP
failureratio = 100 * (1.0 - result.ratio_of_passed())
self.data[result.testname] = failureratio
return html.tr(
html.td("%.2f%%" % failureratio,
style = "background-color: %s" % (getresultcolor(result),)),
html.td(self.render_test_references(result)),
html.td(result['pypy-revision']),
html.td(result['userhost'][:15]),
html.td(result['platform']),
html.td("%.2fs" % result['execution-time']),
html.td(options),
html.td(result.repr_short_error() or NBSP)
)
def getrelpath(self, p):
return p.relto(self.indexpath.dirpath())
def render_test_references(self, result):
dest = self.make_single_test_result(result)
modified = result.ismodifiedtest() and " [mod]" or ""
return html.div(html.a(result.path.purebasename + modified,
href=self.getrelpath(dest)),
style="background-color: transparent")
def make_single_test_result(self, result):
cache = self.indexpath.dirpath('.cache', result['userhost'][:15])
cache.ensure(dir=1)
dest = cache.join(result.path.basename).new(ext='.html')
doc = ViewResult(result)
doc.writetopath(dest)
return dest
def getcorelists(self):
def iscore(result):
return 'core' in result.get('options', [])
coretests = []
noncoretests = []
for name in self.resultcache.getnames():
result = self.resultcache.getlatestrelevant(name)
if iscore(result):
coretests.append(result)
else:
noncoretests.append(result)
return coretests, noncoretests
# generate html files
#
def makeindex(self, indexpath, detail="PyPy - latest"):
self.indexpath = indexpath
self.data = {}
doc = Document(title='pypy test results')
body = doc.body
coretests, noncoretests = self.getcorelists()
body.append(html.h2("%s compliance test results - "
"core tests" % detail))
body.append(self.render_test_summary('core', coretests))
body.append(self.render_latest_table(coretests))
body.append(
html.h2("%s compliance test results - non-core tests" % detail))
body.append(self.render_test_summary('noncore', noncoretests))
body.append(self.render_latest_table(noncoretests))
doc.writetopath(indexpath)
datapath = indexpath.dirpath().join('data')
d = datapath.open('w')
print >>d, "data = ",
pprint.pprint(self.data, stream=d)
d.close()
self.data = None
def render_test_summary(self, tag, tests):
ok = len([x for x in tests if x.isok()])
err = len([x for x in tests if x.iserror()])
to = len([x for x in tests if x.istimeout()])
numtests = ok + err + to
assert numtests == len(tests)
assert numtests
t = html.table()
sum100 = numtests / 100.0
def row(*args):
return html.tr(*[html.td(arg) for arg in args])
sum_passed = sum([x.ratio_of_passed() for x in tests])
compliancy = sum_passed/sum100
self.data['%s-compliancy' % tag] = compliancy
t.append(row(html.b("tests compliancy"),
html.b("%.2f%%" % (compliancy,))))
passed = ok/sum100
self.data['%s-passed' % tag] = passed
t.append(row("testmodules passed completely", "%.2f%%" % passed))
failed = err/sum100
self.data['%s-failed' % tag] = failed
t.append(row("testmodules (partially) failed", "%.2f%%" % failed))
timedout = to/sum100
self.data['%s-timedout' % tag] = timedout
t.append(row("testmodules timeout", "%.2f%%" % timedout))
return t
class Document(object):
def __init__(self, title=None):
self.body = html.body()
self.head = html.head()
self.doc = html.html(self.head, self.body)
if title is not None:
self.head.append(
html.meta(name="title", content=title))
self.head.append(
html.link(rel="Stylesheet", type="text/css", href="/pypy/default.css"))
def writetopath(self, p):
assert p.ext == '.html'
self.head.append(
html.meta(name="Content-Type", content="text/html;charset=UTF-8")
)
s = self.doc.unicode().encode('utf-8')
p.write(s)
def getresultcolor(result):
if result.isok():
color = "#00ee00"
elif result.iserror():
color = "#ee0000"
elif result.istimeout:
color = "#0000ee"
else:
color = "#444444"
return color
class ViewResult(Document):
def __init__(self, result):
title = "%s testresult" % (result.path.purebasename,)
super(ViewResult, self).__init__(title=title)
color = getresultcolor(result)
self.body.append(html.h2(title,
style="background-color: %s" % color))
self.body.append(self.render_meta_info(result))
for name in ('reportdiff', 'stdout', 'stderr'):
try:
text = result.getnamedtext(name)
except KeyError:
continue
self.body.append(html.h3(name))
self.body.append(html.pre(text))
def render_meta_info(self, result):
t = html.table()
items = result.items()
items.sort()
for name, value in items:
if name.lower() == name:
t.append(html.tr(
html.td(name), html.td(value)))
return t
class TestOfHtmlReportClass:
def setup_class(cls):
py.test.skip('needs move to own test file')
cls.testresultdir = confpath.testresultdir
cls.rep = rep = HtmlReport()
rep.parse_all(cls.testresultdir)
def test_pickling(self):
# test pickling of report
tempdir = py.test.ensuretemp('reportpickle')
picklepath = tempdir.join('report.pickle')
picklepath.dump(self.rep)
x = picklepath.load()
assert len(x.results) == len(self.rep.results)
def test_render_latest(self):
t = self.rep.render_latest_table(self.rep.results)
assert unicode(t)
mydir = py.magic.autopath().dirpath()
def getpicklepath():
return mydir.join('.htmlreport.pickle')
| Python |
from pypy.tool.pytest import result
import sys
class ResultCache:
def __init__(self, resultdir):
self.resultdir = resultdir
self.name2result = {}
def parselatest(self):
def filefilter(p):
return p.check(fnmatch='test_*.txt', file=1)
def rec(p):
return p.check(dotfile=0)
for x in self.resultdir.visit(filefilter, rec):
self.parse_one(x)
def parse_one(self, resultpath):
try:
res = result.ResultFromMime(resultpath)
ver = res['testreport-version']
if ver != "1.1" and ver != "1.1.1":
raise TypeError
except TypeError: # xxx
print >>sys.stderr, "could not parse %s" % resultpath
return
name = res.testname
print name
self.name2result.setdefault(name, []).append(res)
return res
def getnames(self):
return self.name2result.keys()
def getlatest(self, name, timeout=0, error=0, ok=0):
l = []
resultlist = self.name2result[name]
maxrev = 0
maxresult = None
for res in resultlist:
resrev = res['pypy-revision']
if resrev == 'unknown':
continue
if resrev <= maxrev:
continue
if timeout or error or ok:
if not (timeout and res.istimeout() or
error and res.iserror() or
ok and res.isok()):
continue
maxrev = resrev
maxresult = res
return maxresult
def getlatestrelevant(self, name):
# get the latest revision that did not time out.
return self.getlatest(name, error=1, ok=1) or self.getlatest(name)
| Python |
import py
def skipimporterror(name):
if not hasimport(name):
__tracebackhide__ = True
py.test.skip("cannot import %r module" % (name,))
def hasimport(name):
try:
__import__(name)
except ImportError:
return False
else:
return True
| Python |
import sys
import py
import re
class Result(object):
def __init__(self, init=True):
self._headers = {}
self._blocks = {}
self._blocknames = []
if init:
stdinit(self)
def __setitem__(self, name, value):
self._headers[name.lower()] = value
def __getitem__(self, name):
return self._headers[name.lower()]
def get(self, name, default):
return self._headers.get(name, default)
def __delitem__(self, name):
del self._headers[name.lower()]
def items(self):
return self._headers.items()
def addnamedtext(self, name, text):
assert isinstance(text, basestring)
assert isinstance(name, str)
self._blocknames.append(name)
self._blocks[name] = text
def getnamedtext(self, name):
return self._blocks[name]
def repr_short_error(self):
if not self.isok():
if 'reportdiff' in self._blocks:
return "output comparison failed, see reportdiff"
else:
text = self.getnamedtext('stderr')
lines = text.strip().split('\n')
if lines:
return lines[-1]
def repr_mimemessage(self):
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
outer = MIMEMultipart()
items = self._headers.items()
items.sort()
reprs = {}
for name, value in items:
assert ':' not in name
chars = map(ord, name)
assert min(chars) >= 33 and max(chars) <= 126
outer[name] = str(value)
if not isinstance(value, str):
typename = type(value).__name__
assert typename in vars(py.std.__builtin__)
reprs[name] = typename
outer['_reprs'] = repr(reprs)
for name in self._blocknames:
text = self._blocks[name]
m = MIMEText(text)
m.add_header('Content-Disposition', 'attachment', filename=name)
outer.attach(m)
return outer
def grep_nr(self,text,section='stdout'):
stdout = self._blocks[section]
find = re.search('%s(?P<nr>\d+)'%text,stdout)
if find:
return float(find.group('nr'))
return 0.
def ratio_of_passed(self):
if self.isok():
return 1.
elif self.istimeout():
return 0.
else:
nr = self.grep_nr('Ran ')
if nr > 0:
return (nr - (self.grep_nr('errors=') + self.grep_nr('failures=')))/nr
else:
passed = self.grep_nr('TestFailed: ',section='stderr')
run = self.grep_nr('TestFailed: \d+/',section='stderr')
if run > 0:
return passed/run
else:
run = self.grep_nr('TestFailed: \d+ of ',section='stderr')
if run > 0 :
return (run-passed)/run
else:
return 0.0
def isok(self):
return self['outcome'].lower() == 'ok'
def iserror(self):
return self['outcome'].lower()[:3] == 'err' or self['outcome'].lower() == 'fail'
def istimeout(self):
return self['outcome'].lower() == 't/o'
# XXX backward compatibility
def sanitize(msg, path):
if 'exit-status' in msg.keys():
return msg
f = open(str(path), 'r')
msg = f.read()
f.close()
for broken in ('exit status', 'cpu model', 'cpu mhz'):
valid = broken.replace(' ','-')
invalid = msg.find(broken+':')
msg = (msg[:invalid] + valid +
msg[invalid+len(valid):])
from email import message_from_string
msg = message_from_string(msg)
return msg
def sanitize_reprs(reprs):
if 'exit status' in reprs:
reprs['exit-status'] = reprs.pop('exit status')
class ResultFromMime(Result):
def __init__(self, path):
super(ResultFromMime, self).__init__(init=False)
f = open(str(path), 'r')
from email import message_from_file
msg = message_from_file(f)
f.close()
msg = sanitize(msg, path)
# XXX security wise evil (keep in mind once we accept reporsts
# from anonymous
#print msg['_reprs']
self._reprs = eval(msg['_reprs'])
del msg['_reprs']
sanitize_reprs(self._reprs)
for name, value in msg.items():
if name in self._reprs:
value = eval(value) # XXX security
self._headers[name] = value
self.fspath = self['fspath']
if self['platform'] == 'win32' and '\\' in self.fspath:
self.testname = self.fspath.split('\\')[-1]
else:
self.testname = self.fspath.split('/')[-1]
#if sys.platform != 'win32' and '\\' in self.fspath:
# self.fspath = py.path.local(self['fspath'].replace('\\'
self.path = path
payload = msg.get_payload()
if payload:
for submsg in payload:
assert submsg.get_content_type() == 'text/plain'
fn = submsg.get_filename()
assert fn
# XXX we need to deal better with encodings to
# begin with
content = submsg.get_payload()
for candidate in 'utf8', 'latin1':
try:
text = unicode(content, candidate)
except UnicodeDecodeError:
continue
else:
unicode(content, candidate)
self.addnamedtext(fn, text)
def ismodifiedtest(self):
# XXX we need proper cross-platform paths!
return 'modified' in self.fspath
def __repr__(self):
return '<%s (%s) %r rev=%s>' %(self.__class__.__name__,
self['outcome'],
self.fspath,
self['pypy-revision'])
def stdinit(result):
import getpass
import socket
try:
username = getpass.getuser()
except:
username = 'unknown'
userhost = '%s@%s' % (username, socket.gethostname())
result['testreport-version'] = "1.1.1"
result['userhost'] = userhost
result['platform'] = sys.platform
result['python-version-info'] = sys.version_info
info = try_getcpuinfo()
if info is not None:
result['cpu-model'] = info.get('model name', "unknown")
result['cpu-mhz'] = info.get('cpu mhz', 'unknown')
#
#
#
def try_getcpuinfo():
if sys.platform.startswith('linux'):
cpuinfopath = py.path.local('/proc/cpuinfo')
if cpuinfopath.check(file=1):
d = {}
for line in cpuinfopath.readlines():
if line.strip():
name, value = line.split(':', 1)
name = name.strip().lower()
d[name] = value.strip()
return d
| Python |
import autopath
import py
import pypy
pypydir = py.path.local(pypy.__file__).dirpath()
distdir = pypydir.dirpath()
testresultdir = distdir.join('testresult')
assert pypydir.check(dir=1)
libpythondir = distdir.join('lib-python')
regrtestdir = libpythondir.join('2.4.1', 'test')
modregrtestdir = libpythondir.join('modified-2.4.1', 'test')
| Python |
#! /usr/bin/env python
"""
the html test reporter
"""
import sys, os, re
import pprint
import py
from pypy.tool.pytest import result
from pypy.tool.pytest.overview import ResultCache
#
# various interesting path objects
#
html = py.xml.html
NBSP = py.xml.raw(" ")
class HtmlReport(object):
def __init__(self, resultdir):
self.resultcache = ResultCache(resultdir)
def parselatest(self):
self.resultcache.parselatest()
#
# rendering
#
def render_latest_table(self, results):
table = html.table(
[html.th(x, align='left')
for x in ("failure", "filename", "revision",
"user", "platform", "elapsed",
"options", "last error line"
)],
)
r = results[:]
def f(x, y):
xnum = x.isok() and 1 or (x.istimeout() and 2 or 3)
ynum = y.isok() and 1 or (y.istimeout() and 2 or 3)
res = -cmp(xnum, ynum)
if res == 0:
return cmp(x['execution-time'], y['execution-time'])
return res
r.sort(f)
for result in r:
table.append(self.render_result_row(result))
return table
def render_result_row(self, result):
dp = py.path.local(result['fspath'])
options = " ".join([x for x in result.get('options', []) if x!= 'core'])
if not options:
options = NBSP
failureratio = 100 * (1.0 - result.ratio_of_passed())
self.data[result.testname] = failureratio
return html.tr(
html.td("%.2f%%" % failureratio,
style = "background-color: %s" % (getresultcolor(result),)),
html.td(self.render_test_references(result)),
html.td(result['pypy-revision']),
html.td(result['userhost'][:15]),
html.td(result['platform']),
html.td("%.2fs" % result['execution-time']),
html.td(options),
html.td(result.repr_short_error() or NBSP)
)
def getrelpath(self, p):
return p.relto(self.indexpath.dirpath())
def render_test_references(self, result):
dest = self.make_single_test_result(result)
modified = result.ismodifiedtest() and " [mod]" or ""
return html.div(html.a(result.path.purebasename + modified,
href=self.getrelpath(dest)),
style="background-color: transparent")
def make_single_test_result(self, result):
cache = self.indexpath.dirpath('.cache', result['userhost'][:15])
cache.ensure(dir=1)
dest = cache.join(result.path.basename).new(ext='.html')
doc = ViewResult(result)
doc.writetopath(dest)
return dest
def getcorelists(self):
def iscore(result):
return 'core' in result.get('options', [])
coretests = []
noncoretests = []
for name in self.resultcache.getnames():
result = self.resultcache.getlatestrelevant(name)
if iscore(result):
coretests.append(result)
else:
noncoretests.append(result)
return coretests, noncoretests
# generate html files
#
def makeindex(self, indexpath, detail="PyPy - latest"):
self.indexpath = indexpath
self.data = {}
doc = Document(title='pypy test results')
body = doc.body
coretests, noncoretests = self.getcorelists()
body.append(html.h2("%s compliance test results - "
"core tests" % detail))
body.append(self.render_test_summary('core', coretests))
body.append(self.render_latest_table(coretests))
body.append(
html.h2("%s compliance test results - non-core tests" % detail))
body.append(self.render_test_summary('noncore', noncoretests))
body.append(self.render_latest_table(noncoretests))
doc.writetopath(indexpath)
datapath = indexpath.dirpath().join('data')
d = datapath.open('w')
print >>d, "data = ",
pprint.pprint(self.data, stream=d)
d.close()
self.data = None
def render_test_summary(self, tag, tests):
ok = len([x for x in tests if x.isok()])
err = len([x for x in tests if x.iserror()])
to = len([x for x in tests if x.istimeout()])
numtests = ok + err + to
assert numtests == len(tests)
assert numtests
t = html.table()
sum100 = numtests / 100.0
def row(*args):
return html.tr(*[html.td(arg) for arg in args])
sum_passed = sum([x.ratio_of_passed() for x in tests])
compliancy = sum_passed/sum100
self.data['%s-compliancy' % tag] = compliancy
t.append(row(html.b("tests compliancy"),
html.b("%.2f%%" % (compliancy,))))
passed = ok/sum100
self.data['%s-passed' % tag] = passed
t.append(row("testmodules passed completely", "%.2f%%" % passed))
failed = err/sum100
self.data['%s-failed' % tag] = failed
t.append(row("testmodules (partially) failed", "%.2f%%" % failed))
timedout = to/sum100
self.data['%s-timedout' % tag] = timedout
t.append(row("testmodules timeout", "%.2f%%" % timedout))
return t
class Document(object):
def __init__(self, title=None):
self.body = html.body()
self.head = html.head()
self.doc = html.html(self.head, self.body)
if title is not None:
self.head.append(
html.meta(name="title", content=title))
self.head.append(
html.link(rel="Stylesheet", type="text/css", href="/pypy/default.css"))
def writetopath(self, p):
assert p.ext == '.html'
self.head.append(
html.meta(name="Content-Type", content="text/html;charset=UTF-8")
)
s = self.doc.unicode().encode('utf-8')
p.write(s)
def getresultcolor(result):
if result.isok():
color = "#00ee00"
elif result.iserror():
color = "#ee0000"
elif result.istimeout:
color = "#0000ee"
else:
color = "#444444"
return color
class ViewResult(Document):
def __init__(self, result):
title = "%s testresult" % (result.path.purebasename,)
super(ViewResult, self).__init__(title=title)
color = getresultcolor(result)
self.body.append(html.h2(title,
style="background-color: %s" % color))
self.body.append(self.render_meta_info(result))
for name in ('reportdiff', 'stdout', 'stderr'):
try:
text = result.getnamedtext(name)
except KeyError:
continue
self.body.append(html.h3(name))
self.body.append(html.pre(text))
def render_meta_info(self, result):
t = html.table()
items = result.items()
items.sort()
for name, value in items:
if name.lower() == name:
t.append(html.tr(
html.td(name), html.td(value)))
return t
class TestOfHtmlReportClass:
def setup_class(cls):
py.test.skip('needs move to own test file')
cls.testresultdir = confpath.testresultdir
cls.rep = rep = HtmlReport()
rep.parse_all(cls.testresultdir)
def test_pickling(self):
# test pickling of report
tempdir = py.test.ensuretemp('reportpickle')
picklepath = tempdir.join('report.pickle')
picklepath.dump(self.rep)
x = picklepath.load()
assert len(x.results) == len(self.rep.results)
def test_render_latest(self):
t = self.rep.render_latest_table(self.rep.results)
assert unicode(t)
mydir = py.magic.autopath().dirpath()
def getpicklepath():
return mydir.join('.htmlreport.pickle')
| Python |
#! /usr/bin/env python
import autopath
import py
import sys
mydir = py.magic.autopath().dirpath().realpath()
from pypy.tool.pytest import htmlreport
from pypy.tool.pytest import confpath
if __name__ == '__main__':
if len(sys.argv) > 1:
testresultdir = py.path.local(sys.argv[1])
assert testresultdir.check(dir=1)
else:
testresultdir = confpath.testresultdir
assert testresultdir.check(dir=1)
try:
resultwc = py.path.svnwc(testresultdir)
print "updating", resultwc
resultwc.update()
except KeyboardInterrupt, RuntimeError:
raise
except Exception,e: #py.process.ExecutionFailed,e:
print >> sys.stderr, "Warning: ",e #Subversion update failed"
print "traversing", mydir
rep = htmlreport.HtmlReport(testresultdir)
rep.parselatest()
print "making html files"
rep.makeindex(testresultdir.join('index.html'))
| Python |
class AppTestTest:
def test_app_method(self):
assert 42 == 41
def app_test_app_func():
assert 41 == 42
def test_interp_func(space):
assert space.is_true(space.w_None)
class TestInterpTest:
def test_interp_method(self):
assert self.space.is_true(self.space.w_False)
def app_test_raises_something():
int("hallo")
def app_test_raises_wrong1():
raises(SyntaxError, 'int("hello")')
def app_test_raises_wrong2():
raises(SyntaxError, int, "hello")
def app_test_raises_doesnt():
raises(ValueError, int, 3)
| Python |
# refer to 2.4.1/test/regrtest.py's runtest() for comparison
import sys
from test import test_support
test_support.verbose = int(sys.argv[1])
sys.argv[:] = sys.argv[2:]
modname = sys.argv[0]
impname = 'test.' + modname
mod = __import__(impname, globals(), locals(), [modname])
indirect_test = getattr(mod, 'test_main', None)
if indirect_test is not None:
indirect_test()
# else the test already ran during import
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.