code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# no longer there, sorry
# XXX fix all these imports
from pypy.tool.pairtype import *
| Python |
import types
from pypy.annotation.model import SomeBool, SomeInteger, SomeString,\
SomeFloat, SomeList, SomeDict, s_None, SomeExternalObject,\
SomeObject, SomeInstance, SomeTuple, lltype_to_annotation
from pypy.annotation.classdef import ClassDef, InstanceSource
from pypy.annotation.listdef import ListDef, MOST_GENERAL_LISTDEF
from pypy.annotation.dictdef import DictDef, MOST_GENERAL_DICTDEF
_annotation_cache = {}
def _annotation_key(t):
from pypy.rpython import extregistry
if type(t) is list:
assert len(t) == 1
return ('list', _annotation_key(t[0]))
elif type(t) is dict:
assert len(t.keys()) == 1
return ('dict', _annotation_key(t.items()[0]))
elif isinstance(t, tuple):
return tuple([_annotation_key(i) for i in t])
elif extregistry.is_registered(t):
# XXX should it really be always different?
return t
return t
def annotation(t, bookkeeper=None):
if bookkeeper is None:
key = _annotation_key(t)
try:
return _annotation_cache[key]
except KeyError:
t = _compute_annotation(t, bookkeeper)
_annotation_cache[key] = t
return t
return _compute_annotation(t, bookkeeper)
def _compute_annotation(t, bookkeeper=None):
from pypy.rpython.lltypesystem import lltype
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.rpython import extregistry
if isinstance(t, SomeObject):
return t
elif isinstance(t, lltype.LowLevelType):
return lltype_to_annotation(t)
elif isinstance(t, list):
assert len(t) == 1, "We do not support type joining in list"
listdef = ListDef(bookkeeper, annotation(t[0]), mutated=True, resized=True)
return SomeList(listdef)
elif isinstance(t, tuple):
return SomeTuple(tuple([annotation(i) for i in t]))
elif isinstance(t, dict):
assert len(t) == 1, "We do not support type joining in dict"
result = SomeDict(DictDef(bookkeeper, annotation(t.keys()[0]),
annotation(t.values()[0])))
return result
elif type(t) is types.NoneType:
return s_None
elif extregistry.is_registered(t):
entry = extregistry.lookup(t)
entry.bookkeeper = bookkeeper
return entry.compute_result_annotation()
else:
return annotationoftype(t, bookkeeper)
def annotationoftype(t, bookkeeper=False):
from pypy.annotation.builtin import BUILTIN_ANALYZERS
from pypy.annotation.builtin import EXTERNAL_TYPE_ANALYZERS
from pypy.rpython import extregistry
"""The most precise SomeValue instance that contains all
objects of type t."""
assert isinstance(t, (type, types.ClassType))
if t is bool:
return SomeBool()
elif t is int:
return SomeInteger()
elif t is float:
return SomeFloat()
elif issubclass(t, str): # py.lib uses annotated str subclasses
return SomeString()
elif t is list:
return SomeList(MOST_GENERAL_LISTDEF)
elif t is dict:
return SomeDict(MOST_GENERAL_DICTDEF)
# can't do tuple
elif t is types.NoneType:
return s_None
elif t in EXTERNAL_TYPE_ANALYZERS:
return SomeExternalObject(t)
elif bookkeeper and extregistry.is_registered_type(t, bookkeeper.policy):
entry = extregistry.lookup_type(t, bookkeeper.policy)
return entry.compute_annotation_bk(bookkeeper)
elif bookkeeper and t.__module__ != '__builtin__' and t not in bookkeeper.pbctypes:
classdef = bookkeeper.getuniqueclassdef(t)
return SomeInstance(classdef)
else:
o = SomeObject()
if t != object:
o.knowntype = t
return o
class Sig(object):
def __init__(self, *argtypes):
self.argtypes = argtypes
def __call__(self, funcdesc, inputcells):
from pypy.rpython.lltypesystem import lltype
args_s = []
from pypy.annotation import model as annmodel
for i, argtype in enumerate(self.argtypes):
if isinstance(argtype, (types.FunctionType, types.MethodType)):
argtype = argtype(*inputcells)
if isinstance(argtype, lltype.LowLevelType) and\
argtype is lltype.Void:
# XXX the mapping between Void and annotation
# is not quite well defined
s_input = inputcells[i]
assert isinstance(s_input, annmodel.SomePBC)
assert s_input.is_constant()
args_s.append(s_input)
elif argtype is None:
args_s.append(inputcells[i]) # no change
else:
args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper))
if len(inputcells) != len(args_s):
raise Exception("%r: expected %d args, got %d" % (funcdesc,
len(args_s),
len(inputcells)))
for i, (s_arg, s_input) in enumerate(zip(args_s, inputcells)):
if not s_arg.contains(s_input):
raise Exception("%r argument %d:\n"
"expected %s,\n"
" got %s" % (funcdesc, i+1,
s_arg,
s_input))
inputcells[:] = args_s
| Python |
"""
This file defines the 'subset' SomeValue classes.
An instance of a SomeValue class stands for a Python object that has some
known properties, for example that is known to be a list of non-negative
integers. Each instance can be considered as an object that is only
'partially defined'. Another point of view is that each instance is a
generic element in some specific subset of the set of all objects.
"""
# Old terminology still in use here and there:
# SomeValue means one of the SomeXxx classes in this file.
# Cell is an instance of one of these classes.
#
# Think about cells as potato-shaped circles in a diagram:
# ______________________________________________________
# / SomeObject() \
# / ___________________________ ______________ \
# | / SomeInteger(nonneg=False) \____ / SomeString() \ \
# | / __________________________ \ | | |
# | | / SomeInteger(nonneg=True) \ | | "hello" | |
# | | | 0 42 _________/ | \______________/ |
# | \ -3 \________________/ / |
# \ \ -5 _____/ /
# \ \________________________/ 3.1416 /
# \_____________________________________________________/
#
from types import BuiltinFunctionType, MethodType, FunctionType
import pypy.tool.instancemethod
from pypy.annotation.pairtype import pair, extendabletype
from pypy.tool.tls import tlsobject
from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong, base_int
import inspect
from sys import maxint
from pypy.annotation.description import FunctionDesc
DEBUG = True # set to False to disable recording of debugging information
TLS = tlsobject()
class SomeObject:
"""The set of all objects. Each instance stands
for an arbitrary object about which nothing is known."""
__metaclass__ = extendabletype
knowntype = object
immutable = False
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
try:
reprdict = TLS.reprdict
except AttributeError:
reprdict = TLS.reprdict = {}
if self in reprdict:
kwds = '...'
else:
reprdict[self] = True
try:
items = self.__dict__.items()
items.sort()
args = []
for k, v in items:
m = getattr(self, 'fmt_' + k, repr)
r = m(v)
if r is not None:
args.append('%s=%s'%(k, r))
kwds = ', '.join(args)
finally:
del reprdict[self]
return '%s(%s)' % (self.__class__.__name__, kwds)
def fmt_knowntype(self, t):
return t.__name__
def contains(self, other):
if self == other:
return True
try:
TLS.no_side_effects_in_union += 1
except AttributeError:
TLS.no_side_effects_in_union = 1
try:
try:
return pair(self, other).union() == self
except UnionError:
return False
finally:
TLS.no_side_effects_in_union -= 1
def is_constant(self):
d = self.__dict__
return 'const' in d or 'const_box' in d
def is_immutable_constant(self):
return self.immutable and 'const' in self.__dict__
# delegate accesses to 'const' to accesses to 'const_box.value',
# where const_box is a Constant. XXX the idea is to eventually
# use systematically 'const_box' instead of 'const' for
# non-immutable constant annotations
class ConstAccessDelegator(object):
def __get__(self, obj, cls=None):
return obj.const_box.value
const = ConstAccessDelegator()
del ConstAccessDelegator
# for debugging, record where each instance comes from
# this is disabled if DEBUG is set to False
_coming_from = {}
def __new__(cls, *args, **kw):
self = super(SomeObject, cls).__new__(cls, *args, **kw)
if DEBUG:
try:
bookkeeper = pypy.annotation.bookkeeper.getbookkeeper()
position_key = bookkeeper.position_key
except AttributeError:
pass
else:
SomeObject._coming_from[id(self)] = position_key, None
return self
def origin(self):
return SomeObject._coming_from.get(id(self), (None, None))[0]
def set_origin(self, nvalue):
SomeObject._coming_from[id(self)] = nvalue, self.caused_by_merge
origin = property(origin, set_origin)
del set_origin
def caused_by_merge(self):
return SomeObject._coming_from.get(id(self), (None, None))[1]
def set_caused_by_merge(self, nvalue):
SomeObject._coming_from[id(self)] = self.origin, nvalue
caused_by_merge = property(caused_by_merge, set_caused_by_merge)
del set_caused_by_merge
def can_be_none(self):
return True
def nonnoneify(self):
return self
class SomeFloat(SomeObject):
"Stands for a float or an integer."
knowntype = float # if we don't know if it's a float or an int,
# pretend it's a float.
immutable = True
def can_be_none(self):
return False
class SomeInteger(SomeFloat):
"Stands for an object which is known to be an integer."
knowntype = int
# size is in multiples of C's sizeof(long)!
def __init__(self, nonneg=False, unsigned=None, knowntype=None):
assert (knowntype is None or knowntype is int or
issubclass(knowntype, base_int))
if knowntype is None:
if unsigned:
knowntype = r_uint
else:
knowntype = int
elif unsigned is not None:
raise TypeError('Conflicting specification for SomeInteger')
self.knowntype = knowntype
unsigned = self.knowntype(-1) > 0
self.nonneg = unsigned or nonneg
self.unsigned = unsigned # pypy.rlib.rarithmetic.r_uint
class SomeBool(SomeInteger):
"Stands for true or false."
knowntype = bool
nonneg = True
unsigned = False
def __init__(self):
pass
class SomeString(SomeObject):
"Stands for an object which is known to be a string."
knowntype = str
immutable = True
def __init__(self, can_be_None=False):
self.can_be_None = can_be_None
def can_be_none(self):
return self.can_be_None
def nonnoneify(self):
return SomeString(can_be_None=False)
class SomeChar(SomeString):
"Stands for an object known to be a string of length 1."
class SomeUnicodeCodePoint(SomeObject):
"Stands for an object known to be a unicode codepoint."
knowntype = unicode
immutable = True
def can_be_none(self):
return False
class SomeList(SomeObject):
"Stands for a homogenous list of any length."
knowntype = list
def __init__(self, listdef):
self.listdef = listdef
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
if not self.listdef.same_as(other.listdef):
return False
selfdic = self.__dict__.copy()
otherdic = other.__dict__.copy()
del selfdic['listdef']
del otherdic['listdef']
return selfdic == otherdic
def can_be_none(self):
return True
class SomeSlice(SomeObject):
knowntype = slice
immutable = True
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
def constant_indices(self):
if (self.start.is_immutable_constant() and
self.stop .is_immutable_constant() and
self.step .is_immutable_constant()):
return self.start.const, self.stop.const, self.step.const
else:
raise Exception("need constant indices for this slice")
def can_be_none(self):
return False
class SomeTuple(SomeObject):
"Stands for a tuple of known length."
knowntype = tuple
immutable = True
def __init__(self, items):
self.items = tuple(items) # tuple of s_xxx elements
for i in items:
if not i.is_constant():
break
else:
self.const = tuple([i.const for i in items])
def can_be_none(self):
return False
class SomeDict(SomeObject):
"Stands for a dict."
knowntype = dict
def __init__(self, dictdef):
self.dictdef = dictdef
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
if not self.dictdef.same_as(other.dictdef):
return False
selfdic = self.__dict__.copy()
otherdic = other.__dict__.copy()
del selfdic['dictdef']
del otherdic['dictdef']
return selfdic == otherdic
def can_be_none(self):
return True
def fmt_const(self, const):
if len(const) < 20:
return repr(const)
else:
return '{...%s...}'%(len(const),)
class SomeIterator(SomeObject):
"Stands for an iterator returning objects from a given container."
knowntype = type(iter([])) # arbitrarily chose seqiter as the type
def __init__(self, s_container, *variant):
self.variant = variant
self.s_container = s_container
def can_be_none(self):
return False
class SomeInstance(SomeObject):
"Stands for an instance of a (user-defined) class."
def __init__(self, classdef, can_be_None=False, flags={}):
self.classdef = classdef
self.knowntype = classdef or object
self.can_be_None = can_be_None
self.flags = flags
def fmt_knowntype(self, kt):
return None
def fmt_classdef(self, cdef):
if cdef is None:
return 'object'
else:
return cdef.name
def fmt_flags(self, flags):
if flags:
return repr(flags)
else:
return None
def can_be_none(self):
return self.can_be_None
def nonnoneify(self):
return SomeInstance(self.classdef, can_be_None=False)
class SomePBC(SomeObject):
"""Stands for a global user instance, built prior to the analysis,
or a set of such instances."""
immutable = True
def __init__(self, descriptions, can_be_None=False, subset_of=None):
# descriptions is a set of Desc instances.
descriptions = dict.fromkeys(descriptions)
self.descriptions = descriptions
self.can_be_None = can_be_None
self.subset_of = subset_of
self.simplify()
if self.isNone():
self.knowntype = type(None)
self.const = None
else:
knowntype = reduce(commonbase,
[x.knowntype for x in descriptions])
if knowntype == type(Exception):
knowntype = type
if knowntype != object:
self.knowntype = knowntype
if len(descriptions) == 1 and not can_be_None:
# hack for the convenience of direct callers to SomePBC():
# only if there is a single object in descriptions
desc, = descriptions
if desc.pyobj is not None:
self.const = desc.pyobj
def getKind(self):
"Return the common Desc class of all descriptions in this PBC."
kinds = {}
for x in self.descriptions:
assert type(x).__name__.endswith('Desc') # avoid import nightmares
kinds[x.__class__] = True
assert len(kinds) <= 1, (
"mixing several kinds of PBCs: %r" % (kinds.keys(),))
if not kinds:
raise ValueError("no 'kind' on the 'None' PBC")
return kinds.keys()[0]
def simplify(self):
if self.descriptions:
# We check that the set only contains a single kind of Desc instance
kind = self.getKind()
# then we remove unnecessary entries in self.descriptions:
# some MethodDescs can be 'shadowed' by others
if len(self.descriptions) > 1:
kind.simplify_desc_set(self.descriptions)
else:
assert self.can_be_None, "use s_ImpossibleValue"
def isNone(self):
return len(self.descriptions) == 0
def can_be_none(self):
return self.can_be_None
def nonnoneify(self):
if self.isNone():
return s_ImpossibleValue
else:
return SomePBC(self.descriptions, can_be_None=False)
def fmt_descriptions(self, pbis):
if hasattr(self, 'const'):
return None
else:
return '{...%s...}'%(len(pbis),)
def fmt_knowntype(self, kt):
if self.is_constant():
return None
else:
return kt.__name__
class SomeGenericCallable(SomeObject):
""" Stands for external callable with known signature
"""
def __init__(self, args, result):
self.args_s = args
self.s_result = result
self.descriptions = {}
def can_be_None(self):
return True
class SomeBuiltin(SomeObject):
"Stands for a built-in function or method with special-cased analysis."
knowntype = BuiltinFunctionType # == BuiltinMethodType
immutable = True
def __init__(self, analyser, s_self=None, methodname=None):
if isinstance(analyser, MethodType):
analyser = pypy.tool.instancemethod.InstanceMethod(
analyser.im_func,
analyser.im_self,
analyser.im_class)
self.analyser = analyser
self.s_self = s_self
self.methodname = methodname
def can_be_none(self):
return False
class SomeBuiltinMethod(SomeBuiltin):
""" Stands for a built-in method which has got special meaning
"""
knowntype = MethodType
class SomeExternalObject(SomeObject):
"""Stands for an object of 'external' type. External types have a Repr
controlled by pypy.rpython.extregistry; or they come from the (obsolete)
table created by pypy.rpython.extfunctable.declaretype() and represent
simple types with some methods that need direct back-end support."""
def __init__(self, knowntype):
self.knowntype = knowntype
def can_be_none(self):
return True
class SomeExternalInstance(SomeExternalObject):
"""Stands for an object of 'external' type, but with custom access to
attributes as well as methods
"""
class SomeCTypesObject(SomeExternalObject):
"""Stands for an object of the ctypes module."""
def __init__(self, knowntype, ownsmemory):
self.knowntype = knowntype
self.ownsmemory = ownsmemory
# 'ownsmemory' specifies if the object is *statically known* to own
# its C memory. If it is False, it will be rtyped as an alias object.
# Alias objects are allowed, at run-time, to have keepalives, so
# that they can indirectly own their memory too (it's just less
# efficient).
def can_be_none(self):
# only 'py_object' can also be None
import ctypes
return issubclass(self.knowntype, ctypes.py_object)
def return_annotation(self):
"""Returns either 'self' or the annotation of the unwrapped version
of this ctype, following the logic used when ctypes operations
return a value.
"""
from pypy.rpython import extregistry
assert extregistry.is_registered_type(self.knowntype)
entry = extregistry.lookup_type(self.knowntype)
# special case for returning primitives or c_char_p
return getattr(entry, 's_return_trick', self)
class SomeImpossibleValue(SomeObject):
"""The empty set. Instances are placeholders for objects that
will never show up at run-time, e.g. elements of an empty list."""
immutable = True
annotationcolor = (160,160,160)
def can_be_none(self):
return False
s_None = SomePBC([], can_be_None=True)
s_Bool = SomeBool()
s_ImpossibleValue = SomeImpossibleValue()
# ____________________________________________________________
# memory addresses
from pypy.rpython.lltypesystem import llmemory
class SomeAddress(SomeObject):
immutable = True
def __init__(self, is_null=False):
self.is_null = is_null
def can_be_none(self):
return False
class SomeWeakGcAddress(SomeObject):
immutable = True
# The following class is used to annotate the intermediate value that
# appears in expressions of the form:
# addr.signed[offset] and addr.signed[offset] = value
class SomeTypedAddressAccess(SomeObject):
def __init__(self, type):
self.type = type
def can_be_none(self):
return False
#____________________________________________________________
# annotation of low-level types
class SomePtr(SomeObject):
immutable = True
def __init__(self, ll_ptrtype):
assert isinstance(ll_ptrtype, lltype.Ptr)
self.ll_ptrtype = ll_ptrtype
def can_be_none(self):
return False
class SomeLLADTMeth(SomeObject):
immutable = True
def __init__(self, ll_ptrtype, func):
self.ll_ptrtype = ll_ptrtype
self.func = func
def can_be_none(self):
return False
class SomeOOClass(SomeObject):
def __init__(self, ootype):
self.ootype = ootype
class SomeOOInstance(SomeObject):
def __init__(self, ootype, can_be_None=False):
self.ootype = ootype
self.can_be_None = can_be_None
class SomeOOBoundMeth(SomeObject):
immutable = True
def __init__(self, ootype, name):
self.ootype = ootype
self.name = name
class SomeOOStaticMeth(SomeObject):
immutable = True
def __init__(self, method):
self.method = method
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.ootypesystem import ootype
NUMBER = object()
annotation_to_ll_map = [
(s_None, lltype.Void), # also matches SomeImpossibleValue()
(s_Bool, lltype.Bool),
(SomeInteger(knowntype=r_ulonglong), NUMBER),
(SomeFloat(), lltype.Float),
(SomeChar(), lltype.Char),
(SomeUnicodeCodePoint(), lltype.UniChar),
(SomeAddress(), llmemory.Address),
(SomeWeakGcAddress(), llmemory.WeakGcAddress),
]
def annotation_to_lltype(s_val, info=None):
if isinstance(s_val, SomeOOInstance):
return s_val.ootype
if isinstance(s_val, SomeOOStaticMeth):
return s_val.method
if isinstance(s_val, SomePtr):
return s_val.ll_ptrtype
for witness, T in annotation_to_ll_map:
if witness.contains(s_val):
if T is NUMBER:
return lltype.build_number(None, s_val.knowntype)
return T
if info is None:
info = ''
else:
info = '%s: ' % info
raise ValueError("%sshould return a low-level type,\ngot instead %r" % (
info, s_val))
ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER])
def lltype_to_annotation(T):
from pypy.rpython.ootypesystem.bltregistry import ExternalType
try:
s = ll_to_annotation_map.get(T)
except TypeError:
s = None # unhashable T, e.g. a Ptr(GcForwardReference())
if s is None:
if isinstance(T, lltype.Number):
return SomeInteger(knowntype=T._type)
if isinstance(T, (ootype.Instance, ootype.BuiltinType)):
return SomeOOInstance(T)
elif isinstance(T, ootype.StaticMethod):
return SomeOOStaticMeth(T)
elif T == ootype.Class:
return SomeOOClass(ootype.ROOT)
elif isinstance(T, ExternalType):
return SomeExternalInstance(T._class_)
else:
return SomePtr(T)
else:
return s
def ll_to_annotation(v):
if v is None:
# i think we can only get here in the case of void-returning
# functions
return s_None
if isinstance(v, MethodType):
ll_ptrtype = lltype.typeOf(v.im_self)
assert isinstance(ll_ptrtype, lltype.Ptr)
return SomeLLADTMeth(ll_ptrtype, v.im_func)
if isinstance(v, FunctionType):
# this case should only be for staticmethod instances used in
# adtmeths: the getattr() result is then a plain FunctionType object.
from pypy.annotation.bookkeeper import getbookkeeper
return getbookkeeper().immutablevalue(v)
return lltype_to_annotation(lltype.typeOf(v))
# ____________________________________________________________
class UnionError(Exception):
"""Signals an suspicious attempt at taking the union of
deeply incompatible SomeXxx instances."""
def unionof(*somevalues):
"The most precise SomeValue instance that contains all the values."
try:
s1, s2 = somevalues
except ValueError:
s1 = s_ImpossibleValue
for s2 in somevalues:
if s1 != s2:
s1 = pair(s1, s2).union()
else:
# this is just a performance shortcut
if s1 != s2:
s1 = pair(s1, s2).union()
if DEBUG:
if s1.caused_by_merge is None and len(somevalues) > 1:
s1.caused_by_merge = somevalues
return s1
def isdegenerated(s_value):
return s_value.__class__ is SomeObject and s_value.knowntype is not type
# make knowntypedata dictionary
def add_knowntypedata(ktd, truth, vars, s_obj):
for v in vars:
ktd[(truth, v)] = s_obj
def merge_knowntypedata(ktd1, ktd2):
r = {}
for truth_v in ktd1:
if truth_v in ktd2:
r[truth_v] = unionof(ktd1[truth_v], ktd2[truth_v])
return r
def not_const(s_obj):
if s_obj.is_constant():
new_s_obj = SomeObject()
new_s_obj.__class__ = s_obj.__class__
dic = new_s_obj.__dict__ = s_obj.__dict__.copy()
if 'const' in dic:
del new_s_obj.const
else:
del new_s_obj.const_box
s_obj = new_s_obj
return s_obj
# ____________________________________________________________
# internal
def setunion(d1, d2):
"Union of two sets represented as dictionaries."
d = d1.copy()
d.update(d2)
return d
def set(it):
"Turn an iterable into a set."
d = {}
for x in it:
d[x] = True
return d
def commonbase(cls1, cls2): # XXX single inheritance only XXX hum
l1 = inspect.getmro(cls1)
l2 = inspect.getmro(cls2)
if l1[-1] != object:
l1 = l1 + (object,)
if l2[-1] != object:
l2 = l2 + (object,)
for x in l1:
if x in l2:
return x
assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2)
def missing_operation(cls, name):
def default_op(*args):
if args and isinstance(args[0], tuple):
flattened = tuple(args[0]) + args[1:]
else:
flattened = args
for arg in flattened:
if arg.__class__ is SomeObject and arg.knowntype is not type:
return SomeObject()
bookkeeper = pypy.annotation.bookkeeper.getbookkeeper()
bookkeeper.warning("no precise annotation supplied for %s%r" % (name, args))
return s_ImpossibleValue
setattr(cls, name, default_op)
class HarmlesslyBlocked(Exception):
"""Raised by the unaryop/binaryop to signal a harmless kind of
BlockedInference: the current block is blocked, but not in a way
that gives 'Blocked block' errors at the end of annotation."""
def read_can_only_throw(opimpl, *args):
can_only_throw = getattr(opimpl, "can_only_throw", None)
if can_only_throw is None or isinstance(can_only_throw, list):
return can_only_throw
return can_only_throw(*args)
#
# safety check that no-one is trying to make annotation and translation
# faster by providing the -O option to Python.
try:
assert False
except AssertionError:
pass # fine
else:
raise RuntimeError("The annotator relies on 'assert' statements from the\n"
"\tannotated program: you cannot run it with 'python -O'.")
# this has the side-effect of registering the unary and binary operations
from pypy.annotation.unaryop import UNARY_OPERATIONS
from pypy.annotation.binaryop import BINARY_OPERATIONS
| Python |
"""
The Bookkeeper class.
"""
from __future__ import generators
import sys, types, inspect
from pypy.objspace.flow.model import Constant
from pypy.annotation.model import SomeString, SomeChar, SomeFloat, \
SomePtr, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, \
SomeInteger, SomeExternalObject, SomeOOInstance, TLS, SomeAddress, \
SomeUnicodeCodePoint, SomeOOStaticMeth, s_None, s_ImpossibleValue, \
SomeLLADTMeth, SomeBool, SomeTuple, SomeOOClass, SomeImpossibleValue, \
SomeList, SomeObject, SomeWeakGcAddress, HarmlesslyBlocked
from pypy.annotation.classdef import ClassDef, InstanceSource
from pypy.annotation.listdef import ListDef, MOST_GENERAL_LISTDEF
from pypy.annotation.dictdef import DictDef, MOST_GENERAL_DICTDEF
from pypy.annotation import description
from pypy.annotation.signature import annotationoftype
from pypy.interpreter.argument import Arguments, ArgErr
from pypy.rlib.rarithmetic import r_int, r_uint, r_ulonglong, r_longlong
from pypy.rlib.rarithmetic import base_int
from pypy.rlib.objectmodel import r_dict, Symbolic
from pypy.tool.algo.unionfind import UnionFind
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.memory import lladdress
from pypy.rpython import extregistry
class Stats:
def __init__(self, bookkeeper):
self.bookkeeper = bookkeeper
self.classify = {}
def count(self, category, *args):
for_category = self.classify.setdefault(category, {})
classifier = getattr(self, 'consider_%s' % category, self.consider_generic)
outcome = classifier(*args)
for_category[self.bookkeeper.position_key] = outcome
def indexrepr(self, idx):
if idx.is_constant():
if idx.const is None:
return ''
if isinstance(idx, SomeInteger):
if idx.const >=0:
return 'pos-constant'
else:
return 'Neg-constant'
return idx.const
else:
if isinstance(idx, SomeInteger):
if idx.nonneg:
return "non-neg"
else:
return "MAYBE-NEG"
else:
return self.typerepr(idx)
def steprepr(self, stp):
if stp.is_constant():
if stp.const in (1, None):
return 'step=1'
else:
return 'step=%s?' % stp.const
else:
return 'non-const-step %s' % self.typerepr(stp)
def consider_generic(self, *args):
return tuple([self.typerepr(x) for x in args])
def consider_newslice(self, s_start, s_stop, s_step):
return ':'.join([self.indexrepr(s_start), self.indexrepr(s_stop), self.steprepr(s_step)])
def consider_list_list_eq(self, obj1, obj2):
return obj1, obj2
def consider_contains(self, seq):
return seq
def consider_non_int_eq(self, obj1, obj2):
if obj1.knowntype == obj2.knowntype == list:
self.count("list_list_eq", obj1, obj2)
return self.typerepr(obj1), self.typerepr(obj2)
def consider_non_int_comp(self, obj1, obj2):
return self.typerepr(obj1), self.typerepr(obj2)
def typerepr(self, obj):
if isinstance(obj, SomeInstance):
return obj.classdef.name
else:
return obj.knowntype.__name__
def consider_tuple_iter(self, tup):
ctxt = "[%s]" % sys._getframe(4).f_code.co_name
if tup.is_constant():
return ctxt, tup.const
else:
return ctxt, tuple([self.typerepr(x) for x in tup.items])
def consider_tuple_random_getitem(self, tup):
return tuple([self.typerepr(x) for x in tup.items])
def consider_list_index(self):
return '!'
def consider_list_getitem(self, idx):
return self.indexrepr(idx)
def consider_list_setitem(self, idx):
return self.indexrepr(idx)
def consider_list_delitem(self, idx):
return self.indexrepr(idx)
def consider_str_join(self, s):
if s.is_constant():
return repr(s.const)
else:
return "NON-CONSTANT"
def consider_str_getitem(self, idx):
return self.indexrepr(idx)
def consider_strformat(self, str, args):
if str.is_constant():
s = repr(str.const)
else:
s = "?!!!!!!"
if isinstance(args, SomeTuple):
return (s, tuple([self.typerepr(x) for x in args.items]))
else:
return (s, self.typerepr(args))
def consider_dict_getitem(self, dic):
return dic
def consider_dict_setitem(self, dic):
return dic
def consider_dict_delitem(self, dic):
return dic
class Bookkeeper:
"""The log of choices that have been made while analysing the operations.
It ensures that the same 'choice objects' will be returned if we ask
again during reflowing. Like ExecutionContext, there is an implicit
Bookkeeper that can be obtained from a thread-local variable.
Currently used for factories and user-defined classes."""
def __setstate__(self, dic):
self.__dict__.update(dic) # normal action
delayed_imports()
def __init__(self, annotator):
self.annotator = annotator
self.policy = annotator.policy
self.descs = {} # map Python objects to their XxxDesc wrappers
self.methoddescs = {} # map (funcdesc, classdef) to the MethodDesc
self.classdefs = [] # list of all ClassDefs
self.pbctypes = {}
self.seen_mutable = {}
self.listdefs = {} # map position_keys to ListDefs
self.dictdefs = {} # map position_keys to DictDefs
self.immutable_cache = {}
self.classpbc_attr_families = {} # {'attr': UnionFind(ClassAttrFamily)}
self.frozenpbc_attr_families = UnionFind(description.FrozenAttrFamily)
self.pbc_maximal_call_families = UnionFind(description.CallFamily)
self.emulated_pbc_calls = {}
self.all_specializations = {} # {FuncDesc: specialization-info}
self.pending_specializations = [] # list of callbacks
self.external_class_cache = {} # cache of ExternalType classes
self.needs_hash_support = {}
self.needs_generic_instantiate = {}
self.stats = Stats(self)
delayed_imports()
def count(self, category, *args):
self.stats.count(category, *args)
def enter(self, position_key):
"""Start of an operation.
The operation is uniquely identified by the given key."""
assert not hasattr(self, 'position_key'), "don't call enter() nestedly"
self.position_key = position_key
TLS.bookkeeper = self
def leave(self):
"""End of an operation."""
del TLS.bookkeeper
del self.position_key
def compute_at_fixpoint(self):
# getbookkeeper() needs to work during this function, so provide
# one with a dummy position
self.enter(None)
try:
def call_sites():
newblocks = self.annotator.added_blocks
if newblocks is None:
newblocks = self.annotator.annotated # all of them
binding = self.annotator.binding
for block in newblocks:
for op in block.operations:
if op.opname in ('simple_call', 'call_args'):
yield op
# some blocks are partially annotated
if binding(op.result, None) is None:
break # ignore the unannotated part
for call_op in call_sites():
self.consider_call_site(call_op)
for pbc, args_s in self.emulated_pbc_calls.itervalues():
self.consider_call_site_for_pbc(pbc, 'simple_call',
args_s, s_ImpossibleValue)
self.emulated_pbc_calls = {}
for clsdef in self.needs_hash_support.keys():
for clsdef2 in self.needs_hash_support:
if clsdef.issubclass(clsdef2) and clsdef is not clsdef2:
del self.needs_hash_support[clsdef]
break
finally:
self.leave()
def consider_call_site(self, call_op):
binding = self.annotator.binding
s_callable = binding(call_op.args[0])
args_s = [binding(arg) for arg in call_op.args[1:]]
if isinstance(s_callable, SomeLLADTMeth):
adtmeth = s_callable
s_callable = self.immutablevalue(adtmeth.func)
args_s = [SomePtr(adtmeth.ll_ptrtype)] + args_s
if isinstance(s_callable, SomePBC):
s_result = binding(call_op.result, s_ImpossibleValue)
self.consider_call_site_for_pbc(s_callable,
call_op.opname,
args_s, s_result)
def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result):
descs = s_callable.descriptions.keys()
if not descs:
return
family = descs[0].getcallfamily()
args = self.build_args(opname, args_s)
s_callable.getKind().consider_call_site(self, family, descs, args,
s_result)
def getuniqueclassdef(self, cls):
"""Get the ClassDef associated with the given user cls.
Avoid using this! It breaks for classes that must be specialized.
"""
if cls is object:
return None
desc = self.getdesc(cls)
return desc.getuniqueclassdef()
def getlistdef(self, **flags):
"""Get the ListDef associated with the current position."""
try:
listdef = self.listdefs[self.position_key]
except KeyError:
listdef = self.listdefs[self.position_key] = ListDef(self)
listdef.listitem.__dict__.update(flags)
return listdef
def newlist(self, *s_values, **flags):
"""Make a SomeList associated with the current position, general
enough to contain the s_values as items."""
listdef = self.getlistdef(**flags)
for s_value in s_values:
listdef.generalize(s_value)
return SomeList(listdef)
def getdictdef(self, is_r_dict=False):
"""Get the DictDef associated with the current position."""
try:
dictdef = self.dictdefs[self.position_key]
except KeyError:
dictdef = DictDef(self, is_r_dict=is_r_dict)
self.dictdefs[self.position_key] = dictdef
return dictdef
def newdict(self):
"""Make a so-far empty SomeDict associated with the current
position."""
return SomeDict(self.getdictdef())
def immutableconstant(self, const):
return self.immutablevalue(const.value)
def immutablevalue(self, x, need_const=True):
"""The most precise SomeValue instance that contains the
immutable value x."""
# convert unbound methods to the underlying function
if hasattr(x, 'im_self') and x.im_self is None:
x = x.im_func
assert not hasattr(x, 'im_self')
if x is sys: # special case constant sys to someobject
return SomeObject()
tp = type(x)
if issubclass(tp, Symbolic): # symbolic constants support
result = x.annotation()
result.const_box = Constant(x)
return result
if tp is bool:
result = SomeBool()
elif tp is int:
result = SomeInteger(nonneg = x>=0)
elif tp is long and 0 <= x <= (sys.maxint * 2 + 1):
result = SomeInteger(unsigned = True)
elif issubclass(tp, str): # py.lib uses annotated str subclasses
if len(x) == 1:
result = SomeChar()
else:
result = SomeString()
elif tp is unicode and len(x) == 1:
result = SomeUnicodeCodePoint()
elif tp is tuple:
result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x])
elif tp is float:
result = SomeFloat()
elif tp is list:
if need_const:
key = Constant(x)
try:
return self.immutable_cache[key]
except KeyError:
result = SomeList(ListDef(self, s_ImpossibleValue))
self.immutable_cache[key] = result
for e in x:
result.listdef.generalize(self.immutablevalue(e))
result.const_box = key
return result
else:
listdef = ListDef(self, s_ImpossibleValue)
for e in x:
listdef.generalize(self.immutablevalue(e, False))
result = SomeList(listdef)
elif tp is dict or tp is r_dict:
if need_const:
key = Constant(x)
try:
return self.immutable_cache[key]
except KeyError:
result = SomeDict(DictDef(self,
s_ImpossibleValue,
s_ImpossibleValue,
is_r_dict = tp is r_dict))
self.immutable_cache[key] = result
if tp is r_dict:
s_eqfn = self.immutablevalue(x.key_eq)
s_hashfn = self.immutablevalue(x.key_hash)
result.dictdef.dictkey.update_rdict_annotations(s_eqfn,
s_hashfn)
done = False
while not done:
try:
for ek, ev in x.iteritems():
result.dictdef.generalize_key(self.immutablevalue(ek))
result.dictdef.generalize_value(self.immutablevalue(ev))
except RuntimeError, r:
pass
else:
done = True
result.const_box = key
return result
else:
dictdef = DictDef(self,
s_ImpossibleValue,
s_ImpossibleValue,
is_r_dict = tp is r_dict)
if tp is r_dict:
s_eqfn = self.immutablevalue(x.key_eq)
s_hashfn = self.immutablevalue(x.key_hash)
dictdef.dictkey.update_rdict_annotations(s_eqfn,
s_hashfn)
for ek, ev in x.iteritems():
dictdef.generalize_key(self.immutablevalue(ek, False))
dictdef.generalize_value(self.immutablevalue(ev, False))
result = SomeDict(dictdef)
elif ishashable(x) and x in BUILTIN_ANALYZERS:
_module = getattr(x,"__module__","unknown")
result = SomeBuiltin(BUILTIN_ANALYZERS[x], methodname="%s.%s" % (_module, x.__name__))
elif extregistry.is_registered(x, self.policy):
entry = extregistry.lookup(x, self.policy)
result = entry.compute_annotation_bk(self)
elif tp in EXTERNAL_TYPE_ANALYZERS:
result = SomeExternalObject(tp)
elif isinstance(x, lltype._ptr):
result = SomePtr(lltype.typeOf(x))
elif isinstance(x, llmemory.fakeaddress):
result = SomeAddress(is_null=not x)
elif isinstance(x, llmemory.fakeweakaddress):
result = SomeWeakGcAddress()
elif isinstance(x, ootype._static_meth):
result = SomeOOStaticMeth(ootype.typeOf(x))
elif isinstance(x, ootype._class):
result = SomeOOClass(x._INSTANCE) # NB. can be None
elif isinstance(x, ootype.instance_impl): # XXX
result = SomeOOInstance(ootype.typeOf(x))
elif callable(x):
if hasattr(x, '__self__') and x.__self__ is not None:
# for cases like 'l.append' where 'l' is a global constant list
s_self = self.immutablevalue(x.__self__, need_const)
result = s_self.find_method(x.__name__)
if result is None:
result = SomeObject()
elif hasattr(x, 'im_self') and hasattr(x, 'im_func'):
# on top of PyPy, for cases like 'l.append' where 'l' is a
# global constant list, the find_method() returns non-None
s_self = self.immutablevalue(x.im_self, need_const)
result = s_self.find_method(x.im_func.__name__)
else:
result = None
if result is None:
if (self.annotator.policy.allow_someobjects
and getattr(x, '__module__', None) == '__builtin__'
# XXX note that the print support functions are __builtin__
and tp not in (types.FunctionType, types.MethodType)):
result = SomeObject()
result.knowntype = tp # at least for types this needs to be correct
else:
result = SomePBC([self.getdesc(x)])
elif hasattr(x, '_freeze_') and x._freeze_():
# user-defined classes can define a method _freeze_(), which
# is called when a prebuilt instance is found. If the method
# returns True, the instance is considered immutable and becomes
# a SomePBC(). Otherwise it's just SomeInstance().
result = SomePBC([self.getdesc(x)])
elif hasattr(x, '__class__') \
and x.__class__.__module__ != '__builtin__':
self.see_mutable(x)
result = SomeInstance(self.getuniqueclassdef(x.__class__))
elif x is None:
return s_None
else:
result = SomeObject()
if need_const:
result.const = x
return result
def getdesc(self, pyobj):
# get the XxxDesc wrapper for the given Python object, which must be
# one of:
# * a user-defined Python function
# * a Python type or class (but not a built-in one like 'int')
# * a user-defined bound or unbound method object
# * a frozen pre-built constant (with _freeze_() == True)
# * a bound method of a frozen pre-built constant
try:
return self.descs[pyobj]
except KeyError:
if isinstance(pyobj, types.FunctionType):
result = description.FunctionDesc(self, pyobj)
elif isinstance(pyobj, (type, types.ClassType)):
if pyobj is object:
raise Exception, "ClassDesc for object not supported"
if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types
result = self.getfrozen(pyobj)
else:
result = description.ClassDesc(self, pyobj)
elif isinstance(pyobj, types.MethodType):
if pyobj.im_self is None: # unbound
return self.getdesc(pyobj.im_func)
elif (hasattr(pyobj.im_self, '_freeze_') and
pyobj.im_self._freeze_()): # method of frozen
result = description.MethodOfFrozenDesc(self,
self.getdesc(pyobj.im_func), # funcdesc
self.getdesc(pyobj.im_self)) # frozendesc
else: # regular method
origincls, name = origin_of_meth(pyobj)
self.see_mutable(pyobj.im_self)
assert pyobj == getattr(pyobj.im_self, name), (
"%r is not %s.%s ??" % (pyobj, pyobj.im_self, name))
# emulate a getattr to make sure it's on the classdef
classdef = self.getuniqueclassdef(pyobj.im_class)
classdef.find_attribute(name)
result = self.getmethoddesc(
self.getdesc(pyobj.im_func), # funcdesc
self.getuniqueclassdef(origincls), # originclassdef
classdef, # selfclassdef
name)
else:
# must be a frozen pre-built constant, but let's check
try:
assert pyobj._freeze_()
except AttributeError:
raise Exception("unexpected prebuilt constant: %r" % (
pyobj,))
result = self.getfrozen(pyobj)
self.descs[pyobj] = result
return result
def have_seen(self, x):
# this might need to expand some more.
if x in self.descs:
return True
elif x in self.seen_mutable:
return True
else:
return False
def getfrozen(self, pyobj):
result = description.FrozenDesc(self, pyobj)
cls = result.knowntype
if cls not in self.pbctypes:
self.pbctypes[cls] = True
return result
def getmethoddesc(self, funcdesc, originclassdef, selfclassdef, name,
flags={}):
flagskey = flags.items()
flagskey.sort()
key = funcdesc, originclassdef, selfclassdef, name, tuple(flagskey)
try:
return self.methoddescs[key]
except KeyError:
result = description.MethodDesc(self, funcdesc, originclassdef,
selfclassdef, name, flags)
self.methoddescs[key] = result
return result
def see_mutable(self, x):
if x in self.seen_mutable:
return
clsdef = self.getuniqueclassdef(x.__class__)
self.seen_mutable[x] = True
self.event('mutable', x)
source = InstanceSource(self, x)
for attr in source.all_instance_attributes():
clsdef.add_source_for_attribute(attr, source) # can trigger reflowing
def valueoftype(self, t):
return annotationoftype(t, self)
def get_classpbc_attr_families(self, attrname):
"""Return the UnionFind for the ClassAttrFamilies corresponding to
attributes of the given name.
"""
map = self.classpbc_attr_families
try:
access_sets = map[attrname]
except KeyError:
access_sets = map[attrname] = UnionFind(description.ClassAttrFamily)
return access_sets
def getexternaldesc(self, class_):
try:
return self.external_class_cache[class_]
except KeyError:
from pypy.rpython.ootypesystem import bltregistry
next = bltregistry.ExternalInstanceDesc(class_)
self.external_class_cache[class_] = next
next.setup()
return next
def pbc_getattr(self, pbc, s_attr):
assert s_attr.is_constant()
attr = s_attr.const
descs = pbc.descriptions.keys()
if not descs:
return s_ImpossibleValue
first = descs[0]
if len(descs) == 1:
return first.s_read_attribute(attr)
change = first.mergeattrfamilies(descs[1:], attr)
attrfamily = first.getattrfamily(attr)
position = self.position_key
attrfamily.read_locations[position] = True
actuals = []
for desc in descs:
actuals.append(desc.s_read_attribute(attr))
s_result = unionof(*actuals)
s_oldvalue = attrfamily.get_s_value(attr)
attrfamily.set_s_value(attr, unionof(s_result, s_oldvalue))
if change:
for position in attrfamily.read_locations:
self.annotator.reflowfromposition(position)
if isinstance(s_result, SomeImpossibleValue):
for desc in descs:
try:
attrs = desc.read_attribute('_attrs_')
except AttributeError:
continue
if isinstance(attrs, Constant):
attrs = attrs.value
if attr in attrs:
raise HarmlesslyBlocked("getattr on enforced attr")
return s_result
def pbc_call(self, pbc, args, emulated=None):
"""Analyse a call to a SomePBC() with the given args (list of
annotations).
"""
descs = pbc.descriptions.keys()
if not descs:
return s_ImpossibleValue
first = descs[0]
first.mergecallfamilies(*descs[1:])
if emulated is None:
whence = self.position_key
# fish the existing annotation for the result variable,
# needed by some kinds of specialization.
fn, block, i = self.position_key
op = block.operations[i]
s_previous_result = self.annotator.binding(op.result,
s_ImpossibleValue)
else:
if emulated is True:
whence = None
else:
whence = emulated # callback case
s_previous_result = s_ImpossibleValue
def schedule(graph, inputcells):
return self.annotator.recursivecall(graph, whence, inputcells)
results = []
for desc in descs:
results.append(desc.pycall(schedule, args, s_previous_result))
s_result = unionof(*results)
return s_result
def emulate_pbc_call(self, unique_key, pbc, args_s, replace=[], callback=None):
emulate_enter = not hasattr(self, 'position_key')
if emulate_enter:
self.enter(None)
try:
emulated_pbc_calls = self.emulated_pbc_calls
prev = [unique_key]
prev.extend(replace)
for other_key in prev:
if other_key in emulated_pbc_calls:
del emulated_pbc_calls[other_key]
emulated_pbc_calls[unique_key] = pbc, args_s
args = self.build_args("simple_call", args_s)
if callback is None:
emulated = True
else:
emulated = callback
return self.pbc_call(pbc, args, emulated=emulated)
finally:
if emulate_enter:
self.leave()
def build_args(self, op, args_s):
space = RPythonCallsSpace()
if op == "simple_call":
return Arguments(space, list(args_s))
elif op == "call_args":
return Arguments.fromshape(space, args_s[0].const, # shape
list(args_s[1:]))
def ondegenerated(self, what, s_value, where=None, called_from_graph=None):
self.annotator.ondegenerated(what, s_value, where=where,
called_from_graph=called_from_graph)
def whereami(self):
return self.annotator.whereami(self.position_key)
def event(self, what, x):
return self.annotator.policy.event(self, what, x)
def warning(self, msg):
return self.annotator.warning(msg)
def origin_of_meth(boundmeth):
func = boundmeth.im_func
candname = func.func_name
for cls in inspect.getmro(boundmeth.im_class):
dict = cls.__dict__
if dict.get(candname) is func:
return cls, candname
for name, value in dict.iteritems():
if value is func:
return cls, name
raise Exception, "could not match bound-method to attribute name: %r" % (boundmeth,)
def ishashable(x):
try:
hash(x)
except TypeError:
return False
else:
return True
# for parsing call arguments
class RPythonCallsSpace:
"""Pseudo Object Space providing almost no real operation.
For the Arguments class: if it really needs other operations, it means
that the call pattern is too complex for R-Python.
"""
w_tuple = SomeTuple
def newtuple(self, items_s):
if items_s == [Ellipsis]:
res = SomeObject() # hack to get a SomeObject as the *arg
res.from_ellipsis = True
return res
else:
return SomeTuple(items_s)
def newdict(self):
raise CallPatternTooComplex, "'**' argument"
def unpackiterable(self, s_obj, expected_length=None):
if isinstance(s_obj, SomeTuple):
if (expected_length is not None and
expected_length != len(s_obj.items)):
raise ValueError
return s_obj.items
if (s_obj.__class__ is SomeObject and
getattr(s_obj, 'from_ellipsis', False)): # see newtuple()
return [Ellipsis]
raise CallPatternTooComplex, "'*' argument must be SomeTuple"
def is_w(self, one, other):
return one is other
def type(self, item):
return type(item)
def is_true(self, s_tup):
assert isinstance(s_tup, SomeTuple)
return bool(s_tup.items)
class CallPatternTooComplex(Exception):
pass
# get current bookkeeper
def getbookkeeper():
"""Get the current Bookkeeper.
Only works during the analysis of an operation."""
try:
return TLS.bookkeeper
except AttributeError:
return None
def delayed_imports():
# import ordering hack
global BUILTIN_ANALYZERS, EXTERNAL_TYPE_ANALYZERS
from pypy.annotation.builtin import BUILTIN_ANALYZERS
from pypy.annotation.builtin import EXTERNAL_TYPE_ANALYZERS
| Python |
# base annotation policy for overrides and specialization
from pypy.annotation.specialize import default_specialize as default
from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype
from pypy.annotation.specialize import memo
# for some reason, model must be imported first,
# or we create a cycle.
from pypy.annotation import model as annmodel
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.annotation.signature import Sig
import types
class BasicAnnotatorPolicy(object):
allow_someobjects = True
def event(pol, bookkeeper, what, *args):
pass
def get_specializer(pol, tag):
return pol.no_specialization
def no_specialization(pol, funcdesc, args_s):
return funcdesc.cachedgraph(None)
def no_more_blocks_to_annotate(pol, annotator):
# hint to all pending specializers that we are done
for callback in annotator.bookkeeper.pending_specializations:
callback()
del annotator.bookkeeper.pending_specializations[:]
def _adjust_space_config(self, space):
# allow to override space options.
if getattr(self, 'do_imports_immediately', None) is not None:
space.do_imports_immediately = self.do_imports_immediately
class AnnotatorPolicy(BasicAnnotatorPolicy):
"""
Possibly subclass and pass an instance to the annotator to control special casing during annotation
"""
def get_specializer(pol, directive):
if directive is None:
return pol.default_specialize
# specialize|override:name[(args)]
directive_parts = directive.split('(', 1)
if len(directive_parts) == 1:
[name] = directive_parts
parms = ()
else:
name, parms = directive_parts
try:
parms = eval("(lambda *parms: parms)(%s" % parms)
except (KeyboardInterrupt, SystemExit):
raise
except:
raise Exception, "broken specialize directive parms: %s" % directive
name = name.replace(':', '__')
try:
specializer = getattr(pol, name)
except AttributeError:
raise AttributeError("%r specialize tag not defined in annotation"
"policy %s" % (name, pol))
if directive.startswith('override:'):
# different signature: override__xyz(*args_s)
if parms:
raise Exception, "override:* specialisations don't support parameters"
def specialize_override(funcdesc, args_s):
funcdesc.overridden = True
return specializer(*args_s)
return specialize_override
else:
if not parms:
return specializer
else:
def specialize_with_parms(funcdesc, args_s):
return specializer(funcdesc, args_s, *parms)
return specialize_with_parms
# common specializations
default_specialize = staticmethod(default)
specialize__memo = staticmethod(memo)
specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N)
specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N)
specialize__arglistitemtype = staticmethod(specialize_arglistitemtype)
def specialize__ll(pol, *args):
from pypy.rpython.annlowlevel import LowLevelAnnotatorPolicy
return LowLevelAnnotatorPolicy.default_specialize(*args)
def override__ignore(pol, *args):
bk = getbookkeeper()
return bk.immutablevalue(None)
| Python |
from pypy.annotation.model import SomeObject, s_ImpossibleValue
from pypy.annotation.model import SomeInteger, s_Bool, unionof
from pypy.annotation.model import SomeInstance
from pypy.annotation.listdef import ListItem
class DictKey(ListItem):
custom_eq_hash = False
def __init__(self, bookkeeper, s_value, is_r_dict=False):
ListItem.__init__(self, bookkeeper, s_value)
self.is_r_dict = is_r_dict
self.enable_hashing()
def patch(self):
for dictdef in self.itemof:
dictdef.dictkey = self
def merge(self, other):
if self is not other:
assert self.custom_eq_hash == other.custom_eq_hash, (
"mixing plain dictionaries with r_dict()")
ListItem.merge(self, other)
if self.custom_eq_hash:
self.update_rdict_annotations(other.s_rdict_eqfn,
other.s_rdict_hashfn,
other=other)
def enable_hashing(self):
# r_dicts don't need the RPython hash of their keys
if isinstance(self.s_value, SomeInstance) and not self.is_r_dict:
self.bookkeeper.needs_hash_support[self.s_value.classdef] = True
def generalize(self, s_other_value):
updated = ListItem.generalize(self, s_other_value)
if updated:
self.enable_hashing()
if updated and self.custom_eq_hash:
self.emulate_rdict_calls()
return updated
def update_rdict_annotations(self, s_eqfn, s_hashfn, other=None):
if not self.custom_eq_hash:
self.custom_eq_hash = True
else:
s_eqfn = unionof(s_eqfn, self.s_rdict_eqfn)
s_hashfn = unionof(s_hashfn, self.s_rdict_hashfn)
self.s_rdict_eqfn = s_eqfn
self.s_rdict_hashfn = s_hashfn
self.emulate_rdict_calls(other=other)
def emulate_rdict_calls(self, other=None):
myeq = (self, 'eq')
myhash = (self, 'hash')
if other:
replace_othereq = [(other, 'eq')]
replace_otherhash = [(other, 'hash')]
else:
replace_othereq = replace_otherhash = ()
s_key = self.s_value
def check_eqfn(annotator, graph):
s = annotator.binding(graph.getreturnvar())
assert s_Bool.contains(s), (
"the custom eq function of an r_dict must return a boolean"
" (got %r)" % (s,))
self.bookkeeper.emulate_pbc_call(myeq, self.s_rdict_eqfn, [s_key, s_key],
replace=replace_othereq,
callback = check_eqfn)
def check_hashfn(annotator, graph):
s = annotator.binding(graph.getreturnvar())
assert SomeInteger().contains(s), (
"the custom hash function of an r_dict must return an integer"
" (got %r)" % (s,))
self.bookkeeper.emulate_pbc_call(myhash, self.s_rdict_hashfn, [s_key],
replace=replace_otherhash,
callback = check_hashfn)
class DictValue(ListItem):
def patch(self):
for dictdef in self.itemof:
dictdef.dictvalue = self
class DictDef:
"""A dict definition remembers how general the keys and values in that
particular dict have to be. Every dict creation makes a new DictDef,
and the union of two dicts merges the DictKeys and DictValues that each
DictDef stores."""
def __init__(self, bookkeeper, s_key = s_ImpossibleValue,
s_value = s_ImpossibleValue,
is_r_dict = False):
self.dictkey = DictKey(bookkeeper, s_key, is_r_dict)
self.dictkey.itemof[self] = True
self.dictvalue = DictValue(bookkeeper, s_value)
self.dictvalue.itemof[self] = True
self.bookkeeper = bookkeeper
def read_key(self, position_key=None):
if position_key is None:
if self.bookkeeper is None: # for tests
from pypy.annotation.bookkeeper import getbookkeeper
position_key = getbookkeeper().position_key
else:
position_key = self.bookkeeper.position_key
self.dictkey.read_locations[position_key] = True
return self.dictkey.s_value
def read_value(self, position_key=None):
if position_key is None:
if self.bookkeeper is None: # for tests
from pypy.annotation.bookkeeper import getbookkeeper
position_key = getbookkeeper().position_key
else:
position_key = self.bookkeeper.position_key
self.dictvalue.read_locations[position_key] = True
return self.dictvalue.s_value
def same_as(self, other):
return (self.dictkey is other.dictkey and
self.dictvalue is other.dictvalue)
def union(self, other):
if (self.same_as(MOST_GENERAL_DICTDEF) or
other.same_as(MOST_GENERAL_DICTDEF)):
return MOST_GENERAL_DICTDEF # without merging
else:
self.dictkey.merge(other.dictkey)
self.dictvalue.merge(other.dictvalue)
return self
def generalize_key(self, s_key):
self.dictkey.generalize(s_key)
def generalize_value(self, s_value):
self.dictvalue.generalize(s_value)
def __repr__(self):
return '<{%r: %r}>' % (self.dictkey.s_value, self.dictvalue.s_value)
MOST_GENERAL_DICTDEF = DictDef(None, SomeObject(), SomeObject())
| Python |
from pypy.annotation.model import SomeObject, s_ImpossibleValue
from pypy.annotation.model import SomeList, SomeString
from pypy.annotation.model import unionof, TLS, UnionError, isdegenerated
class TooLateForChange(Exception):
pass
class ListItem:
mutated = False # True for lists mutated after creation
resized = False # True for lists resized after creation
range_step = None # the step -- only for lists only created by a range()
dont_change_any_more = False # set to True when too late for changes
# what to do if range_step is different in merge.
# - if one is a list (range_step is None), unify to a list.
# - if both have a step, unify to use a variable step (indicated by 0)
_step_map = {
(type(None), int): None,
(int, type(None)): None,
(int, int) : 0,
}
def __init__(self, bookkeeper, s_value):
self.s_value = s_value
self.bookkeeper = bookkeeper
self.itemof = {} # set of all ListDefs using this ListItem
self.read_locations = {}
if bookkeeper is None:
self.dont_change_any_more = True
def mutate(self):
if not self.mutated:
if self.dont_change_any_more:
raise TooLateForChange
self.mutated = True
def resize(self):
if not self.resized:
if self.dont_change_any_more:
raise TooLateForChange
self.resized = True
def setrangestep(self, step):
if step != self.range_step:
if self.dont_change_any_more:
raise TooLateForChange
self.range_step = step
def merge(self, other):
if self is not other:
if getattr(TLS, 'no_side_effects_in_union', 0):
raise UnionError("merging list/dict items")
if other.dont_change_any_more:
if self.dont_change_any_more:
raise TooLateForChange
else:
# lists using 'other' don't expect it to change any more,
# so we try merging into 'other', which will give
# TooLateForChange if it actually tries to make
# things more general
self, other = other, self
if other.mutated: self.mutate()
if other.resized: self.resize()
if other.range_step != self.range_step:
self.setrangestep(self._step_map[type(self.range_step),
type(other.range_step)])
self.itemof.update(other.itemof)
read_locations = self.read_locations.copy()
other_read_locations = other.read_locations.copy()
self.read_locations.update(other.read_locations)
self.patch() # which should patch all refs to 'other'
s_value = self.s_value
s_other_value = other.s_value
s_new_value = unionof(s_value, s_other_value)
if isdegenerated(s_new_value):
if self.bookkeeper:
self.bookkeeper.ondegenerated(self, s_new_value)
elif other.bookkeeper:
other.bookkeeper.ondegenerated(other, s_new_value)
if s_new_value != s_value:
if self.dont_change_any_more:
raise TooLateForChange
self.s_value = s_new_value
# reflow from reading points
for position_key in read_locations:
self.bookkeeper.annotator.reflowfromposition(position_key)
if s_new_value != s_other_value:
# reflow from reading points
for position_key in other_read_locations:
other.bookkeeper.annotator.reflowfromposition(position_key)
def patch(self):
for listdef in self.itemof:
listdef.listitem = self
def generalize(self, s_other_value):
s_new_value = unionof(self.s_value, s_other_value)
if isdegenerated(s_new_value) and self.bookkeeper:
self.bookkeeper.ondegenerated(self, s_new_value)
updated = s_new_value != self.s_value
if updated:
if self.dont_change_any_more:
raise TooLateForChange
self.s_value = s_new_value
# reflow from all reading points
for position_key in self.read_locations:
self.bookkeeper.annotator.reflowfromposition(position_key)
return updated
class ListDef:
"""A list definition remembers how general the items in that particular
list have to be. Every list creation makes a new ListDef, and the union
of two lists merges the ListItems that each ListDef stores."""
def __init__(self, bookkeeper, s_item=s_ImpossibleValue,
mutated=False, resized=False):
self.listitem = ListItem(bookkeeper, s_item)
self.listitem.mutated = mutated | resized
self.listitem.resized = resized
self.listitem.itemof[self] = True
self.bookkeeper = bookkeeper
def getbookkeeper(self):
if self.bookkeeper is None:
from pypy.annotation.bookkeeper import getbookkeeper
return getbookkeeper()
else:
return self.bookkeeper
def read_item(self, position_key=None):
if position_key is None:
position_key = self.getbookkeeper().position_key
self.listitem.read_locations[position_key] = True
return self.listitem.s_value
def same_as(self, other):
return self.listitem is other.listitem
def union(self, other):
if (self.same_as(MOST_GENERAL_LISTDEF) or
other.same_as(MOST_GENERAL_LISTDEF)):
return MOST_GENERAL_LISTDEF # without merging
else:
self.listitem.merge(other.listitem)
return self
def agree(self, other):
s_self_value = self.read_item()
s_other_value = other.read_item()
self.generalize(s_other_value)
other.generalize(s_self_value)
def offspring(self, *others):
s_self_value = self.read_item()
s_other_values = []
for other in others:
s_other_values.append(other.read_item())
s_newlst = self.getbookkeeper().newlist(s_self_value, *s_other_values)
s_newvalue = s_newlst.listdef.read_item()
self.generalize(s_newvalue)
for other in others:
other.generalize(s_newvalue)
return s_newlst
def generalize(self, s_value):
self.listitem.generalize(s_value)
def __repr__(self):
return '<[%r]%s%s>' % (self.listitem.s_value,
self.listitem.mutated and 'm' or '',
self.listitem.resized and 'r' or '')
def mutate(self):
self.listitem.mutate()
def resize(self):
self.listitem.mutate()
self.listitem.resize()
MOST_GENERAL_LISTDEF = ListDef(None, SomeObject())
s_list_of_strings = SomeList(ListDef(None, SomeString(), resized = True))
| Python |
"""
Binary operations between SomeValues.
"""
import py
import operator
from pypy.annotation.pairtype import pair, pairtype
from pypy.annotation.model import SomeObject, SomeInteger, SomeBool, s_Bool
from pypy.annotation.model import SomeString, SomeChar, SomeList, SomeDict
from pypy.annotation.model import SomeUnicodeCodePoint
from pypy.annotation.model import SomeTuple, SomeImpossibleValue, s_ImpossibleValue
from pypy.annotation.model import SomeInstance, SomeBuiltin, SomeIterator
from pypy.annotation.model import SomePBC, SomeSlice, SomeFloat, s_None
from pypy.annotation.model import SomeExternalObject
from pypy.annotation.model import SomeAddress, SomeTypedAddressAccess
from pypy.annotation.model import SomeWeakGcAddress
from pypy.annotation.model import SomeCTypesObject
from pypy.annotation.model import unionof, UnionError, set, missing_operation, TLS
from pypy.annotation.model import read_can_only_throw
from pypy.annotation.model import add_knowntypedata, merge_knowntypedata
from pypy.annotation.model import lltype_to_annotation
from pypy.annotation.model import SomeGenericCallable
from pypy.annotation.model import SomeExternalInstance
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.objspace.flow.model import Variable
from pypy.annotation.listdef import ListDef
from pypy.rlib import rarithmetic
from pypy.rpython import extregistry
# convenience only!
def immutablevalue(x):
return getbookkeeper().immutablevalue(x)
# XXX unify this with ObjSpace.MethodTable
BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod',
'truediv', 'floordiv', 'divmod', 'pow',
'and_', 'or_', 'xor',
'lshift', 'rshift',
'getitem', 'setitem', 'delitem',
'getitem_idx', 'getitem_key', 'getitem_idx_key',
'inplace_add', 'inplace_sub', 'inplace_mul',
'inplace_truediv', 'inplace_floordiv', 'inplace_div',
'inplace_mod', 'inplace_pow',
'inplace_lshift', 'inplace_rshift',
'inplace_and', 'inplace_or', 'inplace_xor',
'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp',
'coerce',
]
+[opname+'_ovf' for opname in
"""add sub mul floordiv div mod pow lshift
""".split()
])
for opname in BINARY_OPERATIONS:
missing_operation(pairtype(SomeObject, SomeObject), opname)
class __extend__(pairtype(SomeObject, SomeObject)):
def union((obj1, obj2)):
if obj1 == obj2:
return obj1
else:
result = SomeObject()
if obj1.knowntype == obj2.knowntype and obj1.knowntype != object:
result.knowntype = obj1.knowntype
is_type_of1 = getattr(obj1, 'is_type_of', None)
is_type_of2 = getattr(obj2, 'is_type_of', None)
if obj1.is_immutable_constant() and obj2.is_immutable_constant() and obj1.const == obj2.const:
result.const = obj1.const
is_type_of = {}
if is_type_of1:
for v in is_type_of1:
is_type_of[v] = True
if is_type_of2:
for v in is_type_of2:
is_type_of[v] = True
if is_type_of:
result.is_type_of = is_type_of.keys()
else:
if is_type_of1 and is_type_of1 == is_type_of2:
result.is_type_of = is_type_of1
# try to preserve the origin of SomeObjects
if obj1 == result:
return obj1
elif obj2 == result:
return obj2
else:
return result
# inplace_xxx ---> xxx by default
def inplace_add((obj1, obj2)): return pair(obj1, obj2).add()
def inplace_sub((obj1, obj2)): return pair(obj1, obj2).sub()
def inplace_mul((obj1, obj2)): return pair(obj1, obj2).mul()
def inplace_truediv((obj1, obj2)): return pair(obj1, obj2).truediv()
def inplace_floordiv((obj1, obj2)): return pair(obj1, obj2).floordiv()
def inplace_div((obj1, obj2)): return pair(obj1, obj2).div()
def inplace_mod((obj1, obj2)): return pair(obj1, obj2).mod()
def inplace_pow((obj1, obj2)): return pair(obj1, obj2).pow(s_None)
def inplace_lshift((obj1, obj2)): return pair(obj1, obj2).lshift()
def inplace_rshift((obj1, obj2)): return pair(obj1, obj2).rshift()
def inplace_and((obj1, obj2)): return pair(obj1, obj2).and_()
def inplace_or((obj1, obj2)): return pair(obj1, obj2).or_()
def inplace_xor((obj1, obj2)): return pair(obj1, obj2).xor()
for name, func in locals().items():
if name.startswith('inplace_'):
func.can_only_throw = []
inplace_div.can_only_throw = [ZeroDivisionError]
inplace_truediv.can_only_throw = [ZeroDivisionError]
inplace_floordiv.can_only_throw = [ZeroDivisionError]
inplace_mod.can_only_throw = [ZeroDivisionError]
def lt((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const < obj2.const)
else:
getbookkeeper().count("non_int_comp", obj1, obj2)
return s_Bool
def le((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const <= obj2.const)
else:
getbookkeeper().count("non_int_comp", obj1, obj2)
return s_Bool
def eq((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const == obj2.const)
else:
getbookkeeper().count("non_int_eq", obj1, obj2)
return s_Bool
def ne((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const != obj2.const)
else:
getbookkeeper().count("non_int_eq", obj1, obj2)
return s_Bool
def gt((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const > obj2.const)
else:
getbookkeeper().count("non_int_comp", obj1, obj2)
return s_Bool
def ge((obj1, obj2)):
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(obj1.const >= obj2.const)
else:
getbookkeeper().count("non_int_comp", obj1, obj2)
return s_Bool
def cmp((obj1, obj2)):
getbookkeeper().count("cmp", obj1, obj2)
if obj1.is_immutable_constant() and obj2.is_immutable_constant():
return immutablevalue(cmp(obj1.const, obj2.const))
else:
return SomeInteger()
def is_((obj1, obj2)):
r = SomeBool()
if obj2.is_constant():
if obj1.is_constant():
r.const = obj1.const is obj2.const
if obj2.const is None and not obj1.can_be_none():
r.const = False
elif obj1.is_constant():
if obj1.const is None and not obj2.can_be_none():
r.const = False
# XXX HACK HACK HACK
# XXX HACK HACK HACK
# XXX HACK HACK HACK
bk = getbookkeeper()
if bk is not None: # for testing
knowntypedata = r.knowntypedata = {}
fn, block, i = bk.position_key
annotator = bk.annotator
op = block.operations[i]
assert op.opname == "is_"
assert len(op.args) == 2
def bind(src_obj, tgt_obj, tgt_arg):
if hasattr(tgt_obj, 'is_type_of') and src_obj.is_constant():
add_knowntypedata(knowntypedata, True, tgt_obj.is_type_of,
bk.valueoftype(src_obj.const))
assert annotator.binding(op.args[tgt_arg]) == tgt_obj
add_knowntypedata(knowntypedata, True, [op.args[tgt_arg]], src_obj)
nonnone_obj = tgt_obj
if src_obj.is_constant() and src_obj.const is None and tgt_obj.can_be_none():
nonnone_obj = tgt_obj.nonnoneify()
add_knowntypedata(knowntypedata, False, [op.args[tgt_arg]], nonnone_obj)
bind(obj2, obj1, 0)
bind(obj1, obj2, 1)
return r
def divmod((obj1, obj2)):
getbookkeeper().count("divmod", obj1, obj2)
return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()])
def coerce((obj1, obj2)):
getbookkeeper().count("coerce", obj1, obj2)
return pair(obj1, obj2).union() # reasonable enough
# approximation of an annotation intersection, the result should be the annotation obj or
# the intersection of obj and improvement
def improve((obj, improvement)):
if not improvement.contains(obj) and obj.contains(improvement):
return improvement
else:
return obj
# checked getitems
def _getitem_can_only_throw(s_c1, s_o2):
impl = pair(s_c1, s_o2).getitem
return read_can_only_throw(impl, s_c1, s_o2)
def getitem_idx_key((s_c1, s_o2)):
impl = pair(s_c1, s_o2).getitem
return impl()
getitem_idx_key.can_only_throw = _getitem_can_only_throw
getitem_idx = getitem_idx_key
getitem_key = getitem_idx_key
# cloning a function with identical code, for the can_only_throw attribute
def _clone(f, can_only_throw = None):
newfunc = type(f)(f.func_code, f.func_globals, f.func_name,
f.func_defaults, f.func_closure)
if can_only_throw is not None:
newfunc.can_only_throw = can_only_throw
return newfunc
class __extend__(pairtype(SomeInteger, SomeInteger)):
# unsignedness is considered a rare and contagious disease
def union((int1, int2)):
knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
return SomeInteger(nonneg=int1.nonneg and int2.nonneg,
knowntype=knowntype)
or_ = xor = add = mul = _clone(union, [])
add_ovf = mul_ovf = _clone(union, [OverflowError])
div = floordiv = mod = _clone(union, [ZeroDivisionError])
div_ovf= floordiv_ovf = mod_ovf = _clone(union, [ZeroDivisionError, OverflowError])
def truediv((int1, int2)):
return SomeFloat()
truediv.can_only_throw = [ZeroDivisionError]
truediv_ovf = _clone(truediv, [ZeroDivisionError, OverflowError])
def sub((int1, int2)):
knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
return SomeInteger(knowntype=knowntype)
sub.can_only_throw = []
sub_ovf = _clone(sub, [OverflowError])
def and_((int1, int2)):
knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
return SomeInteger(nonneg=int1.nonneg or int2.nonneg,
knowntype=knowntype)
and_.can_only_throw = []
def lshift((int1, int2)):
return SomeInteger(knowntype=int1.knowntype)
lshift_ovf = _clone(lshift, [ValueError, OverflowError])
def rshift((int1, int2)):
return SomeInteger(nonneg=int1.nonneg, knowntype=int1.knowntype)
rshift.can_only_throw = [ValueError]
def pow((int1, int2), obj3):
knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
return SomeInteger(nonneg = int1.nonneg,
knowntype=knowntype)
pow.can_only_throw = [ZeroDivisionError]
pow_ovf = _clone(pow, [ZeroDivisionError, OverflowError])
def _compare_helper((int1, int2), opname, operation):
r = SomeBool()
if int1.is_immutable_constant() and int2.is_immutable_constant():
r.const = operation(int1.const, int2.const)
else:
# XXX VERY temporary hack
if (opname == 'ge' and int2.is_immutable_constant() and
int2.const == 0 and
not rarithmetic.signedtype(int1.knowntype)):
r.const = True
knowntypedata = {}
# XXX HACK HACK HACK
# propagate nonneg information between the two arguments
fn, block, i = getbookkeeper().position_key
op = block.operations[i]
assert op.opname == opname
assert len(op.args) == 2
def tointtype(int0):
if int0.knowntype is bool:
return int
return int0.knowntype
if int1.nonneg and isinstance(op.args[1], Variable):
case = opname in ('lt', 'le', 'eq')
add_knowntypedata(knowntypedata, case, [op.args[1]],
SomeInteger(nonneg=True, knowntype=tointtype(int2)))
if int2.nonneg and isinstance(op.args[0], Variable):
case = opname in ('gt', 'ge', 'eq')
add_knowntypedata(knowntypedata, case, [op.args[0]],
SomeInteger(nonneg=True, knowntype=tointtype(int1)))
if knowntypedata:
r.knowntypedata = knowntypedata
return r
def lt(intint): return intint._compare_helper('lt', operator.lt)
def le(intint): return intint._compare_helper('le', operator.le)
def eq(intint): return intint._compare_helper('eq', operator.eq)
def ne(intint): return intint._compare_helper('ne', operator.ne)
def gt(intint): return intint._compare_helper('gt', operator.gt)
def ge(intint): return intint._compare_helper('ge', operator.ge)
class __extend__(pairtype(SomeBool, SomeInteger)):
def lshift((int1, int2)):
return SomeInteger()
lshift.can_only_throw = [ValueError]
lshift_ovf = _clone(lshift, [ValueError, OverflowError])
def rshift((int1, int2)):
return SomeInteger(nonneg=True)
rshift.can_only_throw = [ValueError]
class __extend__(pairtype(SomeBool, SomeBool)):
def union((boo1, boo2)):
s = SomeBool()
if getattr(boo1, 'const', -1) == getattr(boo2, 'const', -2):
s.const = boo1.const
if hasattr(boo1, 'knowntypedata') and \
hasattr(boo2, 'knowntypedata'):
ktd = merge_knowntypedata(boo1.knowntypedata, boo2.knowntypedata)
if ktd:
s.knowntypedata = ktd
return s
def and_((boo1, boo2)):
s = SomeBool()
if boo1.is_constant():
if not boo1.const:
s.const = False
else:
return boo2
if boo2.is_constant():
if not boo2.const:
s.const = False
return s
def or_((boo1, boo2)):
s = SomeBool()
if boo1.is_constant():
if boo1.const:
s.const = True
else:
return boo2
if boo2.is_constant():
if boo2.const:
s.const = True
return s
def xor((boo1, boo2)):
s = SomeBool()
if boo1.is_constant() and boo2.is_constant():
s.const = boo1.const ^ boo2.const
return s
class __extend__(pairtype(SomeString, SomeString)):
def union((str1, str2)):
return SomeString(can_be_None=str1.can_be_None or str2.can_be_None)
def add((str1, str2)):
# propagate const-ness to help getattr(obj, 'prefix' + const_name)
result = SomeString()
if str1.is_immutable_constant() and str2.is_immutable_constant():
result.const = str1.const + str2.const
return result
class __extend__(pairtype(SomeChar, SomeChar)):
def union((chr1, chr2)):
return SomeChar()
class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint),
pairtype(SomeChar, SomeUnicodeCodePoint),
pairtype(SomeUnicodeCodePoint, SomeChar)):
def union((uchr1, uchr2)):
return SomeUnicodeCodePoint()
class __extend__(pairtype(SomeString, SomeObject)):
def mod((str, args)):
getbookkeeper().count('strformat', str, args)
return SomeString()
class __extend__(pairtype(SomeFloat, SomeFloat)):
def union((flt1, flt2)):
return SomeFloat()
add = sub = mul = div = truediv = union
def pow((flt1, flt2), obj3):
return SomeFloat()
pow.can_only_throw = [ZeroDivisionError, ValueError, OverflowError]
class __extend__(pairtype(SomeList, SomeList)):
def union((lst1, lst2)):
return SomeList(lst1.listdef.union(lst2.listdef))
def add((lst1, lst2)):
return lst1.listdef.offspring(lst2.listdef)
def eq((lst1, lst2)):
lst1.listdef.agree(lst2.listdef)
return s_Bool
ne = eq
class __extend__(pairtype(SomeList, SomeObject)):
def inplace_add((lst1, obj2)):
lst1.method_extend(obj2)
return lst1
inplace_add.can_only_throw = []
def inplace_mul((lst1, obj2)):
lst1.listdef.resize()
return lst1
inplace_mul.can_only_throw = []
class __extend__(pairtype(SomeTuple, SomeTuple)):
def union((tup1, tup2)):
if len(tup1.items) != len(tup2.items):
return SomeObject()
else:
unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)]
return SomeTuple(items = unions)
def add((tup1, tup2)):
return SomeTuple(items = tup1.items + tup2.items)
class __extend__(pairtype(SomeDict, SomeDict)):
def union((dic1, dic2)):
return SomeDict(dic1.dictdef.union(dic2.dictdef))
class __extend__(pairtype(SomeDict, SomeObject)):
def _can_only_throw(dic1, *ignore):
if dic1.dictdef.dictkey.custom_eq_hash:
return None
return [KeyError]
def getitem((dic1, obj2)):
getbookkeeper().count("dict_getitem", dic1)
dic1.dictdef.generalize_key(obj2)
return dic1.dictdef.read_value()
getitem.can_only_throw = _can_only_throw
def setitem((dic1, obj2), s_value):
getbookkeeper().count("dict_setitem", dic1)
dic1.dictdef.generalize_key(obj2)
dic1.dictdef.generalize_value(s_value)
setitem.can_only_throw = _can_only_throw
def delitem((dic1, obj2)):
getbookkeeper().count("dict_delitem", dic1)
dic1.dictdef.generalize_key(obj2)
delitem.can_only_throw = _can_only_throw
class __extend__(pairtype(SomeSlice, SomeSlice)):
def union((slic1, slic2)):
return SomeSlice(unionof(slic1.start, slic2.start),
unionof(slic1.stop, slic2.stop),
unionof(slic1.step, slic2.step))
class __extend__(pairtype(SomeTuple, SomeInteger)):
def getitem((tup1, int2)):
if int2.is_immutable_constant():
try:
return tup1.items[int2.const]
except IndexError:
return s_ImpossibleValue
else:
getbookkeeper().count("tuple_random_getitem", tup1)
return unionof(*tup1.items)
getitem.can_only_throw = [IndexError]
class __extend__(pairtype(SomeTuple, SomeSlice)):
def getitem((tup, slic)):
start, stop, step = slic.constant_indices()
return SomeTuple(tup.items[start:stop:step])
getitem.can_only_throw = []
class __extend__(pairtype(SomeList, SomeInteger)):
def mul((lst1, int2)):
return lst1.listdef.offspring()
def getitem((lst1, int2)):
getbookkeeper().count("list_getitem", int2)
return lst1.listdef.read_item()
getitem.can_only_throw = []
getitem_key = getitem
def getitem_idx((lst1, int2)):
getbookkeeper().count("list_getitem", int2)
return lst1.listdef.read_item()
getitem_idx.can_only_throw = [IndexError]
getitem_idx_key = getitem_idx
def setitem((lst1, int2), s_value):
getbookkeeper().count("list_setitem", int2)
lst1.listdef.mutate()
lst1.listdef.generalize(s_value)
setitem.can_only_throw = [IndexError]
def delitem((lst1, int2)):
getbookkeeper().count("list_delitem", int2)
lst1.listdef.resize()
delitem.can_only_throw = [IndexError]
class __extend__(pairtype(SomeList, SomeSlice)):
def getitem((lst, slic)):
return lst.listdef.offspring()
getitem.can_only_throw = []
def setitem((lst, slic), s_iterable):
# we need the same unifying effect as the extend() method for
# the case lst1[x:y] = lst2.
lst.method_extend(s_iterable)
setitem.can_only_throw = []
def delitem((lst1, slic)):
lst1.listdef.resize()
delitem.can_only_throw = []
class __extend__(pairtype(SomeString, SomeSlice)):
def getitem((str1, slic)):
return SomeString()
getitem.can_only_throw = []
class __extend__(pairtype(SomeString, SomeInteger)):
def getitem((str1, int2)):
getbookkeeper().count("str_getitem", int2)
return SomeChar()
getitem.can_only_throw = []
getitem_key = getitem
def getitem_idx((str1, int2)):
getbookkeeper().count("str_getitem", int2)
return SomeChar()
getitem_idx.can_only_throw = [IndexError]
getitem_idx_key = getitem_idx
def mul((str1, int2)): # xxx do we want to support this
getbookkeeper().count("str_mul", str1, int2)
return SomeString()
class __extend__(pairtype(SomeInteger, SomeString)):
def mul((int1, str2)): # xxx do we want to support this
getbookkeeper().count("str_mul", str2, int1)
return SomeString()
class __extend__(pairtype(SomeInteger, SomeList)):
def mul((int1, lst2)):
return lst2.listdef.offspring()
class __extend__(pairtype(SomeInstance, SomeInstance)):
def union((ins1, ins2)):
if ins1.classdef is None or ins2.classdef is None:
# special case only
basedef = None
else:
basedef = ins1.classdef.commonbase(ins2.classdef)
if basedef is None:
# print warning?
return SomeObject()
flags = ins1.flags
if flags:
flags = flags.copy()
for key, value in flags.items():
if key not in ins2.flags or ins2.flags[key] != value:
del flags[key]
return SomeInstance(basedef,
can_be_None=ins1.can_be_None or ins2.can_be_None,
flags=flags)
def improve((ins1, ins2)):
if ins1.classdef is None:
resdef = ins2.classdef
elif ins2.classdef is None:
resdef = ins1.classdef
else:
basedef = ins1.classdef.commonbase(ins2.classdef)
if basedef is ins1.classdef:
resdef = ins2.classdef
elif basedef is ins2.classdef:
resdef = ins1.classdef
else:
if ins1.can_be_None and ins2.can_be_None:
return s_None
else:
return s_ImpossibleValue
res = SomeInstance(resdef, can_be_None=ins1.can_be_None and ins2.can_be_None)
if ins1.contains(res) and ins2.contains(res):
return res # fine
else:
# this case can occur in the presence of 'const' attributes,
# which we should try to preserve. Fall-back...
thistype = pairtype(SomeInstance, SomeInstance)
return super(thistype, pair(ins1, ins2)).improve()
class __extend__(pairtype(SomeIterator, SomeIterator)):
def union((iter1, iter2)):
s_cont = unionof(iter1.s_container, iter2.s_container)
if iter1.variant != iter2.variant:
raise UnionError("merging incompatible iterators variants")
return SomeIterator(s_cont, *iter1.variant)
class __extend__(pairtype(SomeBuiltin, SomeBuiltin)):
def union((bltn1, bltn2)):
if (bltn1.analyser != bltn2.analyser or
bltn1.methodname != bltn2.methodname or
bltn1.s_self is None or bltn2.s_self is None):
raise UnionError("cannot merge two different builtin functions "
"or methods:\n %r\n %r" % (bltn1, bltn2))
s_self = unionof(bltn1.s_self, bltn2.s_self)
return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname)
class __extend__(pairtype(SomePBC, SomePBC)):
def union((pbc1, pbc2)):
d = pbc1.descriptions.copy()
d.update(pbc2.descriptions)
return SomePBC(d, can_be_None = pbc1.can_be_None or pbc2.can_be_None)
def is_((pbc1, pbc2)):
thistype = pairtype(SomePBC, SomePBC)
s = super(thistype, pair(pbc1, pbc2)).is_()
if not s.is_constant():
if not pbc1.can_be_None or not pbc2.can_be_None:
for desc in pbc1.descriptions:
if desc in pbc2.descriptions:
break
else:
s.const = False # no common desc in the two sets
return s
class __extend__(pairtype(SomeGenericCallable, SomePBC)):
def union((gencall, pbc)):
for desc in pbc.descriptions:
unique_key = desc
bk = desc.bookkeeper
s_result = bk.emulate_pbc_call(unique_key, pbc, gencall.args_s)
s_result = unionof(s_result, gencall.s_result)
assert gencall.s_result.contains(s_result)
gencall.descriptions.update(pbc.descriptions)
return gencall
class __extend__(pairtype(SomePBC, SomeGenericCallable)):
def union((pbc, gencall)):
return pair(gencall, pbc).union()
class __extend__(pairtype(SomeImpossibleValue, SomeObject)):
def union((imp1, obj2)):
return obj2
class __extend__(pairtype(SomeObject, SomeImpossibleValue)):
def union((obj1, imp2)):
return obj1
# mixing Nones with other objects
def _make_none_union(classname, constructor_args='', glob=None):
if glob is None:
glob = globals()
loc = locals()
source = py.code.Source("""
class __extend__(pairtype(%(classname)s, SomePBC)):
def union((obj, pbc)):
if pbc.isNone():
return %(classname)s(%(constructor_args)s)
else:
return SomeObject()
class __extend__(pairtype(SomePBC, %(classname)s)):
def union((pbc, obj)):
if pbc.isNone():
return %(classname)s(%(constructor_args)s)
else:
return SomeObject()
""" % loc)
exec source.compile() in glob
_make_none_union('SomeInstance', 'classdef=obj.classdef, can_be_None=True')
_make_none_union('SomeString', 'can_be_None=True')
_make_none_union('SomeList', 'obj.listdef')
_make_none_union('SomeDict', 'obj.dictdef')
_make_none_union('SomeExternalObject', 'obj.knowntype')
# getitem on SomePBCs, in particular None fails
class __extend__(pairtype(SomePBC, SomeObject)):
def getitem((pbc, o)):
return s_ImpossibleValue
class __extend__(pairtype(SomeExternalObject, SomeExternalObject)):
def union((ext1, ext2)):
if ext1.knowntype == ext2.knowntype:
return SomeExternalObject(ext1.knowntype)
return SomeObject()
class __extend__(pairtype(SomeExternalInstance, SomeExternalInstance)):
def union((ext1, ext2)):
def commonsuperclass(cls1, cls2):
cls = cls2
while not issubclass(cls1, cls):
cls = cls.__bases__[0]
return cls
from pypy.rpython.ootypesystem.bltregistry import BasicExternal
cls = commonsuperclass(ext1.knowntype, ext2.knowntype)
if cls is BasicExternal:
return SomeObject()
return SomeExternalInstance(cls)
# ____________________________________________________________
# annotation of low-level types
from pypy.annotation.model import SomePtr, SomeOOInstance, SomeOOClass
from pypy.annotation.model import ll_to_annotation, annotation_to_lltype
from pypy.rpython.ootypesystem import ootype
_make_none_union('SomeOOInstance', 'ootype=obj.ootype, can_be_None=True')
class __extend__(pairtype(SomePtr, SomePtr)):
def union((p1, p2)):
assert p1.ll_ptrtype == p2.ll_ptrtype,("mixing of incompatible pointer types: %r, %r" %
(p1.ll_ptrtype, p2.ll_ptrtype))
return SomePtr(p1.ll_ptrtype)
class __extend__(pairtype(SomePtr, SomeInteger)):
def getitem((p, int1)):
example = p.ll_ptrtype._example()
try:
v = example[0]
except IndexError:
return None # impossible value, e.g. FixedSizeArray(0)
return ll_to_annotation(v)
getitem.can_only_throw = []
def setitem((p, int1), s_value): # just doing checking
example = p.ll_ptrtype._example()
if example[0] is not None: # ignore Void s_value
v_lltype = annotation_to_lltype(s_value)
example[0] = v_lltype._defl()
setitem.can_only_throw = []
class __extend__(pairtype(SomePtr, SomeObject)):
def union((p, obj)):
assert False, ("mixing pointer type %r with something else %r" % (p.ll_ptrtype, obj))
def getitem((p, obj)):
assert False,"ptr %r getitem index not an int: %r" % (p.ll_ptrtype, obj)
def setitem((p, obj)):
assert False,"ptr %r setitem index not an int: %r" % (p.ll_ptrtype, obj)
class __extend__(pairtype(SomeObject, SomePtr)):
def union((obj, p2)):
return pair(p2, obj).union()
class __extend__(pairtype(SomeOOInstance, SomeOOInstance)):
def union((r1, r2)):
common = ootype.commonBaseclass(r1.ootype, r2.ootype)
assert common is not None, 'Mixing of incompatible instances %r, %r' %(r1.ootype, r2.ootype)
return SomeOOInstance(common, can_be_None=r1.can_be_None or r2.can_be_None)
class __extend__(pairtype(SomeOOClass, SomeOOClass)):
def union((r1, r2)):
if r1.ootype is None:
common = r2.ootype
elif r2.ootype is None:
common = r1.ootype
else:
common = ootype.commonBaseclass(r1.ootype, r2.ootype)
assert common is not None, ('Mixing of incompatible classes %r, %r'
% (r1.ootype, r2.ootype))
return SomeOOClass(common)
class __extend__(pairtype(SomeOOInstance, SomeObject)):
def union((r, obj)):
assert False, ("mixing reference type %r with something else %r" % (r.ootype, obj))
class __extend__(pairtype(SomeObject, SomeOOInstance)):
def union((obj, r2)):
return pair(r2, obj).union()
#_________________________________________
# memory addresses
class __extend__(pairtype(SomeAddress, SomeAddress)):
def union((s_addr1, s_addr2)):
return SomeAddress(is_null=s_addr1.is_null and s_addr2.is_null)
def sub((s_addr1, s_addr2)):
if s_addr1.is_null and s_addr2.is_null:
return getbookkeeper().immutablevalue(0)
return SomeInteger()
def is_((s_addr1, s_addr2)):
assert False, "comparisons with is not supported by addresses"
class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)):
def union((s_taa1, s_taa2)):
assert s_taa1.type == s_taa2.type
return s_taa1
class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)):
def getitem((s_taa, s_int)):
from pypy.annotation.model import lltype_to_annotation
return lltype_to_annotation(s_taa.type)
getitem.can_only_throw = []
def setitem((s_taa, s_int), s_value):
from pypy.annotation.model import annotation_to_lltype
assert annotation_to_lltype(s_value) is s_taa.type
setitem.can_only_throw = []
class __extend__(pairtype(SomeAddress, SomeInteger)):
def add((s_addr, s_int)):
return SomeAddress(is_null=False)
def sub((s_addr, s_int)):
return SomeAddress(is_null=False)
class __extend__(pairtype(SomeAddress, SomeImpossibleValue)):
# need to override this specifically to hide the 'raise UnionError'
# of pairtype(SomeAddress, SomeObject).
def union((s_addr, s_imp)):
return s_addr
class __extend__(pairtype(SomeImpossibleValue, SomeAddress)):
# need to override this specifically to hide the 'raise UnionError'
# of pairtype(SomeObject, SomeAddress).
def union((s_imp, s_addr)):
return s_addr
class __extend__(pairtype(SomeAddress, SomeObject)):
def union((s_addr, s_obj)):
raise UnionError, "union of address and anything else makes no sense"
class __extend__(pairtype(SomeObject, SomeAddress)):
def union((s_obj, s_addr)):
raise UnionError, "union of address and anything else makes no sense"
class __extend__(pairtype(SomeWeakGcAddress, SomeWeakGcAddress)):
def union((s_addr1, s_addr2)):
return SomeWeakGcAddress()
class __extend__(pairtype(SomeCTypesObject, SomeInteger)):
def setitem((s_cto, s_index), s_value):
pass
def getitem((s_cto, s_index)):
# Note: The following works for index either pointers and arrays,
# because both have a _type_ attribute that contains the type of the
# object pointed to or in the case of an array the element type.
result_ctype = s_cto.knowntype._type_
s_result = SomeCTypesObject(result_ctype, ownsmemory=False)
return s_result.return_annotation()
class __extend__(pairtype(SomeCTypesObject, SomeSlice)):
def setitem((s_cto, s_slice), s_iterable):
raise NotImplementedError("ctypes array slice assignment")
def getitem((s_cto, s_slice)):
result_ctype = s_cto.knowntype._type_
s_result = SomeCTypesObject(result_ctype, ownsmemory=False)
list_item = s_result.return_annotation()
if isinstance(list_item, SomeChar):
return SomeString()
raise NotImplementedError("ctypes array slicing: "
"only for arrays of char")
class __extend__(pairtype(SomeCTypesObject, SomeCTypesObject)):
def union((s_cto1, s_cto2)):
if s_cto1.knowntype == s_cto2.knowntype:
return SomeCTypesObject(s_cto1.knowntype,
ownsmemory = (s_cto1.ownsmemory and
s_cto2.ownsmemory))
else:
return SomeObject()
class __extend__(pairtype(SomeCTypesObject, SomePBC)):
def union((obj, pbc)):
if pbc.isNone() and obj.can_be_none():
return obj
else:
return SomeObject()
class __extend__(pairtype(SomePBC, SomeCTypesObject)):
def union((pbc, obj)):
if pbc.isNone() and obj.can_be_none():
return obj
else:
return SomeObject()
| Python |
# workaround for a circular imports problem
# e.g. if you import pypy.annotation.listdef first
import pypy.annotation.model
| Python |
from __future__ import generators
from types import ClassType, FunctionType
from pypy.tool.ansi_print import ansi_log, raise_nicer_exception
from pypy.annotation import model as annmodel
from pypy.annotation.pairtype import pair
from pypy.annotation.bookkeeper import Bookkeeper, getbookkeeper
from pypy.annotation import signature
from pypy.objspace.flow.model import Variable, Constant
from pypy.objspace.flow.model import FunctionGraph
from pypy.objspace.flow.model import c_last_exception, checkgraph
import py
log = py.log.Producer("annrpython")
py.log.setconsumer("annrpython", ansi_log)
from pypy.tool.error import format_blocked_annotation_error, format_someobject_error, AnnotatorError
FAIL = object()
class RPythonAnnotator(object):
"""Block annotator for RPython.
See description in doc/translation.txt."""
def __init__(self, translator=None, policy=None, bookkeeper=None):
import pypy.rpython.ootypesystem.ooregistry # has side effects
import pypy.rpython.ootypesystem.bltregistry # has side effects
import pypy.rpython.extfuncregistry # has side effects
import pypy.rlib.nonconst # has side effects
if translator is None:
# interface for tests
from pypy.translator.translator import TranslationContext
translator = TranslationContext()
translator.annotator = self
self.translator = translator
self.pendingblocks = {} # map {block: graph-containing-it}
self.bindings = {} # map Variables to SomeValues
self.annotated = {} # set of blocks already seen
self.added_blocks = None # see processblock() below
self.links_followed = {} # set of links that have ever been followed
self.notify = {} # {block: {positions-to-reflow-from-when-done}}
self.fixed_graphs = {} # set of graphs not to annotate again
self.blocked_blocks = {} # set of {blocked_block: graph}
# --- the following information is recorded for debugging only ---
# --- and only if annotation.model.DEBUG is kept to True
self.why_not_annotated = {} # {block: (exc_type, exc_value, traceback)}
# records the location of BlockedInference
# exceptions that blocked some blocks.
self.blocked_graphs = {} # set of graphs that have blocked blocks
self.bindingshistory = {}# map Variables to lists of SomeValues
self.binding_caused_by = {} # map Variables to position_keys
# records the caller position that caused bindings of inputargs
# to be updated
self.binding_cause_history = {} # map Variables to lists of positions
# history of binding_caused_by, kept in sync with
# bindingshistory
self.reflowcounter = {}
self.return_bindings = {} # map return Variables to their graphs
# --- end of debugging information ---
self.frozen = False
if policy is None:
from pypy.annotation.policy import AnnotatorPolicy
self.policy = AnnotatorPolicy()
else:
self.policy = policy
if bookkeeper is None:
bookkeeper = Bookkeeper(self)
self.bookkeeper = bookkeeper
def __getstate__(self):
attrs = """translator pendingblocks bindings annotated links_followed
notify bookkeeper frozen policy added_blocks""".split()
ret = self.__dict__.copy()
for key, value in ret.items():
if key not in attrs:
assert type(value) is dict, (
"%r is not dict. please update %s.__getstate__" %
(key, self.__class__.__name__))
ret[key] = {}
return ret
def _register_returnvar(self, flowgraph):
if annmodel.DEBUG:
self.return_bindings[flowgraph.getreturnvar()] = flowgraph
#___ convenience high-level interface __________________
def build_types(self, function, input_arg_types, complete_now=True):
"""Recursively build annotations about the specific entry point."""
assert isinstance(function, FunctionType), "fix that!"
# make input arguments and set their type
inputcells = [self.typeannotation(t) for t in input_arg_types]
desc = self.bookkeeper.getdesc(function)
desc.getcallfamily() # record this implicit call (hint for back-ends)
flowgraph = desc.specialize(inputcells)
if not isinstance(flowgraph, FunctionGraph):
assert isinstance(flowgraph, annmodel.SomeObject)
return flowgraph
return self.build_graph_types(flowgraph, inputcells, complete_now=complete_now)
def get_call_parameters(self, function, args_s, policy):
desc = self.bookkeeper.getdesc(function)
args = self.bookkeeper.build_args("simple_call", args_s[:])
result = []
def schedule(graph, inputcells):
result.append((graph, inputcells))
return annmodel.s_ImpossibleValue
prevpolicy = self.policy
self.policy = policy
self.bookkeeper.enter(None)
try:
desc.pycall(schedule, args, annmodel.s_ImpossibleValue)
finally:
self.bookkeeper.leave()
self.policy = prevpolicy
[(graph, inputcells)] = result
return graph, inputcells
def annotate_helper(self, function, args_s, policy=None):
if policy is None:
from pypy.annotation.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
graph, inputcells = self.get_call_parameters(function, args_s, policy)
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
def annotate_helper_method(self, _class, attr, args_s, policy=None):
""" Warning! this method is meant to be used between
annotation and rtyping
"""
if policy is None:
from pypy.annotation.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
assert attr != '__class__'
classdef = self.bookkeeper.getuniqueclassdef(_class)
attrdef = classdef.find_attribute(attr)
s_result = attrdef.getvalue()
classdef.add_source_for_attribute(attr, classdef.classdesc)
self.bookkeeper
assert isinstance(s_result, annmodel.SomePBC)
olddesc = s_result.descriptions.iterkeys().next()
desc = olddesc.bind_self(classdef)
args = self.bookkeeper.build_args("simple_call", args_s[:])
desc.consider_call_site(self.bookkeeper, desc.getcallfamily(), [desc],
args, annmodel.s_ImpossibleValue)
result = []
def schedule(graph, inputcells):
result.append((graph, inputcells))
return annmodel.s_ImpossibleValue
prevpolicy = self.policy
self.policy = policy
self.bookkeeper.enter(None)
try:
desc.pycall(schedule, args, annmodel.s_ImpossibleValue)
finally:
self.bookkeeper.leave()
self.policy = prevpolicy
[(graph, inputcells)] = result
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
def complete_helpers(self, policy):
saved = self.policy, self.added_blocks
self.policy = policy
try:
self.added_blocks = {}
self.complete()
# invoke annotation simplifications for the new blocks
self.simplify(block_subset=self.added_blocks)
finally:
self.policy, self.added_blocks = saved
def build_graph_types(self, flowgraph, inputcells, complete_now=True):
checkgraph(flowgraph)
nbarg = len(flowgraph.getargs())
if len(inputcells) != nbarg:
raise TypeError("%s expects %d args, got %d" %(
flowgraph, nbarg, len(inputcells)))
# register the entry point
self.addpendinggraph(flowgraph, inputcells)
# recursively proceed until no more pending block is left
if complete_now:
self.complete()
return self.binding(flowgraph.getreturnvar(), None)
def gettype(self, variable):
"""Return the known type of a control flow graph variable,
defaulting to 'object'."""
if isinstance(variable, Constant):
return type(variable.value)
elif isinstance(variable, Variable):
cell = self.bindings.get(variable)
if cell:
return cell.knowntype
else:
return object
else:
raise TypeError, ("Variable or Constant instance expected, "
"got %r" % (variable,))
def getuserclassdefinitions(self):
"""Return a list of ClassDefs."""
return self.bookkeeper.classdefs
#___ medium-level interface ____________________________
def addpendinggraph(self, flowgraph, inputcells):
self._register_returnvar(flowgraph)
self.addpendingblock(flowgraph, flowgraph.startblock, inputcells)
def addpendingblock(self, graph, block, cells, called_from_graph=None):
"""Register an entry point into block with the given input cells."""
if graph in self.fixed_graphs:
# special case for annotating/rtyping in several phases: calling
# a graph that has already been rtyped. Safety-check the new
# annotations that are passed in, and don't annotate the old
# graph -- it's already low-level operations!
for a, s_newarg in zip(graph.getargs(), cells):
s_oldarg = self.binding(a)
assert s_oldarg.contains(s_newarg)
else:
assert not self.frozen
for a in cells:
assert isinstance(a, annmodel.SomeObject)
if block not in self.annotated:
self.bindinputargs(graph, block, cells, called_from_graph)
else:
self.mergeinputargs(graph, block, cells, called_from_graph)
if not self.annotated[block]:
self.pendingblocks[block] = graph
def complete(self):
"""Process pending blocks until none is left."""
while True:
while self.pendingblocks:
block, graph = self.pendingblocks.popitem()
if annmodel.DEBUG:
self.flowin_block = block # we need to keep track of block
self.processblock(graph, block)
self.policy.no_more_blocks_to_annotate(self)
if not self.pendingblocks:
break # finished
# make sure that the return variables of all graphs is annotated
if self.added_blocks is not None:
newgraphs = [self.annotated[block] for block in self.added_blocks]
newgraphs = dict.fromkeys(newgraphs)
got_blocked_blocks = False in newgraphs
else:
newgraphs = self.translator.graphs #all of them
got_blocked_blocks = False in self.annotated.values()
if got_blocked_blocks:
for graph in self.blocked_graphs.values():
self.blocked_graphs[graph] = True
blocked_blocks = [block for block, done in self.annotated.items()
if done is False]
assert len(blocked_blocks) == len(self.blocked_blocks)
text = format_blocked_annotation_error(self, self.blocked_blocks)
#raise SystemExit()
raise AnnotatorError(text)
for graph in newgraphs:
v = graph.getreturnvar()
if v not in self.bindings:
self.setbinding(v, annmodel.s_ImpossibleValue)
# policy-dependent computation
self.bookkeeper.compute_at_fixpoint()
def binding(self, arg, default=FAIL):
"Gives the SomeValue corresponding to the given Variable or Constant."
if isinstance(arg, Variable):
try:
return self.bindings[arg]
except KeyError:
if default is not FAIL:
return default
else:
raise
elif isinstance(arg, Constant):
#if arg.value is undefined_value: # undefined local variables
# return annmodel.s_ImpossibleValue
return self.bookkeeper.immutableconstant(arg)
else:
raise TypeError, 'Variable or Constant expected, got %r' % (arg,)
def typeannotation(self, t):
return signature.annotation(t, self.bookkeeper)
def ondegenerated(self, what, s_value, where=None, called_from_graph=None):
if self.policy.allow_someobjects:
return
# is the function itself tagged with allow_someobjects?
position_key = where or getattr(self.bookkeeper, 'position_key', None)
if position_key is not None:
graph, block, i = position_key
try:
if graph.func.allow_someobjects:
return
except AttributeError:
pass
graph = position_key[0]
msgstr = format_someobject_error(self, position_key, what, s_value,
called_from_graph,
self.bindings.get(what, "(none)"))
raise AnnotatorError(msgstr)
def setbinding(self, arg, s_value, called_from_graph=None, where=None):
if arg in self.bindings:
assert s_value.contains(self.bindings[arg])
# for debugging purposes, record the history of bindings that
# have been given to this variable
if annmodel.DEBUG:
history = self.bindingshistory.setdefault(arg, [])
history.append(self.bindings[arg])
cause_history = self.binding_cause_history.setdefault(arg, [])
cause_history.append(self.binding_caused_by[arg])
degenerated = annmodel.isdegenerated(s_value)
if degenerated:
self.ondegenerated(arg, s_value, where=where,
called_from_graph=called_from_graph)
self.bindings[arg] = s_value
if annmodel.DEBUG:
if arg in self.return_bindings:
log.event("%s -> %s" %
(self.whereami((self.return_bindings[arg], None, None)),
s_value))
if arg in self.return_bindings and degenerated:
self.warning("result degenerated to SomeObject",
(self.return_bindings[arg],None, None))
self.binding_caused_by[arg] = called_from_graph
def transfer_binding(self, v_target, v_source):
assert v_source in self.bindings
self.bindings[v_target] = self.bindings[v_source]
if annmodel.DEBUG:
self.binding_caused_by[v_target] = None
def warning(self, msg, pos=None):
if pos is None:
try:
pos = self.bookkeeper.position_key
except AttributeError:
pos = '?'
if pos != '?':
pos = self.whereami(pos)
log.WARNING("%s/ %s" % (pos, msg))
#___ interface for annotator.bookkeeper _______
def recursivecall(self, graph, whence, inputcells): # whence = position_key|callback taking the annotator, graph
if isinstance(whence, tuple):
parent_graph, parent_block, parent_index = position_key = whence
tag = parent_block, parent_index
self.translator.update_call_graph(parent_graph, graph, tag)
else:
position_key = None
self._register_returnvar(graph)
# self.notify[graph.returnblock] is a dictionary of call
# points to this func which triggers a reflow whenever the
# return block of this graph has been analysed.
callpositions = self.notify.setdefault(graph.returnblock, {})
if whence is not None:
if callable(whence):
def callback():
whence(self, graph)
else:
callback = whence
callpositions[callback] = True
# generalize the function's input arguments
self.addpendingblock(graph, graph.startblock, inputcells,
position_key)
# get the (current) return value
v = graph.getreturnvar()
try:
return self.bindings[v]
except KeyError:
# the function didn't reach any return statement so far.
# (some functions actually never do, they always raise exceptions)
return annmodel.s_ImpossibleValue
def reflowfromposition(self, position_key):
graph, block, index = position_key
self.reflowpendingblock(graph, block)
#___ simplification (should be moved elsewhere?) _______
# it should be!
# now simplify_calls is moved to transform.py.
# i kept reverse_binding here for future(?) purposes though. --sanxiyn
def reverse_binding(self, known_variables, cell):
"""This is a hack."""
# In simplify_calls, when we are trying to create the new
# SpaceOperation, all we have are SomeValues. But SpaceOperations take
# Variables, not SomeValues. Trouble is, we don't always have a
# Variable that just happens to be bound to the given SomeValue.
# A typical example would be if the tuple of arguments was created
# from another basic block or even another function. Well I guess
# there is no clean solution, short of making the transformations
# more syntactic (e.g. replacing a specific sequence of SpaceOperations
# with another one). This is a real hack because we have to use
# the identity of 'cell'.
if cell.is_constant():
return Constant(cell.const)
else:
for v in known_variables:
if self.bindings[v] is cell:
return v
else:
raise CannotSimplify
def simplify(self, block_subset=None, extra_passes=None):
# Generic simplifications
from pypy.translator import transform
transform.transform_graph(self, block_subset=block_subset,
extra_passes=extra_passes)
from pypy.translator import simplify
if block_subset is None:
graphs = self.translator.graphs
else:
graphs = {}
for block in block_subset:
graph = self.annotated.get(block)
if graph:
graphs[graph] = True
for graph in graphs:
simplify.eliminate_empty_blocks(graph)
#___ flowing annotations in blocks _____________________
def processblock(self, graph, block):
# Important: this is not called recursively.
# self.flowin() can only issue calls to self.addpendingblock().
# The analysis of a block can be in three states:
# * block not in self.annotated:
# never seen the block.
# * self.annotated[block] == False:
# the input variables of the block are in self.bindings but we
# still have to consider all the operations in the block.
# * self.annotated[block] == graph-containing-block:
# analysis done (at least until we find we must generalize the
# input variables).
#print '* processblock', block, cells
if annmodel.DEBUG:
self.reflowcounter.setdefault(block, 0)
self.reflowcounter[block] += 1
self.annotated[block] = graph
if block in self.blocked_blocks:
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
except BlockedInference, e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = graph
except Exception, e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
raise
# The dict 'added_blocks' is used by rpython.annlowlevel to
# detect which are the new blocks that annotating an additional
# small helper creates.
if self.added_blocks is not None:
self.added_blocks[block] = True
def reflowpendingblock(self, graph, block):
assert not self.frozen
assert graph not in self.fixed_graphs
self.pendingblocks[block] = graph
assert block in self.annotated
self.annotated[block] = False # must re-flow
self.blocked_blocks[block] = graph
def bindinputargs(self, graph, block, inputcells, called_from_graph=None):
# Create the initial bindings for the input args of a block.
assert len(block.inputargs) == len(inputcells)
where = (graph, block, None)
for a, cell in zip(block.inputargs, inputcells):
self.setbinding(a, cell, called_from_graph, where=where)
self.annotated[block] = False # must flowin.
self.blocked_blocks[block] = graph
def mergeinputargs(self, graph, block, inputcells, called_from_graph=None):
# Merge the new 'cells' with each of the block's existing input
# variables.
oldcells = [self.binding(a) for a in block.inputargs]
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
# if the merged cells changed, we must redo the analysis
if unions != oldcells:
self.bindinputargs(graph, block, unions, called_from_graph)
def whereami(self, position_key):
graph, block, i = position_key
blk = ""
if block:
at = block.at()
if at:
blk = " block"+at
opid=""
if i is not None:
opid = " op=%d" % i
return repr(graph) + blk + opid
def flowin(self, graph, block):
#print 'Flowing', block, [self.binding(a) for a in block.inputargs]
try:
for i in range(len(block.operations)):
try:
self.bookkeeper.enter((graph, block, i))
self.consider_op(block.operations[i])
finally:
self.bookkeeper.leave()
except BlockedInference, e:
if annmodel.DEBUG:
import sys
self.why_not_annotated[block] = sys.exc_info()
if (e.op is block.operations[-1] and
block.exitswitch == c_last_exception):
# this is the case where the last operation of the block will
# always raise an exception which is immediately caught by
# an exception handler. We then only follow the exceptional
# branches.
exits = [link for link in block.exits
if link.exitcase is not None]
elif e.op.opname in ('simple_call', 'call_args', 'next'):
# XXX warning, keep the name of the call operations in sync
# with the flow object space. These are the operations for
# which it is fine to always raise an exception. We then
# swallow the BlockedInference and that's it.
# About 'next': see test_annotate_iter_empty_container().
return
else:
# other cases are problematic (but will hopefully be solved
# later by reflowing). Throw the BlockedInference up to
# processblock().
raise
except annmodel.HarmlesslyBlocked:
return
else:
# dead code removal: don't follow all exits if the exitswitch
# is known
exits = block.exits
if isinstance(block.exitswitch, Variable):
s_exitswitch = self.bindings[block.exitswitch]
if s_exitswitch.is_constant():
exits = [link for link in exits
if link.exitcase == s_exitswitch.const]
# mapping (exitcase, variable) -> s_annotation
# that can be attached to booleans, exitswitches
knowntypedata = getattr(self.bindings.get(block.exitswitch),
"knowntypedata", {})
# filter out those exceptions which cannot
# occour for this specific, typed operation.
if block.exitswitch == c_last_exception:
op = block.operations[-1]
if op.opname in annmodel.BINARY_OPERATIONS:
arg1 = self.binding(op.args[0])
arg2 = self.binding(op.args[1])
binop = getattr(pair(arg1, arg2), op.opname, None)
can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2)
elif op.opname in annmodel.UNARY_OPERATIONS:
arg1 = self.binding(op.args[0])
unop = getattr(arg1, op.opname, None)
can_only_throw = annmodel.read_can_only_throw(unop, arg1)
else:
can_only_throw = None
if can_only_throw is not None:
candidates = can_only_throw
candidate_exits = exits
exits = []
for link in candidate_exits:
case = link.exitcase
if case is None:
exits.append(link)
continue
covered = [c for c in candidates if issubclass(c, case)]
if covered:
exits.append(link)
candidates = [c for c in candidates if c not in covered]
for link in exits:
import types
in_except_block = False
last_exception_var = link.last_exception # may be None for non-exception link
last_exc_value_var = link.last_exc_value # may be None for non-exception link
if isinstance(link.exitcase, (types.ClassType, type)) \
and issubclass(link.exitcase, py.builtin.BaseException):
assert last_exception_var and last_exc_value_var
last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase)
last_exception_object = annmodel.SomeObject()
last_exception_object.knowntype = type
if isinstance(last_exception_var, Constant):
last_exception_object.const = last_exception_var.value
last_exception_object.is_type_of = [last_exc_value_var]
if isinstance(last_exception_var, Variable):
self.setbinding(last_exception_var, last_exception_object)
if isinstance(last_exc_value_var, Variable):
self.setbinding(last_exc_value_var, last_exc_value_object)
last_exception_object = annmodel.SomeObject()
last_exception_object.knowntype = type
if isinstance(last_exception_var, Constant):
last_exception_object.const = last_exception_var.value
#if link.exitcase is Exception:
# last_exc_value_object = annmodel.SomeObject()
#else:
last_exc_value_vars = []
in_except_block = True
ignore_link = False
cells = []
renaming = {}
for a,v in zip(link.args,link.target.inputargs):
renaming.setdefault(a, []).append(v)
for a,v in zip(link.args,link.target.inputargs):
if a == last_exception_var:
assert in_except_block
cells.append(last_exception_object)
elif a == last_exc_value_var:
assert in_except_block
cells.append(last_exc_value_object)
last_exc_value_vars.append(v)
else:
cell = self.binding(a)
if (link.exitcase, a) in knowntypedata:
knownvarvalue = knowntypedata[(link.exitcase, a)]
cell = pair(cell, knownvarvalue).improve()
# ignore links that try to pass impossible values
if cell == annmodel.s_ImpossibleValue:
ignore_link = True
if hasattr(cell,'is_type_of'):
renamed_is_type_of = []
for v in cell.is_type_of:
new_vs = renaming.get(v,[])
renamed_is_type_of += new_vs
newcell = annmodel.SomeObject()
if cell.knowntype == type:
newcell.knowntype = type
if cell.is_constant():
newcell.const = cell.const
cell = newcell
cell.is_type_of = renamed_is_type_of
if hasattr(cell, 'knowntypedata'):
renamed_knowntypedata = {}
for (value, v), s in cell.knowntypedata.items():
new_vs = renaming.get(v, [])
for new_v in new_vs:
renamed_knowntypedata[value, new_v] = s
assert isinstance(cell, annmodel.SomeBool)
newcell = annmodel.SomeBool()
if cell.is_constant():
newcell.const = cell.const
cell = newcell
cell.knowntypedata = renamed_knowntypedata
cells.append(cell)
if ignore_link:
continue
if in_except_block:
last_exception_object.is_type_of = last_exc_value_vars
self.links_followed[link] = True
self.addpendingblock(graph, link.target, cells)
if block in self.notify:
# reflow from certain positions when this block is done
for callback in self.notify[block]:
if isinstance(callback, tuple):
self.reflowfromposition(callback) # callback is a position
else:
callback()
#___ creating the annotations based on operations ______
def consider_op(self, op):
argcells = [self.binding(a) for a in op.args]
consider_meth = getattr(self,'consider_op_'+op.opname,
None)
if not consider_meth:
raise Exception,"unknown op: %r" % op
# let's be careful about avoiding propagated SomeImpossibleValues
# to enter an op; the latter can result in violations of the
# more general results invariant: e.g. if SomeImpossibleValue enters is_
# is_(SomeImpossibleValue, None) -> SomeBool
# is_(SomeInstance(not None), None) -> SomeBool(const=False) ...
# boom -- in the assert of setbinding()
for arg in argcells:
if isinstance(arg, annmodel.SomeImpossibleValue):
raise BlockedInference(self, op)
try:
resultcell = consider_meth(*argcells)
except Exception:
graph = self.bookkeeper.position_key[0]
raise_nicer_exception(op, str(graph))
if resultcell is None:
resultcell = self.noreturnvalue(op)
elif resultcell == annmodel.s_ImpossibleValue:
raise BlockedInference(self, op) # the operation cannot succeed
assert isinstance(resultcell, annmodel.SomeObject)
assert isinstance(op.result, Variable)
self.setbinding(op.result, resultcell) # bind resultcell to op.result
def noreturnvalue(self, op):
return annmodel.s_ImpossibleValue # no return value (hook method)
# XXX "contains" clash with SomeObject method
def consider_op_contains(self, seq, elem):
self.bookkeeper.count("contains", seq)
return seq.op_contains(elem)
def consider_op_newtuple(self, *args):
return annmodel.SomeTuple(items = args)
def consider_op_newlist(self, *args):
return self.bookkeeper.newlist(*args)
def consider_op_newdict(self):
return self.bookkeeper.newdict()
def consider_op_newslice(self, start, stop, step):
self.bookkeeper.count('newslice', start, stop, step)
return annmodel.SomeSlice(start, stop, step)
def _registeroperations(cls, model):
# All unary operations
d = {}
for opname in model.UNARY_OPERATIONS:
fnname = 'consider_op_' + opname
exec """
def consider_op_%s(self, arg, *args):
return arg.%s(*args)
""" % (opname, opname) in globals(), d
setattr(cls, fnname, d[fnname])
# All binary operations
for opname in model.BINARY_OPERATIONS:
fnname = 'consider_op_' + opname
exec """
def consider_op_%s(self, arg1, arg2, *args):
return pair(arg1,arg2).%s(*args)
""" % (opname, opname) in globals(), d
setattr(cls, fnname, d[fnname])
_registeroperations = classmethod(_registeroperations)
# register simple operations handling
RPythonAnnotator._registeroperations(annmodel)
class CannotSimplify(Exception):
pass
class BlockedInference(Exception):
"""This exception signals the type inference engine that the situation
is currently blocked, and that it should try to progress elsewhere."""
def __init__(self, annotator, op):
self.annotator = annotator
try:
self.break_at = annotator.bookkeeper.position_key
except AttributeError:
self.break_at = None
self.op = op
def __repr__(self):
if not self.break_at:
break_at = "?"
else:
break_at = self.annotator.whereami(self.break_at)
return "<BlockedInference break_at %s [%s]>" %(break_at, self.op)
__str__ = __repr__
| Python |
"""
Unary operations on SomeValues.
"""
from pypy.annotation.model import \
SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, \
SomeDict, SomeUnicodeCodePoint, SomeTuple, SomeImpossibleValue, \
SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, \
SomeExternalObject, SomeTypedAddressAccess, SomeAddress, \
SomeCTypesObject, s_ImpossibleValue, s_Bool, \
unionof, set, missing_operation, add_knowntypedata, HarmlesslyBlocked, \
SomeGenericCallable
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.annotation import builtin
from pypy.annotation.binaryop import _clone ## XXX where to put this?
from pypy.rpython import extregistry
from pypy.annotation.signature import annotation
# convenience only!
def immutablevalue(x):
return getbookkeeper().immutablevalue(x)
UNARY_OPERATIONS = set(['len', 'is_true', 'getattr', 'setattr', 'delattr', 'hash',
'simple_call', 'call_args', 'str', 'repr',
'iter', 'next', 'invert', 'type', 'issubtype',
'pos', 'neg', 'nonzero', 'abs', 'hex', 'oct',
'ord', 'int', 'float', 'long', 'id',
'neg_ovf', 'abs_ovf', 'hint'])
for opname in UNARY_OPERATIONS:
missing_operation(SomeObject, opname)
class __extend__(SomeObject):
def type(obj, *moreargs):
if moreargs:
raise Exception, 'type() called with more than one argument'
if obj.is_constant():
if isinstance(obj, SomeInstance):
r = SomePBC([obj.classdef.classdesc])
else:
r = immutablevalue(obj.knowntype)
else:
r = SomeObject()
r.knowntype = type
bk = getbookkeeper()
fn, block, i = bk.position_key
annotator = bk.annotator
op = block.operations[i]
assert op.opname == "type"
assert len(op.args) == 1
assert annotator.binding(op.args[0]) == obj
r.is_type_of = [op.args[0]]
return r
def issubtype(obj, s_cls):
if hasattr(obj, 'is_type_of'):
vars = obj.is_type_of
annotator = getbookkeeper().annotator
return builtin.builtin_isinstance(annotator.binding(vars[0]),
s_cls, vars)
if obj.is_constant() and s_cls.is_constant():
return immutablevalue(issubclass(obj.const, s_cls.const))
return s_Bool
def len(obj):
return SomeInteger(nonneg=True)
def is_true_behavior(obj, s):
if obj.is_immutable_constant():
s.const = bool(obj.const)
else:
s_len = obj.len()
if s_len.is_immutable_constant():
s.const = s_len.const > 0
def is_true(s_obj):
r = SomeBool()
s_obj.is_true_behavior(r)
bk = getbookkeeper()
knowntypedata = r.knowntypedata = {}
fn, block, i = bk.position_key
op = block.operations[i]
assert op.opname == "is_true" or op.opname == "nonzero"
assert len(op.args) == 1
arg = op.args[0]
s_nonnone_obj = s_obj
if s_obj.can_be_none():
s_nonnone_obj = s_obj.nonnoneify()
add_knowntypedata(knowntypedata, True, [arg], s_nonnone_obj)
return r
def nonzero(obj):
return obj.is_true()
def hash(obj):
raise TypeError, "hash() is not generally supported"
def str(obj):
getbookkeeper().count('str', obj)
return SomeString()
def repr(obj):
getbookkeeper().count('repr', obj)
return SomeString()
def hex(obj):
getbookkeeper().count('hex', obj)
return SomeString()
def oct(obj):
getbookkeeper().count('oct', obj)
return SomeString()
def id(obj): # xxx
return SomeInteger()
def int(obj):
return SomeInteger()
def float(obj):
return SomeFloat()
def long(obj):
return SomeObject() # XXX
def delattr(obj, s_attr):
if obj.__class__ != SomeObject or obj.knowntype != object:
getbookkeeper().warning(
("delattr on potentally non-SomeObjects is not RPythonic: delattr(%r,%r)" %
(obj, s_attr)))
def find_method(obj, name):
"Look for a special-case implementation for the named method."
try:
analyser = getattr(obj.__class__, 'method_' + name)
except AttributeError:
return None
else:
return SomeBuiltin(analyser, obj, name)
def getattr(obj, s_attr):
# get a SomeBuiltin if the SomeObject has
# a corresponding method to handle it
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
s_method = obj.find_method(attr)
if s_method is not None:
return s_method
# if the SomeObject is itself a constant, allow reading its attrs
if obj.is_immutable_constant() and hasattr(obj.const, attr):
return immutablevalue(getattr(obj.const, attr))
else:
getbookkeeper().warning('getattr(%r, %r) is not RPythonic enough' %
(obj, s_attr))
return SomeObject()
getattr.can_only_throw = []
def bind_callables_under(obj, classdef, name):
return obj # default unbound __get__ implementation
def simple_call(obj, *args_s):
return obj.call(getbookkeeper().build_args("simple_call", args_s))
def call_args(obj, *args_s):
return obj.call(getbookkeeper().build_args("call_args", args_s))
def call(obj, args, implicit_init=False):
#raise Exception, "cannot follow call_args%r" % ((obj, args),)
getbookkeeper().warning("cannot follow call(%r, %r)" % (obj, args))
return SomeObject()
def op_contains(obj, s_element):
return s_Bool
def hint(self, *args_s):
return self
class __extend__(SomeFloat):
def pos(flt):
return flt
def neg(flt):
return SomeFloat()
abs = neg
def is_true(self):
if self.is_immutable_constant():
return getbookkeeper().immutablevalue(bool(self.const))
return s_Bool
def hash(flt):
return SomeInteger()
class __extend__(SomeInteger):
def invert(self):
return SomeInteger(knowntype=self.knowntype)
invert.can_only_throw = []
def pos(self):
return SomeInteger(knowntype=self.knowntype)
pos.can_only_throw = []
int = pos
# these are the only ones which can overflow:
def neg(self):
return SomeInteger(knowntype=self.knowntype)
neg.can_only_throw = []
neg_ovf = _clone(neg, [OverflowError])
def abs(self):
return SomeInteger(nonneg=True, knowntype=self.knowntype)
abs.can_only_throw = []
abs_ovf = _clone(abs, [OverflowError])
class __extend__(SomeBool):
def is_true(self):
return self
def invert(self):
return SomeInteger()
invert.can_only_throw = []
def neg(self):
return SomeInteger()
neg.can_only_throw = []
neg_ovf = _clone(neg, [OverflowError])
def abs(self):
return SomeInteger(nonneg=True)
abs.can_only_throw = []
abs_ovf = _clone(abs, [OverflowError])
def pos(self):
return SomeInteger(nonneg=True)
pos.can_only_throw = []
int = pos
class __extend__(SomeTuple):
def len(tup):
return immutablevalue(len(tup.items))
def iter(tup):
getbookkeeper().count("tuple_iter", tup)
return SomeIterator(tup)
iter.can_only_throw = []
def getanyitem(tup):
return unionof(*tup.items)
def hash(tup):
for s_item in tup.items:
s_item.hash() # record that we need the hash of each item
return SomeInteger()
class __extend__(SomeList):
def method_append(lst, s_value):
lst.listdef.resize()
lst.listdef.generalize(s_value)
def method_extend(lst, s_iterable):
lst.listdef.resize()
if isinstance(s_iterable, SomeList): # unify the two lists
lst.listdef.agree(s_iterable.listdef)
else:
s_iter = s_iterable.iter()
lst.method_append(s_iter.next())
def method_reverse(lst):
lst.listdef.mutate()
def method_insert(lst, s_index, s_value):
lst.method_append(s_value)
def method_remove(lst, s_value):
lst.listdef.resize()
lst.listdef.generalize(s_value)
def method_pop(lst, s_index=None):
lst.listdef.resize()
return lst.listdef.read_item()
def method_index(lst, s_value):
getbookkeeper().count("list_index")
lst.listdef.generalize(s_value)
return SomeInteger(nonneg=True)
def len(lst):
s_item = lst.listdef.read_item()
if isinstance(s_item, SomeImpossibleValue):
return immutablevalue(0)
return SomeObject.len(lst)
def iter(lst):
return SomeIterator(lst)
iter.can_only_throw = []
def getanyitem(lst):
return lst.listdef.read_item()
def op_contains(lst, s_element):
lst.listdef.generalize(s_element)
return s_Bool
def hint(lst, *args_s):
hints = args_s[-1].const
if 'maxlength' in hints:
# only for iteration over lists or dicts at the moment,
# not over an iterator object (because it has no known length)
s_iterable = args_s[0]
if isinstance(s_iterable, (SomeList, SomeDict)):
lst.listdef.resize()
lst.listdef.listitem.hint_maxlength = True
elif 'fence' in hints:
lst = lst.listdef.offspring()
return lst
class __extend__(SomeDict):
def len(dct):
s_key = dct.dictdef.read_key()
s_value = dct.dictdef.read_value()
if isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue):
return immutablevalue(0)
return SomeObject.len(dct)
def iter(dct):
return SomeIterator(dct)
iter.can_only_throw = []
def getanyitem(dct, variant='keys'):
if variant == 'keys':
return dct.dictdef.read_key()
elif variant == 'values':
return dct.dictdef.read_value()
elif variant == 'items':
s_key = dct.dictdef.read_key()
s_value = dct.dictdef.read_value()
if (isinstance(s_key, SomeImpossibleValue) or
isinstance(s_value, SomeImpossibleValue)):
return s_ImpossibleValue
else:
return SomeTuple((s_key, s_value))
else:
raise ValueError
def method_get(dct, key, dfl):
dct.dictdef.generalize_key(key)
dct.dictdef.generalize_value(dfl)
return dct.dictdef.read_value()
method_setdefault = method_get
def method_copy(dct):
return SomeDict(dct.dictdef)
def method_update(dct1, dct2):
dct1.dictdef.union(dct2.dictdef)
def method_keys(dct):
return getbookkeeper().newlist(dct.dictdef.read_key())
def method_values(dct):
return getbookkeeper().newlist(dct.dictdef.read_value())
def method_items(dct):
return getbookkeeper().newlist(dct.getanyitem('items'))
def method_iterkeys(dct):
return SomeIterator(dct, 'keys')
def method_itervalues(dct):
return SomeIterator(dct, 'values')
def method_iteritems(dct):
return SomeIterator(dct, 'items')
def method_clear(dct):
pass
def op_contains(dct, s_element):
dct.dictdef.generalize_key(s_element)
return s_Bool
class __extend__(SomeString):
def method_startswith(str, frag):
return s_Bool
def method_endswith(str, frag):
return s_Bool
def method_find(str, frag, start=None, end=None):
return SomeInteger()
def method_rfind(str, frag, start=None, end=None):
return SomeInteger()
def method_count(str, frag, start=None, end=None):
return SomeInteger(nonneg=True)
def method_strip(str, chr):
return SomeString()
def method_lstrip(str, chr):
return SomeString()
def method_rstrip(str, chr):
return SomeString()
def method_join(str, s_list):
getbookkeeper().count("str_join", str)
s_item = s_list.listdef.read_item()
if isinstance(s_item, SomeImpossibleValue):
return immutablevalue("")
return SomeString()
def iter(str):
return SomeIterator(str)
iter.can_only_throw = []
def getanyitem(str):
return SomeChar()
def ord(str):
return SomeInteger(nonneg=True)
def hash(str):
return SomeInteger()
def method_split(str, patt): # XXX
getbookkeeper().count("str_split", str, patt)
return getbookkeeper().newlist(SomeString())
def method_replace(str, s1, s2):
return SomeString()
def method_lower(str):
return SomeString()
def method_upper(str):
return SomeString()
class __extend__(SomeChar):
def len(chr):
return immutablevalue(1)
def method_isspace(chr):
return s_Bool
def method_isdigit(chr):
return s_Bool
def method_isalpha(chr):
return s_Bool
def method_isalnum(chr):
return s_Bool
def method_islower(chr):
return s_Bool
def method_isupper(chr):
return s_Bool
class __extend__(SomeUnicodeCodePoint):
def ord(uchr):
return SomeInteger(nonneg=True)
class __extend__(SomeIterator):
def iter(itr):
return itr
iter.can_only_throw = []
def _can_only_throw(itr):
can_throw = [StopIteration]
if isinstance(itr.s_container, SomeDict):
can_throw.append(RuntimeError)
return can_throw
def next(itr):
return itr.s_container.getanyitem(*itr.variant)
next.can_only_throw = _can_only_throw
method_next = next
class __extend__(SomeInstance):
def getattr(ins, s_attr):
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
if attr == '__class__':
return ins.classdef.read_attr__class__()
attrdef = ins.classdef.find_attribute(attr)
position = getbookkeeper().position_key
attrdef.read_locations[position] = True
s_result = attrdef.getvalue()
# hack: if s_result is a set of methods, discard the ones
# that can't possibly apply to an instance of ins.classdef.
# XXX do it more nicely
if isinstance(s_result, SomePBC):
s_result = ins.classdef.lookup_filter(s_result, attr,
ins.flags)
elif isinstance(s_result, SomeImpossibleValue):
ins.classdef.check_missing_attribute_update(attr)
# blocking is harmless if the attribute is explicitly listed
# in the class or a parent class.
for basedef in ins.classdef.getmro():
if basedef.classdesc.all_enforced_attrs is not None:
if attr in basedef.classdesc.all_enforced_attrs:
raise HarmlesslyBlocked("get enforced attr")
return s_result
return SomeObject()
getattr.can_only_throw = []
def setattr(ins, s_attr, s_value):
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
# find the (possibly parent) class where this attr is defined
clsdef = ins.classdef.locate_attribute(attr)
attrdef = clsdef.attrs[attr]
attrdef.modified(clsdef)
# if the attrdef is new, this must fail
if attrdef.getvalue().contains(s_value):
return
# create or update the attribute in clsdef
clsdef.generalize_attr(attr, s_value)
def hash(ins):
getbookkeeper().needs_hash_support[ins.classdef] = True
return SomeInteger()
def is_true_behavior(ins, s):
if not ins.can_be_None:
s.const = True
class __extend__(SomeBuiltin):
def simple_call(bltn, *args):
if bltn.s_self is not None:
return bltn.analyser(bltn.s_self, *args)
else:
if bltn.methodname:
getbookkeeper().count(bltn.methodname.replace('.', '_'), *args)
return bltn.analyser(*args)
def call(bltn, args, implicit_init=False):
args_s, kwds = args.unpack()
# prefix keyword arguments with 's_'
kwds_s = {}
for key, s_value in kwds.items():
kwds_s['s_'+key] = s_value
if bltn.s_self is not None:
return bltn.analyser(bltn.s_self, *args_s, **kwds_s)
else:
return bltn.analyser(*args_s, **kwds_s)
class __extend__(SomePBC):
def getattr(pbc, s_attr):
bookkeeper = getbookkeeper()
return bookkeeper.pbc_getattr(pbc, s_attr)
getattr.can_only_throw = []
def setattr(pbc, s_attr, s_value):
getbookkeeper().warning("setattr not wanted on %r" % (pbc,))
def call(pbc, args):
bookkeeper = getbookkeeper()
return bookkeeper.pbc_call(pbc, args)
def bind_callables_under(pbc, classdef, name):
d = [desc.bind_under(classdef, name) for desc in pbc.descriptions]
return SomePBC(d, can_be_None=pbc.can_be_None)
def is_true_behavior(pbc, s):
if pbc.isNone():
s.const = False
elif not pbc.can_be_None:
s.const = True
class __extend__(SomeGenericCallable):
def call(self, args):
bookkeeper = getbookkeeper()
for arg, expected in zip(args.unpack()[0], self.args_s):
assert expected.contains(arg)
return self.s_result
class __extend__(SomeExternalObject):
# XXX kill with extfunctable.py
def find_method(obj, name):
"Look for a special-case implementation for the named method."
type_analyser = builtin.EXTERNAL_TYPE_ANALYZERS[obj.knowntype]
if name in type_analyser:
analyser = type_analyser[name]
return SomeBuiltin(analyser, obj, name)
return SomeObject.find_method(obj, name)
def getattr(p, s_attr):
if s_attr.is_constant() and isinstance(s_attr.const, str):
# XXX kill with extfunctable.py
if p.knowntype in builtin.EXTERNAL_TYPE_ANALYZERS:
return SomeObject.getattr(p, s_attr)
attr = s_attr.const
entry = extregistry.lookup_type(p.knowntype)
s_value = entry.get_field_annotation(p.knowntype, attr)
return s_value
else:
return SomeObject()
getattr.can_only_throw = []
def setattr(p, s_attr, s_value):
assert s_attr.is_constant()
attr = s_attr.const
entry = extregistry.lookup_type(p.knowntype)
entry.set_field_annotation(p.knowntype, attr, s_value)
def is_true(p):
return s_Bool
# annotation of low-level types
from pypy.annotation.model import SomePtr, SomeLLADTMeth
from pypy.annotation.model import SomeOOInstance, SomeOOBoundMeth, SomeOOStaticMeth
from pypy.annotation.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype
class __extend__(SomePtr):
def getattr(p, s_attr):
assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % p.ll_ptrtype
v = getattr(p.ll_ptrtype._example(), s_attr.const)
return ll_to_annotation(v)
getattr.can_only_throw = []
def len(p):
length = p.ll_ptrtype._example()._fixedlength()
if length is None:
return SomeObject.len(p)
else:
return immutablevalue(length)
def setattr(p, s_attr, s_value): # just doing checking
assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % p.ll_ptrtype
example = p.ll_ptrtype._example()
if getattr(example, s_attr.const) is not None: # ignore Void s_value
v_lltype = annotation_to_lltype(s_value)
setattr(example, s_attr.const, v_lltype._defl())
def call(p, args):
args_s, kwds_s = args.unpack()
if kwds_s:
raise Exception("keyword arguments to call to a low-level fn ptr")
info = 'argument to ll function pointer call'
llargs = [annotation_to_lltype(s_arg,info)._defl() for s_arg in args_s]
v = p.ll_ptrtype._example()(*llargs)
return ll_to_annotation(v)
def is_true(p):
return s_Bool
class __extend__(SomeLLADTMeth):
def call(adtmeth, args):
bookkeeper = getbookkeeper()
s_func = bookkeeper.immutablevalue(adtmeth.func)
return s_func.call(args.prepend(SomePtr(adtmeth.ll_ptrtype)))
from pypy.rpython.ootypesystem import ootype
class __extend__(SomeOOInstance):
def getattr(r, s_attr):
assert s_attr.is_constant(), "getattr on ref %r with non-constant field-name" % r.ootype
v = getattr(r.ootype._example(), s_attr.const)
if isinstance(v, ootype._bound_meth):
return SomeOOBoundMeth(r.ootype, s_attr.const)
return ll_to_annotation(v)
def setattr(r, s_attr, s_value):
assert s_attr.is_constant(), "setattr on ref %r with non-constant field-name" % r.ootype
v = annotation_to_lltype(s_value)
example = r.ootype._example()
if example is not None:
setattr(r.ootype._example(), s_attr.const, v._example())
def is_true(p):
return s_Bool
class __extend__(SomeOOBoundMeth):
def simple_call(m, *args_s):
inst = m.ootype._example()
_, meth = m.ootype._lookup(m.name)
if isinstance(meth, ootype._overloaded_meth):
return meth._resolver.annotate(args_s)
else:
METH = ootype.typeOf(meth)
return lltype_to_annotation(METH.RESULT)
class __extend__(SomeOOStaticMeth):
def simple_call(m, *args_s):
llargs = [annotation_to_lltype(arg_s)._example() for arg_s in args_s]
smeth = m.method._example()
v = smeth(*llargs)
return ll_to_annotation(v)
class __extend__(SomeCTypesObject):
def setattr(cto, s_attr, s_value):
pass
def getattr(cto, s_attr):
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
entry = extregistry.lookup_type(cto.knowntype)
s_value = entry.get_field_annotation(cto, attr)
return s_value
else:
return SomeObject()
def is_true(cto):
return s_Bool
def simple_call(cto, *args_s):
# for variables containing ctypes function pointers
entry = extregistry.lookup_type(cto.knowntype)
return entry.compute_result_annotation(*args_s)
#_________________________________________
# memory addresses
from pypy.rpython.memory import lladdress
class __extend__(SomeAddress):
def getattr(s_addr, s_attr):
assert s_attr.is_constant()
assert isinstance(s_attr, SomeString)
assert s_attr.const in lladdress.supported_access_types
return SomeTypedAddressAccess(
lladdress.supported_access_types[s_attr.const])
getattr.can_only_throw = []
def is_true(s_addr):
return s_Bool
| Python |
"""
Built-in functions.
"""
import sys
from pypy.annotation.model import SomeInteger, SomeObject, SomeChar, SomeBool
from pypy.annotation.model import SomeString, SomeTuple, SomeSlice, s_Bool
from pypy.annotation.model import SomeUnicodeCodePoint, SomeAddress
from pypy.annotation.model import SomeFloat, SomeWeakGcAddress, unionof
from pypy.annotation.model import SomePBC, SomeInstance, SomeDict
from pypy.annotation.model import SomeExternalObject
from pypy.annotation.model import annotation_to_lltype, lltype_to_annotation, ll_to_annotation
from pypy.annotation.model import add_knowntypedata
from pypy.annotation.model import s_ImpossibleValue
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.annotation import description
from pypy.objspace.flow.model import Constant
import pypy.rlib.rarithmetic
import pypy.rlib.objectmodel
import pypy.rlib.rstack
# convenience only!
def immutablevalue(x):
return getbookkeeper().immutablevalue(x)
def constpropagate(func, args_s, s_result):
"""Returns s_result unless all args are constants, in which case the
func() is called and a constant result is returned (it must be contained
in s_result).
"""
args = []
for s in args_s:
if not s.is_immutable_constant():
return s_result
args.append(s.const)
try:
realresult = func(*args)
except (ValueError, OverflowError):
return s_ImpossibleValue # no possible answer for this precise input
s_realresult = immutablevalue(realresult)
if not s_result.contains(s_realresult):
raise Exception("%s%r returned %r, which is not contained in %s" % (
func, args, realresult, s_result))
return s_realresult
# ____________________________________________________________
def builtin_range(*args):
s_step = immutablevalue(1)
if len(args) == 1:
s_start = immutablevalue(0)
s_stop = args[0]
elif len(args) == 2:
s_start, s_stop = args
elif len(args) == 3:
s_start, s_stop = args[:2]
s_step = args[2]
else:
raise Exception, "range() takes 1 to 3 arguments"
empty = False # so far
if not s_step.is_constant():
step = 0 # this case signals a variable step
else:
step = s_step.const
if step == 0:
raise Exception, "range() with step zero"
if s_start.is_constant() and s_stop.is_constant():
if len(xrange(s_start.const, s_stop.const, step)) == 0:
empty = True
if empty:
s_item = s_ImpossibleValue
else:
nonneg = False # so far
if step > 0:
nonneg = s_start.nonneg
elif step < 0:
nonneg = s_stop.nonneg or (s_stop.is_constant() and
s_stop.const >= -1)
s_item = SomeInteger(nonneg=nonneg)
return getbookkeeper().newlist(s_item, range_step=step)
builtin_xrange = builtin_range # xxx for now allow it
def builtin_bool(s_obj):
return s_obj.is_true()
def builtin_int(s_obj, s_base=None):
assert (s_base is None or isinstance(s_base, SomeInteger)
and s_obj.knowntype == str), "only int(v|string) or int(string,int) expected"
if s_base is not None:
args_s = [s_obj, s_base]
else:
args_s = [s_obj]
nonneg = isinstance(s_obj, SomeInteger) and s_obj.nonneg
return constpropagate(int, args_s, SomeInteger(nonneg=nonneg))
def builtin_float(s_obj):
return constpropagate(float, [s_obj], SomeFloat())
def builtin_chr(s_int):
return constpropagate(chr, [s_int], SomeChar())
def builtin_unichr(s_int):
return constpropagate(unichr, [s_int], SomeUnicodeCodePoint())
##def builtin_unicode(s_obj):
## raise TypeError, "unicode() calls should not happen at interp-level"
def our_issubclass(cls1, cls2):
""" we're going to try to be less silly in the face of old-style classes"""
from pypy.annotation.classdef import ClassDef
if cls2 is object:
return True
def classify(cls):
if isinstance(cls, ClassDef):
return 'def'
if cls.__module__ == '__builtin__':
return 'builtin'
else:
return 'cls'
kind1 = classify(cls1)
kind2 = classify(cls2)
if kind1 != 'def' and kind2 != 'def':
return issubclass(cls1, cls2)
if kind1 == 'builtin' and kind2 == 'def':
return False
elif kind1 == 'def' and kind2 == 'builtin':
return issubclass(object, cls2)
else:
bk = getbookkeeper()
def toclassdef(kind, cls):
if kind != 'def':
return bk.getuniqueclassdef(cls)
else:
return cls
return toclassdef(kind1, cls1).issubclass(toclassdef(kind2, cls2))
def builtin_isinstance(s_obj, s_type, variables=None):
r = SomeBool()
if s_type.is_constant():
typ = s_type.const
if issubclass(typ, pypy.rlib.rarithmetic.base_int):
r.const = issubclass(s_obj.knowntype, typ)
else:
if typ == long:
getbookkeeper().warning("isinstance(., long) is not RPython")
if s_obj.is_constant():
r.const = isinstance(s_obj.const, long)
else:
if type(s_obj) is not SomeObject: # only SomeObjects could be longs
# type(s_obj) < SomeObject -> SomeBool(False)
# type(s_obj) == SomeObject -> SomeBool()
r.const = False
return r
assert not issubclass(typ, (int,long)) or typ in (bool, int), (
"for integers only isinstance(.,int|r_uint) are supported")
if s_obj.is_constant():
r.const = isinstance(s_obj.const, typ)
elif our_issubclass(s_obj.knowntype, typ):
if not s_obj.can_be_none():
r.const = True
elif not our_issubclass(typ, s_obj.knowntype):
r.const = False
elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation
# from bool to int, notice that isinstance( , bool|int)
# is quite border case for RPython
r.const = False
# XXX HACK HACK HACK
# XXX HACK HACK HACK
# XXX HACK HACK HACK
bk = getbookkeeper()
if variables is None:
fn, block, i = bk.position_key
op = block.operations[i]
assert op.opname == "simple_call"
assert len(op.args) == 3
assert op.args[0] == Constant(isinstance)
variables = [op.args[1]]
for variable in variables:
assert bk.annotator.binding(variable) == s_obj
r.knowntypedata = {}
add_knowntypedata(r.knowntypedata, True, variables, bk.valueoftype(typ))
return r
# note that this one either needs to be constant, or we will create SomeObject
def builtin_hasattr(s_obj, s_attr):
if not s_attr.is_constant() or not isinstance(s_attr.const, str):
getbookkeeper().warning('hasattr(%r, %r) is not RPythonic enough' %
(s_obj, s_attr))
r = SomeBool()
if s_obj.is_immutable_constant():
r.const = hasattr(s_obj.const, s_attr.const)
elif (isinstance(s_obj, SomePBC)
and s_obj.getKind() is description.FrozenDesc):
answers = {}
for d in s_obj.descriptions:
answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue)
answers[answer] = True
if len(answers) == 1:
r.const, = answers
return r
##def builtin_callable(s_obj):
## return SomeBool()
def builtin_tuple(s_iterable):
if isinstance(s_iterable, SomeTuple):
return s_iterable
return SomeObject()
def builtin_list(s_iterable):
s_iter = s_iterable.iter()
return getbookkeeper().newlist(s_iter.next())
def builtin_zip(s_iterable1, s_iterable2): # xxx not actually implemented
s_iter1 = s_iterable1.iter()
s_iter2 = s_iterable2.iter()
s_tup = SomeTuple((s_iter1.next(),s_iter2.next()))
return getbookkeeper().newlist(s_tup)
def builtin_min(*s_values):
if len(s_values) == 1: # xxx do we support this?
s_iter = s_values[0].iter()
return s_iter.next()
else:
return unionof(*s_values)
def builtin_max(*s_values):
if len(s_values) == 1: # xxx do we support this?
s_iter = s_values[0].iter()
return s_iter.next()
else:
s = unionof(*s_values)
if type(s) is SomeInteger and not s.nonneg:
nonneg = False
for s1 in s_values:
nonneg |= s1.nonneg
if nonneg:
s = SomeInteger(nonneg=True, knowntype=s.knowntype)
return s
def builtin_apply(*stuff):
getbookkeeper().warning("ignoring apply%r" % (stuff,))
return SomeObject()
def builtin_slice(*args):
bk = getbookkeeper()
if len(args) == 1:
return SomeSlice(
bk.immutablevalue(None), args[0], bk.immutablevalue(None))
elif len(args) == 2:
return SomeSlice(
args[0], args[1], bk.immutablevalue(None))
elif len(args) == 3:
return SomeSlice(
args[0], args[1], args[2])
else:
raise Exception, "bogus call to slice()"
def OSError_init(s_self, *args):
pass
def termios_error_init(s_self, *args):
pass
def object_init(s_self, *args):
# ignore - mostly used for abstract classes initialization
pass
def conf():
return SomeString()
def rarith_intmask(s_obj):
return SomeInteger()
def robjmodel_instantiate(s_clspbc):
assert isinstance(s_clspbc, SomePBC)
clsdef = None
more_than_one = len(s_clspbc.descriptions)
for desc in s_clspbc.descriptions:
cdef = desc.getuniqueclassdef()
if more_than_one:
getbookkeeper().needs_generic_instantiate[cdef] = True
if not clsdef:
clsdef = cdef
else:
clsdef = clsdef.commonbase(cdef)
return SomeInstance(clsdef)
def robjmodel_we_are_translated():
return immutablevalue(True)
def robjmodel_r_dict(s_eqfn, s_hashfn):
dictdef = getbookkeeper().getdictdef(is_r_dict=True)
dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn)
return SomeDict(dictdef)
def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s):
from pypy.rpython import rmodel
assert s_repr.is_constant() and isinstance(s_repr.const, rmodel.Repr),"hlinvoke expects a constant repr as first argument"
r_func, nimplicitarg = s_repr.const.get_r_implfunc()
nbargs = len(args_s) + nimplicitarg
s_sigs = r_func.get_s_signatures((nbargs, (), False, False))
if len(s_sigs) != 1:
raise TyperError("cannot hlinvoke callable %r with not uniform"
"annotations: %r" % (s_repr.const,
s_sigs))
_, s_ret = s_sigs[0]
rresult = r_func.rtyper.getrepr(s_ret)
return lltype_to_annotation(rresult.lowleveltype)
def robjmodel_keepalive_until_here(*args_s):
return immutablevalue(None)
def llmemory_cast_ptr_to_adr(s):
return SomeAddress()
def llmemory_cast_adr_to_ptr(s, s_type):
assert s_type.is_constant()
return SomePtr(s_type.const)
def llmemory_cast_ptr_to_weakadr(s):
return SomeWeakGcAddress()
def llmemory_cast_weakadr_to_ptr(s, s_type):
assert s_type.is_constant()
return SomePtr(s_type.const)
def llmemory_cast_adr_to_int(s):
return SomeInteger() # xxx
def llmemory_cast_int_to_adr(s):
return SomeAddress()
def rstack_yield_current_frame_to_caller():
return SomeExternalObject(pypy.rlib.rstack.frame_stack_top)
##def rarith_ovfcheck(s_obj):
## if isinstance(s_obj, SomeInteger) and s_obj.unsigned:
## getbookkeeper().warning("ovfcheck on unsigned")
## return s_obj
##def rarith_ovfcheck_lshift(s_obj1, s_obj2):
## if isinstance(s_obj1, SomeInteger) and s_obj1.unsigned:
## getbookkeeper().warning("ovfcheck_lshift with unsigned")
## return SomeInteger()
def unicodedata_decimal(s_uchr):
raise TypeError, "unicodedate.decimal() calls should not happen at interp-level"
def test(*args):
return s_Bool
def import_func(*args):
return SomeObject()
# collect all functions
import __builtin__, exceptions
BUILTIN_ANALYZERS = {}
EXTERNAL_TYPE_ANALYZERS = {}
for name, value in globals().items():
if name.startswith('builtin_'):
original = getattr(__builtin__, name[8:])
BUILTIN_ANALYZERS[original] = value
##BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.ovfcheck] = rarith_ovfcheck
##BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.ovfcheck_lshift] = rarith_ovfcheck_lshift
BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.intmask] = rarith_intmask
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.instantiate] = robjmodel_instantiate
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.we_are_translated] = (
robjmodel_we_are_translated)
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.r_dict] = robjmodel_r_dict
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_ptr_to_adr] = llmemory_cast_ptr_to_adr
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_adr_to_ptr] = llmemory_cast_adr_to_ptr
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_adr_to_int] = llmemory_cast_adr_to_int
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_int_to_adr] = llmemory_cast_int_to_adr
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_ptr_to_weakadr] = llmemory_cast_ptr_to_weakadr
BUILTIN_ANALYZERS[pypy.rpython.lltypesystem.llmemory.cast_weakadr_to_ptr] = llmemory_cast_weakadr_to_ptr
BUILTIN_ANALYZERS[pypy.rlib.rstack.yield_current_frame_to_caller] = (
rstack_yield_current_frame_to_caller)
BUILTIN_ANALYZERS[getattr(OSError.__init__, 'im_func', OSError.__init__)] = (
OSError_init)
BUILTIN_ANALYZERS[sys.getdefaultencoding] = conf
try:
import unicodedata
except ImportError:
pass
else:
BUILTIN_ANALYZERS[unicodedata.decimal] = unicodedata_decimal # xxx
# object - just ignore object.__init__
BUILTIN_ANALYZERS[object.__init__] = object_init
# import
BUILTIN_ANALYZERS[__import__] = import_func
# annotation of low-level types
from pypy.annotation.model import SomePtr
from pypy.rpython.lltypesystem import lltype
def malloc(s_T, s_n=None, s_flavor=None, s_extra_args=None, s_zero=None):
assert (s_n is None or s_n.knowntype == int
or issubclass(s_n.knowntype, pypy.rlib.rarithmetic.base_int))
assert s_T.is_constant()
if s_n is not None:
n = 1
else:
n = None
if s_zero:
assert s_zero.is_constant()
if s_flavor is None:
p = lltype.malloc(s_T.const, n)
r = SomePtr(lltype.typeOf(p))
else:
assert s_flavor.is_constant()
# not sure how to call malloc() for the example 'p' in the
# presence of s_extraargs
r = SomePtr(lltype.Ptr(s_T.const))
return r
def free(s_p, s_flavor):
assert s_flavor.is_constant()
# same problem as in malloc(): some flavors are not easy to
# malloc-by-example
#T = s_p.ll_ptrtype.TO
#p = lltype.malloc(T, flavor=s_flavor.const)
#lltype.free(p, flavor=s_flavor.const)
def typeOf(s_val):
lltype = annotation_to_lltype(s_val, info="in typeOf(): ")
return immutablevalue(lltype)
def cast_primitive(T, s_v):
assert T.is_constant()
return ll_to_annotation(lltype.cast_primitive(T.const, annotation_to_lltype(s_v)._defl()))
def nullptr(T):
assert T.is_constant()
p = lltype.nullptr(T.const)
return immutablevalue(p)
def cast_pointer(PtrT, s_p):
assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p
assert PtrT.is_constant()
cast_p = lltype.cast_pointer(PtrT.const, s_p.ll_ptrtype._defl())
return SomePtr(ll_ptrtype=lltype.typeOf(cast_p))
def cast_opaque_ptr(PtrT, s_p):
assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p
assert PtrT.is_constant()
cast_p = lltype.cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl())
return SomePtr(ll_ptrtype=lltype.typeOf(cast_p))
def direct_fieldptr(s_p, s_fieldname):
assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p
assert s_fieldname.is_constant()
cast_p = lltype.direct_fieldptr(s_p.ll_ptrtype._example(),
s_fieldname.const)
return SomePtr(ll_ptrtype=lltype.typeOf(cast_p))
def direct_arrayitems(s_p):
assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p
cast_p = lltype.direct_arrayitems(s_p.ll_ptrtype._example())
return SomePtr(ll_ptrtype=lltype.typeOf(cast_p))
def direct_ptradd(s_p, s_n):
assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p
# don't bother with an example here: the resulting pointer is the same
return s_p
def cast_ptr_to_int(s_ptr): # xxx
return SomeInteger()
def cast_int_to_ptr(PtrT, s_int):
assert PtrT.is_constant()
return SomePtr(ll_ptrtype=PtrT.const)
def getRuntimeTypeInfo(T):
assert T.is_constant()
return immutablevalue(lltype.getRuntimeTypeInfo(T.const))
def runtime_type_info(s_p):
assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p
return SomePtr(lltype.typeOf(lltype.runtime_type_info(s_p.ll_ptrtype._example())))
def constPtr(T):
assert T.is_constant()
return immutablevalue(lltype.Ptr(T.const))
BUILTIN_ANALYZERS[lltype.malloc] = malloc
BUILTIN_ANALYZERS[lltype.free] = free
BUILTIN_ANALYZERS[lltype.typeOf] = typeOf
BUILTIN_ANALYZERS[lltype.cast_primitive] = cast_primitive
BUILTIN_ANALYZERS[lltype.nullptr] = nullptr
BUILTIN_ANALYZERS[lltype.cast_pointer] = cast_pointer
BUILTIN_ANALYZERS[lltype.cast_opaque_ptr] = cast_opaque_ptr
BUILTIN_ANALYZERS[lltype.direct_fieldptr] = direct_fieldptr
BUILTIN_ANALYZERS[lltype.direct_arrayitems] = direct_arrayitems
BUILTIN_ANALYZERS[lltype.direct_ptradd] = direct_ptradd
BUILTIN_ANALYZERS[lltype.cast_ptr_to_int] = cast_ptr_to_int
BUILTIN_ANALYZERS[lltype.cast_int_to_ptr] = cast_int_to_ptr
BUILTIN_ANALYZERS[lltype.getRuntimeTypeInfo] = getRuntimeTypeInfo
BUILTIN_ANALYZERS[lltype.runtime_type_info] = runtime_type_info
BUILTIN_ANALYZERS[lltype.Ptr] = constPtr
# ootype
from pypy.annotation.model import SomeOOInstance, SomeOOClass
from pypy.rpython.ootypesystem import ootype
def new(I):
assert I.is_constant()
i = ootype.new(I.const)
r = SomeOOInstance(ootype.typeOf(i))
return r
def null(I_OR_SM):
assert I_OR_SM.is_constant()
null = ootype.null(I_OR_SM.const)
r = lltype_to_annotation(ootype.typeOf(null))
return r
def instanceof(i, I):
assert I.is_constant()
assert isinstance(I.const, ootype.Instance)
return s_Bool
def classof(i):
assert isinstance(i, SomeOOInstance)
return SomeOOClass(i.ootype)
def subclassof(class1, class2):
assert isinstance(class1, SomeOOClass)
assert isinstance(class2, SomeOOClass)
return s_Bool
def runtimenew(c):
assert isinstance(c, SomeOOClass)
if c.ootype is None:
return s_ImpossibleValue # can't call runtimenew(NULL)
else:
return SomeOOInstance(c.ootype)
def ooidentityhash(i):
assert isinstance(i, SomeOOInstance)
return SomeInteger()
BUILTIN_ANALYZERS[ootype.instanceof] = instanceof
BUILTIN_ANALYZERS[ootype.new] = new
BUILTIN_ANALYZERS[ootype.null] = null
BUILTIN_ANALYZERS[ootype.runtimenew] = runtimenew
BUILTIN_ANALYZERS[ootype.classof] = classof
BUILTIN_ANALYZERS[ootype.subclassof] = subclassof
BUILTIN_ANALYZERS[ootype.ooidentityhash] = ooidentityhash
#________________________________
# non-gc objects
def robjmodel_free_non_gc_object(obj):
pass
BUILTIN_ANALYZERS[pypy.rlib.objectmodel.free_non_gc_object] = (
robjmodel_free_non_gc_object)
#_________________________________
# memory address
from pypy.rpython.memory import lladdress
from pypy.rpython.lltypesystem import llmemory
def raw_malloc(s_size):
assert isinstance(s_size, SomeInteger) #XXX add noneg...?
return SomeAddress()
def raw_malloc_usage(s_size):
assert isinstance(s_size, SomeInteger) #XXX add noneg...?
return SomeInteger(nonneg=True)
def raw_free(s_addr):
assert isinstance(s_addr, SomeAddress)
assert not s_addr.is_null
def raw_memclear(s_addr, s_int):
assert isinstance(s_addr, SomeAddress)
assert not s_addr.is_null
assert isinstance(s_int, SomeInteger)
def raw_memcopy(s_addr1, s_addr2, s_int):
assert isinstance(s_addr1, SomeAddress)
assert not s_addr1.is_null
assert isinstance(s_addr2, SomeAddress)
assert not s_addr2.is_null
assert isinstance(s_int, SomeInteger) #XXX add noneg...?
BUILTIN_ANALYZERS[lladdress.raw_malloc] = raw_malloc
BUILTIN_ANALYZERS[lladdress.raw_malloc_usage] = raw_malloc_usage
BUILTIN_ANALYZERS[lladdress.raw_free] = raw_free
BUILTIN_ANALYZERS[lladdress.raw_memclear] = raw_memclear
BUILTIN_ANALYZERS[lladdress.raw_memcopy] = raw_memcopy
BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc
BUILTIN_ANALYZERS[llmemory.raw_malloc_usage] = raw_malloc_usage
BUILTIN_ANALYZERS[llmemory.raw_free] = raw_free
BUILTIN_ANALYZERS[llmemory.raw_memclear] = raw_memclear
BUILTIN_ANALYZERS[llmemory.raw_memcopy] = raw_memcopy
#_________________________________
# offsetof/sizeof
from pypy.rpython.lltypesystem import llmemory
def offsetof(TYPE, fldname):
return SomeInteger()
BUILTIN_ANALYZERS[llmemory.offsetof] = offsetof
#_________________________________
# external functions
from pypy.rpython import extfunctable
def update_exttables():
# import annotation information for external functions
# from the extfunctable.table into our own annotation specific table
for func, extfuncinfo in extfunctable.table.iteritems():
BUILTIN_ANALYZERS[func] = extfuncinfo.annotation
# import annotation information for external types
# from the extfunctable.typetable into our own annotation specific table
for typ, exttypeinfo in extfunctable.typetable.iteritems():
EXTERNAL_TYPE_ANALYZERS[typ] = exttypeinfo.get_annotations()
# Note: calls to declare() may occur after builtin.py is first imported.
# We must track future changes to the extfunctables.
extfunctable.table_callbacks.append(update_exttables)
update_exttables()
| Python |
"""
Type inference for user-defined classes.
"""
from __future__ import generators
from pypy.annotation.model import SomePBC, s_ImpossibleValue, unionof
from pypy.annotation.model import SomeInteger, isdegenerated, SomeTuple,\
SomeString
from pypy.annotation import description
# The main purpose of a ClassDef is to collect information about class/instance
# attributes as they are really used. An Attribute object is stored in the
# most general ClassDef where an attribute of that name is read/written:
# classdef.attrs = {'attrname': Attribute()}
#
# The following invariants hold:
#
# (A) if an attribute is read/written on an instance of class A, then the
# classdef of A or a parent class of A has an Attribute object corresponding
# to that name.
#
# (I) if B is a subclass of A, then they don't both have an Attribute for the
# same name. (All information from B's Attribute must be merged into A's.)
#
# Additionally, each ClassDef records an 'attr_sources': it maps attribute names
# to a list of 'source' objects that want to provide a constant value for this
# attribute at the level of this class. The attr_sources provide information
# higher in the class hierarchy than concrete Attribute()s. It is for the case
# where (so far or definitely) the user program only reads/writes the attribute
# at the level of a subclass, but a value for this attribute could possibly
# exist in the parent class or in an instance of a parent class.
#
# The point of not automatically forcing the Attribute instance up to the
# parent class which has a class attribute of the same name is apparent with
# multiple subclasses:
#
# A
# attr=s1
# / \
# / \
# B C
# attr=s2 attr=s3
#
# In this case, as long as 'attr' is only read/written from B or C, the
# Attribute on B says that it can be 's1 or s2', and the Attribute on C says
# it can be 's1 or s3'. Merging them into a single Attribute on A would give
# the more imprecise 's1 or s2 or s3'.
#
# The following invariant holds:
#
# (II) if a class A has an Attribute, the 'attr_sources' for the same name is
# empty. It is also empty on all subclasses of A. (The information goes
# into the Attribute directly in this case.)
#
# The following invariant holds:
#
# (III) for a class A, each attrsource that comes from the class (as opposed to
# from a prebuilt instance) must be merged into all Attributes of the
# same name in all subclasses of A, if any. (Parent class attributes can
# be visible in reads from instances of subclasses.)
class Attribute:
# readonly-ness
# SomeThing-ness
# NB. an attribute is readonly if it is a constant class attribute.
# Both writing to the instance attribute and discovering prebuilt
# instances that have the attribute set will turn off readonly-ness.
def __init__(self, name, bookkeeper):
assert name != '__class__'
self.name = name
self.bookkeeper = bookkeeper
self.s_value = s_ImpossibleValue
self.readonly = True
self.attr_allowed = True
self.read_locations = {}
def add_constant_source(self, classdef, source):
s_value = source.s_get_value(classdef, self.name)
if source.instance_level:
# a prebuilt instance source forces readonly=False, see above
self.modified(classdef)
s_new_value = unionof(self.s_value, s_value)
if isdegenerated(s_new_value):
self.bookkeeper.ondegenerated("source %r attr %s" % (source, self.name),
s_new_value)
self.s_value = s_new_value
def getvalue(self):
# Same as 'self.s_value' for historical reasons.
return self.s_value
def merge(self, other, classdef='?'):
assert self.name == other.name
s_new_value = unionof(self.s_value, other.s_value)
if isdegenerated(s_new_value):
what = "%s attr %s" % (classdef, self.name)
self.bookkeeper.ondegenerated(what, s_new_value)
self.s_value = s_new_value
if not other.readonly:
self.modified(classdef)
self.read_locations.update(other.read_locations)
def mutated(self, homedef): # reflow from attr read positions
s_newvalue = self.getvalue()
for position in self.read_locations:
self.bookkeeper.annotator.reflowfromposition(position)
# check for method demotion and after-the-fact method additions
if isinstance(s_newvalue, SomePBC):
attr = self.name
if (not s_newvalue.isNone() and
s_newvalue.getKind() == description.MethodDesc):
# is method
if homedef.classdesc.read_attribute(attr, None) is None:
if not homedef.check_missing_attribute_update(attr):
for desc in s_newvalue.descriptions:
if desc.selfclassdef is None:
if homedef.classdesc.settled:
raise Exception("demoting method %s "
"to settled class %s not "
"allowed" %
(self.name, homedef)
)
self.bookkeeper.warning("demoting method %s "
"to base class %s" %
(self.name, homedef))
break
# check for attributes forbidden by slots or _attrs_
if homedef.classdesc.all_enforced_attrs is not None:
if self.name not in homedef.classdesc.all_enforced_attrs:
self.attr_allowed = False
if not self.readonly:
raise NoSuchAttrError(homedef, self.name)
def modified(self, classdef='?'):
self.readonly = False
if not self.attr_allowed:
raise NoSuchAttrError(classdef, self.name)
class ClassDef:
"Wraps a user class."
def __init__(self, bookkeeper, classdesc):
self.bookkeeper = bookkeeper
self.attrs = {} # {name: Attribute}
self.classdesc = classdesc
self.name = self.classdesc.name
self.shortname = self.name.split('.')[-1]
self.subdefs = []
self.attr_sources = {} # {name: list-of-sources}
self.read_locations_of__class__ = {}
if classdesc.basedesc:
self.basedef = classdesc.basedesc.getuniqueclassdef()
self.basedef.subdefs.append(self)
self.basedef.see_new_subclass(self)
else:
self.basedef = None
self.parentdefs = dict.fromkeys(self.getmro())
def setup(self, sources):
# collect the (supposed constant) class attributes
for name, source in sources.items():
self.add_source_for_attribute(name, source)
if self.bookkeeper:
self.bookkeeper.event('classdef_setup', self)
def add_source_for_attribute(self, attr, source):
"""Adds information about a constant source for an attribute.
"""
for cdef in self.getmro():
if attr in cdef.attrs:
# the Attribute() exists already for this class (or a parent)
attrdef = cdef.attrs[attr]
s_prev_value = attrdef.s_value
attrdef.add_constant_source(self, source)
# we should reflow from all the reader's position,
# but as an optimization we try to see if the attribute
# has really been generalized
if attrdef.s_value != s_prev_value:
attrdef.mutated(cdef) # reflow from all read positions
return
else:
# remember the source in self.attr_sources
sources = self.attr_sources.setdefault(attr, [])
sources.append(source)
# register the source in any Attribute found in subclasses,
# to restore invariant (III)
# NB. add_constant_source() may discover new subdefs but the
# right thing will happen to them because self.attr_sources
# was already updated
if not source.instance_level:
for subdef in self.getallsubdefs():
if attr in subdef.attrs:
attrdef = subdef.attrs[attr]
s_prev_value = attrdef.s_value
attrdef.add_constant_source(self, source)
if attrdef.s_value != s_prev_value:
attrdef.mutated(subdef) # reflow from all read positions
def locate_attribute(self, attr):
while True:
for cdef in self.getmro():
if attr in cdef.attrs:
return cdef
self.generalize_attr(attr)
# the return value will likely be 'self' now, but not always -- see
# test_annrpython.test_attr_moving_from_subclass_to_class_to_parent
def find_attribute(self, attr):
return self.locate_attribute(attr).attrs[attr]
def __repr__(self):
return "<ClassDef '%s'>" % (self.name,)
def has_no_attrs(self):
for clsdef in self.getmro():
if clsdef.attrs:
return False
return True
def commonbase(self, other):
other1 = other
while other is not None and not self.issubclass(other):
other = other.basedef
return other
def getmro(self):
while self is not None:
yield self
self = self.basedef
def issubclass(self, otherclsdef):
return otherclsdef in self.parentdefs
def getallsubdefs(self):
pending = [self]
seen = {}
for clsdef in pending:
yield clsdef
for sub in clsdef.subdefs:
if sub not in seen:
pending.append(sub)
seen[sub] = True
def _generalize_attr(self, attr, s_value):
# first remove the attribute from subclasses -- including us!
# invariant (I)
subclass_attrs = []
constant_sources = [] # [(classdef-of-origin, source)]
for subdef in self.getallsubdefs():
if attr in subdef.attrs:
subclass_attrs.append(subdef.attrs[attr])
del subdef.attrs[attr]
if attr in subdef.attr_sources:
# accumulate attr_sources for this attribute from all subclasses
lst = subdef.attr_sources[attr]
for source in lst:
constant_sources.append((subdef, source))
del lst[:] # invariant (II)
# accumulate attr_sources for this attribute from all parents, too
# invariant (III)
for superdef in self.getmro():
if attr in superdef.attr_sources:
for source in superdef.attr_sources[attr]:
if not source.instance_level:
constant_sources.append((superdef, source))
# create the Attribute and do the generalization asked for
newattr = Attribute(attr, self.bookkeeper)
if s_value:
newattr.s_value = s_value
# keep all subattributes' values
for subattr in subclass_attrs:
newattr.merge(subattr, classdef=self)
# store this new Attribute, generalizing the previous ones from
# subclasses -- invariant (A)
self.attrs[attr] = newattr
# add the values of the pending constant attributes
# completes invariants (II) and (III)
for origin_classdef, source in constant_sources:
newattr.add_constant_source(origin_classdef, source)
# reflow from all read positions
newattr.mutated(self)
def generalize_attr(self, attr, s_value=None):
# if the attribute exists in a superclass, generalize there,
# as imposed by invariant (I)
#start debug
#if self.name.endswith('W_Root') and attr == 'setdata':
# print 'NAME:',self.name
# import pdb
# pdb.set_trace()
#stop debug
for clsdef in self.getmro():
if attr in clsdef.attrs:
clsdef._generalize_attr(attr, s_value)
break
else:
self._generalize_attr(attr, s_value)
def about_attribute(self, name):
"""This is the interface for the code generators to ask about
the annotation given to a attribute."""
for cdef in self.getmro():
if name in cdef.attrs:
s_result = cdef.attrs[name].s_value
if s_result != s_ImpossibleValue:
return s_result
else:
return None
return None
def lookup_filter(self, pbc, name=None, flags={}):
"""Selects the methods in the pbc that could possibly be seen by
a lookup performed on an instance of 'self', removing the ones
that cannot appear.
"""
d = []
uplookup = None
updesc = None
meth = False
check_for_missing_attrs = False
for desc in pbc.descriptions:
# pick methods but ignore already-bound methods, which can come
# from an instance attribute
if (isinstance(desc, description.MethodDesc)
and desc.selfclassdef is None):
meth = True
methclassdef = desc.originclassdef
if methclassdef is not self and methclassdef.issubclass(self):
pass # subclasses methods are always candidates
elif self.issubclass(methclassdef):
# upward consider only the best match
if uplookup is None or methclassdef.issubclass(uplookup):
uplookup = methclassdef
updesc = desc
continue
# for clsdef1 >= clsdef2, we guarantee that
# clsdef1.lookup_filter(pbc) includes
# clsdef2.lookup_filter(pbc) (see formal proof...)
else:
continue # not matching
# bind the method by giving it a selfclassdef. Use the
# more precise subclass that it's coming from.
desc = desc.bind_self(methclassdef, flags)
d.append(desc)
if uplookup is not None:
d.append(updesc.bind_self(self, flags))
if d or pbc.can_be_None:
return SomePBC(d, can_be_None=pbc.can_be_None)
else:
return s_ImpossibleValue
def check_missing_attribute_update(self, name):
# haaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaack
# sometimes, new methods can show up on classes, added
# e.g. by W_TypeObject._freeze_() -- the multimethod
# implementations. Check that here...
found = False
parents = list(self.getmro())
parents.reverse()
for base in parents:
if base.check_attr_here(name):
found = True
return found
def check_attr_here(self, name):
source = self.classdesc.find_source_for(name)
if source is not None:
# oups! new attribute showed up
self.add_source_for_attribute(name, source)
# maybe it also showed up in some subclass?
for subdef in self.getallsubdefs():
if subdef is not self:
subdef.check_attr_here(name)
return True
else:
return False
def see_new_subclass(self, classdef):
for position in self.read_locations_of__class__:
self.bookkeeper.annotator.reflowfromposition(position)
if self.basedef is not None:
self.basedef.see_new_subclass(classdef)
def read_attr__class__(self):
position = self.bookkeeper.position_key
self.read_locations_of__class__[position] = True
return SomePBC([subdef.classdesc for subdef in self.getallsubdefs()])
def _freeze_(self):
raise Exception, "ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly"
# ____________________________________________________________
class InstanceSource:
instance_level = True
def __init__(self, bookkeeper, obj):
self.bookkeeper = bookkeeper
self.obj = obj
def s_get_value(self, classdef, name):
try:
v = getattr(self.obj, name)
except AttributeError:
all_enforced_attrs = classdef.classdesc.all_enforced_attrs
if all_enforced_attrs and name in all_enforced_attrs:
return s_ImpossibleValue
raise
s_value = self.bookkeeper.immutablevalue(v)
return s_value
def all_instance_attributes(self):
result = getattr(self.obj, '__dict__', {}).keys()
tp = self.obj.__class__
if isinstance(tp, type):
for basetype in tp.__mro__:
slots = basetype.__dict__.get('__slots__')
if slots:
if isinstance(slots, str):
result.append(slots)
else:
result.extend(slots)
return result
class NoSuchAttrError(Exception):
"""Raised when an attribute is found on a class where __slots__
or _attrs_ forbits it."""
# ____________________________________________________________
FORCE_ATTRIBUTES_INTO_CLASSES = {
OSError: {'errno': SomeInteger()},
}
try:
import termios
except ImportError:
pass
else:
FORCE_ATTRIBUTES_INTO_CLASSES[termios.error] = \
{'args': SomeTuple([SomeInteger(), SomeString()])}
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.lltypesystem.lltype import typeOf
from pypy.objspace.flow.model import Constant
from pypy.annotation.model import unionof
from pypy.annotation.signature import annotation
import py
class genericcallable(object):
""" A way to specify the callable annotation, but deferred until
we have bookkeeper
"""
def __init__(self, args, result=None):
self.args = args
self.result = result
class _ext_callable(ExtRegistryEntry):
_type_ = genericcallable
# we defer a bit annotation here
def compute_result_annotation(self):
from pypy.annotation import model as annmodel
return annmodel.SomeGenericCallable([annotation(i, self.bookkeeper)
for i in self.instance.args],
annotation(self.instance.result, self.bookkeeper))
class ExtFuncEntry(ExtRegistryEntry):
def compute_result_annotation(self, *args_s):
if hasattr(self, 'ann_hook'):
self.ann_hook()
if self.signature_args is not None:
assert len(args_s) == len(self.signature_args),\
"Argument number mismatch"
for arg, expected in zip(args_s, self.signature_args):
arg = unionof(arg, expected)
assert expected.contains(arg)
return self.signature_result
def specialize_call(self, hop):
rtyper = hop.rtyper
if self.signature_args is None:
iter_args = hop.args_s
else:
iter_args = self.signature_args
args_r = [rtyper.getrepr(s_arg) for s_arg in iter_args]
args_ll = [r_arg.lowleveltype for r_arg in args_r]
r_result = rtyper.getrepr(hop.s_result)
ll_result = r_result.lowleveltype
name = getattr(self, 'name', None) or self.instance.__name__
method_name = rtyper.type_system.name[:2] + 'typeimpl'
fake_method_name = rtyper.type_system.name[:2] + 'typefakeimpl'
impl = getattr(self, method_name, None)
fakeimpl = getattr(self, fake_method_name, self.instance)
if impl:
obj = rtyper.getannmixlevel().delayedfunction(
impl, self.signature_args, hop.s_result)
else:
obj = rtyper.type_system.getexternalcallable(args_ll, ll_result,
name, _external_name=self.name, _callable=fakeimpl)
vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r)
hop.exception_is_here()
return hop.genop('direct_call', vlist, r_result)
def register_external(function, args, result=None, export_name=None,
llimpl=None, ooimpl=None,
llfakeimpl=None, oofakeimpl=None,
annotation_hook=None):
"""
function: the RPython function that will be rendered as an external function (e.g.: math.floor)
args: a list containing the annotation of the arguments
result: surprisingly enough, the annotation of the result
export_name: the name of the function as it will be seen by the backends
llimpl, ooimpl: optional; if provided, these RPython functions are called instead of the target function
llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter
annotationhook: optional; a callable that is called during annotation, useful for genc hacks
"""
class FunEntry(ExtFuncEntry):
_about_ = function
if args is None:
signature_args = None
else:
signature_args = [annotation(arg, None) for arg in args]
signature_result = annotation(result, None)
name=export_name
if llimpl:
lltypeimpl = staticmethod(llimpl)
if ooimpl:
ootypeimpl = staticmethd(ooimpl)
if llfakeimpl:
lltypefakeimpl = staticmethod(llfakeimpl)
if oofakeimpl:
ootypefakeimpl = staticmethod(oofakeimpl)
if annotation_hook:
ann_hook = staticmethod(annotation_hook)
if export_name:
FunEntry.__name__ = export_name
else:
FunEntry.__name__ = function.func_name
def is_external(func):
if hasattr(func, 'value'):
func = func.value
if getattr(func._callable, 'suggested_primitive', False):
return True
if hasattr(func, '_external_name'):
return True
return False
| Python |
from pypy.annotation.pairtype import pairtype, pair
from pypy.objspace.flow.model import Constant
from pypy.annotation import model as annmodel
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import Repr, IteratorRepr, IntegerRepr, inputconst
from pypy.rpython.rslice import AbstractSliceRepr
from pypy.rpython.rstr import AbstractStringRepr, AbstractCharRepr
from pypy.rpython.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool
from pypy.rpython.lltypesystem.lltype import nullptr, Char, UniChar
from pypy.rpython import robject
from pypy.rlib.objectmodel import malloc_zero_filled, debug_assert
from pypy.rpython.annlowlevel import ADTInterface
ADTIFixedList = ADTInterface(None, {
'll_newlist': (['SELF', Signed ], 'self'),
'll_length': (['self' ], Signed),
'll_getitem_fast': (['self', Signed ], 'item'),
'll_setitem_fast': (['self', Signed, 'item'], Void),
})
ADTIList = ADTInterface(ADTIFixedList, {
'_ll_resize_ge': (['self', Signed ], Void),
'_ll_resize_le': (['self', Signed ], Void),
'_ll_resize': (['self', Signed ], Void),
})
def dum_checkidx(): pass
def dum_nocheck(): pass
class __extend__(annmodel.SomeList):
def rtyper_makerepr(self, rtyper):
listitem = self.listdef.listitem
s_value = listitem.s_value
if (listitem.range_step is not None and not listitem.mutated and
not isinstance(s_value, annmodel.SomeImpossibleValue)):
return rtyper.type_system.rrange.RangeRepr(listitem.range_step)
elif (s_value.__class__ is annmodel.SomeObject and s_value.knowntype == object):
return robject.pyobj_repr
else:
# cannot do the rtyper.getrepr() call immediately, for the case
# of recursive structures -- i.e. if the listdef contains itself
rlist = rtyper.type_system.rlist
if self.listdef.listitem.resized:
return rlist.ListRepr(rtyper,
lambda: rtyper.getrepr(listitem.s_value), listitem)
else:
return rlist.FixedSizeListRepr(rtyper,
lambda: rtyper.getrepr(listitem.s_value), listitem)
def rtyper_makekey(self):
self.listdef.listitem.dont_change_any_more = True
return self.__class__, self.listdef.listitem
class AbstractBaseListRepr(Repr):
eq_func_cache = None
def recast(self, llops, v):
return llops.convertvar(v, self.item_repr, self.external_item_repr)
def convert_const(self, listobj):
# get object from bound list method
if listobj is None:
return self.null_const()
if not isinstance(listobj, list):
raise TyperError("expected a list: %r" % (listobj,))
try:
key = Constant(listobj)
return self.list_cache[key]
except KeyError:
self.setup()
n = len(listobj)
result = self.prepare_const(n)
self.list_cache[key] = result
r_item = self.item_repr
if r_item.lowleveltype is not Void:
for i in range(n):
x = listobj[i]
result.ll_setitem_fast(i, r_item.convert_const(x))
return result
def null_const(self):
raise NotImplementedError
def prepare_const(self, nitems):
raise NotImplementedError
def ll_str(self, l):
constant = self.rstr_ll.ll_constant
start = self.rstr_ll.ll_build_start
push = self.rstr_ll.ll_build_push
finish = self.rstr_ll.ll_build_finish
length = l.ll_length()
if length == 0:
return constant("[]")
buf = start(2 * length + 1)
push(buf, constant("["), 0)
item_repr = self.item_repr
i = 0
while i < length:
if i > 0:
push(buf, constant(", "), 2 * i)
item = l.ll_getitem_fast(i)
push(buf, item_repr.ll_str(item), 2 * i + 1)
i += 1
push(buf, constant("]"), 2 * length)
return finish(buf)
def rtype_bltn_list(self, hop):
v_lst = hop.inputarg(self, 0)
cRESLIST = hop.inputconst(Void, hop.r_result.LIST)
return hop.gendirectcall(ll_copy, cRESLIST, v_lst)
def rtype_len(self, hop):
v_lst, = hop.inputargs(self)
return hop.gendirectcall(ll_len, v_lst)
def rtype_is_true(self, hop):
v_lst, = hop.inputargs(self)
return hop.gendirectcall(ll_list_is_true, v_lst)
def rtype_method_reverse(self, hop):
v_lst, = hop.inputargs(self)
hop.exception_cannot_occur()
hop.gendirectcall(ll_reverse,v_lst)
def rtype_method_remove(self, hop):
v_lst, v_value = hop.inputargs(self, self.item_repr)
hop.has_implicit_exception(ValueError) # record that we know about it
hop.exception_is_here()
return hop.gendirectcall(ll_listremove, v_lst, v_value,
self.get_eqfunc())
def rtype_method_index(self, hop):
v_lst, v_value = hop.inputargs(self, self.item_repr)
hop.has_implicit_exception(ValueError) # record that we know about it
hop.exception_is_here()
return hop.gendirectcall(ll_listindex, v_lst, v_value, self.get_eqfunc())
def get_ll_eq_function(self):
result = self.eq_func_cache
if result is not None:
return result
def list_eq(l1, l2):
return ll_listeq(l1, l2, item_eq_func)
self.eq_func_cache = list_eq
# ^^^ do this first, before item_repr.get_ll_eq_function()
item_eq_func = self.item_repr.get_ll_eq_function()
return list_eq
class AbstractListRepr(AbstractBaseListRepr):
def rtype_method_append(self, hop):
v_lst, v_value = hop.inputargs(self, self.item_repr)
hop.exception_cannot_occur()
hop.gendirectcall(ll_append, v_lst, v_value)
def rtype_method_insert(self, hop):
v_lst, v_index, v_value = hop.inputargs(self, Signed, self.item_repr)
arg1 = hop.args_s[1]
args = v_lst, v_index, v_value
if arg1.is_constant() and arg1.const == 0:
llfn = ll_prepend
args = v_lst, v_value
elif arg1.nonneg:
llfn = ll_insert_nonneg
else:
raise TyperError("insert() index must be proven non-negative")
hop.exception_cannot_occur()
hop.gendirectcall(llfn, *args)
def rtype_method_extend(self, hop):
v_lst1, v_lst2 = hop.inputargs(*hop.args_r)
hop.exception_cannot_occur()
hop.gendirectcall(ll_extend, v_lst1, v_lst2)
def rtype_method_pop(self, hop):
if hop.has_implicit_exception(IndexError):
spec = dum_checkidx
else:
spec = dum_nocheck
v_func = hop.inputconst(Void, spec)
if hop.nb_args == 2:
args = hop.inputargs(self, Signed)
assert hasattr(args[1], 'concretetype')
arg1 = hop.args_s[1]
if arg1.is_constant() and arg1.const == 0:
llfn = ll_pop_zero
args = args[:1]
elif hop.args_s[1].nonneg:
llfn = ll_pop_nonneg
else:
llfn = ll_pop
else:
args = hop.inputargs(self)
llfn = ll_pop_default
hop.exception_is_here()
v_res = hop.gendirectcall(llfn, v_func, *args)
return self.recast(hop.llops, v_res)
class AbstractFixedSizeListRepr(AbstractBaseListRepr):
pass
class __extend__(pairtype(AbstractBaseListRepr, Repr)):
def rtype_contains((r_lst, _), hop):
v_lst, v_any = hop.inputargs(r_lst, r_lst.item_repr)
return hop.gendirectcall(ll_listcontains, v_lst, v_any, r_lst.get_eqfunc())
class __extend__(pairtype(AbstractBaseListRepr, IntegerRepr)):
def rtype_getitem((r_lst, r_int), hop, checkidx=False):
if checkidx:
spec = dum_checkidx
else:
spec = dum_nocheck
v_func = hop.inputconst(Void, spec)
v_lst, v_index = hop.inputargs(r_lst, Signed)
if hop.args_s[1].nonneg:
llfn = ll_getitem_nonneg
else:
llfn = ll_getitem
if checkidx:
hop.exception_is_here()
else:
hop.exception_cannot_occur()
v_res = hop.gendirectcall(llfn, v_func, v_lst, v_index)
return r_lst.recast(hop.llops, v_res)
rtype_getitem_key = rtype_getitem
def rtype_getitem_idx((r_lst, r_int), hop):
return pair(r_lst, r_int).rtype_getitem(hop, checkidx=True)
rtype_getitem_idx_key = rtype_getitem_idx
def rtype_setitem((r_lst, r_int), hop):
if hop.has_implicit_exception(IndexError):
spec = dum_checkidx
else:
spec = dum_nocheck
v_func = hop.inputconst(Void, spec)
v_lst, v_index, v_item = hop.inputargs(r_lst, Signed, r_lst.item_repr)
if hop.args_s[1].nonneg:
llfn = ll_setitem_nonneg
else:
llfn = ll_setitem
hop.exception_is_here()
return hop.gendirectcall(llfn, v_func, v_lst, v_index, v_item)
def rtype_mul((r_lst, r_int), hop):
cRESLIST = hop.inputconst(Void, hop.r_result.LIST)
v_lst, v_factor = hop.inputargs(r_lst, Signed)
return hop.gendirectcall(ll_mul, cRESLIST, v_lst, v_factor)
class __extend__(pairtype(AbstractListRepr, IntegerRepr)):
def rtype_delitem((r_lst, r_int), hop):
if hop.has_implicit_exception(IndexError):
spec = dum_checkidx
else:
spec = dum_nocheck
v_func = hop.inputconst(Void, spec)
v_lst, v_index = hop.inputargs(r_lst, Signed)
if hop.args_s[1].nonneg:
llfn = ll_delitem_nonneg
else:
llfn = ll_delitem
hop.exception_is_here()
return hop.gendirectcall(llfn, v_func, v_lst, v_index)
def rtype_inplace_mul((r_lst, r_int), hop):
v_lst, v_factor = hop.inputargs(r_lst, Signed)
return hop.gendirectcall(ll_inplace_mul, v_lst, v_factor)
class __extend__(pairtype(AbstractBaseListRepr, AbstractBaseListRepr)):
def convert_from_to((r_lst1, r_lst2), v, llops):
if r_lst1.listitem is None or r_lst2.listitem is None:
return NotImplemented
if r_lst1.listitem is not r_lst2.listitem:
return NotImplemented
return v
## # TODO: move it to lltypesystem
## def rtype_is_((r_lst1, r_lst2), hop):
## if r_lst1.lowleveltype != r_lst2.lowleveltype:
## # obscure logic, the is can be true only if both are None
## v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
## return hop.gendirectcall(ll_both_none, v_lst1, v_lst2)
## return pairtype(Repr, Repr).rtype_is_(pair(r_lst1, r_lst2), hop)
def rtype_eq((r_lst1, r_lst2), hop):
assert r_lst1.item_repr == r_lst2.item_repr
v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
return hop.gendirectcall(ll_listeq, v_lst1, v_lst2, r_lst1.get_eqfunc())
def rtype_ne((r_lst1, r_lst2), hop):
assert r_lst1.item_repr == r_lst2.item_repr
v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
flag = hop.gendirectcall(ll_listeq, v_lst1, v_lst2, r_lst1.get_eqfunc())
return hop.genop('bool_not', [flag], resulttype=Bool)
def rtype_newlist(hop):
nb_args = hop.nb_args
r_list = hop.r_result
if r_list == robject.pyobj_repr: # special case: SomeObject lists!
clist = hop.inputconst(robject.pyobj_repr, list)
v_result = hop.genop('simple_call', [clist], resulttype = robject.pyobj_repr)
cname = hop.inputconst(robject.pyobj_repr, 'append')
v_meth = hop.genop('getattr', [v_result, cname], resulttype = robject.pyobj_repr)
for i in range(nb_args):
v_item = hop.inputarg(robject.pyobj_repr, arg=i)
hop.genop('simple_call', [v_meth, v_item], resulttype = robject.pyobj_repr)
return v_result
r_listitem = r_list.item_repr
items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)]
return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v)
def rtype_alloc_and_set(hop):
r_list = hop.r_result
# XXX the special case for pyobj_repr needs to be implemented here as well
# will probably happen during NFS
if r_list == robject.pyobj_repr:
raise Exception, 'please implement this!'
v_count, v_item = hop.inputargs(Signed, r_list.item_repr)
cLIST = hop.inputconst(Void, r_list.LIST)
return hop.gendirectcall(ll_alloc_and_set, cLIST, v_count, v_item)
class __extend__(pairtype(AbstractBaseListRepr, AbstractBaseListRepr)):
def rtype_add((r_lst1, r_lst2), hop):
v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
cRESLIST = hop.inputconst(Void, hop.r_result.LIST)
return hop.gendirectcall(ll_concat, cRESLIST, v_lst1, v_lst2)
class __extend__(pairtype(AbstractListRepr, AbstractBaseListRepr)):
def rtype_inplace_add((r_lst1, r_lst2), hop):
v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
hop.gendirectcall(ll_extend, v_lst1, v_lst2)
return v_lst1
class __extend__(pairtype(AbstractListRepr, AbstractStringRepr)):
def rtype_inplace_add((r_lst1, r_str2), hop):
if r_lst1.item_repr.lowleveltype not in (Char, UniChar):
raise TyperError('"lst += string" only supported with a list '
'of chars or unichars')
string_repr = r_lst1.rtyper.type_system.rstr.string_repr
v_lst1, v_str2 = hop.inputargs(r_lst1, string_repr)
c_strlen = hop.inputconst(Void, string_repr.ll.ll_strlen)
c_stritem = hop.inputconst(Void, string_repr.ll.ll_stritem_nonneg)
hop.gendirectcall(ll_extend_with_str, v_lst1, v_str2,
c_strlen, c_stritem)
return v_lst1
def rtype_extend_with_str_slice((r_lst1, r_str2), hop):
if r_lst1.item_repr.lowleveltype not in (Char, UniChar):
raise TyperError('"lst += string" only supported with a list '
'of chars or unichars')
rs = r_lst1.rtyper.type_system.rslice
string_repr = r_lst1.rtyper.type_system.rstr.string_repr
c_strlen = hop.inputconst(Void, string_repr.ll.ll_strlen)
c_stritem = hop.inputconst(Void, string_repr.ll.ll_stritem_nonneg)
r_slic = hop.args_r[2]
v_lst1, v_str2, v_slice = hop.inputargs(r_lst1, string_repr, r_slic)
if r_slic == rs.startonly_slice_repr:
hop.gendirectcall(ll_extend_with_str_slice_startonly,
v_lst1, v_str2, c_strlen, c_stritem, v_slice)
elif r_slic == rs.startstop_slice_repr:
hop.gendirectcall(ll_extend_with_str_slice,
v_lst1, v_str2, c_strlen, c_stritem, v_slice)
elif r_slic == rs.minusone_slice_repr:
hop.gendirectcall(ll_extend_with_str_slice_minusone,
v_lst1, v_str2, c_strlen, c_stritem)
else:
raise TyperError('lst += str[:] does not support slices with %r' %
(r_slic,))
return v_lst1
class __extend__(pairtype(AbstractListRepr, AbstractCharRepr)):
def rtype_extend_with_char_count((r_lst1, r_chr2), hop):
if r_lst1.item_repr.lowleveltype not in (Char, UniChar):
raise TyperError('"lst += string" only supported with a list '
'of chars or unichars')
char_repr = r_lst1.rtyper.type_system.rstr.char_repr
v_lst1, v_chr, v_count = hop.inputargs(r_lst1, char_repr, Signed)
hop.gendirectcall(ll_extend_with_char_count, v_lst1, v_chr, v_count)
return v_lst1
class __extend__(pairtype(AbstractBaseListRepr, AbstractSliceRepr)):
def rtype_getitem((r_lst, r_slic), hop):
rs = r_lst.rtyper.type_system.rslice
cRESLIST = hop.inputconst(Void, hop.r_result.LIST)
if r_slic == rs.startonly_slice_repr:
v_lst, v_start = hop.inputargs(r_lst, rs.startonly_slice_repr)
return hop.gendirectcall(ll_listslice_startonly, cRESLIST, v_lst, v_start)
if r_slic == rs.startstop_slice_repr:
v_lst, v_slice = hop.inputargs(r_lst, rs.startstop_slice_repr)
return hop.gendirectcall(ll_listslice, cRESLIST, v_lst, v_slice)
if r_slic == rs.minusone_slice_repr:
v_lst, v_ignored = hop.inputargs(r_lst, rs.minusone_slice_repr)
return hop.gendirectcall(ll_listslice_minusone, cRESLIST, v_lst)
raise TyperError('getitem does not support slices with %r' % (r_slic,))
def rtype_setitem((r_lst, r_slic), hop):
#if r_slic == startonly_slice_repr:
# not implemented
rs = r_lst.rtyper.type_system.rslice
if r_slic == rs.startstop_slice_repr:
v_lst, v_slice, v_lst2 = hop.inputargs(r_lst, rs.startstop_slice_repr,
hop.args_r[2])
hop.gendirectcall(ll_listsetslice, v_lst, v_slice, v_lst2)
return
raise TyperError('setitem does not support slices with %r' % (r_slic,))
class __extend__(pairtype(AbstractListRepr, AbstractSliceRepr)):
def rtype_delitem((r_lst, r_slic), hop):
rs = r_lst.rtyper.type_system.rslice
if r_slic == rs.startonly_slice_repr:
v_lst, v_start = hop.inputargs(r_lst, rs.startonly_slice_repr)
hop.gendirectcall(ll_listdelslice_startonly, v_lst, v_start)
return
if r_slic == rs.startstop_slice_repr:
v_lst, v_slice = hop.inputargs(r_lst, rs.startstop_slice_repr)
hop.gendirectcall(ll_listdelslice, v_lst, v_slice)
return
raise TyperError('delitem does not support slices with %r' % (r_slic,))
# ____________________________________________________________
#
# Iteration.
class AbstractListIteratorRepr(IteratorRepr):
def newiter(self, hop):
v_lst, = hop.inputargs(self.r_list)
citerptr = hop.inputconst(Void, self.lowleveltype)
return hop.gendirectcall(self.ll_listiter, citerptr, v_lst)
def rtype_next(self, hop):
v_iter, = hop.inputargs(self)
hop.has_implicit_exception(StopIteration) # record that we know about it
hop.exception_is_here()
v_res = hop.gendirectcall(self.ll_listnext, v_iter)
return self.r_list.recast(hop.llops, v_res)
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
def ll_alloc_and_set(LIST, count, item):
if count < 0:
count = 0
l = LIST.ll_newlist(count)
T = typeOf(item)
if T is Char or T is UniChar:
check = ord(item)
else:
check = item
if (not malloc_zero_filled) or check: # as long as malloc it is known to zero the allocated memory avoid zeroing twice
i = 0
while i < count:
l.ll_setitem_fast(i, item)
i += 1
return l
ll_alloc_and_set.oopspec = 'newlist(count, item)'
# return a nullptr() if lst is a list of pointers it, else None. Note
# that if we are using ootypesystem there are not pointers, so we
# always return None.
def ll_null_item(lst):
LIST = typeOf(lst)
if isinstance(LIST, Ptr):
ITEM = LIST.TO.ITEM
if isinstance(ITEM, Ptr):
return nullptr(ITEM.TO)
return None
def listItemType(lst):
LIST = typeOf(lst)
if isinstance(LIST, Ptr): # lltype
LIST = LIST.TO
return LIST.ITEM
def ll_copy(RESLIST, l):
length = l.ll_length()
new_lst = RESLIST.ll_newlist(length)
i = 0
while i < length:
new_lst.ll_setitem_fast(i, l.ll_getitem_fast(i))
i += 1
return new_lst
ll_copy.oopspec = 'list.copy(l)'
def ll_len(l):
return l.ll_length()
ll_len.oopspec = 'list.len(l)'
ll_len.oopargcheck = lambda l: bool(l)
def ll_list_is_true(l):
# check if a list is True, allowing for None
return bool(l) and l.ll_length() != 0
ll_list_is_true.oopspec = 'list.nonzero(l)'
ll_list_is_true.oopargcheck = lambda l: True
def ll_append(l, newitem):
length = l.ll_length()
l._ll_resize_ge(length+1)
l.ll_setitem_fast(length, newitem)
ll_append.oopspec = 'list.append(l, newitem)'
# this one is for the special case of insert(0, x)
def ll_prepend(l, newitem):
length = l.ll_length()
l._ll_resize_ge(length+1)
dst = length
while dst > 0:
src = dst - 1
l.ll_setitem_fast(dst, l.ll_getitem_fast(src))
dst = src
l.ll_setitem_fast(0, newitem)
ll_prepend.oopspec = 'list.insert(l, 0, newitem)'
def ll_concat(RESLIST, l1, l2):
len1 = l1.ll_length()
len2 = l2.ll_length()
newlength = len1 + len2
l = RESLIST.ll_newlist(newlength)
j = 0
while j < len1:
l.ll_setitem_fast(j, l1.ll_getitem_fast(j))
j += 1
i = 0
while i < len2:
l.ll_setitem_fast(j, l2.ll_getitem_fast(i))
i += 1
j += 1
return l
ll_concat.oopspec = 'list.concat(l1, l2)'
def ll_insert_nonneg(l, index, newitem):
length = l.ll_length()
debug_assert(0 <= index, "negative list insertion index")
debug_assert(index <= length, "list insertion index out of bound")
l._ll_resize_ge(length+1)
dst = length
while dst > index:
src = dst - 1
l.ll_setitem_fast(dst, l.ll_getitem_fast(src))
dst = src
l.ll_setitem_fast(index, newitem)
ll_insert_nonneg.oopspec = 'list.insert(l, index, newitem)'
def ll_pop_nonneg(func, l, index):
debug_assert(index >= 0, "unexpectedly negative list pop index")
if func is dum_checkidx:
if index >= l.ll_length():
raise IndexError
else:
debug_assert(index < l.ll_length(), "list pop index out of bound")
res = l.ll_getitem_fast(index)
ll_delitem_nonneg(dum_nocheck, l, index)
return res
ll_pop_nonneg.oopspec = 'list.pop(l, index)'
def ll_pop_default(func, l):
length = l.ll_length()
if func is dum_checkidx and (length == 0):
raise IndexError
debug_assert(length > 0, "pop from empty list")
index = length - 1
newlength = index
res = l.ll_getitem_fast(index)
null = ll_null_item(l)
if null is not None:
l.ll_setitem_fast(index, null)
l._ll_resize_le(newlength)
return res
ll_pop_default.oopspec = 'list.pop(l)'
def ll_pop_zero(func, l):
length = l.ll_length()
if func is dum_checkidx and (length == 0):
raise IndexError
debug_assert(length > 0, "pop(0) from empty list")
newlength = length - 1
res = l.ll_getitem_fast(0)
j = 0
j1 = j+1
while j < newlength:
l.ll_setitem_fast(j, l.ll_getitem_fast(j1))
j = j1
j1 += 1
null = ll_null_item(l)
if null is not None:
l.ll_setitem_fast(newlength, null)
l._ll_resize_le(newlength)
return res
ll_pop_zero.oopspec = 'list.pop(l, 0)'
def ll_pop(func, l, index):
length = l.ll_length()
if index < 0:
index += length
if func is dum_checkidx:
if index < 0 or index >= length:
raise IndexError
else:
debug_assert(index >= 0, "negative list pop index out of bound")
debug_assert(index < length, "list pop index out of bound")
res = l.ll_getitem_fast(index)
ll_delitem_nonneg(dum_nocheck, l, index)
return res
ll_pop.oopspec = 'list.pop(l, index)'
def ll_reverse(l):
length = l.ll_length()
i = 0
length_1_i = length-1-i
while i < length_1_i:
tmp = l.ll_getitem_fast(i)
l.ll_setitem_fast(i, l.ll_getitem_fast(length_1_i))
l.ll_setitem_fast(length_1_i, tmp)
i += 1
length_1_i -= 1
ll_reverse.oopspec = 'list.reverse(l)'
def ll_getitem_nonneg(func, l, index):
debug_assert(index >= 0, "unexpectedly negative list getitem index")
if func is dum_checkidx:
if index >= l.ll_length():
raise IndexError
else:
debug_assert(index < l.ll_length(), "list getitem index out of bound")
return l.ll_getitem_fast(index)
ll_getitem_nonneg.oopspec = 'list.getitem(l, index)'
ll_getitem_nonneg.oopargcheck = lambda l, index: (bool(l) and
0 <= index < l.ll_length())
def ll_getitem(func, l, index):
length = l.ll_length()
if index < 0:
index += length
if func is dum_checkidx:
if index < 0 or index >= length:
raise IndexError
else:
debug_assert(index >= 0, "negative list getitem index out of bound")
debug_assert(index < length, "list getitem index out of bound")
return l.ll_getitem_fast(index)
ll_getitem.oopspec = 'list.getitem(l, index)'
ll_getitem.oopargcheck = lambda l, index: (bool(l) and -l.ll_length() <=
index < l.ll_length())
def ll_setitem_nonneg(func, l, index, newitem):
debug_assert(index >= 0, "unexpectedly negative list setitem index")
if func is dum_checkidx:
if index >= l.ll_length():
raise IndexError
else:
debug_assert(index < l.ll_length(), "list setitem index out of bound")
l.ll_setitem_fast(index, newitem)
ll_setitem_nonneg.oopspec = 'list.setitem(l, index, newitem)'
def ll_setitem(func, l, index, newitem):
length = l.ll_length()
if index < 0:
index += length
if func is dum_checkidx:
if index < 0 or index >= length:
raise IndexError
else:
debug_assert(index >= 0, "negative list setitem index out of bound")
debug_assert(index < length, "list setitem index out of bound")
l.ll_setitem_fast(index, newitem)
ll_setitem.oopspec = 'list.setitem(l, index, newitem)'
def ll_delitem_nonneg(func, l, index):
debug_assert(index >= 0, "unexpectedly negative list delitem index")
length = l.ll_length()
if func is dum_checkidx:
if index >= length:
raise IndexError
else:
debug_assert(index < length, "list delitem index out of bound")
newlength = length - 1
j = index
j1 = j+1
while j < newlength:
l.ll_setitem_fast(j, l.ll_getitem_fast(j1))
j = j1
j1 += 1
null = ll_null_item(l)
if null is not None:
l.ll_setitem_fast(newlength, null)
l._ll_resize_le(newlength)
ll_delitem_nonneg.oopspec = 'list.delitem(l, index)'
def ll_delitem(func, l, i):
length = l.ll_length()
if i < 0:
i += length
if func is dum_checkidx:
if i < 0 or i >= length:
raise IndexError
else:
debug_assert(i >= 0, "negative list delitem index out of bound")
debug_assert(i < length, "list delitem index out of bound")
ll_delitem_nonneg(dum_nocheck, l, i)
ll_delitem.oopspec = 'list.delitem(l, i)'
def ll_extend(l1, l2):
len1 = l1.ll_length()
len2 = l2.ll_length()
newlength = len1 + len2
l1._ll_resize_ge(newlength)
i = 0
j = len1
while i < len2:
l1.ll_setitem_fast(j, l2.ll_getitem_fast(i))
i += 1
j += 1
def ll_extend_with_str(lst, s, getstrlen, getstritem):
return ll_extend_with_str_slice_startonly(lst, s, getstrlen, getstritem, 0)
def ll_extend_with_str_slice_startonly(lst, s, getstrlen, getstritem, start):
len1 = lst.ll_length()
len2 = getstrlen(s)
debug_assert(start >= 0, "unexpectedly negative str slice start")
debug_assert(start <= len2, "str slice start larger than str length")
newlength = len1 + len2 - start
lst._ll_resize_ge(newlength)
i = start
j = len1
while i < len2:
c = getstritem(s, i)
if listItemType(lst) is UniChar:
c = unichr(ord(c))
lst.ll_setitem_fast(j, c)
i += 1
j += 1
def ll_extend_with_str_slice(lst, s, getstrlen, getstritem, slice):
start = slice.start
stop = slice.stop
len1 = lst.ll_length()
len2 = getstrlen(s)
debug_assert(start >= 0, "unexpectedly negative str slice start")
debug_assert(start <= len2, "str slice start larger than str length")
debug_assert(stop >= start, "str slice stop smaller than start")
if stop > len2:
stop = len2
newlength = len1 + stop - start
lst._ll_resize_ge(newlength)
i = start
j = len1
while i < stop:
c = getstritem(s, i)
if listItemType(lst) is UniChar:
c = unichr(ord(c))
lst.ll_setitem_fast(j, c)
i += 1
j += 1
def ll_extend_with_str_slice_minusone(lst, s, getstrlen, getstritem):
len1 = lst.ll_length()
len2m1 = getstrlen(s) - 1
debug_assert(len2m1 >= 0, "empty string is sliced with [:-1]")
newlength = len1 + len2m1
lst._ll_resize_ge(newlength)
i = 0
j = len1
while i < len2m1:
c = getstritem(s, i)
if listItemType(lst) is UniChar:
c = unichr(ord(c))
lst.ll_setitem_fast(j, c)
i += 1
j += 1
def ll_extend_with_char_count(lst, char, count):
if count <= 0:
return
len1 = lst.ll_length()
newlength = len1 + count
lst._ll_resize_ge(newlength)
j = len1
if listItemType(lst) is UniChar:
char = unichr(ord(char))
while j < newlength:
lst.ll_setitem_fast(j, char)
j += 1
def ll_listslice_startonly(RESLIST, l1, start):
len1 = l1.ll_length()
debug_assert(start >= 0, "unexpectedly negative list slice start")
debug_assert(start <= len1, "list slice start larger than list length")
newlength = len1 - start
l = RESLIST.ll_newlist(newlength)
j = 0
i = start
while i < len1:
l.ll_setitem_fast(j, l1.ll_getitem_fast(i))
i += 1
j += 1
return l
def ll_listslice(RESLIST, l1, slice):
start = slice.start
stop = slice.stop
length = l1.ll_length()
debug_assert(start >= 0, "unexpectedly negative list slice start")
debug_assert(start <= length, "list slice start larger than list length")
debug_assert(stop >= start, "list slice stop smaller than start")
if stop > length:
stop = length
newlength = stop - start
l = RESLIST.ll_newlist(newlength)
j = 0
i = start
while i < stop:
l.ll_setitem_fast(j, l1.ll_getitem_fast(i))
i += 1
j += 1
return l
def ll_listslice_minusone(RESLIST, l1):
newlength = l1.ll_length() - 1
debug_assert(newlength >= 0, "empty list is sliced with [:-1]")
l = RESLIST.ll_newlist(newlength)
j = 0
while j < newlength:
l.ll_setitem_fast(j, l1.ll_getitem_fast(j))
j += 1
return l
def ll_listdelslice_startonly(l, start):
debug_assert(start >= 0, "del l[start:] with unexpectedly negative start")
debug_assert(start <= l.ll_length(), "del l[start:] with start > len(l)")
newlength = start
null = ll_null_item(l)
if null is not None:
j = l.ll_length() - 1
while j >= newlength:
l.ll_setitem_fast(j, null)
j -= 1
l._ll_resize_le(newlength)
def ll_listdelslice(l, slice):
start = slice.start
stop = slice.stop
length = l.ll_length()
debug_assert(start >= 0, "del l[start:x] with unexpectedly negative start")
debug_assert(start <= length, "del l[start:x] with start > len(l)")
debug_assert(stop >= start, "del l[x:y] with x > y")
if stop > length:
stop = length
newlength = length - (stop-start)
j = start
i = stop
while j < newlength:
l.ll_setitem_fast(j, l.ll_getitem_fast(i))
i += 1
j += 1
null = ll_null_item(l)
if null is not None:
j = length - 1
while j >= newlength:
l.ll_setitem_fast(j, null)
j -= 1
l._ll_resize_le(newlength)
def ll_listsetslice(l1, slice, l2):
count = l2.ll_length()
start = slice.start
debug_assert(start >= 0, "l[start:x] = l with unexpectedly negative start")
debug_assert(start <= l1.ll_length(), "l[start:x] = l with start > len(l)")
debug_assert(count == slice.stop - start,
"setslice cannot resize lists in RPython")
# XXX but it should be easy enough to support, soon
j = start
i = 0
while i < count:
l1.ll_setitem_fast(j, l2.ll_getitem_fast(i))
i += 1
j += 1
# ____________________________________________________________
#
# Comparison.
def ll_listeq(l1, l2, eqfn):
if not l1 and not l2:
return True
if not l1 or not l2:
return False
len1 = l1.ll_length()
len2 = l2.ll_length()
if len1 != len2:
return False
j = 0
while j < len1:
if eqfn is None:
if l1.ll_getitem_fast(j) != l2.ll_getitem_fast(j):
return False
else:
if not eqfn(l1.ll_getitem_fast(j), l2.ll_getitem_fast(j)):
return False
j += 1
return True
def ll_listcontains(lst, obj, eqfn):
lng = lst.ll_length()
j = 0
while j < lng:
if eqfn is None:
if lst.ll_getitem_fast(j) == obj:
return True
else:
if eqfn(lst.ll_getitem_fast(j), obj):
return True
j += 1
return False
def ll_listindex(lst, obj, eqfn):
lng = lst.ll_length()
j = 0
while j < lng:
if eqfn is None:
if lst.ll_getitem_fast(j) == obj:
return j
else:
if eqfn(lst.ll_getitem_fast(j), obj):
return j
j += 1
raise ValueError # can't say 'list.index(x): x not in list'
def ll_listremove(lst, obj, eqfn):
index = ll_listindex(lst, obj, eqfn) # raises ValueError if obj not in lst
ll_delitem_nonneg(dum_nocheck, lst, index)
ll_listremove.oopspec = 'list.remove(obj)'
def ll_inplace_mul(l, factor):
length = l.ll_length()
if factor < 0:
factor = 0
resultlen = length * factor
res = l
res._ll_resize(resultlen)
#res._ll_resize_ge(resultlen)
j = length
while j < resultlen:
i = 0
while i < length:
p = j + i
res.ll_setitem_fast(p, l.ll_getitem_fast(i))
i += 1
j += length
return res
def ll_mul(RESLIST, l, factor):
length = l.ll_length()
if factor < 0:
factor = 0
resultlen = length * factor
res = RESLIST.ll_newlist(resultlen)
j = 0
while j < resultlen:
i = 0
while i < length:
p = j + i
res.ll_setitem_fast(p, l.ll_getitem_fast(i))
i += 1
j += length
return res
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.objspace.flow.model import Constant, Variable
from pypy.objspace.flow.model import FunctionGraph, Block, Link
class CPyTypeInterface(object):
def __init__(self, name, objects={}, subclassable=False):
# the exported name of the type
self.name = name
# a dict {name: pyobjectptr()} for general class attributes
# (not for special methods!)
self.objects = objects.copy()
self.subclassable = subclassable
def _freeze_(self):
return True
def emulate(self, original_class):
"Build a type object that emulates 'self'."
assert isinstance(original_class, type)
d = {'__slots__': [], '_rpython_class_': original_class}
for name, value in self.objects.items():
assert lltype.typeOf(value) == PyObjPtr
assert isinstance(value._obj, lltype._pyobject)
d[name] = value._obj.value
t = type(self.name, (rpython_object,), d)
return t
def cpy_export(cpytype, obj):
raise NotImplementedError("only works in translated versions")
def cpy_import(rpytype, obj):
raise NotImplementedError("only works in translated versions")
def cpy_typeobject(cpytype, cls):
raise NotImplementedError("only works in translated versions")
def cpy_allocate(cls, cpytype):
raise NotImplementedError("only works in translated versions")
# ____________________________________________________________
# Implementation
class Entry(ExtRegistryEntry):
_about_ = cpy_export
def compute_result_annotation(self, s_cpytype, s_obj):
from pypy.annotation.model import SomeObject
from pypy.annotation.model import SomeInstance
assert isinstance(s_obj, SomeInstance)
assert s_cpytype.is_constant()
cpytype = s_cpytype.const
attach_cpy_flavor(s_obj.classdef, cpytype)
return SomeObject()
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
s_obj = hop.args_s[1]
r_inst = hop.args_r[1]
v_inst = hop.inputarg(r_inst, arg=1)
return hop.genop('cast_pointer', [v_inst],
resulttype = lltype.Ptr(lltype.PyObject))
class Entry(ExtRegistryEntry):
_about_ = cpy_import
def compute_result_annotation(self, s_rpytype, s_obj):
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.annotation.model import SomeInstance
assert s_rpytype.is_constant()
rpytype = s_rpytype.const
bk = getbookkeeper()
return SomeInstance(bk.getuniqueclassdef(rpytype))
def specialize_call(self, hop):
from pypy.annotation.model import SomeInstance
from pypy.rpython.robject import pyobj_repr
s_rpytype = hop.args_s[0]
assert s_rpytype.is_constant()
rpytype = s_rpytype.const
classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(rpytype)
s_inst = SomeInstance(classdef)
r_inst = hop.rtyper.getrepr(s_inst)
assert r_inst.lowleveltype.TO._gckind == 'cpy'
v_obj = hop.inputarg(pyobj_repr, arg=1)
return hop.genop('cast_pointer', [v_obj],
resulttype = r_inst.lowleveltype)
class Entry(ExtRegistryEntry):
_about_ = cpy_typeobject
def compute_result_annotation(self, s_cpytype, s_cls):
from pypy.annotation.model import SomeObject
assert s_cls.is_constant()
assert s_cpytype.is_constant()
cpytype = s_cpytype.const
[classdesc] = s_cls.descriptions
classdef = classdesc.getuniqueclassdef()
attach_cpy_flavor(classdef, cpytype)
return SomeObject()
def specialize_call(self, hop):
from pypy.rpython.rclass import getinstancerepr
s_cls = hop.args_s[1]
assert s_cls.is_constant()
[classdesc] = s_cls.descriptions
classdef = classdesc.getuniqueclassdef()
r_inst = getinstancerepr(hop.rtyper, classdef)
cpytype = build_pytypeobject(r_inst)
return hop.inputconst(PyObjPtr, cpytype)
class Entry(ExtRegistryEntry):
_about_ = cpy_allocate
def compute_result_annotation(self, s_cls, s_cpytype):
from pypy.annotation.model import SomeObject, SomeInstance
assert s_cls.is_constant()
[classdesc] = s_cls.descriptions
classdef = classdesc.getuniqueclassdef()
return SomeInstance(classdef)
def specialize_call(self, hop):
from pypy.rpython.rclass import getinstancerepr
s_cls = hop.args_s[0]
assert s_cls.is_constant()
[classdesc] = s_cls.descriptions
classdef = classdesc.getuniqueclassdef()
r_inst = getinstancerepr(hop.rtyper, classdef)
vinst = r_inst.new_instance(hop.llops, v_cpytype = hop.args_v[1])
return vinst
def attach_cpy_flavor(classdef, cpytype):
for parentdef in classdef.getmro():
if not hasattr(parentdef, '_cpy_exported_type_'):
parentdef._cpy_exported_type_ = None
if classdef._cpy_exported_type_ is None:
classdef._cpy_exported_type_ = cpytype
else:
assert classdef._cpy_exported_type_ == cpytype
PyObjPtr = lltype.Ptr(lltype.PyObject)
PyNumberMethods = lltype.Struct('PyNumberMethods',
hints={'c_name': 'PyNumberMethods', 'external': True, 'typedef': True})
PyMappingMethods = lltype.Struct('PyMappingMethods',
hints={'c_name': 'PyMappingMethods', 'external': True, 'typedef': True})
PySequenceMethods = lltype.Struct('PySequenceMethods',
hints={'c_name': 'PySequenceMethods', 'external': True, 'typedef': True})
PY_TYPE_OBJECT = lltype.PyForwardReference()
PY_TYPE_OBJECT.become(lltype.PyStruct(
'PyTypeObject',
('head', lltype.PyObject),
('c_ob_size', lltype.Signed),
('c_tp_name', lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1))),
('c_tp_basicsize', lltype.Signed),
('c_tp_itemsize', lltype.Signed),
('c_tp_dealloc', lltype.Ptr(lltype.FuncType([PyObjPtr],
lltype.Void))),
('c_tp_print', lltype.Signed),
('c_tp_getattr', lltype.Signed),
('c_tp_setattr', lltype.Signed), # in
('c_tp_compare', lltype.Signed),
('c_tp_repr', lltype.Signed), # progress
('c_tp_as_number', lltype.Ptr(PyNumberMethods)),
('c_tp_as_sequence',lltype.Ptr(PySequenceMethods)),
('c_tp_as_mapping',lltype.Ptr(PyMappingMethods)),
('c_tp_hash', lltype.Signed),
('c_tp_call', lltype.Signed),
('c_tp_str', lltype.Signed),
('c_tp_getattro', lltype.Signed),
('c_tp_setattro', lltype.Signed),
('c_tp_as_buffer', lltype.Signed),
('c_tp_flags', lltype.Signed),
('c_tp_doc', lltype.Signed),
('c_tp_traverse', lltype.Signed),
('c_tp_clear', lltype.Signed),
('c_tp_richcompare',lltype.Signed),
('c_tp_weaklistoffset',lltype.Signed),
('c_tp_iter', lltype.Signed),
('c_tp_iternext', lltype.Signed),
('c_tp_methods', lltype.Signed),
('c_tp_members', lltype.Signed),
('c_tp_getset', lltype.Signed),
('c_tp_base', lltype.Signed),
('c_tp_dict', PyObjPtr),
('c_tp_descr_get', lltype.Signed),
('c_tp_descr_set', lltype.Signed),
('c_tp_dictoffset',lltype.Signed),
('c_tp_init', lltype.Signed),
('c_tp_alloc', lltype.Signed),
#lltype.Ptr(lltype.FuncType([lltype.Ptr(PY_TYPE_OBJECT),
# lltype.Signed],
# PyObjPtr))),
('c_tp_new', lltype.Ptr(lltype.FuncType([lltype.Ptr(PY_TYPE_OBJECT),
PyObjPtr,
PyObjPtr],
PyObjPtr))),
('c_tp_free', lltype.Signed),
#lltype.Ptr(lltype.FuncType([llmemory.Address],
# lltype.Void))),
hints={'c_name': 'PyTypeObject', 'external': True, 'typedef': True, 'inline_head': True}))
# XXX 'c_name' should be 'PyTypeObject' but genc inserts 'struct' :-(
def ll_tp_dealloc(p):
addr = llmemory.cast_ptr_to_adr(p)
# Warning: this relies on an optimization in gctransformer, which will
# not insert any incref/decref for 'p'. That would lead to infinite
# recursion, as the refcnt of 'p' is already zero!
from pypy.rpython.lltypesystem.rclass import CPYOBJECT
llop.gc_deallocate(lltype.Void, CPYOBJECT, addr)
def build_pytypeobject(r_inst):
rtyper = r_inst.rtyper
cache = rtyper.classdef_to_pytypeobject
try:
return cache[r_inst.classdef]
except KeyError:
for parentdef in r_inst.classdef.getmro():
cpytype = parentdef._cpy_exported_type_
if cpytype is not None:
break
else:
# for classes that cannot be exported at all
return lltype.nullptr(lltype.PyObject)
from pypy.rpython.lltypesystem.rclass import CPYOBJECTPTR
from pypy.rpython.rtyper import LowLevelOpList
typetype = lltype.pyobjectptr(type)
# XXX default tp_new should go away
# make the graph of tp_new manually
v1 = Variable('tp'); v1.concretetype = lltype.Ptr(PY_TYPE_OBJECT)
v2 = Variable('args'); v2.concretetype = PyObjPtr
v3 = Variable('kwds'); v3.concretetype = PyObjPtr
block = Block([v1, v2, v3])
llops = LowLevelOpList(None)
v4 = r_inst.new_instance(llops, v_cpytype = v1)
v5 = llops.genop('cast_pointer', [v4], resulttype = PyObjPtr)
block.operations = list(llops)
tp_new_graph = FunctionGraph('ll_tp_new', block)
block.closeblock(Link([v5], tp_new_graph.returnblock))
tp_new_graph.getreturnvar().concretetype = v5.concretetype
# build the PyTypeObject structure
pytypeobj = lltype.malloc(PY_TYPE_OBJECT, flavor='cpy',
extra_args=(typetype,))
name = cpytype.name
T = lltype.FixedSizeArray(lltype.Char, len(name)+1)
p = lltype.malloc(T, immortal=True)
for i in range(len(name)):
p[i] = name[i]
p[len(name)] = '\x00'
pytypeobj.c_tp_name = lltype.direct_arrayitems(p)
pytypeobj.c_tp_basicsize = llmemory.sizeof(r_inst.lowleveltype.TO)
if cpytype.subclassable and False: # XXX deallocation of subclass object segfaults!
pytypeobj.c_tp_flags = CDefinedIntSymbolic('''(Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_BASETYPE)''')
else:
pytypeobj.c_tp_flags = CDefinedIntSymbolic('''(Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_CHECKTYPES)''')
pytypeobj.c_tp_new = rtyper.type_system.getcallable(tp_new_graph)
pytypeobj.c_tp_dealloc = rtyper.annotate_helper_fn(ll_tp_dealloc,
[PyObjPtr])
pytypeobj.c_tp_as_number = lltype.malloc(PyNumberMethods, immortal=True)
pytypeobj.c_tp_as_sequence = lltype.malloc(PySequenceMethods, immortal=True)
pytypeobj.c_tp_as_mapping = lltype.malloc(PyMappingMethods, immortal=True)
result = lltype.cast_pointer(PyObjPtr, pytypeobj)
# the llsetup function that will store the 'objects' into the
# type's tp_dict
Py_TPFLAGS_HEAPTYPE = CDefinedIntSymbolic('Py_TPFLAGS_HEAPTYPE')
if cpytype.objects:
objects = [(lltype.pyobjectptr(name), value)
for name, value in cpytype.objects.items() if name != '__new__']
if '__new__' in cpytype.objects:
new = cpytype.objects['__new__']._obj.value
objects.append((lltype.pyobjectptr('__new__'),
lltype.pyobjectptr(staticmethod(new))))
def ll_type_setup(p):
tp = lltype.cast_pointer(lltype.Ptr(PY_TYPE_OBJECT), p)
old_flags = tp.c_tp_flags
tp.c_tp_flags |= Py_TPFLAGS_HEAPTYPE
for name, value in objects:
llop.setattr(PyObjPtr, tp, name, value)
tp.c_tp_flags = old_flags
result._obj.setup_fnptr = rtyper.annotate_helper_fn(ll_type_setup,
[PyObjPtr])
cache[r_inst.classdef] = result
return result
# To make this a Py_TPFLAGS_BASETYPE, we need to have a tp_new that does
# something different for subclasses: it needs to allocate a bit more
# for CPython's GC (see PyObject_GC_Malloc); it needs to Py_INCREF the
# type if it's a heap type; and it needs to PyObject_GC_Track() the object.
# Also, tp_dealloc needs to untrack the object.
# ____________________________________________________________
# Emulation support, to have user-defined classes and instances
# work nicely on top of CPython running the CPyObjSpace
class rpython_meta(type):
pass
class rpython_object(object):
"""NOT_RPYTHON
Wrapper object, for emulation.
"""
__metaclass__ = rpython_meta
__slots__ = ('data',)
rpython_data = rpython_object.data
del rpython_object.data
def init_rpython_data(wrapperobj, value):
"""NOT_RPYTHON
Set the wrapper object's hidden 'data' slot to point to the original
RPython instance 'value'.
"""
rpython_data.__set__(wrapperobj, value)
def get_rpython_data(wrapperobj):
"""NOT_RPYTHON
Get the original RPython instance from the wrapper object.
"""
return rpython_data.__get__(wrapperobj)
class Entry(ExtRegistryEntry):
"""Support for translating prebuilt emulated type objects."""
_type_ = rpython_meta
def get_ll_pyobjectptr(self, rtyper):
from pypy.rpython.rclass import getinstancerepr
emulated_cls = self.instance
rpython_cls = emulated_cls._rpython_class_
classdef = rtyper.annotator.bookkeeper.getuniqueclassdef(rpython_cls)
r_inst = getinstancerepr(rtyper, classdef)
return build_pytypeobject(r_inst)
class Entry(ExtRegistryEntry):
"""Support for translating prebuilt emulated type objects."""
_metatype_ = rpython_meta
def get_ll_pyobjectptr(self, rtyper):
from pypy.rpython.rclass import getinstancerepr
wrapperobj = self.instance
rpython_obj = get_rpython_data(wrapperobj)
rpython_cls = rpython_obj.__class__
classdef = rtyper.annotator.bookkeeper.getuniqueclassdef(rpython_cls)
r_inst = getinstancerepr(rtyper, classdef)
pyobj = r_inst.convert_const(rpython_obj)
return lltype.cast_pointer(PyObjPtr, pyobj)
| Python |
import types
import sys
from pypy.annotation.pairtype import pair, pairtype
from pypy.annotation import model as annmodel
from pypy.annotation import description
from pypy.objspace.flow.model import Constant
from pypy.rpython.lltypesystem.lltype import \
typeOf, Void, Bool, nullptr, frozendict, Ptr, Struct, malloc
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import Repr, inputconst, HalfConcreteWrapper, CanBeNull, \
mangle, inputdesc, warning, impossible_repr
from pypy.rpython import rclass
from pypy.rpython import robject
from pypy.rpython import callparse
def small_cand(rtyper, s_pbc):
if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets and \
hasattr(rtyper.type_system.rpbc, 'SmallFunctionSetPBCRepr'):
callfamily = s_pbc.descriptions.iterkeys().next().getcallfamily()
concretetable, uniquerows = get_concrete_calltable(rtyper, callfamily)
if len(uniquerows) == 1 and (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of)):
return True
return False
class __extend__(annmodel.SomePBC):
def rtyper_makerepr(self, rtyper):
if self.isNone():
return none_frozen_pbc_repr
kind = self.getKind()
if issubclass(kind, description.FunctionDesc):
sample = self.descriptions.keys()[0]
callfamily = sample.querycallfamily()
if callfamily and callfamily.total_calltable_size > 0:
if sample.overridden:
getRepr = OverriddenFunctionPBCRepr
else:
getRepr = rtyper.type_system.rpbc.FunctionsPBCRepr
if small_cand(rtyper, self):
getRepr = rtyper.type_system.rpbc.SmallFunctionSetPBCRepr
else:
getRepr = getFrozenPBCRepr
elif issubclass(kind, description.ClassDesc):
# user classes
getRepr = rtyper.type_system.rpbc.ClassesPBCRepr
# XXX what about this?
## elif type(x) is type and x.__module__ in sys.builtin_module_names:
## # special case for built-in types, seen in faking
## getRepr = getPyObjRepr
elif issubclass(kind, description.MethodDesc):
getRepr = rtyper.type_system.rpbc.MethodsPBCRepr
elif issubclass(kind, description.FrozenDesc):
getRepr = getFrozenPBCRepr
elif issubclass(kind, description.MethodOfFrozenDesc):
getRepr = rtyper.type_system.rpbc.MethodOfFrozenPBCRepr
else:
raise TyperError("unexpected PBC kind %r"%(kind,))
## elif isinstance(x, builtin_descriptor_type):
## # strange built-in functions, method objects, etc. from fake.py
## getRepr = getPyObjRepr
return getRepr(rtyper, self)
def rtyper_makekey(self):
lst = list(self.descriptions)
lst.sort()
if self.subset_of:
t = self.subset_of.rtyper_makekey()
else:
t = ()
return tuple([self.__class__, self.can_be_None]+lst)+t
##builtin_descriptor_type = (
## type(len), # type 'builtin_function_or_method'
## type(list.append), # type 'method_descriptor'
## type(type(None).__repr__), # type 'wrapper_descriptor'
## type(type.__dict__['__dict__']), # type 'getset_descriptor'
## type(type.__dict__['__flags__']), # type 'member_descriptor'
## )
# ____________________________________________________________
class ConcreteCallTableRow(dict):
"""A row in a concrete call table."""
def build_concrete_calltable(rtyper, callfamily):
"""Build a complete call table of a call family
with concrete low-level function objs.
"""
concretetable = {} # (shape,index): row, maybe with duplicates
uniquerows = [] # list of rows, without duplicates
def lookuprow(row):
# a 'matching' row is one that has the same llfn, expect
# that it may have more or less 'holes'
for existingindex, existingrow in enumerate(uniquerows):
if row.fntype != existingrow.fntype:
continue # not the same pointer type, cannot match
for funcdesc, llfn in row.items():
if funcdesc in existingrow:
if llfn != existingrow[funcdesc]:
break # mismatch
else:
# potential match, unless the two rows have no common funcdesc
merged = ConcreteCallTableRow(row)
merged.update(existingrow)
merged.fntype = row.fntype
if len(merged) == len(row) + len(existingrow):
pass # no common funcdesc, not a match
else:
return existingindex, merged
raise LookupError
def addrow(row):
# add a row to the table, potentially merging it with an existing row
try:
index, merged = lookuprow(row)
except LookupError:
uniquerows.append(row) # new row
else:
if merged == uniquerows[index]:
pass # already exactly in the table
else:
del uniquerows[index]
addrow(merged) # add the potentially larger merged row
concreterows = {}
for shape, rows in callfamily.calltables.items():
for index, row in enumerate(rows):
concreterow = ConcreteCallTableRow()
for funcdesc, graph in row.items():
llfn = rtyper.getcallable(graph)
concreterow[funcdesc] = llfn
assert len(concreterow) > 0
concreterow.fntype = typeOf(llfn) # 'llfn' from the loop above
# (they should all have the same type)
concreterows[shape, index] = concreterow
for row in concreterows.values():
addrow(row)
for (shape, index), row in concreterows.items():
existingindex, biggerrow = lookuprow(row)
row = uniquerows[existingindex]
assert biggerrow == row # otherwise, addrow() is broken
concretetable[shape, index] = row
if len(uniquerows) == 1:
uniquerows[0].attrname = None
else:
for finalindex, row in enumerate(uniquerows):
row.attrname = 'variant%d' % finalindex
return concretetable, uniquerows
def get_concrete_calltable(rtyper, callfamily):
"""Get a complete call table of a call family
with concrete low-level function objs.
"""
# cache on the callfamily
try:
cached = rtyper.concrete_calltables[callfamily]
except KeyError:
concretetable, uniquerows = build_concrete_calltable(rtyper, callfamily)
cached = concretetable, uniquerows, callfamily.total_calltable_size
rtyper.concrete_calltables[callfamily] = cached
else:
concretetable, uniquerows, oldsize = cached
if oldsize != callfamily.total_calltable_size:
raise TyperError("call table was unexpectedly extended")
return concretetable, uniquerows
class AbstractFunctionsPBCRepr(CanBeNull, Repr):
"""Representation selected for a PBC of function(s)."""
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.s_pbc = s_pbc
self.callfamily = s_pbc.descriptions.iterkeys().next().getcallfamily()
if len(s_pbc.descriptions) == 1 and not s_pbc.can_be_None:
# a single function
self.lowleveltype = Void
else:
concretetable, uniquerows = get_concrete_calltable(self.rtyper,
self.callfamily)
self.concretetable = concretetable
self.uniquerows = uniquerows
if len(uniquerows) == 1:
row = uniquerows[0]
self.lowleveltype = row.fntype
else:
# several functions, each with several specialized variants.
# each function becomes a pointer to a Struct containing
# pointers to its variants.
self.lowleveltype = self.setup_specfunc()
self.funccache = {}
def get_s_callable(self):
return self.s_pbc
def get_r_implfunc(self):
return self, 0
def get_s_signatures(self, shape):
funcdesc = self.s_pbc.descriptions.iterkeys().next()
return funcdesc.get_s_signatures(shape)
## def function_signatures(self):
## if self._function_signatures is None:
## self._function_signatures = {}
## for func in self.s_pbc.prebuiltinstances:
## if func is not None:
## self._function_signatures[func] = getsignature(self.rtyper,
## func)
## assert self._function_signatures
## return self._function_signatures
def convert_desc(self, funcdesc):
# get the whole "column" of the call table corresponding to this desc
try:
return self.funccache[funcdesc]
except KeyError:
pass
if self.lowleveltype is Void:
result = HalfConcreteWrapper(self.get_unique_llfn)
else:
llfns = {}
found_anything = False
for row in self.uniquerows:
if funcdesc in row:
llfn = row[funcdesc]
found_anything = True
else:
# missing entry -- need a 'null' of the type that matches
# this row
llfn = self.rtyper.type_system.null_callable(row.fntype)
llfns[row.attrname] = llfn
if not found_anything:
raise TyperError("%r not in %r" % (funcdesc,
self.s_pbc.descriptions))
if len(self.uniquerows) == 1:
result = llfn # from the loop above
else:
# build a Struct with all the values collected in 'llfns'
result = self.create_specfunc()
for attrname, llfn in llfns.items():
setattr(result, attrname, llfn)
self.funccache[funcdesc] = result
return result
def convert_const(self, value):
if isinstance(value, types.MethodType) and value.im_self is None:
value = value.im_func # unbound method -> bare function
if self.lowleveltype is Void:
return HalfConcreteWrapper(self.get_unique_llfn)
if value is None:
null = self.rtyper.type_system.null_callable(self.lowleveltype)
return null
funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value)
return self.convert_desc(funcdesc)
def convert_to_concrete_llfn(self, v, shape, index, llop):
"""Convert the variable 'v' to a variable referring to a concrete
low-level function. In case the call table contains multiple rows,
'index' and 'shape' tells which of its items we are interested in.
"""
assert v.concretetype == self.lowleveltype
if self.lowleveltype is Void:
assert len(self.s_pbc.descriptions) == 1
# lowleveltype wouldn't be Void otherwise
funcdesc, = self.s_pbc.descriptions
row_of_one_graph = self.callfamily.calltables[shape][index]
graph = row_of_one_graph[funcdesc]
llfn = self.rtyper.getcallable(graph)
return inputconst(typeOf(llfn), llfn)
elif len(self.uniquerows) == 1:
return v
else:
# 'v' is a Struct pointer, read the corresponding field
row = self.concretetable[shape, index]
cname = inputconst(Void, row.attrname)
return self.get_specfunc_row(llop, v, cname, row.fntype)
def get_unique_llfn(self):
# try to build a unique low-level function. Avoid to use
# whenever possible! Doesn't work with specialization, multiple
# different call sites, etc.
if self.lowleveltype is not Void:
raise TyperError("cannot pass multiple functions here")
assert len(self.s_pbc.descriptions) == 1
# lowleveltype wouldn't be Void otherwise
funcdesc, = self.s_pbc.descriptions
if len(self.callfamily.calltables) != 1:
raise TyperError("cannot pass a function with various call shapes")
table, = self.callfamily.calltables.values()
graphs = []
for row in table:
if funcdesc in row:
graphs.append(row[funcdesc])
if not graphs:
raise TyperError("cannot pass here a function that is not called")
graph = graphs[0]
if graphs != [graph]*len(graphs):
raise TyperError("cannot pass a specialized function here")
llfn = self.rtyper.getcallable(graph)
return inputconst(typeOf(llfn), llfn)
def rtype_simple_call(self, hop):
return self.call('simple_call', hop)
def rtype_call_args(self, hop):
return self.call('call_args', hop)
def call(self, opname, hop):
bk = self.rtyper.annotator.bookkeeper
args = bk.build_args(opname, hop.args_s[1:])
s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc
descs = s_pbc.descriptions.keys()
shape, index = description.FunctionDesc.variant_for_call_site(bk, self.callfamily, descs, args)
row_of_graphs = self.callfamily.calltables[shape][index]
anygraph = row_of_graphs.itervalues().next() # pick any witness
vfn = hop.inputarg(self, arg=0)
vlist = [self.convert_to_concrete_llfn(vfn, shape, index,
hop.llops)]
vlist += callparse.callparse(self.rtyper, anygraph, hop, opname)
rresult = callparse.getrresult(self.rtyper, anygraph)
hop.exception_is_here()
if isinstance(vlist[0], Constant):
v = hop.genop('direct_call', vlist, resulttype = rresult)
else:
vlist.append(hop.inputconst(Void, row_of_graphs.values()))
v = hop.genop('indirect_call', vlist, resulttype = rresult)
if hop.r_result is impossible_repr:
return None # see test_always_raising_methods
else:
return hop.llops.convertvar(v, rresult, hop.r_result)
class __extend__(pairtype(AbstractFunctionsPBCRepr, AbstractFunctionsPBCRepr)):
def convert_from_to((r_fpbc1, r_fpbc2), v, llops):
# this check makes sense because both source and dest repr are FunctionsPBCRepr
if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype:
return v
if r_fpbc1.lowleveltype is Void:
return inputconst(r_fpbc2, r_fpbc1.s_pbc.const)
if r_fpbc2.lowleveltype is Void:
wrapper = HalfConcreteWrapper(r_fpbc2.get_unique_llfn)
return inputconst(Void, wrapper)
return NotImplemented
class OverriddenFunctionPBCRepr(Repr):
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.s_pbc = s_pbc
assert len(s_pbc.descriptions) == 1
self.lowleveltype = Void
def rtype_simple_call(self, hop):
from pypy.rpython.rspecialcase import rtype_call_specialcase
return rtype_call_specialcase(hop)
def getPyObjRepr(rtyper, s_pbc):
return robject.pyobj_repr
def getFrozenPBCRepr(rtyper, s_pbc):
descs = s_pbc.descriptions.keys()
assert len(descs) >= 1
if len(descs) == 1 and not s_pbc.can_be_None:
return SingleFrozenPBCRepr(descs[0])
else:
access = descs[0].queryattrfamily()
for desc in descs[1:]:
access1 = desc.queryattrfamily()
if access1 is not access:
try:
return rtyper.pbc_reprs['unrelated']
except KeyError:
rpbc = rtyper.type_system.rpbc
result = rpbc.MultipleUnrelatedFrozenPBCRepr(rtyper)
rtyper.pbc_reprs['unrelated'] = result
return result
try:
return rtyper.pbc_reprs[access]
except KeyError:
result = rtyper.type_system.rpbc.MultipleFrozenPBCRepr(rtyper,
access)
rtyper.pbc_reprs[access] = result
rtyper.add_pendingsetup(result)
return result
class SingleFrozenPBCRepr(Repr):
"""Representation selected for a single non-callable pre-built constant."""
lowleveltype = Void
def __init__(self, frozendesc):
self.frozendesc = frozendesc
def rtype_getattr(_, hop):
if not hop.s_result.is_constant():
raise TyperError("getattr on a constant PBC returns a non-constant")
return hop.inputconst(hop.r_result, hop.s_result.const)
def convert_desc(self, frozendesc):
assert frozendesc is self.frozendesc
return object() # lowleveltype is Void
def getstr(self):
return str(self.frozendesc)
getstr._annspecialcase_ = 'specialize:memo'
def ll_str(self, x):
return self.getstr()
class AbstractMultipleUnrelatedFrozenPBCRepr(CanBeNull, Repr):
"""For a SomePBC of frozen PBCs that have no common access set.
The only possible operation on such a thing is comparison with 'is'."""
def __init__(self, rtyper):
self.rtyper = rtyper
self.converted_pbc_cache = {}
def convert_desc(self, frozendesc):
try:
return self.converted_pbc_cache[frozendesc]
except KeyError:
r = self.rtyper.getrepr(annmodel.SomePBC([frozendesc]))
if r.lowleveltype is Void:
# must create a new empty structure, as a placeholder
pbc = self.create_instance()
else:
pbc = r.convert_desc(frozendesc)
convpbc = self.convert_pbc(pbc)
self.converted_pbc_cache[frozendesc] = convpbc
return convpbc
def convert_const(self, pbc):
if pbc is None:
return self.null_instance()
if isinstance(pbc, types.MethodType) and pbc.im_self is None:
value = pbc.im_func # unbound method -> bare function
frozendesc = self.rtyper.annotator.bookkeeper.getdesc(pbc)
return self.convert_desc(frozendesc)
def rtype_getattr(_, hop):
if not hop.s_result.is_constant():
raise TyperError("getattr on a constant PBC returns a non-constant")
return hop.inputconst(hop.r_result, hop.s_result.const)
class AbstractMultipleFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr):
"""For a SomePBC of frozen PBCs with a common attribute access set."""
def _setup_repr_fields(self):
fields = []
self.fieldmap = {}
if self.access_set is not None:
attrlist = self.access_set.attrs.keys()
attrlist.sort()
for attr in attrlist:
s_value = self.access_set.attrs[attr]
r_value = self.rtyper.getrepr(s_value)
mangled_name = mangle('pbc', attr)
fields.append((mangled_name, r_value.lowleveltype))
self.fieldmap[attr] = mangled_name, r_value
return fields
def convert_desc(self, frozendesc):
if (self.access_set is not None and
frozendesc not in self.access_set.descs):
raise TyperError("not found in PBC access set: %r" % (frozendesc,))
try:
return self.pbc_cache[frozendesc]
except KeyError:
self.setup()
result = self.create_instance()
self.pbc_cache[frozendesc] = result
for attr, (mangled_name, r_value) in self.fieldmap.items():
if r_value.lowleveltype is Void:
continue
try:
thisattrvalue = frozendesc.attrcache[attr]
except KeyError:
warning("Desc %r has no attribute %r" % (frozendesc, attr))
continue
llvalue = r_value.convert_const(thisattrvalue)
setattr(result, mangled_name, llvalue)
return result
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
attr = hop.args_s[1].const
vpbc, vattr = hop.inputargs(self, Void)
v_res = self.getfield(vpbc, attr, hop.llops)
mangled_name, r_res = self.fieldmap[attr]
return hop.llops.convertvar(v_res, r_res, hop.r_result)
class __extend__(pairtype(AbstractMultipleFrozenPBCRepr, AbstractMultipleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
if r_pbc1.access_set == r_pbc2.access_set:
return v
return NotImplemented
class __extend__(pairtype(SingleFrozenPBCRepr, AbstractMultipleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
frozendesc1 = r_pbc1.frozendesc
access = frozendesc1.queryattrfamily()
if access is r_pbc2.access_set:
return inputdesc(r_pbc2, frozendesc1)
return NotImplemented
class __extend__(pairtype(AbstractMultipleUnrelatedFrozenPBCRepr,
SingleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
return inputconst(Void, r_pbc2.frozendesc)
class MethodOfFrozenPBCRepr(Repr):
"""Representation selected for a PBC of method object(s) of frozen PBCs.
It assumes that all methods are the same function bound to different PBCs.
The low-level representation can then be a pointer to that PBC."""
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.funcdesc = s_pbc.descriptions.keys()[0].funcdesc
# a hack to force the underlying function to show up in call_families
# (generally not needed, as normalizecalls() should ensure this,
# but needed for bound methods that are ll helpers)
# XXX sort this out
#call_families = rtyper.annotator.getpbccallfamilies()
#call_families.find((None, self.function))
if s_pbc.can_be_none():
raise TyperError("unsupported: variable of type "
"method-of-frozen-PBC or None")
im_selves = []
for desc in s_pbc.descriptions:
assert desc.funcdesc is self.funcdesc
im_selves.append(desc.frozendesc)
self.s_im_self = annmodel.SomePBC(im_selves)
self.r_im_self = rtyper.getrepr(self.s_im_self)
self.lowleveltype = self.r_im_self.lowleveltype
def get_s_callable(self):
return annmodel.SomePBC([self.funcdesc])
def get_r_implfunc(self):
r_func = self.rtyper.getrepr(self.get_s_callable())
return r_func, 1
def convert_desc(self, mdesc):
if mdesc.funcdesc is not self.funcdesc:
raise TyperError("not a method bound on %r: %r" % (self.funcdesc,
mdesc))
return self.r_im_self.convert_desc(mdesc.frozendesc)
def convert_const(self, method):
mdesc = self.rtyper.annotator.bookkeeper.getdesc(method)
return self.convert_desc(mdesc)
def rtype_simple_call(self, hop):
return self.redispatch_call(hop, call_args=False)
def rtype_call_args(self, hop):
return self.redispatch_call(hop, call_args=True)
def redispatch_call(self, hop, call_args):
# XXX obscure, try to refactor...
s_function = annmodel.SomePBC([self.funcdesc])
hop2 = hop.copy()
hop2.args_s[0] = self.s_im_self # make the 1st arg stand for 'im_self'
hop2.args_r[0] = self.r_im_self # (same lowleveltype as 'self')
if isinstance(hop2.args_v[0], Constant):
boundmethod = hop2.args_v[0].value
hop2.args_v[0] = Constant(boundmethod.im_self)
if call_args:
hop2.swap_fst_snd_args()
_, s_shape = hop2.r_s_popfirstarg() # temporarely remove shape
adjust_shape(hop2, s_shape)
# a marker that would crash if actually used...
c = Constant("obscure-don't-use-me")
hop2.v_s_insertfirstarg(c, s_function) # insert 'function'
# now hop2 looks like simple_call(function, self, args...)
return hop2.dispatch()
class __extend__(pairtype(MethodOfFrozenPBCRepr, MethodOfFrozenPBCRepr)):
def convert_from_to((r_from, r_to), v, llops):
return pair(r_from.r_im_self, r_to.r_im_self).convert_from_to(v, llops)
# __ None ____________________________________________________
class NoneFrozenPBCRepr(Repr):
lowleveltype = Void
def rtype_is_true(self, hop):
return Constant(False, Bool)
def none_call(self, hop):
raise TyperError("attempt to call constant None")
rtype_simple_call = none_call
rtype_call_args = none_call
none_frozen_pbc_repr = NoneFrozenPBCRepr()
class __extend__(pairtype(Repr, NoneFrozenPBCRepr)):
def convert_from_to((r_from, _), v, llops):
return inputconst(Void, None)
def rtype_is_((robj1, rnone2), hop):
return hop.rtyper.type_system.rpbc.rtype_is_None(robj1, rnone2, hop)
class __extend__(pairtype(NoneFrozenPBCRepr, Repr)):
def convert_from_to((_, r_to), v, llops):
return inputconst(r_to, None)
def rtype_is_((rnone1, robj2), hop):
return hop.rtyper.type_system.rpbc.rtype_is_None(
robj2, rnone1, hop, pos=1)
# ____________________________________________________________
class AbstractClassesPBCRepr(Repr):
"""Representation selected for a PBC of class(es)."""
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.s_pbc = s_pbc
#if s_pbc.can_be_None:
# raise TyperError("unsupported: variable of type "
# "class-pointer or None")
if s_pbc.is_constant():
self.lowleveltype = Void
else:
self.lowleveltype = rtyper.type_system.rclass.CLASSTYPE
def get_access_set(self, attrname):
"""Return the ClassAttrFamily corresponding to accesses to 'attrname'
and the ClassRepr of the class which stores this attribute in
its vtable.
"""
classdescs = self.s_pbc.descriptions.keys()
access = classdescs[0].queryattrfamily(attrname)
for classdesc in classdescs[1:]:
access1 = classdesc.queryattrfamily(attrname)
assert access1 is access # XXX not implemented
if access is None:
raise rclass.MissingRTypeAttribute(attrname)
commonbase = access.commonbase
class_repr = rclass.getclassrepr(self.rtyper, commonbase)
return access, class_repr
def convert_desc(self, desc):
if desc not in self.s_pbc.descriptions:
raise TyperError("%r not in %r" % (cls, self))
if self.lowleveltype is Void:
return desc.pyobj
return rclass.get_type_repr(self.rtyper).convert_desc(desc)
def convert_const(self, cls):
if cls is None:
if self.lowleveltype is Void:
return None
else:
T = self.lowleveltype
return self.rtyper.type_system.null_callable(T)
bk = self.rtyper.annotator.bookkeeper
classdesc = bk.getdesc(cls)
return self.convert_desc(classdesc)
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
else:
attr = hop.args_s[1].const
access_set, class_repr = self.get_access_set(attr)
vcls, vattr = hop.inputargs(class_repr, Void)
v_res = class_repr.getpbcfield(vcls, access_set, attr, hop.llops)
s_res = access_set.s_value
r_res = self.rtyper.getrepr(s_res)
return hop.llops.convertvar(v_res, r_res, hop.r_result)
def replace_class_with_inst_arg(self, hop, v_inst, s_inst, call_args):
hop2 = hop.copy()
hop2.r_s_popfirstarg() # discard the class pointer argument
if call_args:
_, s_shape = hop2.r_s_popfirstarg() # temporarely remove shape
hop2.v_s_insertfirstarg(v_inst, s_inst) # add 'instance'
adjust_shape(hop2, s_shape)
else:
hop2.v_s_insertfirstarg(v_inst, s_inst) # add 'instance'
return hop2
def rtype_simple_call(self, hop):
return self.redispatch_call(hop, call_args=False)
def rtype_call_args(self, hop):
return self.redispatch_call(hop, call_args=True)
def redispatch_call(self, hop, call_args):
s_instance = hop.s_result
r_instance = hop.r_result
if len(self.s_pbc.descriptions) == 1:
# instantiating a single class
if self.lowleveltype is not Void:
assert 0, "XXX None-or-1-class instantation not implemented"
assert isinstance(s_instance, annmodel.SomeInstance)
classdef = hop.s_result.classdef
s_init = classdef.classdesc.s_read_attribute('__init__')
v_init = Constant("init-func-dummy") # this value not really used
if (isinstance(s_init, annmodel.SomeImpossibleValue) and
classdef.classdesc.is_exception_class() and
classdef.has_no_attrs()):
# special case for instanciating simple built-in
# exceptions: always return the same prebuilt instance,
# and ignore any arguments passed to the contructor.
r_instance = rclass.getinstancerepr(hop.rtyper, classdef)
example = r_instance.get_reusable_prebuilt_instance()
hop.exception_cannot_occur()
return hop.inputconst(r_instance.lowleveltype, example)
v_instance = rclass.rtype_new_instance(hop.rtyper, classdef,
hop.llops, hop)
if isinstance(v_instance, tuple):
v_instance, must_call_init = v_instance
if not must_call_init:
return v_instance
else:
# instantiating a class from multiple possible classes
vtypeptr = hop.inputarg(self, arg=0)
try:
access_set, r_class = self.get_access_set('__init__')
except rclass.MissingRTypeAttribute:
s_init = annmodel.s_ImpossibleValue
else:
s_init = access_set.s_value
v_init = r_class.getpbcfield(vtypeptr, access_set, '__init__',
hop.llops)
v_instance = self._instantiate_runtime_class(hop, vtypeptr, r_instance)
if isinstance(s_init, annmodel.SomeImpossibleValue):
assert hop.nb_args == 1, ("arguments passed to __init__, "
"but no __init__!")
hop.exception_cannot_occur()
else:
hop2 = self.replace_class_with_inst_arg(
hop, v_instance, s_instance, call_args)
hop2.v_s_insertfirstarg(v_init, s_init) # add 'initfunc'
hop2.s_result = annmodel.s_None
hop2.r_result = self.rtyper.getrepr(hop2.s_result)
# now hop2 looks like simple_call(initfunc, instance, args...)
hop2.dispatch()
return v_instance
class __extend__(pairtype(AbstractClassesPBCRepr, rclass.AbstractClassRepr)):
def convert_from_to((r_clspbc, r_cls), v, llops):
# turn a PBC of classes to a standard pointer-to-vtable class repr
if r_clspbc.lowleveltype == r_cls.lowleveltype:
return v
if r_clspbc.lowleveltype is Void:
return inputconst(r_cls, r_clspbc.s_pbc.const)
# convert from ptr-to-object-vtable to ptr-to-more-precise-vtable
assert (r_clspbc.lowleveltype ==
r_clspbc.rtyper.type_system.rclass.CLASSTYPE)
return r_cls.fromclasstype(v, llops)
class __extend__(pairtype(AbstractClassesPBCRepr, AbstractClassesPBCRepr)):
def convert_from_to((r_clspbc1, r_clspbc2), v, llops):
# this check makes sense because both source and dest repr are ClassesPBCRepr
if r_clspbc1.lowleveltype == r_clspbc2.lowleveltype:
return v
if r_clspbc1.lowleveltype is Void:
return inputconst(r_clspbc2, r_clspbc1.s_pbc.const)
if r_clspbc2.lowleveltype is Void:
return inputconst(Void, r_clspbc2.s_pbc.const)
return NotImplemented
def adjust_shape(hop2, s_shape):
new_shape = (s_shape.const[0]+1,) + s_shape.const[1:]
c_shape = Constant(new_shape)
s_shape = hop2.rtyper.annotator.bookkeeper.immutablevalue(new_shape)
hop2.v_s_insertfirstarg(c_shape, s_shape) # reinsert adjusted shape
class AbstractMethodsPBCRepr(Repr):
"""Representation selected for a PBC of MethodDescs.
It assumes that all the methods come from the same name and have
been read from instances with a common base."""
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.s_pbc = s_pbc
if s_pbc.isNone():
raise TyperError("unsupported: variable of type "
"bound-method-object or None")
mdescs = s_pbc.descriptions.keys()
methodname = mdescs[0].name
classdef = mdescs[0].selfclassdef
flags = mdescs[0].flags
for mdesc in mdescs[1:]:
if mdesc.name != methodname:
raise TyperError("cannot find a unique name under which the "
"methods can be found: %r" % (
mdescs,))
if mdesc.flags != flags:
raise TyperError("inconsistent 'flags': %r versus %r" % (
mdesc.flags, flags))
classdef = classdef.commonbase(mdesc.selfclassdef)
if classdef is None:
raise TyperError("mixing methods coming from instances of "
"classes with no common base: %r" % (mdescs,))
self.methodname = methodname
self.classdef = classdef.locate_attribute(methodname)
# the low-level representation is just the bound 'self' argument.
self.s_im_self = annmodel.SomeInstance(self.classdef, flags=flags)
self.r_im_self = rclass.getinstancerepr(rtyper, self.classdef)
self.lowleveltype = self.r_im_self.lowleveltype
def convert_const(self, method):
if getattr(method, 'im_func', None) is None:
raise TyperError("not a bound method: %r" % method)
return self.r_im_self.convert_const(method.im_self)
def get_r_implfunc(self):
r_class = self.r_im_self.rclass
mangled_name, r_func = r_class.clsfields[self.methodname]
return r_func, 1
def get_s_callable(self):
return self.s_pbc
def get_method_from_instance(self, r_inst, v_inst, llops):
# The 'self' might have to be cast to a parent class
# (as shown for example in test_rclass/test_method_both_A_and_B)
return llops.convertvar(v_inst, r_inst, self.r_im_self)
def add_instance_arg_to_hop(self, hop, call_args):
hop2 = hop.copy()
hop2.args_s[0] = self.s_im_self # make the 1st arg stand for 'im_self'
hop2.args_r[0] = self.r_im_self # (same lowleveltype as 'self')
if call_args:
hop2.swap_fst_snd_args()
_, s_shape = hop2.r_s_popfirstarg()
adjust_shape(hop2, s_shape)
return hop2
# ____________________________________________________________
##def getsignature(rtyper, func):
## f = rtyper.getcallable(func)
## graph = rtyper.type_system_deref(f).graph
## rinputs = [rtyper.bindingrepr(v) for v in graph.getargs()]
## if graph.getreturnvar() in rtyper.annotator.bindings:
## rresult = rtyper.bindingrepr(graph.getreturnvar())
## else:
## rresult = Void
## return f, rinputs, rresult
def samesig(funcs):
import inspect
argspec = inspect.getargspec(funcs[0])
for func in funcs:
if inspect.getargspec(func) != argspec:
return False
return True
# ____________________________________________________________
def commonbase(classdefs):
result = classdefs[0]
for cdef in classdefs[1:]:
result = result.commonbase(cdef)
if result is None:
raise TyperError("no common base class in %r" % (classdefs,))
return result
def allattributenames(classdef):
for cdef1 in classdef.getmro():
for attrname in cdef1.attrs:
yield cdef1, attrname
| Python |
class TyperError(Exception):
def __str__(self):
result = Exception.__str__(self)
if hasattr(self, 'where'):
result += '\n.. %r\n.. %r' % self.where
return result
class MissingRTypeOperation(TyperError):
pass
| Python |
from pypy.annotation.annrpython import RPythonAnnotator
from pypy.rpython.rtyper import RPythonTyper
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.memory.support import get_address_linked_list, INT_SIZE
from pypy.rpython.memory.lladdress import raw_malloc, raw_free, NULL
from pypy.rpython.memory import lltypelayout
from pypy.rpython.memory import lltypesimulation
from pypy.rpython.memory import gc
from pypy.rpython.memory.convertlltype import FlowGraphConstantConverter
class QueryTypes(object):
def __init__(self):
self.types = []
self.type_to_typeid = {}
def get_typeid(self, TYPE, nonewtype=False):
if TYPE not in self.type_to_typeid:
if nonewtype:
raise Exception, "unknown type: %s" % TYPE
index = len(self.types)
self.type_to_typeid[TYPE] = index
self.types.append(TYPE)
return index
typeid = self.type_to_typeid[TYPE]
return typeid
def create_query_functions(self):
from pypy.rpython.lltypesystem import rstr
_is_varsize = []
_finalizers = []
_offsets_to_gc_pointers = []
_fixed_size = []
_varsize_item_sizes = []
_varsize_offset_to_variable_part = []
_varsize_offset_to_length = []
_varsize_offsets_to_gcpointers_in_var_part = []
tttid = zip(*zip(*self.type_to_typeid.items())[::-1])
tttid.sort()
tttid = zip(*zip(*tttid)[::-1])
for TYPE, typeid in tttid:
varsize = self.is_varsize(typeid)
_is_varsize.append(varsize)
_finalizers.append(None)
_offsets_to_gc_pointers.append(self.offsets_to_gc_pointers(typeid))
_fixed_size.append(self.fixed_size(typeid))
if varsize:
_varsize_item_sizes.append(self.varsize_item_sizes(typeid))
_varsize_offset_to_variable_part.append(
self.varsize_offset_to_variable_part(typeid))
_varsize_offset_to_length.append(
self.varsize_offset_to_length(typeid))
_varsize_offsets_to_gcpointers_in_var_part.append(
lltypelayout.varsize_offsets_to_gcpointers_in_var_part(TYPE))
else:
_varsize_item_sizes.append(0)
_varsize_offset_to_variable_part.append(0)
_varsize_offset_to_length.append(0)
_varsize_offsets_to_gcpointers_in_var_part.append([])
# trick to make the annotator see that the list can contain functions:
_finalizers.append(lambda addr: None)
def is_varsize(typeid):
return _is_varsize[typeid]
def getfinalizer(typeid):
return _finalizers[typeid]
def offsets_to_gc_pointers(typeid):
return _offsets_to_gc_pointers[typeid]
def fixed_size(typeid):
return _fixed_size[typeid]
def varsize_item_sizes(typeid):
return _varsize_item_sizes[typeid]
def varsize_offset_to_variable_part(typeid):
return _varsize_offset_to_variable_part[typeid]
def varsize_offset_to_length(typeid):
return _varsize_offset_to_length[typeid]
def varsize_offsets_to_gcpointers_in_var_part(typeid):
return _varsize_offsets_to_gcpointers_in_var_part[typeid]
return (is_varsize, getfinalizer, offsets_to_gc_pointers, fixed_size,
varsize_item_sizes, varsize_offset_to_variable_part,
varsize_offset_to_length,
varsize_offsets_to_gcpointers_in_var_part)
def is_varsize(self, typeid):
assert typeid >= 0
TYPE = self.types[typeid]
return (isinstance(TYPE, lltype.Array) or
(isinstance(TYPE, lltype.Struct) and
TYPE._arrayfld is not None))
def getfinalizer(self, typeid):
return None
def offsets_to_gc_pointers(self, typeid):
assert typeid >= 0
return lltypelayout.offsets_to_gc_pointers(self.types[typeid])
def fixed_size(self, typeid):
assert typeid >= 0
return lltypelayout.get_fixed_size(self.types[typeid])
def varsize_item_sizes(self, typeid):
assert typeid >= 0
if self.is_varsize(typeid):
return lltypelayout.get_variable_size(self.types[typeid])
else:
return 0
def varsize_offset_to_variable_part(self, typeid):
assert typeid >= 0
if self.is_varsize(typeid):
return lltypelayout.get_fixed_size(self.types[typeid])
else:
return 0
def varsize_offset_to_length(self, typeid):
assert typeid >= 0
if self.is_varsize(typeid):
return lltypelayout.varsize_offset_to_length(self.types[typeid])
else:
return 0
def varsize_offsets_to_gcpointers_in_var_part(self, typeid):
assert typeid >= 0
if self.is_varsize(typeid):
return lltypelayout.varsize_offsets_to_gcpointers_in_var_part(
self.types[typeid])
else:
return 0
def get_setup_query_functions(self):
return (self.is_varsize, self.getfinalizer,
self.offsets_to_gc_pointers, self.fixed_size,
self.varsize_item_sizes, self.varsize_offset_to_variable_part,
self.varsize_offset_to_length,
self.varsize_offsets_to_gcpointers_in_var_part)
def getfunctionptr(annotator, graphfunc):
"""Make a functionptr from the given Python function."""
graph = annotator.bookkeeper.getdesc(graphfunc).getuniquegraph()
llinputs = [v.concretetype for v in graph.getargs()]
lloutput = graph.getreturnvar().concretetype
FT = lltype.FuncType(llinputs, lloutput)
_callable = graphfunc
return lltypesimulation.functionptr(FT, graphfunc.func_name,
graph=graph, _callable=_callable)
class GcWrapper(object):
def __init__(self, llinterp, flowgraphs, gc_class):
self.query_types = QueryTypes()
self.AddressLinkedList = get_address_linked_list(3, hackishpop=True)
# XXX there might me GCs that have headers that depend on the type
# therefore we have to change the query functions to annotatable ones
# later
self.gc = gc_class(self.AddressLinkedList)
self.gc.set_query_functions(*self.query_types.get_setup_query_functions())
fgcc = FlowGraphConstantConverter(flowgraphs, self.gc, self.query_types)
fgcc.convert()
self.gc.set_query_functions(*self.query_types.create_query_functions())
self.llinterp = llinterp
self.gc.get_roots = self.get_roots
self.constantroots = fgcc.cvter.constantroots
self.pseudo_root_pointers = NULL
self.roots = []
self.gc.setup()
def get_arg_malloc(self, TYPE, size=0):
typeid = self.query_types.get_typeid(TYPE, nonewtype=True)
return [typeid, size]
def get_funcptr_malloc(self):
return self.llinterp.heap.functionptr(gc.gc_interface["malloc"], "malloc",
_callable=self.gc.malloc)
def adjust_result_malloc(self, address, TYPE, size=0):
result = lltypesimulation.init_object_on_address(address, TYPE, size)
self.update_changed_addresses()
return result
def needs_write_barrier(self, TYPE):
return (hasattr(self.gc, "write_barrier") and
isinstance(TYPE, lltype.Ptr) and
isinstance(TYPE.TO, (lltype.GcStruct, lltype.GcArray)))
def get_arg_write_barrier(self, obj, index_or_field, item):
#XXX: quick hack to get the correct addresses, fix later
layout = lltypelayout.get_layout(lltype.typeOf(obj).TO)
if isinstance(lltype.typeOf(obj).TO, lltype.Array):
assert isinstance(index_or_field, int)
offset = layout[0] + layout[1] * index_or_field
addr_to = obj._address + layout[0] + index_or_field * layout[1]
return item._address, addr_to, obj._address
else:
offset = layout[index_or_field]
addr_to = obj._address + offset
return item._address, addr_to, obj._address
def get_funcptr_write_barrier(self):
return self.llinterp.heap.functionptr(gc.gc_interface["write_barrier"],
"write_barrier",
_callable=self.gc.write_barrier)
def update_changed_addresses(self):
for i, root in enumerate(self.roots):
root.__dict__['_address'] = self.pseudo_root_pointers.address[i]
def get_roots_from_llinterp(self):
if self.pseudo_root_pointers != NULL:
raw_free(self.pseudo_root_pointers)
roots = [r for r in self.llinterp.find_roots()
if isinstance(r._TYPE.TO,
(lltype.GcStruct, lltype.GcArray))]
self.roots = roots + self.constantroots
self.roots = [r for r in self.roots
if isinstance(r._TYPE.TO,
(lltype.Struct, lltype.Array))]
if len(self.roots) == 0:
self.pseudo_root_pointers = NULL
else:
self.pseudo_root_pointers = raw_malloc(len(self.roots) * INT_SIZE)
return self.roots
def get_roots(self):
self.get_roots_from_llinterp()
ll = self.AddressLinkedList()
for i, root in enumerate(self.roots):
self.pseudo_root_pointers.address[i] = root._address
ll.append(self.pseudo_root_pointers + INT_SIZE * i)
return ll
class AnnotatingGcWrapper(GcWrapper):
def __init__(self, llinterp, flowgraphs, gc_class):
super(AnnotatingGcWrapper, self).__init__(llinterp, flowgraphs, gc_class)
# tell the real-built gc to free its memory as it is only used for
# initialisation
self.gc.free_memory()
self.annotate_rtype_gc()
def annotate_rtype_gc(self):
# annotate and specialize functions
gc_class = self.gc.__class__
AddressLinkedList = self.AddressLinkedList
def instantiate_linked_list():
return AddressLinkedList()
f1, f2, f3, f4, f5, f6, f7, f8 = self.query_types.create_query_functions()
the_gc = gc_class(AddressLinkedList)
def instantiate_gc():
the_gc.set_query_functions(f1, f2, f3, f4, f5, f6, f7, f8)
the_gc.setup()
return the_gc
func, dummy_get_roots1, dummy_get_roots2 = gc.get_dummy_annotate(
the_gc, self.AddressLinkedList)
self.gc.get_roots = dummy_get_roots1
a = RPythonAnnotator()
a.build_types(instantiate_gc, [])
a.build_types(func, [])
a.build_types(instantiate_linked_list, [])
typer = RPythonTyper(a)
typer.specialize()
self.annotator = a
# convert constants
fgcc = FlowGraphConstantConverter(a.translator.graphs)
fgcc.convert()
self.malloc_graph = a.bookkeeper.getdesc(self.gc.malloc.im_func).getuniquegraph()
self.write_barrier_graph = a.bookkeeper.getdesc(self.gc.write_barrier.im_func).getuniquegraph()
# create a gc via invoking instantiate_gc
self.gcptr = self.llinterp.eval_graph(
a.bookkeeper.getdesc(instantiate_gc).getuniquegraph())
GETROOTS_FUNCTYPE = lltype.typeOf(
getfunctionptr(a, dummy_get_roots1)).TO
setattr(self.gcptr, "inst_get_roots",
lltypesimulation.functionptr(GETROOTS_FUNCTYPE, "get_roots",
_callable=self.get_roots))
#get funcptrs neccessary to build the result of get_roots
self.instantiate_linked_list = getfunctionptr(
a, instantiate_linked_list)
self.append_linked_list = getfunctionptr(
a, AddressLinkedList.append.im_func)
self.pop_linked_list = getfunctionptr(
a, AddressLinkedList.pop.im_func)
self.gc.get_roots = None
self.translator = a.translator
## a.translator.view()
def get_arg_malloc(self, TYPE, size=0):
typeid = self.query_types.get_typeid(TYPE, nonewtype=True)
return [self.gcptr, typeid, size]
def get_funcptr_malloc(self):
FUNC = gc.gc_interface["malloc"]
FUNC = lltype.FuncType([lltype.typeOf(self.gcptr)] + list(FUNC.ARGS), FUNC.RESULT)
return self.llinterp.heap.functionptr(FUNC, "malloc",
_callable=self.gc.malloc,
graph=self.malloc_graph)
def adjust_result_malloc(self, address, TYPE, size=0):
result = lltypesimulation.init_object_on_address(address, TYPE, size)
self.update_changed_addresses()
return result
def get_arg_write_barrier(self, obj, index_or_field, item):
#XXX: quick hack to get the correct addresses, fix later
layout = lltypelayout.get_layout(lltype.typeOf(obj).TO)
if isinstance(lltype.typeOf(obj).TO, lltype.Array):
assert isinstance(index_or_field, int)
offset = layout[0] + layout[1] * index_or_field
addr_to = obj._address + layout[0] + index_or_field * layout[1]
return self.gcptr, item._address, addr_to, obj._address
else:
offset = layout[index_or_field]
addr_to = obj._address + offset
return self.gcptr, item._address, addr_to, obj._address
def get_funcptr_write_barrier(self):
FUNC = gc.gc_interface["write_barrier"]
FUNC = lltype.FuncType([lltype.typeOf(self.gcptr)] + list(FUNC.ARGS), FUNC.RESULT)
return self.llinterp.heap.functionptr(FUNC,
"write_barrier",
_callable=self.gc.write_barrier,
graph=self.write_barrier_graph)
def get_roots(self):
# call the llinterpreter to construct the result in a suitable way
self.get_roots_from_llinterp()
ll = self.llinterp.active_frame.op_direct_call(
self.instantiate_linked_list)
for i, root in enumerate(self.roots):
self.pseudo_root_pointers.address[i] = root._address
self.llinterp.active_frame.op_direct_call(
self.append_linked_list, ll,
self.pseudo_root_pointers + INT_SIZE * i)
return ll
| Python |
import py
from pypy.rpython.memory.lltypelayout import get_layout, get_fixed_size
from pypy.rpython.memory.lltypelayout import get_variable_size, sizeof
from pypy.rpython.memory.lltypelayout import primitive_to_fmt
from pypy.rpython.memory import lladdress
from pypy.rpython.lltypesystem import lltype, llmemory
log = py.log.Producer("lltypesim")
def _expose(T, address):
"""XXX A nice docstring here"""
if isinstance(T, (lltype.Struct, lltype.Array)):
return simulatorptr(lltype.Ptr(T), address)
elif T == lltype.Bool:
return bool(address._load(primitive_to_fmt[T])[0])
elif T == llmemory.Address:
return (self._address + offset).address[0]
elif isinstance(T, lltype.Primitive):
return address._load(primitive_to_fmt[T])[0]
elif isinstance(T, lltype.Ptr):
return simulatorptr(T, address.address[0])
elif isinstance(T, lltype.PyObjectType):
return simulatorptr(lltype.Ptr(T), address)
elif isinstance(T, lltype.FuncType):
return simulatorptr(lltype.Ptr(T), address)
else:
assert 0, "not implemented yet"
#_____________________________________________________________________________
# this class is intended to replace the _ptr class in lltype
# using the memory simulator
class simulatorptr(object):
def __init__(self, TYPE, address):
self.__dict__['_TYPE'] = TYPE
self.__dict__['_T'] = TYPE.TO
self.__dict__['_address'] = address
self.__dict__['_layout'] = get_layout(TYPE.TO)
def _zero_initialize(self, i=None):
size = sizeof(self._T, i)
self._address._store("c" * size, *(["\x00"] * size))
def _init_size(self, size):
if isinstance(self._T, lltype.Array):
self._address.signed[0] = size
elif isinstance(self._T, lltype.Struct):
if self._T._arrayfld is not None:
addr = self._address + self._layout[self._T._arrayfld]
addr.signed[0] = size
else:
assert size is None, "setting not implemented"
def __getattr__(self, field_name):
if isinstance(self._T, lltype.Struct):
offset = self._layout[field_name]
if field_name in self._T._flds:
T = self._T._flds[field_name]
base = self._layout[field_name]
if isinstance(T, lltype.Primitive):
if T == lltype.Void:
return None
elif T == llmemory.Address:
return (self._address + offset).address[0]
res = (self._address + offset)._load(primitive_to_fmt[T])[0]
if T == lltype.Bool:
res = bool(res)
return res
elif isinstance(T, lltype.Ptr):
res = _expose(T.TO, (self._address + offset).address[0])
return res
elif isinstance(T, lltype.ContainerType):
res = _expose(T, (self._address + offset))
return res
else:
assert 0, "not implemented"
if isinstance(self._T, lltype.ContainerType):
adtmeth = self._T._adtmeths.get(field_name)
if adtmeth is not None:
return adtmeth.__get__(self)
raise AttributeError, ("%r instance has no field %r" % (self._T,
field_name))
def __setattr__(self, field_name, value):
if isinstance(self._T, lltype.Struct):
if field_name in self._T._flds:
T = self._T._flds[field_name]
offset = self._layout[field_name]
if isinstance(T, lltype.Primitive):
if T == lltype.Void:
return
if T == llmemory.Address:
(self._address + offset).address[0] = value
else:
(self._address + offset)._store(primitive_to_fmt[T],
value)
return
elif isinstance(T, lltype.Ptr):
assert value._TYPE == T
(self._address + offset).address[0] = value._address
return
else:
assert 0, "not implemented"
raise AttributeError, ("%r instance has no field %r" % (self._T,
field_name))
def __getitem__(self, i):
if isinstance(self._T, lltype.FixedSizeArray):
return self.__getattr__('item%d' % i)
if isinstance(self._T, lltype.Array):
if not (0 <= i < self._address.signed[0]):
raise IndexError, "array index out of bounds"
addr = self._address + self._layout[0] + i * self._layout[1]
return _expose(self._T.OF, addr)
raise TypeError("%r instance is not an array" % (self._T,))
def __setitem__(self, i, value):
if isinstance(self._T, lltype.FixedSizeArray):
return self.__setattr__('item%d' % i, value)
if isinstance(self._T, lltype.Array):
T1 = self._T.OF
if isinstance(T1, lltype.ContainerType):
s = "cannot directly assign to container array items"
raise TypeError, s
T2 = lltype.typeOf(value)
if T2 != T1:
raise TypeError("%r items:\n"
"expect %r\n"
" got %r" % (self._T, T1, T2))
if not (0 <= i < self._address.signed[0]):
raise IndexError, "array index out of bounds"
if isinstance(T2, lltype.Ptr):
value = value._address.intaddress
addr = self._address + self._layout[0] + i * self._layout[1]
addr._store(get_layout(self._T.OF), value)
return
raise TypeError("%r instance is not an array" % (self._T,))
def _getobj(self):
assert isinstance(self._T, (lltype.FuncType, lltype.PyObjectType))
return lladdress.get_py_object(self._address)
_obj = property(_getobj)
def __call__(self, *args):
if isinstance(self._T, lltype.FuncType):
if len(args) != len(self._T.ARGS):
raise TypeError,"calling %r with wrong argument number: %r" % (self._T, args)
for a, ARG in zip(args, self._T.ARGS):
if lltype.typeOf(a) != ARG:
raise TypeError,"calling %r with wrong argument types: %r" % (self._T, args)
callb = lladdress.get_py_object(self._address)._callable
if callb is None:
raise RuntimeError,"calling undefined function"
return callb(*args)
raise TypeError("%r instance is not a function" % (self._T,))
def __len__(self):
if isinstance(self._T, lltype.Array):
return self._address.signed[0]
raise TypeError("%r instance is not an array" % (self._T,))
def __nonzero__(self):
return self._address != lladdress.NULL
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if not isinstance(other, simulatorptr):
raise TypeError("comparing pointer with %r object" % (
type(other).__name__,))
if self._TYPE != other._TYPE:
raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE))
return self._address == other._address
def __repr__(self):
return '<simulatorptr %s to %s>' % (self._TYPE.TO, self._address)
def _cast_to(self, PTRTYPE):
CURTYPE = self._TYPE
down_or_up = lltype.castable(PTRTYPE, CURTYPE)
if down_or_up == 0:
return self
return simulatorptr(PTRTYPE, self._address)
def _cast_to_int(self):
return self._address.intaddress
def _cast_to_adr(self):
return self._address
# for now use the simulators raw_malloc
def malloc(T, n=None, immortal=False, flavor='gc'):
fixedsize = get_fixed_size(T)
varsize = get_variable_size(T)
if n is None:
if varsize:
raise TypeError, "%r is variable-sized" % (T,)
size = fixedsize
else:
size = fixedsize + n * varsize
address = lladdress.raw_malloc(size)
return init_object_on_address(address, T, n)
def free(obj, flavor="gc"):
assert not flavor.startswith("gc")
assert isinstance(obj, simulatorptr)
lladdress.raw_free(obj._address)
obj.__dict__["_address"] = lladdress.NULL
def init_object_on_address(address, T, n=None):
result = simulatorptr(lltype.Ptr(T), address)
result._zero_initialize(n)
result._init_size(n)
return result
def nullptr(T):
return simulatorptr(lltype.Ptr(T), lladdress.NULL)
def functionptr(TYPE, name, **attrs):
if not isinstance(TYPE, lltype.FuncType):
raise TypeError, "functionptr() for FuncTypes only"
try:
hash(tuple(attrs.items()))
except TypeError:
raise TypeError("'%r' must be hashable"%attrs)
addr = lladdress.get_address_of_object(
lltype._func(TYPE, _name=name, **attrs))
return simulatorptr(lltype.Ptr(TYPE), addr)
def pyobjectptr(obj):
addr = lladdress.get_address_of_object(lltype._pyobject(obj))
return simulatorptr(lltype.Ptr(lltype.PyObject), addr)
| Python |
import weakref
from pypy.rpython.lltypesystem import lltype, llmemory
# this is global because a header cannot be a header of more than one GcObj
header2obj = weakref.WeakKeyDictionary()
class GCHeaderBuilder(object):
def __init__(self, HDR):
"""NOT_RPYTHON"""
self.HDR = HDR
self.obj2header = weakref.WeakKeyDictionary()
self.size_gc_header = llmemory.GCHeaderOffset(self)
def header_of_object(self, gcptr):
return self.obj2header[gcptr._as_obj()]
def object_from_header(headerptr):
return header2obj[headerptr._as_obj()]
object_from_header = staticmethod(object_from_header)
def get_header(self, gcptr):
return self.obj2header.get(gcptr._as_obj(), None)
def new_header(self, gcptr):
gcobj = gcptr._as_obj()
assert gcobj not in self.obj2header
# sanity checks
assert gcobj._TYPE._gckind == 'gc'
assert not isinstance(gcobj._TYPE, lltype.GcOpaqueType)
assert not gcobj._parentstructure()
headerptr = lltype.malloc(self.HDR, immortal=True)
self.obj2header[gcobj] = headerptr
header2obj[headerptr._obj] = gcptr._as_ptr()
return headerptr
def _freeze_(self):
return True # for reads of size_gc_header
| Python |
from pypy.rpython.memory import lladdress, lltypelayout
from pypy.rpython.memory.lltypesimulation import simulatorptr, sizeof
from pypy.rpython.memory.lltypesimulation import nullptr, malloc
from pypy.rpython.memory.lltypesimulation import init_object_on_address
from pypy.objspace.flow.model import traverse, Link, Constant, Block
from pypy.objspace.flow.model import Constant
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.rmodel import IntegerRepr
import types
FUNCTIONTYPES = (types.FunctionType, types.UnboundMethodType,
types.BuiltinFunctionType)
def get_real_value(val_or_ptr):
if isinstance(val_or_ptr, llmemory.AddressOffset):
return lltypelayout.convert_offset_to_int(val_or_ptr)
return val_or_ptr
def size_gc_header(gc, typeid):
return get_real_value(gc.size_gc_header(typeid))
class LLTypeConverter(object):
def __init__(self, address, gc=None, qt=None):
self.type_to_typeid = {}
self.types = []
self.converted = {}
self.curraddress = address
self.constantroots = []
self.gc = gc
self.query_types = qt
def convert(self, val_or_ptr, inline_to_ptr=None):
TYPE = lltype.typeOf(val_or_ptr)
if isinstance(TYPE, lltype.Primitive):
assert inline_to_ptr is None
return get_real_value(val_or_ptr)
elif isinstance(TYPE, lltype.Array):
return self.convert_array(val_or_ptr, inline_to_ptr)
elif isinstance(TYPE, lltype.Struct):
return self.convert_struct(val_or_ptr, inline_to_ptr)
elif isinstance(TYPE, lltype.Ptr):
return self.convert_pointer(val_or_ptr, inline_to_ptr)
elif isinstance(TYPE, lltype.OpaqueType):
return self.convert_object(val_or_ptr, inline_to_ptr)
elif isinstance(TYPE, lltype.FuncType):
return self.convert_object(val_or_ptr, inline_to_ptr)
elif isinstance(TYPE, lltype.PyObjectType):
return self.convert_object(val_or_ptr, inline_to_ptr)
else:
assert 0, "don't know about %s" % (val_or_ptr, )
def convert_array(self, _array, inline_to_ptr):
if _array in self.converted:
ptr = self.converted[_array]
assert inline_to_ptr is None or ptr == inline_to_ptr
return ptr
TYPE = lltype.typeOf(_array)
arraylength = len(_array.items)
size = sizeof(TYPE, arraylength)
if inline_to_ptr is not None:
ptr = inline_to_ptr
else:
startaddr = self.curraddress
self.curraddress += size
if self.gc is not None:
typeid = self.query_types.get_typeid(TYPE)
self.gc.init_gc_object_immortal(startaddr, typeid)
startaddr += size_gc_header(self.gc, typeid)
self.curraddress += size_gc_header(self.gc, typeid)
ptr = init_object_on_address(startaddr, TYPE, arraylength)
self.constantroots.append(ptr)
self.converted[_array] = ptr
if isinstance(TYPE.OF, lltype.Struct):
for i, item in enumerate(_array.items):
self.convert(item, ptr[i])
else:
for i, item in enumerate(_array.items):
if not isinstance(item, lltype._uninitialized):
ptr[i] = self.convert(item)
return ptr
def convert_struct(self, _struct, inline_to_ptr):
if _struct in self.converted:
ptr = self.converted[_struct]
assert inline_to_ptr is None or ptr == inline_to_ptr
return ptr
parent = _struct._parentstructure()
if parent is not None and inline_to_ptr is None:
ptr = self.convert(parent)
if isinstance(_struct._parent_index, str):
return getattr(ptr, _struct._parent_index)
else:
return ptr[_struct._parent_index]
TYPE = lltype.typeOf(_struct)
if TYPE._arrayfld is not None:
inlinedarraylength = len(getattr(_struct, TYPE._arrayfld).items)
size = sizeof(TYPE, inlinedarraylength)
else:
inlinedarraylength = None
size = sizeof(TYPE)
if inline_to_ptr is not None:
ptr = inline_to_ptr
else:
startaddr = self.curraddress
self.curraddress += size
if self.gc is not None:
typeid = self.query_types.get_typeid(TYPE)
self.gc.init_gc_object_immortal(startaddr, typeid)
startaddr += size_gc_header(self.gc, typeid)
self.curraddress += size_gc_header(self.gc, typeid)
ptr = init_object_on_address(startaddr, TYPE, inlinedarraylength)
self.constantroots.append(ptr)
self.converted[_struct] = ptr
for name in TYPE._flds:
FIELD = getattr(TYPE, name)
if isinstance(FIELD, (lltype.Struct, lltype.Array)):
self.convert(getattr(_struct, name), getattr(ptr, name))
else:
v = _struct._getattr(name, uninitialized_ok=True)
if not isinstance(v, lltype._uninitialized):
setattr(ptr, name, self.convert(v))
return ptr
def convert_pointer(self, _ptr, inline_to_ptr):
assert inline_to_ptr is None, "can't inline pointer"
TYPE = lltype.typeOf(_ptr)
if _ptr._obj is not None:
return self.convert(_ptr._obj)
else:
return nullptr(TYPE.TO)
def convert_object(self, _obj, inline_to_ptr):
assert inline_to_ptr is None, "can't inline function or pyobject"
return simulatorptr(lltype.Ptr(lltype.typeOf(_obj)),
lladdress.get_address_of_object(_obj))
def collect_constants_and_types(graphs):
constants = {}
types = {}
def collect_args(args):
for arg in args:
if (isinstance(arg, Constant) and
arg.concretetype is not lltype.Void):
constants[arg] = None
types[arg.concretetype] = True
for graph in graphs:
for block in graph.iterblocks():
collect_args(block.inputargs)
for op in block.operations:
collect_args(op.args)
if op.opname in ("malloc", "malloc_varsize"):
types[op.args[0].value] = True
for link in graph.iterlinks():
collect_args(link.args)
if hasattr(link, "llexitcase"):
if isinstance(link.llexitcase, IntegerRepr):
assert 0
constants[Constant(link.llexitcase)] = None
return constants, types
class FlowGraphConstantConverter(object):
def __init__(self, graphs, gc=None, qt=None):
self.graphs = graphs
self.memory = lladdress.NULL
self.cvter = None
self.total_size = 0
self.gc = gc
self.query_types = qt
def collect_constants_and_types(self):
self.constants, self.types = collect_constants_and_types(self.graphs)
def calculate_size(self):
total_size = 0
seen = {}
candidates = [const.value for const in self.constants.iterkeys()]
while candidates:
cand = candidates.pop()
if isinstance(cand, lltype._ptr):
if cand:
candidates.append(cand._obj)
continue
elif isinstance(cand, lltype.LowLevelType):
continue
elif isinstance(cand, FUNCTIONTYPES):
continue
elif isinstance(cand, str):
continue
elif isinstance(lltype.typeOf(cand), lltype.Primitive):
continue
elif cand in seen:
continue
elif isinstance(cand, lltype._array):
seen[cand] = True
length = len(cand.items)
total_size += sizeof(cand._TYPE, length)
if self.gc is not None:
typeid = self.query_types.get_typeid(cand._TYPE)
total_size += size_gc_header(self.gc, typeid)
for item in cand.items:
candidates.append(item)
elif isinstance(cand, lltype._struct):
seen[cand] = True
parent = cand._parentstructure()
if parent is not None:
has_parent = True
candidates.append(parent)
else:
has_parent = False
TYPE = cand._TYPE
if not has_parent:
if TYPE._arrayfld is not None:
total_size += sizeof(
TYPE, len(getattr(cand, TYPE._arrayfld).items))
else:
total_size += sizeof(TYPE)
if self.gc is not None:
typeid = self.query_types.get_typeid(TYPE)
total_size += size_gc_header(self.gc, typeid)
for name in TYPE._flds:
candidates.append(getattr(cand, name))
elif isinstance(cand, lltype._opaque):
total_size += sizeof(lltype.Signed)
elif isinstance(cand, lltype._func):
total_size += sizeof(lltype.Signed)
elif isinstance(cand, lltype._pyobject):
total_size += sizeof(lltype.Signed)
else:
assert 0, "don't know about %s %s" % (cand, cand.__class__)
self.total_size = total_size
def convert_constants(self):
self.memory = lladdress.raw_malloc(self.total_size)
self.cvter = LLTypeConverter(self.memory, self.gc, self.query_types)
for constant in self.constants.iterkeys():
if isinstance(constant.value, lltype.LowLevelType):
self.constants[constant] = constant.value
elif isinstance(constant.value, str):
self.constants[constant] = constant.value
elif isinstance(constant.value, FUNCTIONTYPES):
self.constants[constant] = constant.value
else:
self.constants[constant] = self.cvter.convert(constant.value)
def patch_graphs(self):
def patch_consts(args):
for arg in args:
if isinstance(arg, Constant) and arg in self.constants:
arg.value = self.constants[arg]
def visit(obj):
if isinstance(obj, Link):
patch_consts(obj.args)
if (hasattr(obj, "llexitcase") and
Constant(obj.llexitcase) in self.constants):
obj.llexitcase = self.constants[Constant(obj.llexitcase)]
elif isinstance(obj, Block):
for op in obj.operations:
patch_consts(op.args)
for graph in self.graphs:
traverse(visit, graph)
def create_type_ids(self):
for TYPE in self.types:
if isinstance(TYPE, (lltype.Array, lltype.Struct)):
#assign a typeid
self.query_types.get_typeid(TYPE)
elif isinstance(TYPE, lltype.Ptr):
self.query_types.get_typeid(TYPE.TO)
def convert(self):
self.collect_constants_and_types()
self.calculate_size()
self.convert_constants()
self.patch_graphs()
if self.query_types is not None:
self.create_type_ids()
| Python |
import struct
from pypy.rpython.memory.simulator import MemorySimulator, MemorySimulatorError
from pypy.rlib.rarithmetic import r_uint
from pypy.rpython.lltypesystem import llmemory
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.memory.lltypelayout import convert_offset_to_int
from pypy.rlib.objectmodel import ComputedIntSymbolic
NULL = llmemory.NULL
class _address(object):
def __new__(cls, intaddress=0):
if intaddress == 0:
return NULL
else:
return object.__new__(cls)
def __init__(self, intaddress=0):
self.intaddress = intaddress
def __add__(self, offset):
if isinstance(offset, int):
return _address(self.intaddress + offset)
else:
assert isinstance(offset, llmemory.AddressOffset)
return _address(self.intaddress + convert_offset_to_int(offset))
def __sub__(self, other):
if isinstance(other, int):
return _address(self.intaddress - other)
elif isinstance(other, llmemory.AddressOffset):
return _address(self.intaddress - convert_offset_to_int(other))
else:
assert isinstance(other, _address)
return self.intaddress - other.intaddress
def __cmp__(self, other):
return cmp(self.intaddress, other.intaddress)
def __repr__(self):
return "<addr: %s>" % self.intaddress
def _load(self, fmt):
return simulator.getstruct(fmt, self.intaddress)
def _store(self, fmt, *values):
# XXX annoyance: suddenly a Symbolic changes into a Signed?!
from pypy.rpython.memory.lltypelayout import convert_offset_to_int
if len(values) == 1 and isinstance(values[0], llmemory.AddressOffset):
values = [convert_offset_to_int(values[0])]
elif len(values) == 1 and isinstance(values[0], ComputedIntSymbolic):
values = [values[0].compute_fn()]
simulator.setstruct(fmt, self.intaddress, *values)
def __nonzero__(self):
return self.intaddress != 0
def _cast_to_ptr(self, EXPECTED_TYPE):
from pypy.rpython.memory.lltypesimulation import simulatorptr
return simulatorptr(EXPECTED_TYPE, self)
def _cast_to_int(self):
return self.intaddress
class _accessor(object):
def __init__(self, addr):
if addr == NULL:
raise MemorySimulatorError("trying to access NULL pointer")
self.intaddress = addr.intaddress
def __getitem__(self, offset):
result = simulator.getstruct(self.format,
self.intaddress + offset * self.size)
return self.convert_from(result[0])
def __setitem__(self, offset, value):
simulator.setstruct(self.format, self.intaddress + offset * self.size,
self.convert_to(value))
class _signed_accessor(_accessor):
format = "l"
size = struct.calcsize("l")
convert_from = int
def convert_to(self, offset):
from pypy.rpython.memory.lltypelayout import convert_offset_to_int
# XXX same annoyance as in _store
if isinstance(offset, llmemory.AddressOffset):
return convert_offset_to_int(offset)
return int(offset)
class _unsigned_accessor(_accessor):
format = "L"
size = struct.calcsize("L")
convert_from = r_uint
convert_to = long
class _char_accessor(_accessor):
format = "c"
size = struct.calcsize("c")
convert_from = str
convert_to = str
class _address_accessor(_accessor):
from pypy.tool.uid import HUGEVAL_FMT as format
from pypy.tool.uid import HUGEVAL_BYTES as size
convert_from = _address
convert_to = staticmethod(lambda addr: addr.intaddress)
_address.signed = property(_signed_accessor)
_address.unsigned = property(_unsigned_accessor)
_address.char = property(_char_accessor)
_address.address = property(_address_accessor)
simulator = MemorySimulator()
def raw_malloc(size):
return _address(simulator.malloc(size))
def raw_malloc_usage(size):
assert isinstance(size, int)
return size
def raw_free(addr):
simulator.free(addr.intaddress)
def raw_memclear(addr, size):
from pypy.rpython.lltypesystem.llmemory import fakeaddress, raw_memclear
if isinstance(addr, fakeaddress):
raw_memclear(addr, size)
else:
simulator.memclear(addr.intaddress, size)
def raw_memcopy(addr1, addr2, size):
simulator.memcopy(addr1.intaddress, addr2.intaddress, size)
def get_address_of_object(obj):
return _address(simulator.get_address_of_object(obj))
def get_py_object(address):
return simulator.get_py_object(address.intaddress)
_address._TYPE = llmemory.Address
supported_access_types = {"signed": lltype.Signed,
"unsigned": lltype.Unsigned,
"char": lltype.Char,
"address": llmemory.Address,
}
| Python |
import py
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.objspace.flow.model import SpaceOperation, Variable, Constant, \
c_last_exception, checkgraph
from pypy.translator.unsimplify import insert_empty_block
from pypy.translator.unsimplify import insert_empty_startblock
from pypy.translator.unsimplify import starts_with_empty_block
from pypy.translator.backendopt.support import var_needsgc
from pypy.translator.backendopt import inline
from pypy.translator.backendopt import graphanalyze
from pypy.translator.backendopt.canraise import RaiseAnalyzer
from pypy.translator.backendopt.ssa import DataFlowFamilyBuilder
from pypy.annotation import model as annmodel
from pypy.rpython import rmodel, annlowlevel
from pypy.rpython.memory import gc, lladdress
from pypy.rpython.annlowlevel import MixLevelHelperAnnotator
from pypy.rpython.rtyper import LowLevelOpList
from pypy.rpython.rbuiltin import gen_cast
from pypy.rlib.rarithmetic import ovfcheck
import sets, os, sys
def var_ispyobj(var):
if hasattr(var, 'concretetype'):
if isinstance(var.concretetype, lltype.Ptr):
return var.concretetype.TO._gckind == 'cpy'
else:
return False
else:
# assume PyObjPtr
return True
PyObjPtr = lltype.Ptr(lltype.PyObject)
class GcHighLevelOp(object):
def __init__(self, gctransformer, op, llops):
self.gctransformer = gctransformer
self.spaceop = op
self.llops = llops
def dispatch(self):
gct = self.gctransformer
opname = self.spaceop.opname
v_result = self.spaceop.result
meth = getattr(gct, 'gct_' + opname, gct.default)
meth(self)
if var_needsgc(v_result):
gct.livevars.append(v_result)
if var_ispyobj(v_result):
if opname in ('getfield', 'getarrayitem', 'same_as',
'cast_pointer', 'getsubstruct'):
# XXX more operations?
gct.push_alive(v_result)
elif opname not in ('direct_call', 'indirect_call'):
gct.push_alive(v_result)
def rename(self, newopname):
self.llops.append(
SpaceOperation(newopname, self.spaceop.args, self.spaceop.result))
def inputargs(self):
return self.spaceop.args
def genop(self, opname, args, resulttype=None, resultvar=None):
assert resulttype is None or resultvar is None
if resultvar is None:
return self.llops.genop(opname, args,
resulttype=resulttype)
else:
newop = SpaceOperation(opname, args, resultvar)
self.llops.append(newop)
return resultvar
def cast_result(self, var):
v_result = self.spaceop.result
resulttype = getattr(v_result, 'concretetype', PyObjPtr)
curtype = getattr(var, 'concretetype', PyObjPtr)
if curtype == resulttype:
self.genop('same_as', [var], resultvar=v_result)
else:
v_new = gen_cast(self.llops, resulttype, var)
assert v_new != var
self.llops[-1].result = v_result
class GCTransformer(object):
finished_helpers = False
def __init__(self, translator, inline=False):
self.translator = translator
self.seen_graphs = {}
self.minimal_transform = {}
if translator:
self.mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)
else:
self.mixlevelannotator = None
self.inline = inline
if translator and inline:
self.lltype_to_classdef = translator.rtyper.lltype_to_classdef_mapping()
self.graphs_to_inline = {}
if self.MinimalGCTransformer:
self.minimalgctransformer = self.MinimalGCTransformer(self)
else:
self.minimalgctransformer = None
def get_lltype_of_exception_value(self):
if self.translator is not None:
exceptiondata = self.translator.rtyper.getexceptiondata()
return exceptiondata.lltype_of_exception_value
else:
return lltype.Ptr(lltype.PyObject)
def need_minimal_transform(self, graph):
self.seen_graphs[graph] = True
self.minimal_transform[graph] = True
def inline_helpers(self, graph):
if self.inline:
raise_analyzer = RaiseAnalyzer(self.translator)
for inline_graph in self.graphs_to_inline:
try:
# XXX quite inefficient: we go over the function lots of times
inline.inline_function(self.translator, inline_graph, graph,
self.lltype_to_classdef,
raise_analyzer)
except inline.CannotInline, e:
print 'CANNOT INLINE:', e
print '\t%s into %s' % (inline_graph, graph)
checkgraph(graph)
def compute_borrowed_vars(self, graph):
# the input args are borrowed, and stay borrowed for as long as they
# are not merged with other values.
var_families = DataFlowFamilyBuilder(graph).get_variable_families()
borrowed_reps = {}
for v in graph.getargs():
borrowed_reps[var_families.find_rep(v)] = True
# no support for returning borrowed values so far
retvar = graph.getreturnvar()
def is_borrowed(v1):
return (var_families.find_rep(v1) in borrowed_reps
and v1 is not retvar)
return is_borrowed
def transform_block(self, block, is_borrowed):
self.llops = LowLevelOpList()
#self.curr_block = block
self.livevars = [var for var in block.inputargs
if var_needsgc(var) and not is_borrowed(var)]
for op in block.operations:
hop = GcHighLevelOp(self, op, self.llops)
hop.dispatch()
if len(block.exits) != 0: # i.e not the return block
assert block.exitswitch is not c_last_exception
deadinallexits = sets.Set(self.livevars)
for link in block.exits:
deadinallexits.difference_update(sets.Set(link.args))
for var in deadinallexits:
self.pop_alive(var)
for link in block.exits:
livecounts = dict.fromkeys(sets.Set(self.livevars) - deadinallexits, 1)
for v, v2 in zip(link.args, link.target.inputargs):
if is_borrowed(v2):
continue
if v in livecounts:
livecounts[v] -= 1
elif var_needsgc(v):
# 'v' is typically a Constant here, but it can be
# a borrowed variable going into a non-borrowed one
livecounts[v] = -1
self.links_to_split[link] = livecounts
block.operations[:] = self.llops
self.llops = None
self.livevars = None
def transform_graph(self, graph):
if graph in self.minimal_transform:
if self.minimalgctransformer:
self.minimalgctransformer.transform_graph(graph)
del self.minimal_transform[graph]
return
if graph in self.seen_graphs:
return
self.seen_graphs[graph] = True
self.links_to_split = {} # link -> vars to pop_alive across the link
# for sanity, we need an empty block at the start of the graph
inserted_empty_startblock = False
if not starts_with_empty_block(graph):
insert_empty_startblock(self.translator.annotator, graph)
inserted_empty_startblock = True
is_borrowed = self.compute_borrowed_vars(graph)
for block in graph.iterblocks():
self.transform_block(block, is_borrowed)
for link, livecounts in self.links_to_split.iteritems():
llops = LowLevelOpList()
for var, livecount in livecounts.iteritems():
for i in range(livecount):
self.pop_alive(var, llops)
for i in range(-livecount):
self.push_alive(var, llops)
if llops:
if link.prevblock.exitswitch is None:
link.prevblock.operations.extend(llops)
else:
insert_empty_block(self.translator.annotator, link, llops)
# remove the empty block at the start of the graph, which should
# still be empty (but let's check)
if starts_with_empty_block(graph) and inserted_empty_startblock:
old_startblock = graph.startblock
graph.startblock.isstartblock = False
graph.startblock = graph.startblock.exits[0].target
graph.startblock.isstartblock = True
checkgraph(graph)
self.links_to_split = None
v = Variable('vanishing_exc_value')
v.concretetype = self.get_lltype_of_exception_value()
llops = LowLevelOpList()
self.pop_alive(v, llops)
graph.exc_cleanup = (v, list(llops))
return is_borrowed # xxx for tests only
def annotate_helper(self, ll_helper, ll_args, ll_result, inline=False):
assert not self.finished_helpers
args_s = map(annmodel.lltype_to_annotation, ll_args)
s_result = annmodel.lltype_to_annotation(ll_result)
graph = self.mixlevelannotator.getgraph(ll_helper, args_s, s_result)
# the produced graphs does not need to be fully transformed
self.need_minimal_transform(graph)
if inline:
self.graphs_to_inline[graph] = True
return self.mixlevelannotator.graph2delayed(graph)
def inittime_helper(self, ll_helper, ll_args, ll_result, inline=True):
ptr = self.annotate_helper(ll_helper, ll_args, ll_result, inline=inline)
return Constant(ptr, lltype.typeOf(ptr))
def finish_helpers(self):
if self.translator is not None:
self.mixlevelannotator.finish_annotate()
self.finished_helpers = True
if self.translator is not None:
self.mixlevelannotator.finish_rtype()
def finish_tables(self):
pass
def finish(self):
self.finish_helpers()
self.finish_tables()
def transform_generic_set(self, hop):
opname = hop.spaceop.opname
v_new = hop.spaceop.args[-1]
v_old = hop.genop('g' + opname[1:],
hop.inputargs()[:-1],
resulttype=v_new.concretetype)
self.push_alive(v_new)
hop.rename('bare_' + opname)
self.pop_alive(v_old)
def push_alive(self, var, llops=None):
if llops is None:
llops = self.llops
if var_ispyobj(var):
self.push_alive_pyobj(var, llops)
else:
self.push_alive_nopyobj(var, llops)
def pop_alive(self, var, llops=None):
if llops is None:
llops = self.llops
if var_ispyobj(var):
self.pop_alive_pyobj(var, llops)
else:
self.pop_alive_nopyobj(var, llops)
def push_alive_pyobj(self, var, llops):
if hasattr(var, 'concretetype') and var.concretetype != PyObjPtr:
var = gen_cast(llops, PyObjPtr, var)
llops.genop("gc_push_alive_pyobj", [var])
def pop_alive_pyobj(self, var, llops):
if hasattr(var, 'concretetype') and var.concretetype != PyObjPtr:
var = gen_cast(llops, PyObjPtr, var)
llops.genop("gc_pop_alive_pyobj", [var])
def push_alive_nopyobj(self, var, llops):
pass
def pop_alive_nopyobj(self, var, llops):
pass
def var_needs_set_transform(self, var):
return var_ispyobj(var)
def default(self, hop):
hop.llops.append(hop.spaceop)
def gct_setfield(self, hop):
if self.var_needs_set_transform(hop.spaceop.args[-1]):
self.transform_generic_set(hop)
else:
hop.rename('bare_' + hop.spaceop.opname)
gct_setarrayitem = gct_setfield
#def gct_safe_call(self, hop):
# hop.rename("direct_call")
def gct_zero_gc_pointers_inside(self, hop):
pass
class MinimalGCTransformer(GCTransformer):
def __init__(self, parenttransformer):
GCTransformer.__init__(self, parenttransformer.translator)
self.parenttransformer = parenttransformer
def push_alive(self, var, llops=None):
pass
def pop_alive(self, var, llops=None):
pass
GCTransformer.MinimalGCTransformer = MinimalGCTransformer
MinimalGCTransformer.MinimalGCTransformer = None
| Python |
from pypy.rpython.memory.gctransform.transform import GCTransformer
from pypy.rpython.memory.gctransform.support import type_contains_pyobjs, \
get_rtti, _static_deallocator_body_for_type, LLTransformerOp, ll_call_destructor
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython import rmodel
from pypy.rlib.rarithmetic import ovfcheck
from pypy.objspace.flow.model import Constant
class BoehmGCTransformer(GCTransformer):
FINALIZER_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void))
def __init__(self, translator, inline=False):
super(BoehmGCTransformer, self).__init__(translator, inline=inline)
self.finalizer_funcptrs = {}
memoryError = MemoryError()
def ll_malloc_fixedsize(size, finalizer):
result = llop.boehm_malloc(llmemory.Address, size)
if not result:
raise memoryError
if finalizer: # XXX runtime check here is bad?
llop.boehm_register_finalizer(lltype.Void, result, finalizer)
return result
def ll_malloc_fixedsize_atomic(size, finalizer):
result = llop.boehm_malloc_atomic(llmemory.Address, size)
if not result:
raise memoryError
if finalizer: # XXX runtime check here is bad?
llop.boehm_register_finalizer(lltype.Void, result, finalizer)
return result
# XXX, do we need/want an atomic version of this function?
def ll_malloc_varsize_no_length(length, size, itemsize):
try:
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(size + varsize)
except OverflowError:
raise memoryError
result = llop.boehm_malloc(llmemory.Address, tot_size)
if not result:
raise memoryError
return result
def ll_malloc_varsize(length, size, itemsize, lengthoffset):
result = ll_malloc_varsize_no_length(length, size, itemsize)
(result + lengthoffset).signed[0] = length
return result
if self.translator:
self.malloc_fixedsize_ptr = self.inittime_helper(
ll_malloc_fixedsize, [lltype.Signed, self.FINALIZER_PTR], llmemory.Address)
self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
ll_malloc_fixedsize_atomic, [lltype.Signed, self.FINALIZER_PTR], llmemory.Address)
self.malloc_varsize_no_length_ptr = self.inittime_helper(
ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False)
self.malloc_varsize_ptr = self.inittime_helper(
ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False)
self.mixlevelannotator.finish() # for now
self.mixlevelannotator.backend_optimize()
def push_alive_nopyobj(self, var, llops):
pass
def pop_alive_nopyobj(self, var, llops):
pass
def gct_gc_protect(self, hop):
""" for boehm it is enough to do nothing"""
pass
def gct_gc_unprotect(self, hop):
""" for boehm it is enough to do nothing"""
pass
def gct_malloc(self, hop):
TYPE = hop.spaceop.result.concretetype.TO
assert not TYPE._is_varsize()
c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE))
if TYPE._is_atomic():
funcptr = self.malloc_fixedsize_atomic_ptr
else:
funcptr = self.malloc_fixedsize_ptr
c_finalizer_ptr = Constant(self.finalizer_funcptr_for_type(TYPE), self.FINALIZER_PTR)
v_raw = hop.genop("direct_call",
[funcptr, c_size, c_finalizer_ptr],
resulttype=llmemory.Address)
hop.cast_result(v_raw)
# XXX In theory this is wrong:
gct_zero_malloc = gct_malloc
def gct_malloc_varsize(self, hop):
def intconst(c): return rmodel.inputconst(lltype.Signed, c)
op = hop.spaceop
TYPE = op.result.concretetype.TO
assert TYPE._is_varsize()
assert not self.finalizer_funcptr_for_type(TYPE)
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
else:
ARRAY = TYPE
assert isinstance(ARRAY, lltype.Array)
if ARRAY._hints.get('isrpystring', False):
c_const_size = intconst(llmemory.sizeof(TYPE, 1))
else:
c_const_size = intconst(llmemory.sizeof(TYPE, 0))
c_item_size = intconst(llmemory.sizeof(ARRAY.OF))
if ARRAY._hints.get("nolength", False):
v_raw = hop.genop("direct_call",
[self.malloc_varsize_no_length_ptr, op.args[-1],
c_const_size, c_item_size],
resulttype=llmemory.Address)
else:
if isinstance(TYPE, lltype.Struct):
offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
llmemory.ArrayLengthOffset(ARRAY)
else:
offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
v_raw = hop.genop("direct_call",
[self.malloc_varsize_ptr, op.args[-1],
c_const_size, c_item_size, intconst(offset_to_length)],
resulttype=llmemory.Address)
hop.cast_result(v_raw)
gct_zero_malloc_varsize = gct_malloc_varsize
def finalizer_funcptr_for_type(self, TYPE):
if TYPE in self.finalizer_funcptrs:
return self.finalizer_funcptrs[TYPE]
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
else:
destrptr = None
DESTR_ARG = None
if type_contains_pyobjs(TYPE):
if destrptr:
raise Exception("can't mix PyObjects and __del__ with Boehm")
static_body = '\n'.join(_static_deallocator_body_for_type('v', TYPE))
d = {'pop_alive': LLTransformerOp(self.pop_alive),
'PTR_TYPE':lltype.Ptr(TYPE),
'cast_adr_to_ptr': llmemory.cast_adr_to_ptr}
src = ("def ll_finalizer(addr):\n"
" v = cast_adr_to_ptr(addr, PTR_TYPE)\n"
"%s\n")%(static_body,)
exec src in d
fptr = self.annotate_helper(d['ll_finalizer'], [llmemory.Address], lltype.Void)
elif destrptr:
EXC_INSTANCE_TYPE = self.translator.rtyper.exceptiondata.lltype_of_exception_value
def ll_finalizer(addr):
exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE)
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
ll_call_destructor(destrptr, v)
llop.gc_restore_exception(lltype.Void, exc_instance)
fptr = self.annotate_helper(ll_finalizer, [llmemory.Address], lltype.Void)
else:
fptr = lltype.nullptr(self.FINALIZER_PTR.TO)
self.finalizer_funcptrs[TYPE] = fptr
return fptr
| Python |
# calculate some statistics about the number of variables that need
# to be cared for across a call
from pypy.rpython.lltypesystem import lltype
relevant_ops = ["direct_call", "indirect_call", "malloc", "malloc_varsize"]
def filter_for_ptr(arg):
return isinstance(arg.concretetype, lltype.Ptr)
def filter_for_nongcptr(arg):
return isinstance(arg.concretetype, lltype.Ptr) and not arg.concretetype._needsgc()
def relevant_gcvars_block(block, filter=filter_for_ptr):
import sets
result = []
def filter_ptr(args):
return [arg for arg in args if filter(arg)]
def live_vars_before(index):
if index == 0:
return sets.Set(filter_ptr(block.inputargs))
op = block.operations[index - 1]
result = live_vars_before(index - 1).union(filter_ptr(op.args + [op.result]))
return result
def live_vars_after(index):
if index == len(block.operations) - 1:
result = sets.Set()
for exit in block.exits:
result = result.union(filter_ptr(exit.args))
return result
op = block.operations[index + 1]
result = live_vars_after(index + 1).union(filter_ptr(op.args + [op.result]))
return result
for i, op in enumerate(block.operations):
if op.opname not in relevant_ops:
continue
live_before = live_vars_before(i)
live_after = live_vars_after(i)
result.append(len(live_before.intersection(live_after)))
return result
def relevant_gcvars_graph(graph, filter=filter_for_ptr):
result = []
for block in graph.iterblocks():
result += relevant_gcvars_block(block, filter)
return result
def relevant_gcvars(t, filter=filter_for_ptr):
result = []
for graph in t.graphs:
result.extend(relevant_gcvars_graph(graph, filter))
return result
| Python |
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.annotation import model as annmodel
import os
def var_ispyobj(var):
if hasattr(var, 'concretetype'):
if isinstance(var.concretetype, lltype.Ptr):
return var.concretetype.TO._gckind == 'cpy'
else:
return False
else:
# assume PyObjPtr
return True
PyObjPtr = lltype.Ptr(lltype.PyObject)
def find_gc_ptrs_in_type(TYPE):
if isinstance(TYPE, lltype.Array):
return find_gc_ptrs_in_type(TYPE.OF)
elif isinstance(TYPE, lltype.Struct):
result = []
for name in TYPE._names:
result.extend(find_gc_ptrs_in_type(TYPE._flds[name]))
return result
elif isinstance(TYPE, lltype.Ptr) and TYPE._needsgc():
return [TYPE]
elif isinstance(TYPE, lltype.GcOpaqueType):
# heuristic: in theory the same problem exists with OpaqueType, but
# we use OpaqueType for other things too that we know are safely
# empty of further gc pointers
raise Exception("don't know what is in %r" % (TYPE,))
else:
return []
def type_contains_pyobjs(TYPE):
if isinstance(TYPE, lltype.Array):
return type_contains_pyobjs(TYPE.OF)
elif isinstance(TYPE, lltype.Struct):
result = []
for name in TYPE._names:
if type_contains_pyobjs(TYPE._flds[name]):
return True
return False
elif isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'cpy':
return True
else:
return False
def get_rtti(TYPE):
if isinstance(TYPE, lltype.RttiStruct):
try:
return lltype.getRuntimeTypeInfo(TYPE)
except ValueError:
pass
return None
def _static_deallocator_body_for_type(v, TYPE, depth=1):
if isinstance(TYPE, lltype.Array):
inner = list(_static_deallocator_body_for_type('v_%i'%depth, TYPE.OF, depth+1))
if inner:
yield ' '*depth + 'i_%d = 0'%(depth,)
yield ' '*depth + 'l_%d = len(%s)'%(depth, v)
yield ' '*depth + 'while i_%d < l_%d:'%(depth, depth)
yield ' '*depth + ' v_%d = %s[i_%d]'%(depth, v, depth)
for line in inner:
yield line
yield ' '*depth + ' i_%d += 1'%(depth,)
elif isinstance(TYPE, lltype.Struct):
for name in TYPE._names:
inner = list(_static_deallocator_body_for_type(
v + '_' + name, TYPE._flds[name], depth))
if inner:
yield ' '*depth + v + '_' + name + ' = ' + v + '.' + name
for line in inner:
yield line
elif isinstance(TYPE, lltype.Ptr) and TYPE._needsgc():
yield ' '*depth + 'pop_alive(%s)'%v
class LLTransformerOp(object):
"""Objects that can be called in ll functions.
Their calls are replaced by a simple operation of the GC transformer,
e.g. ll_pop_alive.
"""
def __init__(self, transformer_method):
self.transformer_method = transformer_method
class LLTransformerOpEntry(ExtRegistryEntry):
"Annotation and specialization of LLTransformerOp() instances."
_type_ = LLTransformerOp
def compute_result_annotation(self, s_arg):
return annmodel.s_None
def specialize_call(self, hop):
op = self.instance # the LLTransformerOp instance
op.transformer_method(hop.args_v[0], hop.llops)
hop.exception_cannot_occur()
return hop.inputconst(hop.r_result.lowleveltype, hop.s_result.const)
def ll_call_destructor(destrptr, destr_v):
try:
destrptr(destr_v)
except:
try:
os.write(2, "a destructor raised an exception, ignoring it\n")
except:
pass
| Python |
from pypy.rpython.memory.gctransform.transform import \
MinimalGCTransformer, var_ispyobj
from pypy.rpython.memory.gctransform.framework import \
FrameworkGCTransformer
from pypy.rpython.lltypesystem import lltype, llmemory
class StacklessFrameworkMinimalGCTransformer(MinimalGCTransformer):
def gct_flavored_malloc(self, hop):
flavor = hop.spaceop.args[0].value
if flavor == 'gc_nocollect':
return self.parenttransformer.gct_flavored_malloc(hop)
else:
self.default(hop)
gct_flavored_malloc_varsize = gct_flavored_malloc
class StacklessFrameworkGCTransformer(FrameworkGCTransformer):
use_stackless = True
extra_static_slots = 1 # for the stack_capture()'d frame
MinimalGCTransformer = StacklessFrameworkMinimalGCTransformer
def __init__(self, translator):
FrameworkGCTransformer.__init__(self, translator)
# and now, fun fun fun, we need to inline malloc_fixedsize
# manually into all 'malloc' operation users, because inlining
# it after it has been stackless transformed is both a Very
# Bad Idea and forbidden by the fact that stackless transform
# makes it self-recursive! Argh.
## self.replace_and_inline_malloc_already_now()
# nothing left to inline during code generation
self.inline = False
## def replace_and_inline_malloc_already_now(self):
## for graph in self.translator.graphs:
## any_malloc = False
## for block in graph.iterblocks():
## if block.operations:
## newops = []
## for op in block.operations:
## if op.opname.startswith('malloc'):
## any_malloc = True
## ops = self.replace_malloc(op, [], block)
## if isinstance(ops, tuple):
## ops = ops[0]
## newops.extend(ops)
## else:
## newops.append(op)
## block.operations = newops
## if any_malloc:
## self.inline_helpers(graph)
def build_stack_root_iterator(self):
from pypy.rlib.rstack import stack_capture
sizeofaddr = llmemory.sizeof(llmemory.Address)
gcdata = self.gcdata
class StackRootIterator:
_alloc_flavor_ = 'raw'
def setup_root_stack():
pass
setup_root_stack = staticmethod(setup_root_stack)
need_root_stack = False
def __init__(self):
frame = llmemory.cast_ptr_to_adr(stack_capture())
self.static_current = gcdata.static_root_start
index = len(gcdata.static_roots)
self.static_roots_index = index
gcdata.static_roots[index-1] = frame
def pop(self):
while self.static_current != gcdata.static_root_end:
result = self.static_current
self.static_current += sizeofaddr
if result.address[0].address[0] != llmemory.NULL:
return result.address[0]
i = self.static_roots_index
if i > 0:
i -= 1
self.static_roots_index = i
p = lltype.direct_arrayitems(gcdata.static_roots)
p = lltype.direct_ptradd(p, i)
return llmemory.cast_ptr_to_adr(p)
return llmemory.NULL
return StackRootIterator
| Python |
import py
from pypy.rpython.memory.gctransform.transform import GCTransformer
from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, _static_deallocator_body_for_type, LLTransformerOp, ll_call_destructor
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.translator.backendopt.support import var_needsgc
from pypy.rpython import rmodel
from pypy.rpython.memory import lladdress
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import ovfcheck
from pypy.rpython.rbuiltin import gen_cast
import sys
counts = {}
## def print_call_chain(ob):
## import sys
## f = sys._getframe(1)
## stack = []
## flag = False
## while f:
## if f.f_locals.get('self') is ob:
## stack.append((f.f_code.co_name, f.f_locals.get('TYPE')))
## if not flag:
## counts[f.f_code.co_name] = counts.get(f.f_code.co_name, 0) + 1
## print counts
## flag = True
## f = f.f_back
## stack.reverse()
## for i, (a, b) in enumerate(stack):
## print ' '*i, a, repr(b)[:100-i-len(a)], id(b)
ADDRESS_VOID_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
class RefcountingGCTransformer(GCTransformer):
HDR = lltype.Struct("header", ("refcount", lltype.Signed))
def __init__(self, translator):
super(RefcountingGCTransformer, self).__init__(translator, inline=True)
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
gc_header_offset = self.gcheaderbuilder.size_gc_header
self.deallocator_graphs_needing_transforming = []
# create incref, etc graph
memoryError = MemoryError()
HDRPTR = lltype.Ptr(self.HDR)
def ll_incref(adr):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
gcheader.refcount = gcheader.refcount + 1
def ll_decref(adr, dealloc):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
refcount = gcheader.refcount - 1
gcheader.refcount = refcount
if refcount == 0:
dealloc(adr)
def ll_decref_simple(adr):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
refcount = gcheader.refcount - 1
if refcount == 0:
llop.gc_free(lltype.Void, adr)
else:
gcheader.refcount = refcount
def ll_no_pointer_dealloc(adr):
llop.gc_free(lltype.Void, adr)
def ll_malloc_fixedsize(size):
size = gc_header_offset + size
result = lladdress.raw_malloc(size)
if not result:
raise memoryError
lladdress.raw_memclear(result, size)
result += gc_header_offset
return result
def ll_malloc_varsize_no_length(length, size, itemsize):
try:
fixsize = gc_header_offset + size
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(fixsize + varsize)
except OverflowError:
raise memoryError
result = lladdress.raw_malloc(tot_size)
if not result:
raise memoryError
lladdress.raw_memclear(result, tot_size)
result += gc_header_offset
return result
def ll_malloc_varsize(length, size, itemsize, lengthoffset):
result = ll_malloc_varsize_no_length(length, size, itemsize)
(result + lengthoffset).signed[0] = length
return result
if self.translator:
self.increfptr = self.inittime_helper(
ll_incref, [llmemory.Address], lltype.Void)
self.decref_ptr = self.inittime_helper(
ll_decref, [llmemory.Address, lltype.Ptr(ADDRESS_VOID_FUNC)],
lltype.Void)
self.decref_simple_ptr = self.inittime_helper(
ll_decref_simple, [llmemory.Address], lltype.Void)
self.no_pointer_dealloc_ptr = self.inittime_helper(
ll_no_pointer_dealloc, [llmemory.Address], lltype.Void)
self.malloc_fixedsize_ptr = self.inittime_helper(
ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
self.malloc_varsize_no_length_ptr = self.inittime_helper(
ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address)
self.malloc_varsize_ptr = self.inittime_helper(
ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address)
self.mixlevelannotator.finish() # for now
# cache graphs:
self.decref_funcptrs = {}
self.static_deallocator_funcptrs = {}
self.dynamic_deallocator_funcptrs = {}
self.queryptr2dynamic_deallocator_funcptr = {}
def var_needs_set_transform(self, var):
return var_needsgc(var)
def push_alive_nopyobj(self, var, llops):
v_adr = gen_cast(llops, llmemory.Address, var)
llops.genop("direct_call", [self.increfptr, v_adr])
def pop_alive_nopyobj(self, var, llops):
PTRTYPE = var.concretetype
v_adr = gen_cast(llops, llmemory.Address, var)
dealloc_fptr = self.dynamic_deallocation_funcptr_for_type(PTRTYPE.TO)
if dealloc_fptr is self.no_pointer_dealloc_ptr.value:
# simple case
llops.genop("direct_call", [self.decref_simple_ptr, v_adr])
else:
cdealloc_fptr = rmodel.inputconst(
lltype.typeOf(dealloc_fptr), dealloc_fptr)
llops.genop("direct_call", [self.decref_ptr, v_adr, cdealloc_fptr])
def gct_gc_protect(self, hop):
""" protect this object from gc (make it immortal) """
self.push_alive(hop.spaceop.args[0])
def gct_gc_unprotect(self, hop):
""" get this object back into gc control """
self.pop_alive(hop.spaceop.args[0])
def gct_malloc(self, hop):
TYPE = hop.spaceop.result.concretetype.TO
assert not TYPE._is_varsize()
c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE))
v_raw = hop.genop("direct_call", [self.malloc_fixedsize_ptr, c_size],
resulttype=llmemory.Address)
hop.cast_result(v_raw)
gct_zero_malloc = gct_malloc
def gct_malloc_varsize(self, hop):
def intconst(c): return rmodel.inputconst(lltype.Signed, c)
op = hop.spaceop
TYPE = op.result.concretetype.TO
assert TYPE._is_varsize()
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
else:
ARRAY = TYPE
assert isinstance(ARRAY, lltype.Array)
if ARRAY._hints.get('isrpystring', False):
c_const_size = intconst(llmemory.sizeof(TYPE, 1))
else:
c_const_size = intconst(llmemory.sizeof(TYPE, 0))
c_item_size = intconst(llmemory.sizeof(ARRAY.OF))
if ARRAY._hints.get("nolength", False):
v_raw = hop.genop("direct_call",
[self.malloc_varsize_no_length_ptr, op.args[-1],
c_const_size, c_item_size],
resulttype=llmemory.Address)
else:
if isinstance(TYPE, lltype.Struct):
offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
llmemory.ArrayLengthOffset(ARRAY)
else:
offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
v_raw = hop.genop("direct_call",
[self.malloc_varsize_ptr, op.args[-1],
c_const_size, c_item_size, intconst(offset_to_length)],
resulttype=llmemory.Address)
hop.cast_result(v_raw)
gct_zero_malloc_varsize = gct_malloc_varsize
def gct_gc_deallocate(self, hop):
TYPE = hop.spaceop.args[0].value
v_addr = hop.spaceop.args[1]
dealloc_fptr = self.dynamic_deallocation_funcptr_for_type(TYPE)
cdealloc_fptr = rmodel.inputconst(
lltype.typeOf(dealloc_fptr), dealloc_fptr)
hop.genop("direct_call", [cdealloc_fptr, v_addr])
def consider_constant(self, TYPE, value):
if value is not lltype.top_container(value):
return
if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
p = value._as_ptr()
if not self.gcheaderbuilder.get_header(p):
hdr = self.gcheaderbuilder.new_header(p)
hdr.refcount = sys.maxint // 2
def static_deallocation_funcptr_for_type(self, TYPE):
if TYPE in self.static_deallocator_funcptrs:
return self.static_deallocator_funcptrs[TYPE]
#print_call_chain(self)
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
else:
destrptr = None
DESTR_ARG = None
if destrptr is None and not find_gc_ptrs_in_type(TYPE):
#print repr(TYPE)[:80], 'is dealloc easy'
p = self.no_pointer_dealloc_ptr.value
self.static_deallocator_funcptrs[TYPE] = p
return p
if destrptr is not None:
body = '\n'.join(_static_deallocator_body_for_type('v', TYPE, 3))
src = """
def ll_deallocator(addr):
exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE)
try:
v = cast_adr_to_ptr(addr, PTR_TYPE)
gcheader = cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
# refcount is at zero, temporarily bump it to 1:
gcheader.refcount = 1
destr_v = cast_pointer(DESTR_ARG, v)
ll_call_destructor(destrptr, destr_v)
refcount = gcheader.refcount - 1
gcheader.refcount = refcount
if refcount == 0:
%s
llop.gc_free(lltype.Void, addr)
except:
pass
llop.gc_restore_exception(lltype.Void, exc_instance)
pop_alive(exc_instance)
# XXX layering of exceptiontransform versus gcpolicy
""" % (body, )
else:
call_del = None
body = '\n'.join(_static_deallocator_body_for_type('v', TYPE))
src = ('def ll_deallocator(addr):\n v = cast_adr_to_ptr(addr, PTR_TYPE)\n' +
body + '\n llop.gc_free(lltype.Void, addr)\n')
d = {'pop_alive': LLTransformerOp(self.pop_alive),
'llop': llop,
'lltype': lltype,
'destrptr': destrptr,
'gc_header_offset': self.gcheaderbuilder.size_gc_header,
'cast_adr_to_ptr': llmemory.cast_adr_to_ptr,
'cast_pointer': lltype.cast_pointer,
'PTR_TYPE': lltype.Ptr(TYPE),
'DESTR_ARG': DESTR_ARG,
'EXC_INSTANCE_TYPE': self.translator.rtyper.exceptiondata.lltype_of_exception_value,
'll_call_destructor': ll_call_destructor,
'HDRPTR':lltype.Ptr(self.HDR)}
exec src in d
this = d['ll_deallocator']
fptr = self.annotate_helper(this, [llmemory.Address], lltype.Void)
self.static_deallocator_funcptrs[TYPE] = fptr
for p in find_gc_ptrs_in_type(TYPE):
self.static_deallocation_funcptr_for_type(p.TO)
return fptr
def dynamic_deallocation_funcptr_for_type(self, TYPE):
if TYPE in self.dynamic_deallocator_funcptrs:
return self.dynamic_deallocator_funcptrs[TYPE]
#print_call_chain(self)
rtti = get_rtti(TYPE)
if rtti is None:
p = self.static_deallocation_funcptr_for_type(TYPE)
self.dynamic_deallocator_funcptrs[TYPE] = p
return p
queryptr = rtti._obj.query_funcptr
if queryptr._obj in self.queryptr2dynamic_deallocator_funcptr:
return self.queryptr2dynamic_deallocator_funcptr[queryptr._obj]
RTTI_PTR = lltype.Ptr(lltype.RuntimeTypeInfo)
QUERY_ARG_TYPE = lltype.typeOf(queryptr).TO.ARGS[0]
gc_header_offset = self.gcheaderbuilder.size_gc_header
HDRPTR = lltype.Ptr(self.HDR)
def ll_dealloc(addr):
# bump refcount to 1
gcheader = llmemory.cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
gcheader.refcount = 1
v = llmemory.cast_adr_to_ptr(addr, QUERY_ARG_TYPE)
rtti = queryptr(v)
gcheader.refcount = 0
llop.gc_call_rtti_destructor(lltype.Void, rtti, addr)
fptr = self.annotate_helper(ll_dealloc, [llmemory.Address], lltype.Void)
self.dynamic_deallocator_funcptrs[TYPE] = fptr
self.queryptr2dynamic_deallocator_funcptr[queryptr._obj] = fptr
return fptr
| Python |
#
| Python |
from pypy.rpython.memory.gctransform.transform import GCTransformer, var_ispyobj
from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, ll_call_destructor, type_contains_pyobjs
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython import rmodel
from pypy.rpython.memory import gc, lladdress
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import ovfcheck
from pypy.rlib.objectmodel import debug_assert
from pypy.translator.backendopt import graphanalyze
from pypy.annotation import model as annmodel
from pypy.rpython import annlowlevel
from pypy.rpython.rbuiltin import gen_cast
import sys
class CollectAnalyzer(graphanalyze.GraphAnalyzer):
def operation_is_true(self, op):
return op.opname in ("malloc", "malloc_varsize", "gc__collect",
"gc_x_become", "zero_malloc_varsize", "zero_malloc")
ADDRESS_VOID_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
class FrameworkGCTransformer(GCTransformer):
use_stackless = False
extra_static_slots = 0
finished_tables = False
root_stack_depth = 163840
from pypy.rpython.memory.gc import MarkSweepGC as GCClass
GC_PARAMS = {'start_heap_size': 8*1024*1024} # XXX adjust
def __init__(self, translator):
from pypy.rpython.memory.support import get_address_linked_list
super(FrameworkGCTransformer, self).__init__(translator, inline=True)
AddressLinkedList = get_address_linked_list()
GCClass = self.GCClass
self.finalizer_funcptrs = {}
self.FINALIZERTYPE = lltype.Ptr(ADDRESS_VOID_FUNC)
class GCData(object):
# types of the GC information tables
OFFSETS_TO_GC_PTR = lltype.Array(lltype.Signed)
TYPE_INFO = lltype.Struct("type_info",
("isvarsize", lltype.Bool),
("finalyzer", self.FINALIZERTYPE),
("fixedsize", lltype.Signed),
("ofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
("varitemsize", lltype.Signed),
("ofstovar", lltype.Signed),
("ofstolength", lltype.Signed),
("varofstoptrs",lltype.Ptr(OFFSETS_TO_GC_PTR)),
)
TYPE_INFO_TABLE = lltype.Array(TYPE_INFO)
def q_is_varsize(typeid):
return gcdata.type_info_table[typeid].isvarsize
def q_finalyzer(typeid):
return gcdata.type_info_table[typeid].finalyzer
def q_offsets_to_gc_pointers(typeid):
return gcdata.type_info_table[typeid].ofstoptrs
def q_fixed_size(typeid):
return gcdata.type_info_table[typeid].fixedsize
def q_varsize_item_sizes(typeid):
return gcdata.type_info_table[typeid].varitemsize
def q_varsize_offset_to_variable_part(typeid):
return gcdata.type_info_table[typeid].ofstovar
def q_varsize_offset_to_length(typeid):
return gcdata.type_info_table[typeid].ofstolength
def q_varsize_offsets_to_gcpointers_in_var_part(typeid):
return gcdata.type_info_table[typeid].varofstoptrs
gcdata = GCData()
# set up dummy a table, to be overwritten with the real one in finish()
gcdata.type_info_table = lltype.malloc(GCData.TYPE_INFO_TABLE, 0,
immortal=True)
gcdata.static_roots = lltype.malloc(lltype.Array(llmemory.Address), 0,
immortal=True)
# initialize the following two fields with a random non-NULL address,
# to make the annotator happy. The fields are patched in finish()
# to point to a real array (not 'static_roots', another one).
a_random_address = llmemory.cast_ptr_to_adr(gcdata.type_info_table)
gcdata.static_root_start = a_random_address # patched in finish()
gcdata.static_root_end = a_random_address # patched in finish()
self.gcdata = gcdata
self.type_info_list = []
self.id_of_type = {} # {LLTYPE: type_id}
self.seen_roots = {}
self.static_gc_roots = []
self.addresses_of_static_ptrs_in_nongc = []
self.offsettable_cache = {}
self.malloc_fnptr_cache = {}
sizeofaddr = llmemory.sizeof(llmemory.Address)
StackRootIterator = self.build_stack_root_iterator()
gcdata.gc = GCClass(AddressLinkedList, get_roots=StackRootIterator, **self.GC_PARAMS)
def frameworkgc_setup():
# run-time initialization code
StackRootIterator.setup_root_stack()
gcdata.gc.setup()
gcdata.gc.set_query_functions(
q_is_varsize,
q_finalyzer,
q_offsets_to_gc_pointers,
q_fixed_size,
q_varsize_item_sizes,
q_varsize_offset_to_variable_part,
q_varsize_offset_to_length,
q_varsize_offsets_to_gcpointers_in_var_part)
bk = self.translator.annotator.bookkeeper
# the point of this little dance is to not annotate
# self.gcdata.type_info_table as a constant.
data_classdef = bk.getuniqueclassdef(GCData)
data_classdef.generalize_attr(
'type_info_table',
annmodel.SomePtr(lltype.Ptr(GCData.TYPE_INFO_TABLE)))
data_classdef.generalize_attr(
'static_roots',
annmodel.SomePtr(lltype.Ptr(lltype.Array(llmemory.Address))))
data_classdef.generalize_attr(
'static_root_start',
annmodel.SomeAddress())
data_classdef.generalize_attr(
'static_root_end',
annmodel.SomeAddress())
annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)
def getfn(ll_function, args_s, s_result, inline=False,
minimal_transform=True):
graph = annhelper.getgraph(ll_function, args_s, s_result)
if minimal_transform:
self.need_minimal_transform(graph)
if inline:
self.graphs_to_inline[graph] = True
return annhelper.graph2const(graph)
self.frameworkgc_setup_ptr = getfn(frameworkgc_setup, [],
annmodel.s_None)
if StackRootIterator.need_root_stack:
self.pop_root_ptr = getfn(StackRootIterator.pop_root, [],
annmodel.s_None,
inline = True)
self.push_root_ptr = getfn(StackRootIterator.push_root,
[annmodel.SomeAddress()],
annmodel.s_None,
inline = True)
self.incr_stack_ptr = getfn(StackRootIterator.incr_stack,
[annmodel.SomeInteger()],
annmodel.SomeAddress(),
inline = True)
self.decr_stack_ptr = getfn(StackRootIterator.decr_stack,
[annmodel.SomeInteger()],
annmodel.s_None,
inline = True)
self.save_addr_ptr = getfn(StackRootIterator.save_addr,
[annmodel.SomeAddress(),
annmodel.SomeInteger(),
annmodel.SomeAddress()],
annmodel.s_None,
inline = True)
else:
self.push_root_ptr = None
self.pop_root_ptr = None
self.incr_stack_ptr = None
self.decr_stack_ptr = None
self.save_addr_ptr = None
classdef = bk.getuniqueclassdef(GCClass)
s_gc = annmodel.SomeInstance(classdef)
s_gcref = annmodel.SomePtr(llmemory.GCREF)
self.malloc_fixedsize_ptr = getfn(
GCClass.malloc_fixedsize.im_func,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool()], s_gcref,
inline = False)
self.malloc_fixedsize_clear_ptr = getfn(
GCClass.malloc_fixedsize_clear.im_func,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool()], s_gcref,
inline = False)
## self.malloc_varsize_ptr = getfn(
## GCClass.malloc_varsize.im_func,
## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
## + [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.malloc_varsize_clear_ptr = getfn(
GCClass.malloc_varsize_clear.im_func,
[s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
+ [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.collect_ptr = getfn(GCClass.collect.im_func,
[s_gc], annmodel.s_None)
self.statistics_ptr = getfn(GCClass.statistics.im_func,
[s_gc, annmodel.SomeInteger()],
annmodel.SomeInteger())
# experimental gc_x_* operations
s_x_pool = annmodel.SomePtr(gc.X_POOL_PTR)
s_x_clone = annmodel.SomePtr(gc.X_CLONE_PTR)
# the x_*() methods use some regular mallocs that must be
# transformed in the normal way
self.x_swap_pool_ptr = getfn(GCClass.x_swap_pool.im_func,
[s_gc, s_x_pool],
s_x_pool,
minimal_transform = False)
self.x_clone_ptr = getfn(GCClass.x_clone.im_func,
[s_gc, s_x_clone],
annmodel.s_None,
minimal_transform = False)
self.x_become_ptr = getfn(
GCClass.x_become.im_func,
[s_gc, annmodel.SomeAddress(), annmodel.SomeAddress()],
annmodel.s_None)
annhelper.finish() # at this point, annotate all mix-level helpers
annhelper.backend_optimize()
self.collect_analyzer = CollectAnalyzer(self.translator)
self.collect_analyzer.analyze_all()
s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass)
r_gc = self.translator.rtyper.getrepr(s_gc)
self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc)
HDR = self._gc_HDR = self.gcdata.gc.gcheaderbuilder.HDR
self._gc_fields = fields = []
for fldname in HDR._names:
FLDTYPE = getattr(HDR, fldname)
fields.append(('_' + fldname, FLDTYPE))
def build_stack_root_iterator(self):
gcdata = self.gcdata
sizeofaddr = llmemory.sizeof(llmemory.Address)
rootstacksize = sizeofaddr * self.root_stack_depth
class StackRootIterator:
_alloc_flavor_ = 'raw'
def setup_root_stack():
stackbase = lladdress.raw_malloc(rootstacksize)
debug_assert(bool(stackbase), "could not allocate root stack")
lladdress.raw_memclear(stackbase, rootstacksize)
gcdata.root_stack_top = stackbase
gcdata.root_stack_base = stackbase
i = 0
while i < len(gcdata.static_roots):
StackRootIterator.push_root(gcdata.static_roots[i])
i += 1
setup_root_stack = staticmethod(setup_root_stack)
need_root_stack = True
def incr_stack(n):
top = gcdata.root_stack_top
gcdata.root_stack_top = top + n*sizeofaddr
return top
incr_stack = staticmethod(incr_stack)
def save_addr(top, k, addr):
top.address[k] = addr
save_addr = staticmethod(save_addr)
def decr_stack(n):
gcdata.root_stack_top -= n*sizeofaddr
decr_stack = staticmethod(decr_stack)
def push_root(addr):
top = gcdata.root_stack_top
top.address[0] = addr
gcdata.root_stack_top = top + sizeofaddr
push_root = staticmethod(push_root)
def pop_root():
gcdata.root_stack_top -= sizeofaddr
pop_root = staticmethod(pop_root)
def __init__(self):
self.stack_current = gcdata.root_stack_top
self.static_current = gcdata.static_root_start
def pop(self):
while self.static_current != gcdata.static_root_end:
result = self.static_current
self.static_current += sizeofaddr
if result.address[0].address[0] != llmemory.NULL:
return result.address[0]
while self.stack_current != gcdata.root_stack_base:
self.stack_current -= sizeofaddr
if self.stack_current.address[0] != llmemory.NULL:
return self.stack_current
return llmemory.NULL
return StackRootIterator
def get_type_id(self, TYPE):
try:
return self.id_of_type[TYPE]
except KeyError:
assert not self.finished_tables
assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
# Record the new type_id description as a small dict for now.
# It will be turned into a Struct("type_info") in finish()
type_id = len(self.type_info_list)
info = {}
self.type_info_list.append(info)
self.id_of_type[TYPE] = type_id
offsets = offsets_to_gc_pointers(TYPE)
info["ofstoptrs"] = self.offsets2table(offsets, TYPE)
info["finalyzer"] = self.finalizer_funcptr_for_type(TYPE)
if not TYPE._is_varsize():
info["isvarsize"] = False
info["fixedsize"] = llmemory.sizeof(TYPE)
info["ofstolength"] = -1
else:
info["isvarsize"] = True
info["fixedsize"] = llmemory.sizeof(TYPE, 0)
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
info["ofstolength"] = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
if ARRAY.OF != lltype.Void:
info["ofstovar"] = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
else:
info["fixedsize"] = ofs1 + llmemory.sizeof(lltype.Signed)
if ARRAY._hints.get('isrpystring'):
info["fixedsize"] = llmemory.sizeof(TYPE, 1)
else:
ARRAY = TYPE
info["ofstolength"] = llmemory.ArrayLengthOffset(ARRAY)
if ARRAY.OF != lltype.Void:
info["ofstovar"] = llmemory.itemoffsetof(TYPE, 0)
else:
info["fixedsize"] = llmemory.ArrayLengthOffset(ARRAY) + llmemory.sizeof(lltype.Signed)
assert isinstance(ARRAY, lltype.Array)
if ARRAY.OF != lltype.Void:
offsets = offsets_to_gc_pointers(ARRAY.OF)
info["varofstoptrs"] = self.offsets2table(offsets, ARRAY.OF)
info["varitemsize"] = llmemory.sizeof(ARRAY.OF)
else:
info["varofstoptrs"] = self.offsets2table((), lltype.Void)
info["varitemsize"] = llmemory.sizeof(ARRAY.OF)
return type_id
def finalizer_funcptr_for_type(self, TYPE):
if TYPE in self.finalizer_funcptrs:
return self.finalizer_funcptrs[TYPE]
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
else:
destrptr = None
DESTR_ARG = None
assert not type_contains_pyobjs(TYPE), "not implemented"
if destrptr:
def ll_finalizer(addr):
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
ll_call_destructor(destrptr, v)
fptr = self.annotate_helper(ll_finalizer, [llmemory.Address], lltype.Void)
else:
fptr = lltype.nullptr(ADDRESS_VOID_FUNC)
self.finalizer_funcptrs[TYPE] = fptr
return fptr
def consider_constant(self, TYPE, value):
if value is not lltype.top_container(value):
return
if id(value) in self.seen_roots:
return
self.seen_roots[id(value)] = True
if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
typeid = self.get_type_id(TYPE)
hdrbuilder = self.gcdata.gc.gcheaderbuilder
hdr = hdrbuilder.new_header(value)
adr = llmemory.cast_ptr_to_adr(hdr)
self.gcdata.gc.init_gc_object(adr, typeid)
if find_gc_ptrs_in_type(TYPE):
adr = llmemory.cast_ptr_to_adr(value._as_ptr())
if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
self.static_gc_roots.append(adr)
else:
for a in gc_pointers_inside(value, adr):
self.addresses_of_static_ptrs_in_nongc.append(a)
def gc_fields(self):
return self._gc_fields
def gc_field_values_for(self, obj):
hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj)
HDR = self._gc_HDR
return [getattr(hdr, fldname) for fldname in HDR._names]
def offsets2table(self, offsets, TYPE):
try:
return self.offsettable_cache[TYPE]
except KeyError:
cachedarray = lltype.malloc(self.gcdata.OFFSETS_TO_GC_PTR,
len(offsets), immortal=True)
for i, value in enumerate(offsets):
cachedarray[i] = value
self.offsettable_cache[TYPE] = cachedarray
return cachedarray
def finish_tables(self):
self.finished_tables = True
table = lltype.malloc(self.gcdata.TYPE_INFO_TABLE,
len(self.type_info_list), immortal=True)
for tableentry, newcontent in zip(table, self.type_info_list):
for key, value in newcontent.items():
setattr(tableentry, key, value)
self.offsettable_cache = None
# replace the type_info_table pointer in gcdata -- at this point,
# the database is in principle complete, so it has already seen
# the old (empty) array. We need to force it to consider the new
# array now. It's a bit hackish as the old empty array will also
# be generated in the C source, but that's a rather minor problem.
# XXX because we call inputconst already in replace_malloc, we can't
# modify the instance, we have to modify the 'rtyped instance'
# instead. horrors. is there a better way?
s_gcdata = self.translator.annotator.bookkeeper.immutablevalue(
self.gcdata)
r_gcdata = self.translator.rtyper.getrepr(s_gcdata)
ll_instance = rmodel.inputconst(r_gcdata, self.gcdata).value
ll_instance.inst_type_info_table = table
#self.gcdata.type_info_table = table
ll_static_roots = lltype.malloc(lltype.Array(llmemory.Address),
len(self.static_gc_roots) +
self.extra_static_slots,
immortal=True)
for i in range(len(self.static_gc_roots)):
adr = self.static_gc_roots[i]
ll_static_roots[i] = adr
ll_instance.inst_static_roots = ll_static_roots
ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address),
len(self.addresses_of_static_ptrs_in_nongc),
immortal=True)
for i in range(len(self.addresses_of_static_ptrs_in_nongc)):
ll_static_roots_inside[i] = self.addresses_of_static_ptrs_in_nongc[i]
ll_instance.inst_static_root_start = llmemory.cast_ptr_to_adr(ll_static_roots_inside) + llmemory.ArrayItemsOffset(lltype.Array(llmemory.Address))
ll_instance.inst_static_root_end = ll_instance.inst_static_root_start + llmemory.sizeof(llmemory.Address) * len(ll_static_roots_inside)
newgcdependencies = []
newgcdependencies.append(table)
newgcdependencies.append(ll_static_roots)
newgcdependencies.append(ll_static_roots_inside)
return newgcdependencies
def gct_direct_call(self, hop):
if self.collect_analyzer.analyze(hop.spaceop):
self.push_roots(hop)
self.default(hop)
self.pop_roots(hop)
else:
self.default(hop)
gct_indirect_call = gct_direct_call
def gct_malloc(self, hop):
op = hop.spaceop
if op.opname.startswith('flavored_'):
flavor = op.args[0].value
TYPE = op.args[1].value
else:
flavor = 'gc'
TYPE = op.args[0].value
if not flavor.startswith('gc'):
self.default(hop)
return
c_can_collect = rmodel.inputconst(lltype.Bool,
flavor != 'gc_nocollect')
PTRTYPE = op.result.concretetype
assert PTRTYPE.TO == TYPE
type_id = self.get_type_id(TYPE)
c_type_id = rmodel.inputconst(lltype.Signed, type_id)
info = self.type_info_list[type_id]
c_size = rmodel.inputconst(lltype.Signed, info["fixedsize"])
if not op.opname.endswith('_varsize'):
#malloc_ptr = self.malloc_fixedsize_ptr
if op.opname.startswith('zero'):
malloc_ptr = self.malloc_fixedsize_clear_ptr
else:
malloc_ptr = self.malloc_fixedsize_ptr
args = [self.c_const_gc, c_type_id, c_size, c_can_collect]
else:
v_length = op.args[-1]
c_ofstolength = rmodel.inputconst(lltype.Signed, info['ofstolength'])
c_varitemsize = rmodel.inputconst(lltype.Signed, info['varitemsize'])
malloc_ptr = self.malloc_varsize_clear_ptr
## if op.opname.startswith('zero'):
## malloc_ptr = self.malloc_varsize_clear_ptr
## else:
## malloc_ptr = self.malloc_varsize_clear_ptr
args = [self.c_const_gc, c_type_id, v_length, c_size,
c_varitemsize, c_ofstolength, c_can_collect]
c_has_finalizer = rmodel.inputconst(
lltype.Bool, bool(self.finalizer_funcptr_for_type(TYPE)))
args.append(c_has_finalizer)
self.push_roots(hop)
v_result = hop.genop("direct_call", [malloc_ptr] + args,
resulttype=llmemory.GCREF)
self.pop_roots(hop)
hop.cast_result(v_result)
gct_zero_malloc = gct_malloc
gct_malloc_varsize = gct_malloc
gct_zero_malloc_varsize = gct_malloc
gct_flavored_malloc = gct_malloc
gct_flavored_malloc_varsize = gct_malloc
def gct_gc__collect(self, hop):
op = hop.spaceop
self.push_roots(hop)
hop.genop("direct_call", [self.collect_ptr, self.c_const_gc],
resultvar=op.result)
self.pop_roots(hop)
def gct_gc_x_swap_pool(self, hop):
op = hop.spaceop
[v_malloced] = op.args
hop.genop("direct_call",
[self.x_swap_pool_ptr, self.c_const_gc, v_malloced],
resultvar=op.result)
def gct_gc_x_clone(self, hop):
op = hop.spaceop
[v_clonedata] = op.args
hop.genop("direct_call",
[self.x_clone_ptr, self.c_const_gc, v_clonedata],
resultvar=op.result)
def gct_gc_x_size_header(self, hop):
op = hop.spaceop
c_result = rmodel.inputconst(lltype.Signed,
self.gcdata.gc.size_gc_header())
hop.genop("same_as",
[c_result],
resultvar=op.result)
def gct_gc_x_become(self, hop):
op = hop.spaceop
[v_target, v_source] = op.args
self.push_roots(hop)
hop.genop("direct_call",
[self.x_become_ptr, self.c_const_gc, v_target, v_source],
resultvar=op.result)
self.pop_roots(hop)
def gct_zero_gc_pointers_inside(self, hop):
v_ob = hop.spaceop.args[0]
TYPE = v_ob.concretetype.TO
gen_zero_gc_pointers(TYPE, v_ob, hop.llops)
def push_alive_nopyobj(self, var, llops):
pass
def pop_alive_nopyobj(self, var, llops):
pass
def push_roots(self, hop):
if self.push_root_ptr is None:
return
livevars = [var for var in self.livevars if not var_ispyobj(var)]
c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
base_addr = hop.genop("direct_call", [self.incr_stack_ptr, c_len ],
resulttype=llmemory.Address)
for k,var in enumerate(livevars):
c_k = rmodel.inputconst(lltype.Signed, k)
v_adr = gen_cast(hop.llops, llmemory.Address, var)
hop.genop("direct_call", [self.save_addr_ptr, base_addr, c_k, v_adr])
def pop_roots(self, hop):
if self.pop_root_ptr is None:
return
livevars = [var for var in self.livevars if not var_ispyobj(var)]
c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
hop.genop("direct_call", [self.decr_stack_ptr, c_len ] )
## for var in livevars[::-1]:
## # XXX specific to non-moving collectors
## hop.genop("direct_call", [self.pop_root_ptr])
## #hop.genop("gc_reload_possibly_moved", [var])
# XXX copied and modified from lltypelayout.py
def offsets_to_gc_pointers(TYPE):
offsets = []
if isinstance(TYPE, lltype.Struct):
for name in TYPE._names:
FIELD = getattr(TYPE, name)
if isinstance(FIELD, lltype.Array):
continue # skip inlined array
baseofs = llmemory.offsetof(TYPE, name)
suboffsets = offsets_to_gc_pointers(FIELD)
for s in suboffsets:
try:
knownzero = s == 0
except TypeError:
knownzero = False
if knownzero:
offsets.append(baseofs)
else:
offsets.append(baseofs + s)
# sanity check
#ex = lltype.Ptr(TYPE)._example()
#adr = llmemory.cast_ptr_to_adr(ex)
#for off in offsets:
# (adr + off)
elif isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc':
offsets.append(0)
return offsets
def gen_zero_gc_pointers(TYPE, v, llops):
assert isinstance(TYPE, lltype.Struct)
for name in TYPE._names:
FIELD = getattr(TYPE, name)
if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc():
c_name = rmodel.inputconst(lltype.Void, name)
c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO))
llops.genop('bare_setfield', [v, c_name, c_null])
elif isinstance(FIELD, lltype.Struct):
c_name = rmodel.inputconst(lltype.Void, name)
v1 = llops.genop('getsubstruct', [v, c_name],
resulttype = lltype.Ptr(FIELD))
gen_zero_gc_pointers(FIELD, v1, llops)
def gc_pointers_inside(v, adr):
t = lltype.typeOf(v)
if isinstance(t, lltype.Struct):
for n, t2 in t._flds.iteritems():
if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc':
yield adr + llmemory.offsetof(t, n)
elif isinstance(t2, (lltype.Array, lltype.Struct)):
for a in gc_pointers_inside(getattr(v, n), adr + llmemory.offsetof(t, n)):
yield a
elif isinstance(t, lltype.Array):
if isinstance(t.OF, lltype.Ptr) and t2._needsgc():
for i in range(len(v.items)):
yield adr + llmemory.itemoffsetof(t, i)
elif isinstance(t.OF, lltype.Struct):
for i in range(len(v.items)):
for a in gc_pointers_inside(v.items[i], adr + llmemory.itemoffsetof(t, i)):
yield a
| Python |
#
| Python |
from pypy.rpython.memory.lladdress import raw_malloc, raw_free, raw_memcopy, raw_memclear
from pypy.rpython.memory.lladdress import NULL, _address, raw_malloc_usage
from pypy.rpython.memory.support import get_address_linked_list
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rpython.memory import lltypesimulation
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.objectmodel import free_non_gc_object, debug_assert
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.rarithmetic import ovfcheck
import sys, os
int_size = lltypesimulation.sizeof(lltype.Signed)
gc_header_two_ints = 2*int_size
X_POOL = lltype.GcOpaqueType('gc.pool')
X_POOL_PTR = lltype.Ptr(X_POOL)
X_CLONE = lltype.GcStruct('CloneData', ('gcobjectptr', llmemory.GCREF),
('pool', X_POOL_PTR))
X_CLONE_PTR = lltype.Ptr(X_CLONE)
class GCError(Exception):
pass
def get_dummy_annotate(gc, AddressLinkedList):
def dummy_annotate():
gc.setup()
gc.get_roots = dummy_get_roots1 #prevent the get_roots attribute to
gc.get_roots = dummy_get_roots2 #be constants
a = gc.malloc(1, 2)
b = gc.malloc(2, 3)
gc.write_barrier(raw_malloc(1), raw_malloc(2), raw_malloc(1))
gc.collect()
return a - b
def dummy_get_roots1():
ll = AddressLinkedList()
ll.append(NULL)
ll.append(raw_malloc(10))
ll.pop() #make the annotator see pop
return ll
def dummy_get_roots2():
ll = AddressLinkedList()
ll.append(raw_malloc(10))
ll.append(NULL)
ll.pop() #make the annotator see pop
return ll
return dummy_annotate, dummy_get_roots1, dummy_get_roots2
gc_interface = {
"malloc": lltype.FuncType((lltype.Signed, lltype.Signed), llmemory.Address),
"collect": lltype.FuncType((), lltype.Void),
"write_barrier": lltype.FuncType((llmemory.Address, ) * 3, lltype.Void),
}
class GCBase(object):
_alloc_flavor_ = "raw"
def set_query_functions(self, is_varsize, getfinalizer,
offsets_to_gc_pointers,
fixed_size, varsize_item_sizes,
varsize_offset_to_variable_part,
varsize_offset_to_length,
varsize_offsets_to_gcpointers_in_var_part):
self.getfinalizer = getfinalizer
self.is_varsize = is_varsize
self.offsets_to_gc_pointers = offsets_to_gc_pointers
self.fixed_size = fixed_size
self.varsize_item_sizes = varsize_item_sizes
self.varsize_offset_to_variable_part = varsize_offset_to_variable_part
self.varsize_offset_to_length = varsize_offset_to_length
self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part
def write_barrier(self, addr, addr_to, addr_struct):
addr_to.address[0] = addr
def free_memory(self):
#this will never be called at runtime, just during setup
"NOT_RPYTHON"
pass
def setup(self):
pass
class DummyGC(GCBase):
_alloc_flavor_ = "raw"
def __init__(self, AddressLinkedList, dummy=None, get_roots=None):
self.get_roots = get_roots
#self.set_query_functions(None, None, None, None, None, None, None)
def malloc(self, typeid, length=0):
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
size += length * self.varsize_item_sizes(typeid)
result = raw_malloc(size)
if not result:
raise memoryError
return result
def collect(self):
self.get_roots() #this is there so that the annotator thinks get_roots is a function
def size_gc_header(self, typeid=0):
return 0
def init_gc_object(self, addr, typeid):
return
init_gc_object_immortal = init_gc_object
DEBUG_PRINT = False
memoryError = MemoryError()
class MarkSweepGC(GCBase):
_alloc_flavor_ = "raw"
HDR = lltype.ForwardReference()
HDRPTR = lltype.Ptr(HDR)
# need to maintain a linked list of malloced objects, since we used the
# systems allocator and can't walk the heap
HDR.become(lltype.Struct('header', ('typeid', lltype.Signed),
('next', HDRPTR)))
POOL = lltype.GcStruct('gc_pool')
POOLPTR = lltype.Ptr(POOL)
POOLNODE = lltype.ForwardReference()
POOLNODEPTR = lltype.Ptr(POOLNODE)
POOLNODE.become(lltype.Struct('gc_pool_node', ('linkedlist', HDRPTR),
('nextnode', POOLNODEPTR)))
def __init__(self, AddressLinkedList, start_heap_size=4096, get_roots=None):
self.heap_usage = 0 # at the end of the latest collection
self.bytes_malloced = 0 # since the latest collection
self.bytes_malloced_threshold = start_heap_size
self.total_collection_time = 0.0
self.AddressLinkedList = AddressLinkedList
self.malloced_objects = lltype.nullptr(self.HDR)
self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
self.get_roots = get_roots
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
# pools, for x_swap_pool():
# 'curpool' is the current pool, lazily allocated (i.e. NULL means
# the current POOL object is not yet malloc'ed). POOL objects are
# usually at the start of a linked list of objects, via the HDRs.
# The exception is 'curpool' whose linked list of objects is in
# 'self.malloced_objects' instead of in the header of 'curpool'.
# POOL objects are never in the middle of a linked list themselves.
self.curpool = lltype.nullptr(self.POOL)
# 'poolnodes' is a linked list of all such linked lists. Each
# linked list will usually start with a POOL object, but it can
# also contain only normal objects if the POOL object at the head
# was already freed. The objects in 'malloced_objects' are not
# found via 'poolnodes'.
self.poolnodes = lltype.nullptr(self.POOLNODE)
self.collect_in_progress = False
self.prev_collect_end_time = 0.0
def malloc(self, typeid, length=0):
size = self.fixed_size(typeid)
needs_finalizer = bool(self.getfinalizer(typeid))
if self.is_varsize(typeid):
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
ref = self.malloc_varsize(typeid, length, size, itemsize,
offset_to_length, True, needs_finalizer)
else:
ref = self.malloc_fixedsize(typeid, size, True, needs_finalizer)
# XXX lots of cast and reverse-cast around, but this malloc()
# should eventually be killed
return llmemory.cast_ptr_to_adr(ref)
def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False):
if can_collect and self.bytes_malloced > self.bytes_malloced_threshold:
self.collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
tot_size = size_gc_header + size
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid = typeid << 1
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
else:
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc typeid', typeid,
# '->', llmemory.cast_adr_to_int(result))
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
def malloc_fixedsize_clear(self, typeid, size, can_collect, has_finalizer=False):
if can_collect and self.bytes_malloced > self.bytes_malloced_threshold:
self.collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
tot_size = size_gc_header + size
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
raw_memclear(result, tot_size)
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid = typeid << 1
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
else:
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc typeid', typeid,
# '->', llmemory.cast_adr_to_int(result))
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
def malloc_varsize(self, typeid, length, size, itemsize, offset_to_length,
can_collect, has_finalizer=False):
if can_collect and self.bytes_malloced > self.bytes_malloced_threshold:
self.collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
fixsize = size_gc_header + size
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(fixsize + varsize)
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
(result + size_gc_header + offset_to_length).signed[0] = length
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid = typeid << 1
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
else:
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc_varsize length', length,
# 'typeid', typeid,
# '->', llmemory.cast_adr_to_int(result))
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length,
can_collect, has_finalizer=False):
if can_collect and self.bytes_malloced > self.bytes_malloced_threshold:
self.collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
fixsize = size_gc_header + size
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(fixsize + varsize)
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
raw_memclear(result, tot_size)
(result + size_gc_header + offset_to_length).signed[0] = length
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid = typeid << 1
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
else:
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc_varsize length', length,
# 'typeid', typeid,
# '->', llmemory.cast_adr_to_int(result))
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
def collect(self):
# 1. mark from the roots, and also the objects that objects-with-del
# point to (using the list of malloced_objects_with_finalizer)
# 2. walk the list of objects-without-del and free the ones not marked
# 3. walk the list of objects-with-del and for the ones not marked:
# call __del__, move the object to the list of object-without-del
import time
from pypy.rpython.lltypesystem.lloperation import llop
if DEBUG_PRINT:
llop.debug_print(lltype.Void, 'collecting...')
start_time = time.time()
self.collect_in_progress = True
roots = self.get_roots()
size_gc_header = self.gcheaderbuilder.size_gc_header
## llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
## size_gc_header)
# push the roots on the mark stack
objects = self.AddressLinkedList() # mark stack
while 1:
curr = roots.pop()
if curr == NULL:
break
# roots is a list of addresses to addresses:
objects.append(curr.address[0])
# the last sweep did not clear the mark bit of static roots,
# since they are not in the malloced_objects list
gc_info = curr.address[0] - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
hdr.typeid = hdr.typeid & (~1)
free_non_gc_object(roots)
# from this point onwards, no more mallocs should be possible
old_malloced = self.bytes_malloced
self.bytes_malloced = 0
curr_heap_size = 0
freed_size = 0
# mark objects reachable by objects with a finalizer, but not those
# themselves. add their size to curr_heap_size, since they always
# survive the collection
hdr = self.malloced_objects_with_finalizer
while hdr:
next = hdr.next
typeid = hdr.typeid >> 1
gc_info = llmemory.cast_ptr_to_adr(hdr)
obj = gc_info + size_gc_header
if not hdr.typeid & 1:
self.add_reachable_to_stack(obj, objects)
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
size += self.varsize_item_sizes(typeid) * length
estimate = raw_malloc_usage(size_gc_header + size)
curr_heap_size += estimate
hdr = next
# mark thinks on the mark stack and put their descendants onto the
# stack until the stack is empty
while objects.non_empty(): #mark
curr = objects.pop()
gc_info = curr - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
if hdr.typeid & 1:
continue
self.add_reachable_to_stack(curr, objects)
hdr.typeid = hdr.typeid | 1
objects.delete()
# also mark self.curpool
if self.curpool:
gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
hdr.typeid = hdr.typeid | 1
# sweep: delete objects without del if they are not marked
# unmark objects without del that are marked
firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
firstpoolnode.linkedlist = self.malloced_objects
firstpoolnode.nextnode = self.poolnodes
prevpoolnode = lltype.nullptr(self.POOLNODE)
poolnode = firstpoolnode
while poolnode: #sweep
ppnext = llmemory.cast_ptr_to_adr(poolnode)
ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
hdr = poolnode.linkedlist
while hdr: #sweep
typeid = hdr.typeid >> 1
next = hdr.next
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
size += self.varsize_item_sizes(typeid) * length
estimate = raw_malloc_usage(size_gc_header + size)
if hdr.typeid & 1:
hdr.typeid = hdr.typeid & (~1)
ppnext.address[0] = addr
ppnext = llmemory.cast_ptr_to_adr(hdr)
ppnext += llmemory.offsetof(self.HDR, 'next')
curr_heap_size += estimate
else:
freed_size += estimate
raw_free(addr)
hdr = next
ppnext.address[0] = llmemory.NULL
next = poolnode.nextnode
if not poolnode.linkedlist and prevpoolnode:
# completely empty node
prevpoolnode.nextnode = next
lltype.free(poolnode, flavor='raw')
else:
prevpoolnode = poolnode
poolnode = next
self.malloced_objects = firstpoolnode.linkedlist
self.poolnodes = firstpoolnode.nextnode
lltype.free(firstpoolnode, flavor='raw')
#llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
end_time = time.time()
compute_time = start_time - self.prev_collect_end_time
collect_time = end_time - start_time
garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)
if (collect_time * curr_heap_size >
0.02 * garbage_collected * compute_time):
self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
if (collect_time * curr_heap_size <
0.005 * garbage_collected * compute_time):
self.bytes_malloced_threshold /= 2
# Use atleast as much memory as current live objects.
if curr_heap_size > self.bytes_malloced_threshold:
self.bytes_malloced_threshold = curr_heap_size
# Cap at 1/4 GB
self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
256 * 1024 * 1024)
self.total_collection_time += collect_time
self.prev_collect_end_time = end_time
if DEBUG_PRINT:
llop.debug_print(lltype.Void,
" malloced since previous collection:",
old_malloced, "bytes")
llop.debug_print(lltype.Void,
" heap usage at start of collection: ",
self.heap_usage + old_malloced, "bytes")
llop.debug_print(lltype.Void,
" freed: ",
freed_size, "bytes")
llop.debug_print(lltype.Void,
" new heap usage: ",
curr_heap_size, "bytes")
llop.debug_print(lltype.Void,
" total time spent collecting: ",
self.total_collection_time, "seconds")
llop.debug_print(lltype.Void,
" collecting time: ",
collect_time)
llop.debug_print(lltype.Void,
" computing time: ",
collect_time)
llop.debug_print(lltype.Void,
" new threshold: ",
self.bytes_malloced_threshold)
## llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
## size_gc_header)
assert self.heap_usage + old_malloced == curr_heap_size + freed_size
self.heap_usage = curr_heap_size
hdr = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
last = lltype.nullptr(self.HDR)
while hdr:
next = hdr.next
if hdr.typeid & 1:
hdr.next = lltype.nullptr(self.HDR)
if not self.malloced_objects_with_finalizer:
self.malloced_objects_with_finalizer = hdr
else:
last.next = hdr
hdr.typeid = hdr.typeid & (~1)
last = hdr
else:
obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
finalizer = self.getfinalizer(hdr.typeid >> 1)
# make malloced_objects_with_finalizer consistent
# for the sake of a possible collection caused by finalizer
if not self.malloced_objects_with_finalizer:
self.malloced_objects_with_finalizer = next
else:
last.next = next
hdr.next = self.malloced_objects
self.malloced_objects = hdr
#llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
finalizer(obj)
if not self.collect_in_progress: # another collection was caused?
llop.debug_print(lltype.Void, "outer collect interrupted "
"by recursive collect")
return
if not last:
if self.malloced_objects_with_finalizer == next:
self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
else:
# now it gets annoying: finalizer caused a malloc of something
# with a finalizer
last = self.malloced_objects_with_finalizer
while last.next != next:
last = last.next
last.next = lltype.nullptr(self.HDR)
else:
last.next = lltype.nullptr(self.HDR)
hdr = next
self.collect_in_progress = False
STAT_HEAP_USAGE = 0
STAT_BYTES_MALLOCED = 1
STATISTICS_NUMBERS = 2
def add_reachable_to_stack(self, obj, objects):
size_gc_header = self.gcheaderbuilder.size_gc_header
gc_info = obj - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
typeid = hdr.typeid >> 1
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
while i < len(offsets):
pointer = obj + offsets[i]
objects.append(pointer.address[0])
i += 1
if self.is_varsize(typeid):
offset = self.varsize_offset_to_variable_part(
typeid)
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
obj += offset
offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
itemlength = self.varsize_item_sizes(typeid)
i = 0
while i < length:
item = obj + itemlength * i
j = 0
while j < len(offsets):
pointer = item + offsets[j]
objects.append(pointer.address[0])
j += 1
i += 1
def statistics(self, index):
# no memory allocation here!
if index == self.STAT_HEAP_USAGE:
return self.heap_usage
if index == self.STAT_BYTES_MALLOCED:
return self.bytes_malloced
return -1
def size_gc_header(self, typeid=0):
return self.gcheaderbuilder.size_gc_header
def init_gc_object(self, addr, typeid):
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
hdr.typeid = typeid << 1
init_gc_object_immortal = init_gc_object
# experimental support for thread cloning
def x_swap_pool(self, newpool):
# Set newpool as the current pool (create one if newpool == NULL).
# All malloc'ed objects are put into the current pool;this is a
# way to separate objects depending on when they were allocated.
size_gc_header = self.gcheaderbuilder.size_gc_header
# invariant: each POOL GcStruct is at the _front_ of a linked list
# of malloced objects.
oldpool = self.curpool
#llop.debug_print(lltype.Void, 'x_swap_pool',
# lltype.cast_ptr_to_int(oldpool),
# lltype.cast_ptr_to_int(newpool))
if not oldpool:
# make a fresh pool object, which is automatically inserted at the
# front of the current list
oldpool = lltype.malloc(self.POOL)
addr = llmemory.cast_ptr_to_adr(oldpool)
addr -= size_gc_header
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
# put this new POOL object in the poolnodes list
node = lltype.malloc(self.POOLNODE, flavor="raw")
node.linkedlist = hdr
node.nextnode = self.poolnodes
self.poolnodes = node
else:
# manually insert oldpool at the front of the current list
addr = llmemory.cast_ptr_to_adr(oldpool)
addr -= size_gc_header
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
hdr.next = self.malloced_objects
newpool = lltype.cast_opaque_ptr(self.POOLPTR, newpool)
if newpool:
# newpool is at the front of the new linked list to install
addr = llmemory.cast_ptr_to_adr(newpool)
addr -= size_gc_header
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
self.malloced_objects = hdr.next
# invariant: now that objects in the hdr.next list are accessible
# through self.malloced_objects, make sure they are not accessible
# via poolnodes (which has a node pointing to newpool):
hdr.next = lltype.nullptr(self.HDR)
else:
# start a fresh new linked list
self.malloced_objects = lltype.nullptr(self.HDR)
self.curpool = newpool
return lltype.cast_opaque_ptr(X_POOL_PTR, oldpool)
def x_clone(self, clonedata):
# Recursively clone the gcobject and everything it points to,
# directly or indirectly -- but stops at objects that are not
# in the specified pool. A new pool is built to contain the
# copies, and the 'gcobjectptr' and 'pool' fields of clonedata
# are adjusted to refer to the result.
CURPOOL_FLAG = sys.maxint // 2 + 1
# install a new pool into which all the mallocs go
curpool = self.x_swap_pool(lltype.nullptr(X_POOL))
size_gc_header = self.gcheaderbuilder.size_gc_header
oldobjects = self.AddressLinkedList()
# if no pool specified, use the current pool as the 'source' pool
oldpool = clonedata.pool or curpool
oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool)
addr = llmemory.cast_ptr_to_adr(oldpool)
addr -= size_gc_header
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
hdr = hdr.next # skip the POOL object itself
while hdr:
next = hdr.next
hdr.typeid |= CURPOOL_FLAG # mark all objects from malloced_list
hdr.next = lltype.nullptr(self.HDR) # abused to point to the copy
oldobjects.append(llmemory.cast_ptr_to_adr(hdr))
hdr = next
# a stack of addresses of places that still points to old objects
# and that must possibly be fixed to point to a new copy
stack = self.AddressLinkedList()
stack.append(llmemory.cast_ptr_to_adr(clonedata)
+ llmemory.offsetof(X_CLONE, 'gcobjectptr'))
while stack.non_empty():
gcptr_addr = stack.pop()
oldobj_addr = gcptr_addr.address[0]
if not oldobj_addr:
continue # pointer is NULL
oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header,
self.HDRPTR)
typeid = oldhdr.typeid
if not (typeid & CURPOOL_FLAG):
continue # ignore objects that were not in the malloced_list
newhdr = oldhdr.next # abused to point to the copy
if not newhdr:
typeid = (typeid & ~CURPOOL_FLAG) >> 1
size = self.fixed_size(typeid)
# XXX! collect() at the beginning if the free heap is low
if self.is_varsize(typeid):
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
length = (oldobj_addr + offset_to_length).signed[0]
newobj = self.malloc_varsize(typeid, length, size,
itemsize, offset_to_length,
False)
size += length*itemsize
else:
newobj = self.malloc_fixedsize(typeid, size, False)
length = -1
newobj_addr = llmemory.cast_ptr_to_adr(newobj)
#llop.debug_print(lltype.Void, 'clone',
# llmemory.cast_adr_to_int(oldobj_addr),
# '->', llmemory.cast_adr_to_int(newobj_addr),
# 'typeid', typeid,
# 'length', length)
newhdr_addr = newobj_addr - size_gc_header
newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR)
saved_id = newhdr.typeid # XXX hack needed for genc
saved_next = newhdr.next # where size_gc_header == 0
raw_memcopy(oldobj_addr, newobj_addr, size)
newhdr.typeid = saved_id
newhdr.next = saved_next
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
while i < len(offsets):
pointer_addr = newobj_addr + offsets[i]
stack.append(pointer_addr)
i += 1
if length > 0:
offsets = self.varsize_offsets_to_gcpointers_in_var_part(
typeid)
itemlength = self.varsize_item_sizes(typeid)
offset = self.varsize_offset_to_variable_part(typeid)
itembaseaddr = newobj_addr + offset
i = 0
while i < length:
item = itembaseaddr + itemlength * i
j = 0
while j < len(offsets):
pointer_addr = item + offsets[j]
stack.append(pointer_addr)
j += 1
i += 1
oldhdr.next = newhdr
newobj_addr = llmemory.cast_ptr_to_adr(newhdr) + size_gc_header
gcptr_addr.address[0] = newobj_addr
stack.delete()
# re-create the original linked list
next = lltype.nullptr(self.HDR)
while oldobjects.non_empty():
hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR)
hdr.typeid &= ~CURPOOL_FLAG # reset the flag
hdr.next = next
next = hdr
oldobjects.delete()
# consistency check
addr = llmemory.cast_ptr_to_adr(oldpool)
addr -= size_gc_header
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
assert hdr.next == next
# build the new pool object collecting the new objects, and
# reinstall the pool that was current at the beginning of x_clone()
clonedata.pool = self.x_swap_pool(curpool)
def add_reachable_to_stack2(self, obj, objects, target_addr, source_addr):
size_gc_header = self.gcheaderbuilder.size_gc_header
gc_info = obj - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
if hdr.typeid & 1:
return
typeid = hdr.typeid >> 1
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
while i < len(offsets):
pointer = obj + offsets[i]
# -------------------------------------------------
# begin difference from collect
if pointer.address[0] == target_addr:
pointer.address[0] = source_addr
# end difference from collect
# -------------------------------------------------
objects.append(pointer.address[0])
i += 1
if self.is_varsize(typeid):
offset = self.varsize_offset_to_variable_part(
typeid)
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
obj += offset
offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
itemlength = self.varsize_item_sizes(typeid)
i = 0
while i < length:
item = obj + itemlength * i
j = 0
while j < len(offsets):
pointer = item + offsets[j]
# -------------------------------------------------
# begin difference from collect
if pointer.address[0] == target_addr:
pointer.address[0] = source_addr
## end difference from collect
# -------------------------------------------------
objects.append(pointer.address[0])
j += 1
i += 1
def x_become(self, target_addr, source_addr):
# 1. mark from the roots, and also the objects that objects-with-del
# point to (using the list of malloced_objects_with_finalizer)
# 2. walk the list of objects-without-del and free the ones not marked
# 3. walk the list of objects-with-del and for the ones not marked:
# call __del__, move the object to the list of object-without-del
import time
from pypy.rpython.lltypesystem.lloperation import llop
if DEBUG_PRINT:
llop.debug_print(lltype.Void, 'collecting...')
start_time = time.time()
roots = self.get_roots()
size_gc_header = self.gcheaderbuilder.size_gc_header
## llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
## size_gc_header)
# push the roots on the mark stack
objects = self.AddressLinkedList() # mark stack
while 1:
curr = roots.pop()
if curr == NULL:
break
# roots is a list of addresses to addresses:
objects.append(curr.address[0])
# the last sweep did not clear the mark bit of static roots,
# since they are not in the malloced_objects list
gc_info = curr.address[0] - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
hdr.typeid = hdr.typeid & (~1)
free_non_gc_object(roots)
# from this point onwards, no more mallocs should be possible
old_malloced = self.bytes_malloced
self.bytes_malloced = 0
curr_heap_size = 0
freed_size = 0
# mark objects reachable by objects with a finalizer, but not those
# themselves. add their size to curr_heap_size, since they always
# survive the collection
hdr = self.malloced_objects_with_finalizer
while hdr:
next = hdr.next
typeid = hdr.typeid >> 1
gc_info = llmemory.cast_ptr_to_adr(hdr)
obj = gc_info + size_gc_header
self.add_reachable_to_stack2(obj, objects, target_addr, source_addr)
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
size += self.varsize_item_sizes(typeid) * length
estimate = raw_malloc_usage(size_gc_header + size)
curr_heap_size += estimate
hdr = next
# mark thinks on the mark stack and put their descendants onto the
# stack until the stack is empty
while objects.non_empty(): #mark
curr = objects.pop()
self.add_reachable_to_stack2(curr, objects, target_addr, source_addr)
gc_info = curr - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
if hdr.typeid & 1:
continue
hdr.typeid = hdr.typeid | 1
objects.delete()
# also mark self.curpool
if self.curpool:
gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
hdr.typeid = hdr.typeid | 1
# sweep: delete objects without del if they are not marked
# unmark objects without del that are marked
firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
firstpoolnode.linkedlist = self.malloced_objects
firstpoolnode.nextnode = self.poolnodes
prevpoolnode = lltype.nullptr(self.POOLNODE)
poolnode = firstpoolnode
while poolnode: #sweep
ppnext = llmemory.cast_ptr_to_adr(poolnode)
ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
hdr = poolnode.linkedlist
while hdr: #sweep
typeid = hdr.typeid >> 1
next = hdr.next
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
size += self.varsize_item_sizes(typeid) * length
estimate = raw_malloc_usage(size_gc_header + size)
if hdr.typeid & 1:
hdr.typeid = hdr.typeid & (~1)
ppnext.address[0] = addr
ppnext = llmemory.cast_ptr_to_adr(hdr)
ppnext += llmemory.offsetof(self.HDR, 'next')
curr_heap_size += estimate
else:
freed_size += estimate
raw_free(addr)
hdr = next
ppnext.address[0] = llmemory.NULL
next = poolnode.nextnode
if not poolnode.linkedlist and prevpoolnode:
# completely empty node
prevpoolnode.nextnode = next
lltype.free(poolnode, flavor='raw')
else:
prevpoolnode = poolnode
poolnode = next
self.malloced_objects = firstpoolnode.linkedlist
self.poolnodes = firstpoolnode.nextnode
lltype.free(firstpoolnode, flavor='raw')
if curr_heap_size > self.bytes_malloced_threshold:
self.bytes_malloced_threshold = curr_heap_size
end_time = time.time()
self.total_collection_time += end_time - start_time
if DEBUG_PRINT:
llop.debug_print(lltype.Void,
" malloced since previous collection:",
old_malloced, "bytes")
llop.debug_print(lltype.Void,
" heap usage at start of collection: ",
self.heap_usage + old_malloced, "bytes")
llop.debug_print(lltype.Void,
" freed: ",
freed_size, "bytes")
llop.debug_print(lltype.Void,
" new heap usage: ",
curr_heap_size, "bytes")
llop.debug_print(lltype.Void,
" total time spent collecting: ",
self.total_collection_time, "seconds")
## llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
## size_gc_header)
assert self.heap_usage + old_malloced == curr_heap_size + freed_size
# call finalizers if needed
self.heap_usage = curr_heap_size
hdr = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
while hdr:
next = hdr.next
if hdr.typeid & 1:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
hdr.typeid = hdr.typeid & (~1)
else:
obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
finalizer = self.getfinalizer(hdr.typeid >> 1)
finalizer(obj)
hdr.next = self.malloced_objects
self.malloced_objects = hdr
hdr = next
class SemiSpaceGC(GCBase):
_alloc_flavor_ = "raw"
HDR = lltype.Struct('header', ('forw', lltype.Signed),
('typeid', lltype.Signed))
def __init__(self, AddressLinkedList, space_size=1024*int_size,
get_roots=None):
self.bytes_malloced = 0
self.space_size = space_size
self.tospace = NULL
self.top_of_space = NULL
self.fromspace = NULL
self.free = NULL
self.get_roots = get_roots
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
def setup(self):
self.tospace = raw_malloc(self.space_size)
debug_assert(bool(self.tospace), "couldn't allocate tospace")
self.top_of_space = self.tospace + self.space_size
self.fromspace = raw_malloc(self.space_size)
debug_assert(bool(self.fromspace), "couldn't allocate fromspace")
self.free = self.tospace
def free_memory(self):
"NOT_RPYTHON"
raw_free(self.tospace)
self.tospace = NULL
raw_free(self.fromspace)
self.fromspace = NULL
def malloc(self, typeid, length=0):
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
ref = self.malloc_varsize(typeid, length, size, itemsize,
offset_to_length, True)
else:
ref = self.malloc_fixedsize(typeid, size, True)
# XXX lots of cast and reverse-cast around, but this malloc()
# should eventually be killed
return llmemory.cast_ptr_to_adr(ref)
def malloc_fixedsize(self, typeid, size, can_collect):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
if can_collect and self.free + totalsize > self.top_of_space:
self.collect()
#XXX need to increase the space size if the object is too big
#for bonus points do big objects differently
if self.free + totalsize > self.top_of_space:
raise memoryError
result = self.free
self.init_gc_object(result, typeid)
self.free += totalsize
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def malloc_varsize(self, typeid, length, size, itemsize, offset_to_length,
can_collect):
try:
varsize = ovfcheck(itemsize * length)
except OverflowError:
raise memoryError
# XXX also check for overflow on the various '+' below!
size += varsize
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
if can_collect and self.free + totalsize > self.top_of_space:
self.collect()
#XXX need to increase the space size if the object is too big
#for bonus points do big objects differently
if self.free + totalsize > self.top_of_space:
raise memoryError
result = self.free
self.init_gc_object(result, typeid)
(result + size_gc_header + offset_to_length).signed[0] = length
self.free += totalsize
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def collect(self):
## print "collecting"
tospace = self.fromspace
fromspace = self.tospace
self.fromspace = fromspace
self.tospace = tospace
self.top_of_space = tospace + self.space_size
roots = self.get_roots()
scan = self.free = tospace
while 1:
root = roots.pop()
if root == NULL:
break
## print "root", root, root.address[0]
root.address[0] = self.copy(root.address[0])
free_non_gc_object(roots)
while scan < self.free:
curr = scan + self.size_gc_header()
self.trace_and_copy(curr)
scan += self.get_size(curr) + self.size_gc_header()
def copy(self, obj):
if not self.fromspace <= obj < self.fromspace + self.space_size:
return self.copy_non_managed_obj(obj)
## print "copying regularly", obj,
if self.is_forwarded(obj):
## print "already copied to", self.get_forwarding_address(obj)
return self.get_forwarding_address(obj)
else:
newaddr = self.free
totalsize = self.get_size(obj) + self.size_gc_header()
raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
self.free += totalsize
newobj = newaddr + self.size_gc_header()
## print "to", newobj
self.set_forwarding_address(obj, newobj)
return newobj
def copy_non_managed_obj(self, obj): #umph, PBCs, not really copy
## print "copying nonmanaged", obj
#we have to do the tracing here because PBCs are not moved to tospace
self.trace_and_copy(obj)
return obj
def trace_and_copy(self, obj):
gc_info = obj - self.size_gc_header()
typeid = gc_info.signed[1]
## print "scanning", obj, typeid
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
while i < len(offsets):
pointer = obj + offsets[i]
if pointer.address[0] != NULL:
pointer.address[0] = self.copy(pointer.address[0])
i += 1
if self.is_varsize(typeid):
offset = self.varsize_offset_to_variable_part(
typeid)
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
itemlength = self.varsize_item_sizes(typeid)
i = 0
while i < length:
item = obj + offset + itemlength * i
j = 0
while j < len(offsets):
pointer = item + offsets[j]
if pointer.address[0] != NULL:
pointer.address[0] = self.copy(pointer.address[0])
j += 1
i += 1
def is_forwarded(self, obj):
return (obj - self.size_gc_header()).signed[1] < 0
def get_forwarding_address(self, obj):
return (obj - self.size_gc_header()).address[0]
def set_forwarding_address(self, obj, newobj):
gc_info = obj - self.size_gc_header()
gc_info.signed[1] = -gc_info.signed[1] - 1
gc_info.address[0] = newobj
def get_size(self, obj):
typeid = (obj - self.size_gc_header()).signed[1]
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
lenaddr = obj + self.varsize_offset_to_length(typeid)
length = lenaddr.signed[0]
size += length * self.varsize_item_sizes(typeid)
return size
def size_gc_header(self, typeid=0):
return self.gcheaderbuilder.size_gc_header
def init_gc_object(self, addr, typeid):
addr.signed[0] = 0
addr.signed[1] = typeid
init_gc_object_immortal = init_gc_object
class DeferredRefcountingGC(GCBase):
_alloc_flavor_ = "raw"
def __init__(self, AddressLinkedList, max_refcount_zero=50, get_roots=None):
self.zero_ref_counts = None
self.AddressLinkedList = AddressLinkedList
self.length_zero_ref_counts = 0
self.max_refcount_zero = max_refcount_zero
#self.set_query_functions(None, None, None, None, None, None, None)
self.get_roots = get_roots
self.collecting = False
def setup(self):
self.zero_ref_counts = self.AddressLinkedList()
def malloc(self, typeid, length=0):
size = self.fixed_size(typeid)
if self.is_varsize(typeid):
size += length * self.varsize_item_sizes(typeid)
size_gc_header = self.size_gc_header()
result = raw_malloc(size + size_gc_header)
## print "mallocing %s, size %s at %s" % (typeid, size, result)
if not result:
raise memoryError
result.signed[0] = 0 # refcount
result.signed[1] = typeid
return result + size_gc_header
def collect(self):
if self.collecting:
return
else:
self.collecting = True
roots = self.get_roots()
roots_copy = self.AddressLinkedList()
curr = roots.pop()
while curr != NULL:
## print "root", root, root.address[0]
## assert self.refcount(root.address[0]) >= 0, "refcount negative"
self.incref(curr.address[0])
roots_copy.append(curr)
curr = roots.pop()
roots = roots_copy
dealloc_list = self.AddressLinkedList()
self.length_zero_ref_counts = 0
while self.zero_ref_counts.non_empty():
candidate = self.zero_ref_counts.pop()
refcount = self.refcount(candidate)
typeid = (candidate - self.size_gc_header()).signed[1]
if (refcount == 0 and typeid >= 0):
(candidate - self.size_gc_header()).signed[1] = -typeid - 1
dealloc_list.append(candidate)
while dealloc_list.non_empty():
deallocate = dealloc_list.pop()
typeid = (deallocate - self.size_gc_header()).signed[1]
(deallocate - self.size_gc_header()).signed[1] = -typeid - 1
self.deallocate(deallocate)
dealloc_list.delete()
while roots.non_empty():
root = roots.pop()
self.decref(root.address[0])
roots.delete()
self.collecting = False
def write_barrier(self, addr, addr_to, addr_struct):
self.decref(addr_to.address[0])
addr_to.address[0] = addr
self.incref(addr)
def deallocate(self, obj):
gc_info = obj - self.size_gc_header()
typeid = gc_info.signed[1]
## print "deallocating", obj, typeid
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
while i < len(offsets):
pointer = obj + offsets[i]
self.decref(pointer.address[0])
i += 1
if self.is_varsize(typeid):
offset = self.varsize_offset_to_variable_part(
typeid)
length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
itemlength = self.varsize_item_sizes(typeid)
i = 0
while i < length:
item = obj + offset + itemlength * i
j = 0
while j < len(offsets):
pointer = item + offsets[j]
self.decref(pointer.address[0])
j += 1
i += 1
raw_free(gc_info)
def incref(self, addr):
if addr == NULL:
return
(addr - self.size_gc_header()).signed[0] += 1
def decref(self, addr):
if addr == NULL:
return
refcount = (addr - self.size_gc_header()).signed[0]
## assert refcount > 0, "neg refcount"
if refcount == 1:
self.zero_ref_counts.append(addr)
self.length_zero_ref_counts += 1
if self.length_zero_ref_counts > self.max_refcount_zero:
self.collect()
(addr - self.size_gc_header()).signed[0] = refcount - 1
def refcount(self, addr):
return (addr - self.size_gc_header()).signed[0]
def init_gc_object(self, addr, typeid):
addr.signed[0] = 0 # refcount
addr.signed[1] = typeid
def init_gc_object_immortal(self, addr, typeid):
addr.signed[0] = sys.maxint // 2 # refcount
addr.signed[1] = typeid
def size_gc_header(self, typeid=0):
return gc_header_two_ints
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.memory.lltypelayout import sizeof
from pypy.rlib.objectmodel import free_non_gc_object
INT_SIZE = sizeof(lltype.Signed)
DEFAULT_CHUNK_SIZE = 1019
def get_address_linked_list(chunk_size=DEFAULT_CHUNK_SIZE, hackishpop=False):
CHUNK = lltype.ForwardReference()
CHUNK.become(lltype.Struct('AddressLinkedListChunk',
('previous', lltype.Ptr(CHUNK)),
('length', lltype.Signed),
('items', lltype.FixedSizeArray(
llmemory.Address, chunk_size))))
null_chunk = lltype.nullptr(CHUNK)
SIZEOF_CHUNK = llmemory.sizeof(CHUNK)
class FreeList(object):
_alloc_flavor_ = "raw"
def __init__(self):
self.free_list = null_chunk
def get(self):
if not self.free_list:
from pypy.rpython.memory.lladdress import raw_memclear
r = lltype.malloc(CHUNK, flavor="raw")
raw_memclear(llmemory.cast_ptr_to_adr(r), SIZEOF_CHUNK)
return r
result = self.free_list
self.free_list = result.previous
return result
def put(self, chunk):
chunk.previous = self.free_list
self.free_list = chunk
unused_chunks = FreeList()
class AddressLinkedList(object):
_alloc_flavor_ = "raw"
def __init__(self):
self.chunk = unused_chunks.get()
self.chunk.previous = null_chunk
self.chunk.length = 0
def enlarge(self):
new = unused_chunks.get()
new.previous = self.chunk
new.length = 0
self.chunk = new
return new
enlarge.dont_inline = True
def shrink(self):
old = self.chunk
self.chunk = old.previous
unused_chunks.put(old)
return self.chunk
shrink.dont_inline = True
def append(self, addr):
if addr == llmemory.NULL:
return
chunk = self.chunk
if chunk.length == chunk_size:
chunk = self.enlarge()
used_chunks = chunk.length
chunk.length = used_chunks + 1
chunk.items[used_chunks] = addr
def non_empty(self):
chunk = self.chunk
return chunk.length != 0 or bool(chunk.previous)
def pop(self):
if hackishpop and not self.non_empty():
return llmemory.NULL
chunk = self.chunk
if chunk.length == 0:
chunk = self.shrink()
used_chunks = self.chunk.length - 1
result = chunk.items[used_chunks]
chunk.length = used_chunks
return result
def delete(self):
cur = self.chunk
while cur:
prev = cur.previous
unused_chunks.put(cur)
cur = prev
free_non_gc_object(self)
return AddressLinkedList
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory
import struct
primitive_to_fmt = {lltype.Signed: "l",
lltype.Unsigned: "L",
lltype.Char: "c",
lltype.UniChar: "H", # maybe
lltype.Bool: "B",
lltype.Float: "d",
llmemory.Address: "P",
}
#___________________________________________________________________________
# Utility functions that know about the memory layout of the lltypes
# in the simulation
#returns some sort of layout information that is useful for the simulatorptr
def get_layout(TYPE):
layout = {}
if isinstance(TYPE, lltype.Primitive):
return primitive_to_fmt[TYPE]
elif isinstance(TYPE, lltype.Ptr):
return "P"
elif isinstance(TYPE, lltype.Struct):
curr = 0
for name in TYPE._names:
layout[name] = curr
curr += get_fixed_size(TYPE._flds[name])
layout["_size"] = curr
return layout
elif isinstance(TYPE, lltype.Array):
return (get_fixed_size(lltype.Signed), get_fixed_size(TYPE.OF))
elif isinstance(TYPE, lltype.OpaqueType):
return "i"
elif isinstance(TYPE, lltype.FuncType):
return "i"
elif isinstance(TYPE, lltype.PyObjectType):
return "i"
else:
assert 0, "type %s not yet implemented" % (TYPE, )
def get_fixed_size(TYPE):
if isinstance(TYPE, lltype.Primitive):
if TYPE == lltype.Void:
return 0
return struct.calcsize(primitive_to_fmt[TYPE])
elif isinstance(TYPE, lltype.Ptr):
return struct.calcsize("P")
elif isinstance(TYPE, lltype.Struct):
return get_layout(TYPE)["_size"]
elif isinstance(TYPE, lltype.Array):
return get_fixed_size(lltype.Unsigned)
elif isinstance(TYPE, lltype.OpaqueType):
return get_fixed_size(lltype.Unsigned)
elif isinstance(TYPE, lltype.FuncType):
return get_fixed_size(lltype.Unsigned)
elif isinstance(TYPE, lltype.PyObjectType):
return get_fixed_size(lltype.Unsigned)
assert 0, "not yet implemented"
def get_variable_size(TYPE):
if isinstance(TYPE, lltype.Array):
return get_fixed_size(TYPE.OF)
elif isinstance(TYPE, lltype.Primitive):
return 0
elif isinstance(TYPE, lltype.Struct):
if TYPE._arrayfld is not None:
return get_variable_size(TYPE._flds[TYPE._arrayfld])
else:
return 0
elif isinstance(TYPE, lltype.OpaqueType):
return 0
elif isinstance(TYPE, lltype.FuncType):
return 0
elif isinstance(TYPE, lltype.PyObjectType):
return 0
elif isinstance(TYPE, lltype.Ptr):
return 0
else:
assert 0, "not yet implemented"
def sizeof(TYPE, i=None):
fixedsize = get_fixed_size(TYPE)
varsize = get_variable_size(TYPE)
if i is None:
assert varsize == 0
return fixedsize
else:
return fixedsize + i * varsize
def convert_offset_to_int(offset):
if isinstance(offset, llmemory.FieldOffset):
layout = get_layout(offset.TYPE)
return layout[offset.fldname]
elif isinstance(offset, llmemory.CompositeOffset):
return sum([convert_offset_to_int(item) for item in offset.offsets])
elif type(offset) == llmemory.AddressOffset:
return 0
elif isinstance(offset, llmemory.ItemOffset):
return sizeof(offset.TYPE) * offset.repeat
elif isinstance(offset, llmemory.ArrayItemsOffset):
return get_fixed_size(lltype.Signed)
elif isinstance(offset, llmemory.GCHeaderOffset):
return sizeof(offset.gcheaderbuilder.HDR)
elif isinstance(offset, llmemory.ArrayLengthOffset):
return 0
else:
raise Exception("unknown offset type %r"%offset)
# _____________________________________________________________________________
# the following functions are used to find contained pointers
def offsets_to_gc_pointers(TYPE):
if isinstance(TYPE, lltype.Struct):
offsets = []
for name in TYPE._names:
FIELD = getattr(TYPE, name)
if isinstance(FIELD, lltype.Ptr) and FIELD.TO._gckind == 'gc':
offsets.append(llmemory.offsetof(TYPE, name))
elif isinstance(FIELD, lltype.Struct):
suboffsets = offsets_to_gc_pointers(FIELD)
offsets += [s + llmemory.offsetof(TYPE, name) for s in suboffsets]
return offsets
return []
def varsize_offset_to_length(TYPE):
if isinstance(TYPE, lltype.Array):
return 0
elif isinstance(TYPE, lltype.Struct):
layout = get_layout(TYPE)
return layout[TYPE._arrayfld]
def varsize_offsets_to_gcpointers_in_var_part(TYPE):
if isinstance(TYPE, lltype.Array):
if isinstance(TYPE.OF, lltype.Ptr):
return [0]
elif isinstance(TYPE.OF, lltype.Struct):
return offsets_to_gc_pointers(TYPE.OF)
return []
elif isinstance(TYPE, lltype.Struct):
return varsize_offsets_to_gcpointers_in_var_part(getattr(TYPE,
TYPE._arrayfld))
| Python |
#
| Python |
import array
import struct
# all addresses in the simulator are just ints
# possible chars in status are:
# 'u': uninitialized
# 'i': initialized
class MemorySimulatorError(Exception):
pass
class MemoryBlock(object):
def __init__(self, baseaddress, size):
self.baseaddress = baseaddress
self.size = size
self.memory = array.array("c", "\x00" * size)
self.status = array.array("c", "u" * size)
self.freed = False
def free(self):
if self.freed:
raise MemorySimulatorError, "trying to free already freed memory"
self.freed = True
self.memory = None
self.status = None
def getbytes(self, offset, size):
assert offset >= 0
if self.freed:
raise MemorySimulatorError, "trying to access free memory"
if offset + size > self.size:
raise MemorySimulatorError, "trying to access memory between blocks"
if "u" in self.status[offset: offset+size]:
raise MemorySimulatorError, "trying to access uninitialized memory"
return self.memory[offset:offset+size].tostring()
def setbytes(self, offset, value):
assert offset >= 0
if self.freed:
raise MemorySimulatorError, "trying to access free memory"
if offset + len(value) > self.size:
raise MemorySimulatorError, "trying to access memory between blocks"
a = array.array("c")
a.fromstring(value)
s = array.array("c")
s.fromstring("i" * len(value))
self.memory[offset:offset + len(value)] = a
self.status[offset:offset + len(value)] = s
assert len(self.memory) == self.size
def memcopy(self, offset1, other, offset2, size):
if offset1 + size > self.size:
raise MemorySimulatorError, "trying to access memory between blocks"
if offset2 + size > other.size:
raise MemorySimulatorError, "trying to access memory between blocks"
other.memory[offset2:offset2+size] = self.memory[offset1:offset1+size]
other.status[offset2:offset2+size] = self.status[offset1:offset1+size]
def memclear(self, offset, size):
self.setbytes(offset, "\x00" * size)
# block which stores functions and PyObects
class ObjectBlock(object):
def __init__(self, baseaddress, size):
self.baseaddress = baseaddress
self.size = size
self.objects_to_num = {}
self.objects = []
def get_py_object(self, offset):
try:
return self.objects[offset]
except IndexError:
raise MemorySimulatorError, "trying to access unknown object"
def get_address_of_object(self, obj):
if obj in self.objects_to_num:
return self.objects_to_num[obj]
else:
assert len(self.objects) <= self.size
index = len(self.objects)
self.objects_to_num[obj] = index
self.objects.append(obj)
return index
SIZE_OF_OBJECT_BLOCK = 2 ** 16 # arbitraly choosen size
class MemorySimulator(object):
size_of_simulated_ram = 64 * 1024 * 1024
def __init__(self, ram_size = None):
self.objectblock = ObjectBlock(4, SIZE_OF_OBJECT_BLOCK)
self.blocks = [ObjectBlock(4, SIZE_OF_OBJECT_BLOCK)]
self.freememoryaddress = 4 + SIZE_OF_OBJECT_BLOCK
if ram_size is not None:
self.size_of_simulated_ram = ram_size
self.current_size = 0
def find_block(self, address):
if address >= self.freememoryaddress:
raise MemorySimulatorError, "trying to access memory not malloc'ed"
lo = 0
hi = len(self.blocks)
while lo < hi:
mid = (lo + hi) // 2
block = self.blocks[mid]
if address < block.baseaddress:
hi = mid
elif address < block.baseaddress + block.size:
return block
else:
lo = mid
return self.blocks[mid]
def malloc(self, size):
if size == 0:
size = 1
result = self.freememoryaddress
self.blocks.append(MemoryBlock(result, size))
self.freememoryaddress += size
self.current_size += size
if self.current_size + size > self.size_of_simulated_ram:
raise MemorySimulatorError, "out of memory"
return result
def free(self, baseaddress):
if baseaddress == 0:
raise MemorySimulatorError, "trying to free NULL address"
block = self.find_block(baseaddress)
if baseaddress != block.baseaddress:
raise MemorySimulatorError, "trying to free address not malloc'ed"
self.current_size -= block.size
block.free()
def getstruct(self, fmt, address):
block = self.find_block(address)
offset = address - block.baseaddress
size = struct.calcsize(fmt)
return struct.unpack(fmt, block.getbytes(offset, size))
def setstruct(self, fmt, address, *types):
block = self.find_block(address)
offset = address - block.baseaddress
block.setbytes(offset, struct.pack(fmt, *types))
def memcopy(self, address1, address2, size):
block1 = self.find_block(address1)
block2 = self.find_block(address2)
offset1 = address1 - block1.baseaddress
offset2 = address2 - block2.baseaddress
block1.memcopy(offset1, block2, offset2, size)
def memclear(self, address, size):
block = self.find_block(address)
offset = address - block.baseaddress
block.memclear(offset, size)
def get_py_object(self, address):
block = self.objectblock
offset = address - block.baseaddress
assert isinstance(block, ObjectBlock)
return block.get_py_object(offset)
def get_address_of_object(self, obj):
return (self.objectblock.get_address_of_object(obj) +
self.objectblock.baseaddress)
| Python |
from pypy.rpython.memory.convertlltype import FlowGraphConstantConverter
from pypy.rpython.memory.lltypesimulation import free
from pypy.rpython.memory.lltypesimulation import simulatorptr as _ptr
from pypy.rpython.memory.lltypesimulation import malloc, functionptr, nullptr
from pypy.rpython.memory.lltypesimulation import pyobjectptr
from pypy.rpython.memory.lladdress import raw_malloc, raw_free, raw_memcopy, raw_memclear
def raw_malloc_usage(sz):
return sz
def notimplemented(*args, **kwargs):
raise NotImplemented
# the following names from lltype will probably have to be implemented yet:
# opaqueptr, attachRuntimeTypeInfo, getRuntimeTypeInfo,
# runtime_type_info
opaqueptr = attachRuntimeTypeInfo = notimplemented
getRuntimeTypeInfo = runtime_type_info = notimplemented
del notimplemented
def create_no_gc(llinterp, flowgraphs):
fgcc = FlowGraphConstantConverter(flowgraphs)
fgcc.convert()
return None
from pypy.rpython.memory.gc import MarkSweepGC, SemiSpaceGC
use_gc = MarkSweepGC
def create_gc(llinterp, flowgraphs):
import py; py.test.skip("out-of-progress")
from pypy.rpython.memory.gcwrapper import GcWrapper, AnnotatingGcWrapper
wrapper = GcWrapper(llinterp, flowgraphs, use_gc)
return wrapper
def create_gc_run_on_llinterp(llinterp, flowgraphs):
from pypy.rpython.memory.gcwrapper import GcWrapper, AnnotatingGcWrapper
wrapper = AnnotatingGcWrapper(llinterp, flowgraphs, use_gc)
return wrapper
prepare_graphs_and_create_gc = create_no_gc
| Python |
#
| Python |
import operator
from pypy.annotation.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.objspace.flow.model import Constant
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import Repr, IntegerRepr, inputconst
from pypy.rpython.rmodel import IteratorRepr
from pypy.rpython.rmodel import externalvsinternal
from pypy.rpython.rslice import AbstractSliceRepr
from pypy.rpython.lltypesystem.lltype import Void, Signed, Bool
from pypy.rlib.rarithmetic import intmask
from pypy.rlib.unroll import unrolling_iterable
class __extend__(annmodel.SomeTuple):
def rtyper_makerepr(self, rtyper):
repr_class = rtyper.type_system.rtuple.TupleRepr
return repr_class(rtyper, [rtyper.getrepr(s_item) for s_item in self.items])
def rtyper_makekey_ex(self, rtyper):
keys = [rtyper.makekey(s_item) for s_item in self.items]
return tuple([self.__class__]+keys)
_gen_eq_function_cache = {}
_gen_cmp_function_cache = {}
_gen_hash_function_cache = {}
_gen_str_function_cache = {}
def gen_eq_function(items_r):
eq_funcs = [r_item.get_ll_eq_function() or operator.eq for r_item in items_r]
key = tuple(eq_funcs)
try:
return _gen_eq_function_cache[key]
except KeyError:
autounrolling_funclist = unrolling_iterable(enumerate(eq_funcs))
def ll_eq(t1, t2):
equal_so_far = True
for i, eqfn in autounrolling_funclist:
if not equal_so_far:
return False
attrname = 'item%d' % i
item1 = getattr(t1, attrname)
item2 = getattr(t2, attrname)
equal_so_far = eqfn(item1, item2)
return equal_so_far
_gen_eq_function_cache[key] = ll_eq
return ll_eq
import os
def gen_cmp_function(items_r, op_funcs, eq_funcs, strict):
"""generates <= and >= comparison ll_op for tuples
cmp_funcs is a tuple of (strict_comp, equality) functions
works for != with strict==True
"""
cmp_funcs = zip(op_funcs,eq_funcs)
autounrolling_funclist = unrolling_iterable(enumerate(cmp_funcs))
key = tuple(cmp_funcs), strict
try:
return _gen_cmp_function_cache[key]
except KeyError:
def ll_cmp(t1, t2):
cmp_res = True
for i, (cmpfn, eqfn) in autounrolling_funclist:
attrname = 'item%d' % i
item1 = getattr(t1, attrname)
item2 = getattr(t2, attrname)
cmp_res = cmpfn(item1, item2)
if cmp_res:
# a strict compare is true we shortcut
return True
eq_res = eqfn(item1, item2)
if not eq_res:
# not strict and not equal we fail
return False
# Everything's equal here
if strict:
return False
else:
return True
_gen_cmp_function_cache[key] = ll_cmp
return ll_cmp
def gen_gt_function(items_r, strict):
gt_funcs = [r_item.get_ll_gt_function() or operator.gt for r_item in items_r]
eq_funcs = [r_item.get_ll_eq_function() or operator.eq for r_item in items_r]
return gen_cmp_function( items_r, gt_funcs, eq_funcs, strict )
def gen_lt_function(items_r, strict):
lt_funcs = [r_item.get_ll_lt_function() or operator.lt for r_item in items_r]
eq_funcs = [r_item.get_ll_eq_function() or operator.eq for r_item in items_r]
return gen_cmp_function( items_r, lt_funcs, eq_funcs, strict )
def gen_hash_function(items_r):
# based on CPython
hash_funcs = [r_item.get_ll_hash_function() for r_item in items_r]
key = tuple(hash_funcs)
try:
return _gen_hash_function_cache[key]
except KeyError:
autounrolling_funclist = unrolling_iterable(enumerate(hash_funcs))
def ll_hash(t):
retval = 0x345678
mult = 1000003
for i, hash_func in autounrolling_funclist:
attrname = 'item%d' % i
item = getattr(t, attrname)
retval = intmask((retval ^ hash_func(item)) * intmask(mult))
mult = mult + 82520 + 2*len(items_r)
return retval
_gen_hash_function_cache[key] = ll_hash
return ll_hash
def gen_str_function(tuplerepr):
items_r = tuplerepr.items_r
str_funcs = [r_item.ll_str for r_item in items_r]
key = tuplerepr.rstr_ll, tuple(str_funcs)
try:
return _gen_str_function_cache[key]
except KeyError:
autounrolling_funclist = unrolling_iterable(enumerate(str_funcs))
constant = tuplerepr.rstr_ll.ll_constant
start = tuplerepr.rstr_ll.ll_build_start
push = tuplerepr.rstr_ll.ll_build_push
finish = tuplerepr.rstr_ll.ll_build_finish
length = len(items_r)
def ll_str(t):
if length == 0:
return constant("()")
buf = start(2 * length + 1)
push(buf, constant("("), 0)
for i, str_func in autounrolling_funclist:
attrname = 'item%d' % i
item = getattr(t, attrname)
if i > 0:
push(buf, constant(", "), 2 * i)
push(buf, str_func(item), 2 * i + 1)
if length == 1:
push(buf, constant(",)"), 2 * length)
else:
push(buf, constant(")"), 2 * length)
return finish(buf)
_gen_str_function_cache[key] = ll_str
return ll_str
class AbstractTupleRepr(Repr):
def __init__(self, rtyper, items_r):
self.items_r = []
self.external_items_r = []
for item_r in items_r:
external_repr, internal_repr = externalvsinternal(rtyper, item_r)
self.items_r.append(internal_repr)
self.external_items_r.append(external_repr)
items_r = self.items_r
self.fieldnames = ['item%d' % i for i in range(len(items_r))]
self.lltypes = [r.lowleveltype for r in items_r]
self.tuple_cache = {}
def getitem(self, llops, v_tuple, index):
"""Generate the operations to get the index'th item of v_tuple,
in the external repr external_items_r[index]."""
v = self.getitem_internal(llops, v_tuple, index)
r_item = self.items_r[index]
r_external_item = self.external_items_r[index]
return llops.convertvar(v, r_item, r_external_item)
def newtuple_cached(cls, hop, items_v):
r_tuple = hop.r_result
if hop.s_result.is_constant():
return inputconst(r_tuple, hop.s_result.const)
else:
return cls.newtuple(hop.llops, r_tuple, items_v)
newtuple_cached = classmethod(newtuple_cached)
def _rtype_newtuple(cls, hop):
r_tuple = hop.r_result
vlist = hop.inputargs(*r_tuple.items_r)
return cls.newtuple_cached(hop, vlist)
_rtype_newtuple = classmethod(_rtype_newtuple)
def convert_const(self, value):
assert isinstance(value, tuple) and len(value) == len(self.items_r)
key = tuple([Constant(item) for item in value])
try:
return self.tuple_cache[key]
except KeyError:
p = self.instantiate()
self.tuple_cache[key] = p
for obj, r, name in zip(value, self.items_r, self.fieldnames):
if r.lowleveltype is not Void:
setattr(p, name, r.convert_const(obj))
return p
def compact_repr(self):
return "TupleR %s" % ' '.join([llt._short_name() for llt in self.lltypes])
def rtype_len(self, hop):
return hop.inputconst(Signed, len(self.items_r))
def rtype_id(self, hop):
raise TyperError("cannot ask for the id() of a tuple")
def get_ll_eq_function(self):
return gen_eq_function(self.items_r)
def get_ll_ge_function(self):
return gen_gt_function(self.items_r, False)
def get_ll_gt_function(self):
return gen_gt_function(self.items_r, True)
def get_ll_le_function(self):
return gen_lt_function(self.items_r, False)
def get_ll_lt_function(self):
return gen_lt_function(self.items_r, True)
def get_ll_hash_function(self):
return gen_hash_function(self.items_r)
ll_str = property(gen_str_function)
def make_iterator_repr(self):
if len(self.items_r) == 1:
# subclasses are supposed to set the IteratorRepr attribute
return self.IteratorRepr(self)
raise TyperError("can only iterate over tuples of length 1 for now")
class __extend__(pairtype(AbstractTupleRepr, IntegerRepr)):
def rtype_getitem((r_tup, r_int), hop):
v_tuple, v_index = hop.inputargs(r_tup, Signed)
if not isinstance(v_index, Constant):
raise TyperError("non-constant tuple index")
if hop.has_implicit_exception(IndexError):
hop.exception_cannot_occur()
index = v_index.value
return r_tup.getitem(hop.llops, v_tuple, index)
class __extend__(pairtype(AbstractTupleRepr, AbstractSliceRepr)):
def rtype_getitem((r_tup, r_slice), hop):
v_tup = hop.inputarg(r_tup, arg=0)
s_slice = hop.args_s[1]
start, stop, step = s_slice.constant_indices()
indices = range(len(r_tup.items_r))[start:stop:step]
assert len(indices) == len(hop.r_result.items_r)
items_v = [r_tup.getitem_internal(hop.llops, v_tup, i)
for i in indices]
return hop.r_result.newtuple(hop.llops, hop.r_result, items_v)
class __extend__(pairtype(AbstractTupleRepr, Repr)):
def rtype_contains((r_tup, r_item), hop):
s_tup = hop.args_s[0]
if not s_tup.is_constant():
raise TyperError("contains() on non-const tuple")
t = s_tup.const
typ = type(t[0])
for x in t[1:]:
if type(x) is not typ:
raise TyperError("contains() on mixed-type tuple "
"constant %r" % (t,))
d = {}
for x in t:
d[x] = None
hop2 = hop.copy()
_, _ = hop2.r_s_popfirstarg()
v_dict = Constant(d)
s_dict = hop.rtyper.annotator.bookkeeper.immutablevalue(d)
hop2.v_s_insertfirstarg(v_dict, s_dict)
return hop2.dispatch()
class __extend__(pairtype(AbstractTupleRepr, AbstractTupleRepr)):
def rtype_add((r_tup1, r_tup2), hop):
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup2)
vlist = []
for i in range(len(r_tup1.items_r)):
vlist.append(r_tup1.getitem_internal(hop.llops, v_tuple1, i))
for i in range(len(r_tup2.items_r)):
vlist.append(r_tup2.getitem_internal(hop.llops, v_tuple2, i))
return r_tup1.newtuple_cached(hop, vlist)
rtype_inplace_add = rtype_add
def rtype_eq((r_tup1, r_tup2), hop):
# XXX assumes that r_tup2 is convertible to r_tup1
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup1)
ll_eq = r_tup1.get_ll_eq_function()
return hop.gendirectcall(ll_eq, v_tuple1, v_tuple2)
def rtype_ge((r_tup1, r_tup2), hop):
# XXX assumes that r_tup2 is convertible to r_tup1
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup1)
ll_ge = r_tup1.get_ll_ge_function()
return hop.gendirectcall(ll_ge, v_tuple1, v_tuple2)
def rtype_gt((r_tup1, r_tup2), hop):
# XXX assumes that r_tup2 is convertible to r_tup1
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup1)
ll_gt = r_tup1.get_ll_gt_function()
return hop.gendirectcall(ll_gt, v_tuple1, v_tuple2)
def rtype_le((r_tup1, r_tup2), hop):
# XXX assumes that r_tup2 is convertible to r_tup1
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup1)
ll_le = r_tup1.get_ll_le_function()
return hop.gendirectcall(ll_le, v_tuple1, v_tuple2)
def rtype_lt((r_tup1, r_tup2), hop):
# XXX assumes that r_tup2 is convertible to r_tup1
v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup1)
ll_lt = r_tup1.get_ll_lt_function()
return hop.gendirectcall(ll_lt, v_tuple1, v_tuple2)
def rtype_ne(tup1tup2, hop):
v_res = tup1tup2.rtype_eq(hop)
return hop.genop('bool_not', [v_res], resulttype=Bool)
def convert_from_to((r_from, r_to), v, llops):
if len(r_from.items_r) == len(r_to.items_r):
if r_from.lowleveltype == r_to.lowleveltype:
return v
n = len(r_from.items_r)
items_v = []
for i in range(n):
item_v = r_from.getitem_internal(llops, v, i)
item_v = llops.convertvar(item_v,
r_from.items_r[i],
r_to.items_r[i])
items_v.append(item_v)
return r_from.newtuple(llops, r_to, items_v)
return NotImplemented
def rtype_is_((robj1, robj2), hop):
raise TyperError("cannot compare tuples with 'is'")
class AbstractTupleIteratorRepr(IteratorRepr):
def newiter(self, hop):
v_tuple, = hop.inputargs(self.r_tuple)
citerptr = hop.inputconst(Void, self.lowleveltype)
return hop.gendirectcall(self.ll_tupleiter, citerptr, v_tuple)
def rtype_next(self, hop):
v_iter, = hop.inputargs(self)
hop.has_implicit_exception(StopIteration) # record that we know about it
hop.exception_is_here()
v = hop.gendirectcall(self.ll_tuplenext, v_iter)
return hop.llops.convertvar(v, self.r_tuple.items_r[0], self.r_tuple.external_items_r[0])
| Python |
import sys
from pypy.annotation.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.objspace.flow.objspace import op_appendices
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Bool, Float, \
Void, Char, UniChar, malloc, pyobjectptr, UnsignedLongLong, \
SignedLongLong, build_number, Number, cast_primitive, typeOf
from pypy.rpython.rmodel import IntegerRepr, inputconst
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rlib.rarithmetic import intmask, r_int, r_uint, r_ulonglong, r_longlong
from pypy.rpython.error import TyperError, MissingRTypeOperation
from pypy.rpython.rmodel import log
from pypy.rlib import objectmodel
_integer_reprs = {}
def getintegerrepr(lltype, prefix=None):
try:
return _integer_reprs[lltype]
except KeyError:
pass
repr = _integer_reprs[lltype] = IntegerRepr(lltype, prefix)
return repr
class __extend__(annmodel.SomeInteger):
def rtyper_makerepr(self, rtyper):
lltype = build_number(None, self.knowntype)
return getintegerrepr(lltype)
def rtyper_makekey(self):
return self.__class__, self.knowntype
signed_repr = getintegerrepr(Signed, 'int_')
signedlonglong_repr = getintegerrepr(SignedLongLong, 'llong_')
unsigned_repr = getintegerrepr(Unsigned, 'uint_')
unsignedlonglong_repr = getintegerrepr(UnsignedLongLong, 'ullong_')
class __extend__(pairtype(IntegerRepr, IntegerRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Signed and r_to.lowleveltype == Unsigned:
log.debug('explicit cast_int_to_uint')
return llops.genop('cast_int_to_uint', [v], resulttype=Unsigned)
if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Signed:
log.debug('explicit cast_uint_to_int')
return llops.genop('cast_uint_to_int', [v], resulttype=Signed)
if r_from.lowleveltype == Signed and r_to.lowleveltype == SignedLongLong:
return llops.genop('cast_int_to_longlong', [v], resulttype=SignedLongLong)
if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Signed:
return llops.genop('truncate_longlong_to_int', [v], resulttype=Signed)
return llops.genop('cast_primitive', [v], resulttype=r_to.lowleveltype)
#arithmetic
def rtype_add(_, hop):
return _rtype_template(hop, 'add')
rtype_inplace_add = rtype_add
def rtype_add_ovf(_, hop):
return _rtype_template(hop, 'add_ovf')
def rtype_sub(_, hop):
return _rtype_template(hop, 'sub')
rtype_inplace_sub = rtype_sub
def rtype_sub_ovf(_, hop):
return _rtype_template(hop, 'sub_ovf')
def rtype_mul(_, hop):
return _rtype_template(hop, 'mul')
rtype_inplace_mul = rtype_mul
def rtype_mul_ovf(_, hop):
return _rtype_template(hop, 'mul_ovf')
def rtype_floordiv(_, hop):
return _rtype_template(hop, 'floordiv', [ZeroDivisionError])
rtype_inplace_floordiv = rtype_floordiv
def rtype_floordiv_ovf(_, hop):
return _rtype_template(hop, 'floordiv_ovf', [ZeroDivisionError])
# turn 'div' on integers into 'floordiv'
rtype_div = rtype_floordiv
rtype_inplace_div = rtype_inplace_floordiv
rtype_div_ovf = rtype_floordiv_ovf
# 'def rtype_truediv' is delegated to the superclass FloatRepr
def rtype_mod(_, hop):
return _rtype_template(hop, 'mod', [ZeroDivisionError])
rtype_inplace_mod = rtype_mod
def rtype_mod_ovf(_, hop):
return _rtype_template(hop, 'mod_ovf', [ZeroDivisionError])
def rtype_xor(_, hop):
return _rtype_template(hop, 'xor')
rtype_inplace_xor = rtype_xor
def rtype_and_(_, hop):
return _rtype_template(hop, 'and')
rtype_inplace_and = rtype_and_
def rtype_or_(_, hop):
return _rtype_template(hop, 'or')
rtype_inplace_or = rtype_or_
def rtype_lshift(_, hop):
return _rtype_template(hop, 'lshift', [ValueError])
rtype_inplace_lshift = rtype_lshift
def rtype_lshift_ovf(_, hop):
return _rtype_template(hop, 'lshift_ovf', [ValueError])
def rtype_rshift(_, hop):
return _rtype_template(hop, 'rshift', [ValueError])
rtype_inplace_rshift = rtype_rshift
def rtype_pow(_, hop):
raise MissingRTypeOperation("pow(int, int)"
" (use float**float instead; it is too"
" easy to overlook the overflow"
" issues of int**int)")
rtype_pow_ovf = rtype_pow
rtype_inplace_pow = rtype_pow
## def rtype_pow(_, hop, suffix=''):
## if hop.has_implicit_exception(ZeroDivisionError):
## suffix += '_zer'
## s_int3 = hop.args_s[2]
## rresult = hop.rtyper.makerepr(hop.s_result)
## if s_int3.is_constant() and s_int3.const is None:
## vlist = hop.inputargs(rresult, rresult, Void)[:2]
## else:
## vlist = hop.inputargs(rresult, rresult, rresult)
## hop.exception_is_here()
## return hop.genop(rresult.opprefix + 'pow' + suffix, vlist, resulttype=rresult)
## def rtype_pow_ovf(_, hop):
## if hop.s_result.unsigned:
## raise TyperError("forbidden uint_pow_ovf")
## hop.has_implicit_exception(OverflowError) # record that we know about it
## return self.rtype_pow(_, hop, suffix='_ovf')
## def rtype_inplace_pow(_, hop):
## return _rtype_template(hop, 'pow', [ZeroDivisionError])
#comparisons: eq is_ ne lt le gt ge
def rtype_eq(_, hop):
return _rtype_compare_template(hop, 'eq')
rtype_is_ = rtype_eq
def rtype_ne(_, hop):
return _rtype_compare_template(hop, 'ne')
def rtype_lt(_, hop):
return _rtype_compare_template(hop, 'lt')
def rtype_le(_, hop):
return _rtype_compare_template(hop, 'le')
def rtype_gt(_, hop):
return _rtype_compare_template(hop, 'gt')
def rtype_ge(_, hop):
return _rtype_compare_template(hop, 'ge')
#Helper functions
def _rtype_template(hop, func, implicit_excs=[]):
if func.endswith('_ovf'):
if hop.s_result.unsigned:
raise TyperError("forbidden unsigned " + func)
else:
hop.has_implicit_exception(OverflowError)
for implicit_exc in implicit_excs:
if hop.has_implicit_exception(implicit_exc):
appendix = op_appendices[implicit_exc]
func += '_' + appendix
r_result = hop.rtyper.makerepr(hop.s_result)
if r_result.lowleveltype == Bool:
repr = signed_repr
else:
repr = r_result
vlist = hop.inputargs(repr, repr)
hop.exception_is_here()
prefix = repr.opprefix
v_res = hop.genop(prefix+func, vlist, resulttype=repr)
bothnonneg = hop.args_s[0].nonneg and hop.args_s[1].nonneg
if prefix in ('int_', 'llong_') and not bothnonneg:
# cpython, and rpython, assumed that integer division truncates
# towards -infinity. however, in C99 and most (all?) other
# backends, integer division truncates towards 0. so assuming
# that, we can generate scary code that applies the necessary
# correction in the right cases.
# paper and pencil are encouraged for this :)
from pypy.rpython.rbool import bool_repr
assert isinstance(repr.lowleveltype, Number)
c_zero = inputconst(repr.lowleveltype, repr.lowleveltype._default)
op = func.split('_', 1)[0]
if op == 'floordiv':
# return (x/y) - (((x^y)<0)&((x%y)!=0));
v_xor = hop.genop(prefix + 'xor', vlist,
resulttype=repr)
v_xor_le = hop.genop(prefix + 'le', [v_xor, c_zero],
resulttype=Bool)
v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr)
v_mod = hop.genop(prefix + 'mod', vlist,
resulttype=repr)
v_mod_ne = hop.genop(prefix + 'ne', [v_mod, c_zero],
resulttype=Bool)
v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr)
v_corr = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne],
resulttype=repr)
v_res = hop.genop(prefix + 'sub', [v_res, v_corr],
resulttype=repr)
elif op == 'mod':
# return r + y*(((x^y)<0)&(r!=0));
v_xor = hop.genop(prefix + 'xor', vlist,
resulttype=repr)
v_xor_le = hop.genop(prefix + 'le', [v_xor, c_zero],
resulttype=Bool)
v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr)
v_mod_ne = hop.genop(prefix + 'ne', [v_res, c_zero],
resulttype=Bool)
v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr)
v_corr1 = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne],
resulttype=repr)
v_corr = hop.genop(prefix + 'mul', [v_corr1, vlist[1]],
resulttype=repr)
v_res = hop.genop(prefix + 'add', [v_res, v_corr],
resulttype=repr)
v_res = hop.llops.convertvar(v_res, repr, r_result)
return v_res
#Helper functions for comparisons
def _rtype_compare_template(hop, func):
s_int1, s_int2 = hop.args_s
if s_int1.unsigned or s_int2.unsigned:
if not s_int1.nonneg or not s_int2.nonneg:
raise TyperError("comparing a signed and an unsigned number")
repr = hop.rtyper.makerepr(annmodel.unionof(s_int1, s_int2)).as_int
vlist = hop.inputargs(repr, repr)
hop.exception_is_here()
return hop.genop(repr.opprefix+func, vlist, resulttype=Bool)
#
class __extend__(IntegerRepr):
def convert_const(self, value):
if isinstance(value, objectmodel.Symbolic):
return value
T = typeOf(value)
if isinstance(T, Number) or T is Bool:
return cast_primitive(self.lowleveltype, value)
raise TyperError("not an integer: %r" % (value,))
def get_ll_eq_function(self):
return None
get_ll_gt_function = get_ll_eq_function
get_ll_lt_function = get_ll_eq_function
get_ll_ge_function = get_ll_eq_function
get_ll_le_function = get_ll_eq_function
def get_ll_ge_function(self):
return None
def get_ll_hash_function(self):
return ll_hash_int
get_ll_fasthash_function = get_ll_hash_function
def get_ll_dummyval_obj(self, rtyper, s_value):
# if >= 0, then all negative values are special
if s_value.nonneg and not s_value.unsigned:
return signed_repr # whose ll_dummy_value is -1
else:
return None
ll_dummy_value = -1
def rtype_chr(_, hop):
vlist = hop.inputargs(Signed)
if hop.has_implicit_exception(ValueError):
hop.exception_is_here()
hop.gendirectcall(ll_check_chr, vlist[0])
return hop.genop('cast_int_to_char', vlist, resulttype=Char)
def rtype_unichr(_, hop):
vlist = hop.inputargs(Signed)
if hop.has_implicit_exception(ValueError):
hop.exception_is_here()
hop.gendirectcall(ll_check_unichr, vlist[0])
return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar)
def rtype_is_true(self, hop):
assert self is self.as_int # rtype_is_true() is overridden in BoolRepr
vlist = hop.inputargs(self)
return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool)
#Unary arithmetic operations
def rtype_abs(self, hop):
self = self.as_int
vlist = hop.inputargs(self)
if hop.s_result.unsigned:
return vlist[0]
else:
return hop.genop(self.opprefix + 'abs', vlist, resulttype=self)
def rtype_abs_ovf(self, hop):
self = self.as_int
if hop.s_result.unsigned:
raise TyperError("forbidden uint_abs_ovf")
else:
vlist = hop.inputargs(self)
hop.has_implicit_exception(OverflowError) # record we know about it
hop.exception_is_here()
return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self)
def rtype_invert(self, hop):
self = self.as_int
vlist = hop.inputargs(self)
return hop.genop(self.opprefix + 'invert', vlist, resulttype=self)
def rtype_neg(self, hop):
self = self.as_int
vlist = hop.inputargs(self)
if hop.s_result.unsigned:
zero = self.lowleveltype._defl()
vlist.insert(0, hop.inputconst(self.lowleveltype, zero))
return hop.genop(self.opprefix + 'sub', vlist, resulttype=self)
else:
return hop.genop(self.opprefix + 'neg', vlist, resulttype=self)
def rtype_neg_ovf(self, hop):
self = self.as_int
if hop.s_result.unsigned:
raise TyperError("forbidden uint_neg_ovf")
else:
vlist = hop.inputargs(self)
hop.has_implicit_exception(OverflowError) # record we know about it
hop.exception_is_here()
return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self)
def rtype_pos(self, hop):
self = self.as_int
vlist = hop.inputargs(self)
return vlist[0]
def rtype_int(self, hop):
if self.lowleveltype in (Unsigned, UnsignedLongLong):
raise TyperError("use intmask() instead of int(r_uint(...))")
vlist = hop.inputargs(Signed)
return vlist[0]
def rtype_float(_, hop):
vlist = hop.inputargs(Float)
return vlist[0]
# version picked by specialisation based on which
# type system rtyping is using, from <type_system>.ll_str module
def ll_str(self, i):
pass
ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_int_str')"
def rtype_hex(self, hop):
self = self.as_int
varg = hop.inputarg(self, 0)
true = inputconst(Bool, True)
fn = hop.rtyper.type_system.ll_str.ll_int2hex
return hop.gendirectcall(fn, varg, true)
def rtype_oct(self, hop):
self = self.as_int
varg = hop.inputarg(self, 0)
true = inputconst(Bool, True)
fn = hop.rtyper.type_system.ll_str.ll_int2oct
return hop.gendirectcall(fn, varg, true)
def ll_identity(n):
return n
ll_hash_int = ll_identity
def ll_check_chr(n):
if 0 <= n <= 255:
return
else:
raise ValueError
def ll_check_unichr(n):
if 0 <= n <= sys.maxunicode:
return
else:
raise ValueError
#
# _________________________ Conversions _________________________
py_to_ll_conversion_functions = {
UnsignedLongLong: ('RPyLong_AsUnsignedLongLong', lambda pyo: r_ulonglong(pyo._obj.value)),
SignedLongLong: ('RPyLong_AsLongLong', lambda pyo: r_longlong(pyo._obj.value)),
Unsigned: ('RPyLong_AsUnsignedLong', lambda pyo: r_uint(pyo._obj.value)),
Signed: ('PyInt_AsLong', lambda pyo: int(pyo._obj.value))
}
ll_to_py_conversion_functions = {
UnsignedLongLong: ('PyLong_FromUnsignedLongLong', lambda i: pyobjectptr(i)),
SignedLongLong: ('PyLong_FromLongLong', lambda i: pyobjectptr(i)),
Unsigned: ('PyLong_FromUnsignedLong', lambda i: pyobjectptr(i)),
Signed: ('PyInt_FromLong', lambda i: pyobjectptr(i)),
}
class __extend__(pairtype(PyObjRepr, IntegerRepr)):
def convert_from_to((r_from, r_to), v, llops):
tolltype = r_to.lowleveltype
fnname, callable = py_to_ll_conversion_functions[tolltype]
return llops.gencapicall(fnname, [v],
resulttype=r_to, _callable=callable)
class __extend__(pairtype(IntegerRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
fromlltype = r_from.lowleveltype
fnname, callable = ll_to_py_conversion_functions[fromlltype]
return llops.gencapicall(fnname, [v],
resulttype=pyobj_repr, _callable=callable)
| Python |
from pypy.annotation.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem.lltype import \
Signed, Unsigned, SignedLongLong, Bool, Float, Void, pyobjectptr
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import FloatRepr
from pypy.rpython.rmodel import IntegerRepr, BoolRepr
from pypy.rpython.rstr import AbstractStringRepr
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython.rmodel import log
from pypy.rlib.rarithmetic import base_int
import math
class __extend__(annmodel.SomeFloat):
def rtyper_makerepr(self, rtyper):
return float_repr
def rtyper_makekey(self):
return self.__class__,
float_repr = FloatRepr()
class __extend__(pairtype(FloatRepr, FloatRepr)):
#Arithmetic
def rtype_add(_, hop):
return _rtype_template(hop, 'add')
rtype_inplace_add = rtype_add
def rtype_sub(_, hop):
return _rtype_template(hop, 'sub')
rtype_inplace_sub = rtype_sub
def rtype_mul(_, hop):
return _rtype_template(hop, 'mul')
rtype_inplace_mul = rtype_mul
def rtype_truediv(_, hop):
return _rtype_template(hop, 'truediv')
rtype_inplace_truediv = rtype_truediv
# turn 'div' on floats into 'truediv'
rtype_div = rtype_truediv
rtype_inplace_div = rtype_inplace_truediv
# 'floordiv' on floats not supported in RPython
def rtype_pow(_, hop):
s_float3 = hop.args_s[2]
if s_float3.is_constant() and s_float3.const is None:
vlist = hop.inputargs(Float, Float, Void)[:2]
return hop.genop('float_pow', vlist, resulttype=Float)
else:
raise TyperError("cannot handle pow with three float arguments")
def rtype_inplace_pow(_, hop):
return _rtype_template(hop, 'pow')
#comparisons: eq is_ ne lt le gt ge
def rtype_eq(_, hop):
return _rtype_compare_template(hop, 'eq')
rtype_is_ = rtype_eq
def rtype_ne(_, hop):
return _rtype_compare_template(hop, 'ne')
def rtype_lt(_, hop):
return _rtype_compare_template(hop, 'lt')
def rtype_le(_, hop):
return _rtype_compare_template(hop, 'le')
def rtype_gt(_, hop):
return _rtype_compare_template(hop, 'gt')
def rtype_ge(_, hop):
return _rtype_compare_template(hop, 'ge')
class __extend__(pairtype(AbstractStringRepr, FloatRepr)):
def rtype_mod(_, hop):
rstr = hop.rtyper.type_system.rstr
return rstr.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])])
#Helpers FloatRepr,FloatRepr
def _rtype_template(hop, func):
vlist = hop.inputargs(Float, Float)
return hop.genop('float_'+func, vlist, resulttype=Float)
def _rtype_compare_template(hop, func):
vlist = hop.inputargs(Float, Float)
return hop.genop('float_'+func, vlist, resulttype=Bool)
#
class __extend__(FloatRepr):
def convert_const(self, value):
if not isinstance(value, (int, base_int, float)): # can be bool too
raise TyperError("not a float: %r" % (value,))
return float(value)
def get_ll_eq_function(self):
return None
get_ll_gt_function = get_ll_eq_function
get_ll_lt_function = get_ll_eq_function
get_ll_ge_function = get_ll_eq_function
get_ll_le_function = get_ll_eq_function
def get_ll_hash_function(self):
return ll_hash_float
def rtype_is_true(_, hop):
vlist = hop.inputargs(Float)
return hop.genop('float_is_true', vlist, resulttype=Bool)
def rtype_neg(_, hop):
vlist = hop.inputargs(Float)
return hop.genop('float_neg', vlist, resulttype=Float)
def rtype_pos(_, hop):
vlist = hop.inputargs(Float)
return vlist[0]
def rtype_abs(_, hop):
vlist = hop.inputargs(Float)
return hop.genop('float_abs', vlist, resulttype=Float)
def rtype_int(_, hop):
vlist = hop.inputargs(Float)
return hop.genop('cast_float_to_int', vlist, resulttype=Signed)
rtype_float = rtype_pos
# version picked by specialisation based on which
# type system rtyping is using, from <type_system>.ll_str module
def ll_str(self, f):
pass
ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_float_str')"
TAKE_NEXT = float(2**31)
def ll_hash_float(f):
"""
this implementation is identical to the CPython implementation,
despite the fact that the integer case is not treated, specially.
This should be special-cased in W_FloatObject.
In the low-level case, floats cannot be used with ints in dicts, anyway.
"""
v, expo = math.frexp(f)
v *= TAKE_NEXT
hipart = int(v)
v = (v - float(hipart)) * TAKE_NEXT
x = hipart + int(v) + (expo << 15)
return x
#
# _________________________ Conversions _________________________
class __extend__(pairtype(IntegerRepr, FloatRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Float:
log.debug('explicit cast_uint_to_float')
return llops.genop('cast_uint_to_float', [v], resulttype=Float)
if r_from.lowleveltype == Signed and r_to.lowleveltype == Float:
log.debug('explicit cast_int_to_float')
return llops.genop('cast_int_to_float', [v], resulttype=Float)
if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float:
log.debug('explicit cast_longlong_to_float')
return llops.genop('cast_longlong_to_float', [v], resulttype=Float)
return NotImplemented
class __extend__(pairtype(FloatRepr, IntegerRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Float and r_to.lowleveltype == Unsigned:
log.debug('explicit cast_float_to_uint')
return llops.genop('cast_float_to_uint', [v], resulttype=Unsigned)
if r_from.lowleveltype == Float and r_to.lowleveltype == Signed:
log.debug('explicit cast_float_to_int')
return llops.genop('cast_float_to_int', [v], resulttype=Signed)
if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong:
log.debug('explicit cast_float_to_longlong')
return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong)
return NotImplemented
class __extend__(pairtype(BoolRepr, FloatRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Bool and r_to.lowleveltype == Float:
log.debug('explicit cast_bool_to_float')
return llops.genop('cast_bool_to_float', [v], resulttype=Float)
return NotImplemented
class __extend__(pairtype(FloatRepr, BoolRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Float and r_to.lowleveltype == Bool:
log.debug('explicit cast_float_to_bool')
return llops.genop('float_is_true', [v], resulttype=Bool)
return NotImplemented
class __extend__(pairtype(PyObjRepr, FloatRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_to.lowleveltype == Float:
return llops.gencapicall('PyFloat_AsDouble', [v],
resulttype=Float,
_callable=lambda pyo: float(pyo._obj.value))
return NotImplemented
class __extend__(pairtype(FloatRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Float:
return llops.gencapicall('PyFloat_FromDouble', [v],
resulttype=pyobj_repr,
_callable=lambda x: pyobjectptr(x))
return NotImplemented
| Python |
# this registry use the new interface for external functions
# all the above declarations in extfunctable should be moved here at some point.
from extfunc import register_external
# ___________________________
# math functions
import math
from pypy.rpython.lltypesystem.module import ll_math
from pypy.rpython.ootypesystem.module import ll_math as oo_math
from pypy.rpython.module import ll_os
try:
import termios
except ImportError:
pass
else:
from pypy.rpython.module import ll_termios
# the following functions all take one float, return one float
# and are part of math.h
simple_math_functions = [
'acos', 'asin', 'atan', 'ceil', 'cos', 'cosh', 'exp', 'fabs',
'floor', 'log', 'log10', 'sin', 'sinh', 'sqrt', 'tan', 'tanh'
]
for name in simple_math_functions:
register_external(getattr(math, name), [float], float, "ll_math.ll_math_%s" % name)
def frexp_hook():
from pypy.rpython.extfunctable import record_call
from pypy.annotation.model import SomeInteger, SomeTuple, SomeFloat
from pypy.rpython.lltypesystem.module.ll_math import ll_frexp_result
record_call(ll_frexp_result, (SomeFloat(), SomeInteger()), 'MATH_FREXP')
def modf_hook():
from pypy.rpython.extfunctable import record_call
from pypy.annotation.model import SomeTuple, SomeFloat
from pypy.rpython.lltypesystem.module.ll_math import ll_modf_result
record_call(ll_modf_result, (SomeFloat(), SomeFloat()), 'MATH_MODF')
complex_math_functions = [
('frexp', [float], (float, int), frexp_hook),
('atan2', [float, float], float, None),
('fmod', [float, float], float, None),
('ldexp', [float, int], float, None),
('modf', [float], (float, float), modf_hook),
('hypot', [float, float], float, None),
('pow', [float, float], float, None),
]
for name, args, res, hook in complex_math_functions:
func = getattr(math, name)
llfake = getattr(ll_math, 'll_math_%s' % name, None)
oofake = getattr(oo_math, 'll_math_%s' % name, None)
register_external(func, args, res, 'll_math.ll_math_%s' % name,
llfakeimpl=llfake, oofakeimpl=oofake,
annotation_hook = hook)
# ___________________________
# os.path functions
from pypy.tool.sourcetools import func_with_new_name
import os.path
# os.path.join is RPython, but we don't want to compile it directly
# because it's platform dependant. This is ok for lltype where the
# execution platform is the same as the translation platform, but not
# for ootype where the executable produced by some backends (e.g. CLI,
# JVM) are expected to run everywhere. Thus, we register it as an
# external function, but we provide a clone for lltype using
# func_with_new_name.
# XXX: I can't see any easy way to provide an oofakeimpl for the
# llinterpreter
path_functions = [
('join', [str, str], str),
]
for name, args, res in path_functions:
func = getattr(os.path, name)
llimpl = func_with_new_name(func, name)
register_external(func, args, res, 'll_os_path.ll_%s' % name, llimpl=llimpl)
| Python |
from pypy.rpython import rclass
from pypy.rpython.extfunctable import standardexceptions
from pypy.annotation import model as annmodel
class AbstractExceptionData:
"""Public information for the code generators to help with exceptions."""
standardexceptions = standardexceptions
def __init__(self, rtyper):
self.make_standard_exceptions(rtyper)
# (NB. rclass identifies 'Exception' and 'object')
r_type = rclass.getclassrepr(rtyper, None)
r_instance = rclass.getinstancerepr(rtyper, None)
r_type.setup()
r_instance.setup()
self.r_exception_type = r_type
self.r_exception_value = r_instance
self.lltype_of_exception_type = r_type.lowleveltype
self.lltype_of_exception_value = r_instance.lowleveltype
def make_standard_exceptions(self, rtyper):
bk = rtyper.annotator.bookkeeper
for cls in self.standardexceptions:
classdef = bk.getuniqueclassdef(cls)
def finish(self, rtyper):
bk = rtyper.annotator.bookkeeper
for cls in self.standardexceptions:
classdef = bk.getuniqueclassdef(cls)
rclass.getclassrepr(rtyper, classdef).setup()
def make_raise_OSError(self, rtyper):
# ll_raise_OSError(errno)
def ll_raise_OSError(errno):
raise OSError(errno, None)
helper_fn = rtyper.annotate_helper_fn(ll_raise_OSError, [annmodel.SomeInteger()])
return helper_fn
| Python |
import py
import types, sys
import inspect
from pypy.objspace.flow.model import Variable, Constant, Block, Link
from pypy.objspace.flow.model import checkgraph, FunctionGraph, SpaceOperation
from pypy.annotation import model as annmodel
from pypy.annotation import description
from pypy.tool.sourcetools import has_varargs, valid_identifier
from pypy.tool.sourcetools import func_with_new_name
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import getgcflavor
from pypy.rlib.objectmodel import instantiate, ComputedIntSymbolic
def normalize_call_familes(annotator):
for callfamily in annotator.bookkeeper.pbc_maximal_call_families.infos():
normalize_calltable(annotator, callfamily)
callfamily.normalized = True
def normalize_calltable(annotator, callfamily):
"""Try to normalize all rows of a table."""
overridden = False
for desc in callfamily.descs:
if getattr(desc, 'overridden', False):
overridden = True
if overridden:
if len(callfamily.descs) > 1:
raise Exception("non-static call to overridden function")
callfamily.overridden = True
return
nshapes = len(callfamily.calltables)
for shape, table in callfamily.calltables.items():
for row in table:
did_something = normalize_calltable_row_signature(annotator, shape,
row)
if did_something:
assert not callfamily.normalized, "change in call family normalisation"
assert nshapes == 1, "XXX call table too complex"
while True:
progress = False
for shape, table in callfamily.calltables.items():
for row in table:
progress |= normalize_calltable_row_annotation(annotator,
row.values())
if not progress:
return # done
assert not callfamily.normalized, "change in call family normalisation"
def normalize_calltable_row_signature(annotator, shape, row):
graphs = row.values()
assert graphs, "no graph??"
sig0 = graphs[0].signature
defaults0 = graphs[0].defaults
for graph in graphs[1:]:
if graph.signature != sig0:
break
if graph.defaults != defaults0:
break
else:
return False # nothing to do, all signatures already match
shape_cnt, shape_keys, shape_star, shape_stst = shape
assert not shape_star, "XXX not implemented"
assert not shape_stst, "XXX not implemented"
# for the first 'shape_cnt' arguments we need to generalize to
# a common type
call_nbargs = shape_cnt + len(shape_keys)
did_something = False
NODEFAULT = object()
for graph in graphs:
argnames, varargname, kwargname = graph.signature
assert not varargname, "XXX not implemented"
assert not kwargname, "XXX not implemented" # ?
inputargs_s = [annotator.binding(v) for v in graph.getargs()]
argorder = range(shape_cnt)
for key in shape_keys:
i = list(argnames).index(key)
assert i not in argorder
argorder.append(i)
need_reordering = (argorder != range(call_nbargs))
if need_reordering or len(graph.getargs()) != call_nbargs:
oldblock = graph.startblock
inlist = []
defaults = graph.defaults or ()
num_nondefaults = len(inputargs_s) - len(defaults)
defaults = [NODEFAULT] * num_nondefaults + list(defaults)
newdefaults = []
for j in argorder:
v = Variable(graph.getargs()[j])
annotator.setbinding(v, inputargs_s[j])
inlist.append(v)
newdefaults.append(defaults[j])
newblock = Block(inlist)
# prepare the output args of newblock:
# 1. collect the positional arguments
outlist = inlist[:shape_cnt]
# 2. add defaults and keywords
for j in range(shape_cnt, len(inputargs_s)):
try:
i = argorder.index(j)
v = inlist[i]
except ValueError:
default = defaults[j]
if default is NODEFAULT:
raise TyperError(
"call pattern has %d positional arguments, "
"but %r takes at least %d arguments" % (
shape_cnt, graph.name, num_nondefaults))
v = Constant(default)
outlist.append(v)
newblock.closeblock(Link(outlist, oldblock))
oldblock.isstartblock = False
newblock.isstartblock = True
graph.startblock = newblock
for i in range(len(newdefaults)-1,-1,-1):
if newdefaults[i] is NODEFAULT:
newdefaults = newdefaults[i:]
break
graph.defaults = tuple(newdefaults)
graph.signature = (tuple([argnames[j] for j in argorder]),
None, None)
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
did_something = True
return did_something
def normalize_calltable_row_annotation(annotator, graphs):
if len(graphs) <= 1:
return False # nothing to do
graph_bindings = {}
for graph in graphs:
graph_bindings[graph] = [annotator.binding(v)
for v in graph.getargs()]
iterbindings = graph_bindings.itervalues()
nbargs = len(iterbindings.next())
for binding in iterbindings:
assert len(binding) == nbargs
generalizedargs = []
for i in range(nbargs):
args_s = []
for graph, bindings in graph_bindings.items():
args_s.append(bindings[i])
s_value = annmodel.unionof(*args_s)
generalizedargs.append(s_value)
result_s = [annotator.binding(graph.getreturnvar())
for graph in graph_bindings]
generalizedresult = annmodel.unionof(*result_s)
conversion = False
for graph in graphs:
bindings = graph_bindings[graph]
need_conversion = (generalizedargs != bindings)
if need_conversion:
conversion = True
oldblock = graph.startblock
inlist = []
for j, s_value in enumerate(generalizedargs):
v = Variable(graph.getargs()[j])
annotator.setbinding(v, s_value)
inlist.append(v)
newblock = Block(inlist)
# prepare the output args of newblock and link
outlist = inlist[:]
newblock.closeblock(Link(outlist, oldblock))
oldblock.isstartblock = False
newblock.isstartblock = True
graph.startblock = newblock
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
# convert the return value too
if annotator.binding(graph.getreturnvar()) != generalizedresult:
conversion = True
annotator.setbinding(graph.getreturnvar(), generalizedresult)
return conversion
# ____________________________________________________________
def merge_classpbc_getattr_into_classdef(rtyper):
# code like 'some_class.attr' will record an attribute access in the
# PBC access set of the family of classes of 'some_class'. If the classes
# have corresponding ClassDefs, they are not updated by the annotator.
# We have to do it now.
all_families = rtyper.annotator.bookkeeper.classpbc_attr_families
for attrname, access_sets in all_families.items():
for access_set in access_sets.infos():
descs = access_set.descs
if len(descs) <= 1:
continue
if not isinstance(descs.iterkeys().next(), description.ClassDesc):
continue
classdefs = [desc.getuniqueclassdef() for desc in descs]
commonbase = classdefs[0]
for cdef in classdefs[1:]:
commonbase = commonbase.commonbase(cdef)
if commonbase is None:
raise TyperError("reading attribute %r: no common base "
"class for %r" % (attrname, descs.keys()))
extra_access_sets = rtyper.class_pbc_attributes.setdefault(
commonbase, {})
if commonbase in rtyper.class_reprs:
assert access_set in extra_access_sets # minimal sanity check
continue
access_set.commonbase = commonbase
if access_set not in extra_access_sets:
counter = len(extra_access_sets)
extra_access_sets[access_set] = attrname, counter
# ____________________________________________________________
def create_class_constructors(annotator):
bk = annotator.bookkeeper
call_families = bk.pbc_maximal_call_families
for family in call_families.infos():
if len(family.descs) <= 1:
continue
descs = family.descs.keys()
if not isinstance(descs[0], description.ClassDesc):
continue
# Note that if classes are in the same callfamily, their __init__
# attribute must be in the same attrfamily as well.
change = descs[0].mergeattrfamilies(descs[1:], '__init__')
if hasattr(descs[0].getuniqueclassdef(), 'my_instantiate_graph'):
assert not change, "after the fact change to a family of classes" # minimal sanity check
continue
# Put __init__ into the attr family, for ClassesPBCRepr.call()
attrfamily = descs[0].getattrfamily('__init__')
inits_s = [desc.s_read_attribute('__init__') for desc in descs]
s_value = annmodel.unionof(attrfamily.s_value, *inits_s)
attrfamily.s_value = s_value
# ClassesPBCRepr.call() will also need instantiate() support
for desc in descs:
bk.needs_generic_instantiate[desc.getuniqueclassdef()] = True
# ____________________________________________________________
def create_instantiate_functions(annotator):
# build the 'instantiate() -> instance of C' functions for the vtables
needs_generic_instantiate = annotator.bookkeeper.needs_generic_instantiate
for classdef in needs_generic_instantiate:
assert getgcflavor(classdef) == 'gc' # only gc-case
create_instantiate_function(annotator, classdef)
def create_instantiate_function(annotator, classdef):
# build the graph of a function that looks like
#
# def my_instantiate():
# return instantiate(cls)
#
if hasattr(classdef, 'my_instantiate_graph'):
return
v = Variable()
block = Block([])
block.operations.append(SpaceOperation('instantiate1', [], v))
name = valid_identifier('instantiate_'+classdef.name)
graph = FunctionGraph(name, block)
block.closeblock(Link([v], graph.returnblock))
annotator.setbinding(v, annmodel.SomeInstance(classdef))
annotator.annotated[block] = graph
# force the result to be converted to a generic OBJECTPTR
generalizedresult = annmodel.SomeInstance(classdef=None)
annotator.setbinding(graph.getreturnvar(), generalizedresult)
classdef.my_instantiate_graph = graph
annotator.translator.graphs.append(graph)
# ____________________________________________________________
class Max(object):
def __cmp__(self, other):
if self is other:
return 0
else:
return 1
MAX = Max() # a maximum object
class TotalOrderSymbolic(ComputedIntSymbolic):
def __init__(self, orderwitness, peers):
self.orderwitness = orderwitness
self.peers = peers
self.value = None
peers.append(self)
def __cmp__(self, other):
if not isinstance(other, TotalOrderSymbolic):
return NotImplemented
else:
return cmp(self.orderwitness, other.orderwitness)
def compute_fn(self):
if self.value is None:
self.peers.sort()
for i, peer in enumerate(self.peers):
assert peer.value is None
peer.value = i
assert self.value is not None
return self.value
def assign_inheritance_ids(annotator):
# we sort the classes by lexicographic order of reversed(mro),
# which gives a nice depth-first order.
bk = annotator.bookkeeper
try:
lst = bk._inheritance_id_symbolics
except AttributeError:
lst = bk._inheritance_id_symbolics = []
for classdef in annotator.bookkeeper.classdefs:
if not hasattr(classdef, 'minid'):
witness = list(classdef.getmro())
witness.reverse()
classdef.minid = TotalOrderSymbolic(witness, lst)
classdef.maxid = TotalOrderSymbolic(witness + [MAX], lst)
# ____________________________________________________________
def perform_normalizations(rtyper):
create_class_constructors(rtyper.annotator)
rtyper.annotator.frozen += 1
try:
normalize_call_familes(rtyper.annotator)
merge_classpbc_getattr_into_classdef(rtyper)
assign_inheritance_ids(rtyper.annotator)
finally:
rtyper.annotator.frozen -= 1
create_instantiate_functions(rtyper.annotator)
| Python |
import sys
from pypy.rlib.rarithmetic import r_longlong, r_uint, intmask
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.lltypesystem.lltype import Signed
#XXX original SIGNED_RIGHT_SHIFT_ZERO_FILLS not taken into account
#XXX assuming HAVE_LONG_LONG (int_mul_ovf)
#XXX should int_mod and int_floordiv return an intmask(...) instead?
LONG_MAX = sys.maxint
LONG_MIN = -sys.maxint-1
LLONG_MAX = r_longlong(2 ** (r_longlong.BITS-1) - 1)
LLONG_MIN = -LLONG_MAX-1
def int_floordiv_zer(x, y):
'''#define OP_INT_FLOORDIV_ZER(x,y,r,err) \
if ((y)) { OP_INT_FLOORDIV(x,y,r,err); } \
else FAIL_ZER(err, "integer division")
'''
if y:
return llop.int_floordiv(Signed, x, y)
else:
raise ZeroDivisionError("integer division")
def uint_floordiv_zer(x, y):
'''#define OP_UINT_FLOORDIV_ZER(x,y,r,err) \
if ((y)) { OP_UINT_FLOORDIV(x,y,r,err); } \
else FAIL_ZER(err, "unsigned integer division")
'''
if y:
return x / y
else:
raise ZeroDivisionError("unsigned integer division")
def int_neg_ovf(x):
if x == LONG_MIN:
raise OverflowError("integer negate")
return -x
def llong_neg_ovf(x):
if x == LLONG_MIN:
raise OverflowError("integer negate")
return -x
def int_abs_ovf(x):
if x == LONG_MIN:
raise OverflowError("integer absolute")
if x < 0:
return -x
else:
return x
def llong_abs_ovf(x):
if x == LLONG_MIN:
raise OverflowError("integer absolute")
if x < 0:
return -x
else:
return x
def int_add_ovf(x, y):
'''#define OP_INT_ADD_OVF(x,y,r,err) \
OP_INT_ADD(x,y,r,err); \
if ((r^(x)) >= 0 || (r^(y)) >= 0); \
else FAIL_OVF(err, "integer addition")
'''
r = x + y
if r^x >= 0 or r^y >= 0:
return r
else:
raise OverflowError("integer addition")
def int_sub_ovf(x, y):
'''#define OP_INT_SUB_OVF(x,y,r,err) \
OP_INT_SUB(x,y,r,err); \
if ((r^(x)) >= 0 || (r^~(y)) >= 0); \
else FAIL_OVF(err, "integer subtraction")
'''
r = x - y
if r^x >= 0 or r^~y >= 0:
return r
else:
raise OverflowError("integer subtraction")
def int_lshift_ovf(x, y):
'''#define OP_INT_LSHIFT_OVF(x,y,r,err) \
OP_INT_LSHIFT(x,y,r,err); \
if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \
FAIL_OVF(err, "x<<y losing bits or changing sign")
'''
r = x << y
if x != _Py_ARITHMETIC_RIGHT_SHIFT(r, y):
raise OverflowError("x<<y losing bits or changing sign")
else:
return r
def int_rshift_val(x, y):
'''#define OP_INT_RSHIFT_VAL(x,y,r,err) \
if ((y) >= 0) { OP_INT_RSHIFT(x,y,r,err); } \
else FAIL_VAL(err, "negative shift count")
'''
if y >= 0:
return _Py_ARITHMETIC_RIGHT_SHIFT(x, y)
else:
raise ValueError("negative shift count")
def int_lshift_val(x, y):
'''#define OP_INT_LSHIFT_VAL(x,y,r,err) \
if ((y) >= 0) { OP_INT_LSHIFT(x,y,r,err); } \
else FAIL_VAL(err, "negative shift count")
'''
if y >= 0:
return x << y
else:
raise ValueError("negative shift count")
def int_lshift_ovf_val(x, y):
'''#define OP_INT_LSHIFT_OVF_VAL(x,y,r,err) \
if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r,err); } \
else FAIL_VAL(err, "negative shift count")
'''
if y >= 0:
return int_lshift_ovf(x, y)
else:
raise ValueError("negative shift count")
def int_floordiv_ovf(x, y):
'''#define OP_INT_FLOORDIV_OVF(x,y,r,err) \
if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \
FAIL_OVF(err, "integer division"); \
OP_INT_FLOORDIV(x,y,r,err)
'''
if y == -1 and x < 0 and (r_uint(x) << 1) == 0:
raise OverflowError("integer division")
else:
return llop.int_floordiv(Signed, x, y)
def int_floordiv_ovf_zer(x, y):
'''#define OP_INT_FLOORDIV_OVF_ZER(x,y,r,err) \
if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r,err); } \
else FAIL_ZER(err, "integer division")
'''
if y:
return int_floordiv_ovf(x, y)
else:
raise ZeroDivisionError("integer division")
def int_mod_ovf(x, y):
'''#define OP_INT_MOD_OVF(x,y,r,err) \
if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \
FAIL_OVF(err, "integer modulo"); \
OP_INT_MOD(x,y,r,err)
'''
if y == -1 and x < 0 and (r_uint(x) << 1) == 0:
raise OverflowError("integer modulo")
else:
return llop.int_mod(Signed, x, y)
def int_mod_zer(x, y):
'''#define OP_INT_MOD_ZER(x,y,r,err) \
if ((y)) { OP_INT_MOD(x,y,r,err); } \
else FAIL_ZER(err, "integer modulo")
'''
if y:
return llop.int_mod(Signed, x, y)
else:
raise ZeroDivisionError("integer modulo")
def uint_mod_zer(x, y):
'''#define OP_UINT_MOD_ZER(x,y,r,err) \
if ((y)) { OP_UINT_MOD(x,y,r,err); } \
else FAIL_ZER(err, "unsigned integer modulo")
'''
if y:
return x % y
else:
raise ZeroDivisionError("unsigned integer modulo")
def int_mod_ovf_zer(x, y):
'''#define OP_INT_MOD_OVF_ZER(x,y,r,err) \
if ((y)) { OP_INT_MOD_OVF(x,y,r,err); } \
else FAIL_ZER(err, "integer modulo")
'''
if y:
return int_mod_ovf(x, y)
else:
raise ZeroDivisionError("integer modulo")
# Helpers...
def _Py_ARITHMETIC_RIGHT_SHIFT(i, j):
'''
// Py_ARITHMETIC_RIGHT_SHIFT
// C doesn't define whether a right-shift of a signed integer sign-extends
// or zero-fills. Here a macro to force sign extension:
// Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J)
// Return I >> J, forcing sign extension.
// Requirements:
// I is of basic signed type TYPE (char, short, int, long, or long long).
// TYPE is one of char, short, int, long, or long long, although long long
// must not be used except on platforms that support it.
// J is an integer >= 0 and strictly less than the number of bits in TYPE
// (because C doesn't define what happens for J outside that range either).
// Caution:
// I may be evaluated more than once.
#ifdef SIGNED_RIGHT_SHIFT_ZERO_FILLS
#define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) \
((I) < 0 ? ~((~(unsigned TYPE)(I)) >> (J)) : (I) >> (J))
#else
#define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) ((I) >> (J))
#endif
'''
return i >> j
#XXX some code from src/int.h seems missing
#def int_mul_ovf(x, y): #HAVE_LONG_LONG version
# '''{ \
# PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \
# r = (long)lr; \
# if ((PY_LONG_LONG)r == lr); \
# else FAIL_OVF(err, "integer multiplication"); \
# }
# '''
# lr = r_longlong(x) * r_longlong(y);
# r = intmask(lr)
# if r_longlong(r) == lr:
# return r
# else:
# raise OverflowError("integer multiplication")
#not HAVE_LONG_LONG version
def int_mul_ovf(a, b): #long a, long b, long *longprod):
longprod = a * b
doubleprod = float(a) * float(b)
doubled_longprod = float(longprod)
# Fast path for normal case: small multiplicands, and no info is lost in either method.
if doubled_longprod == doubleprod:
return longprod
# Somebody somewhere lost info. Close enough, or way off? Note
# that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0).
# The difference either is or isn't significant compared to the
# true value (of which doubleprod is a good approximation).
# absdiff/absprod <= 1/32 iff 32 * absdiff <= absprod -- 5 good bits is "close enough"
if 32.0 * abs(doubled_longprod - doubleprod) <= abs(doubleprod):
return longprod
raise OverflowError("integer multiplication")
| Python |
from pypy.rpython.microbench.microbench import MetaBench
class str_dict__set_item:
__metaclass__ = MetaBench
def init():
return {}
args = ['obj', 'i']
def loop(obj, i):
obj['foo'] = i
obj['bar'] = i
class str_dict__get_item:
__metaclass__ = MetaBench
def init():
return {'foo': 0, 'bar': 1}
args = ['obj', 'i']
def loop(obj, i):
return obj['foo'] + obj['bar']
class int_dict__set_item:
__metaclass__ = MetaBench
def init():
return {}
args = ['obj', 'i']
def loop(obj, i):
obj[42] = i
obj[43] = i
class int_dict__get_item:
__metaclass__ = MetaBench
def init():
return {42: 0, 43: 1}
args = ['obj', 'i']
def loop(obj, i):
return obj[42] + obj[43]
class Foo:
pass
obj1 = Foo()
obj2 = Foo()
class obj_dict__set_item:
__metaclass__ = MetaBench
def init():
return {}
args = ['obj', 'i']
def loop(obj, i):
obj[obj1] = i
obj[obj2] = i
class obj_dict__get_item:
__metaclass__ = MetaBench
def init():
return {obj1: 0, obj2: 1}
args = ['obj', 'i']
def loop(obj, i):
return obj[obj1] + obj[obj2]
| Python |
#!/usr/bin/env python
import sys
import autopath
from time import clock
from py.compat import subprocess
from pypy.translator.interactive import Translation
LOOPS = 10000000
class MetaBench(type):
def __new__(self, cls_name, bases, cls_dict):
loop = cls_dict['loop']
loop.dont_inline = True
myglob = {
'init': cls_dict['init'],
'loop': loop,
'LOOPS': cls_dict.get('LOOPS', LOOPS),
'clock': clock,
}
args = ', '.join(cls_dict['args'])
source = """
def %(cls_name)s():
obj = init()
start = clock()
for i in xrange(LOOPS):
loop(%(args)s)
return clock() - start
""" % locals()
exec source in myglob
func = myglob[cls_name]
func.benchmark = True
return func
def run_benchmark(exe):
from pypy.translator.cli.test.runtest import CliFunctionWrapper
if isinstance(exe, CliFunctionWrapper):
stdout, stderr, retval = exe.run()
else:
assert isinstance(exe, str)
bench = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = bench.communicate()
retval = bench.wait()
if retval != 0:
print 'Running benchmark failed'
print 'Standard Output:'
print stdout
print '-' * 40
print 'Standard Error:'
print stderr
raise SystemExit(-1)
mydict = {}
for line in stdout.splitlines():
name, res = line.split(':')
mydict[name.strip()] = float(res)
return mydict
def import_benchmarks():
modules = sys.argv[1:]
if len(modules) == 0:
# import all the microbenchs
from glob import glob
for module in glob('*.py'):
if module not in ('__init__.py', 'autopath.py', 'microbench.py'):
modules.append(module)
for module in modules:
module = module.rstrip('.py')
exec 'from %s import *' % module in globals()
def main():
import_benchmarks()
benchmarks = []
for name, thing in globals().iteritems():
if getattr(thing, 'benchmark', False):
benchmarks.append((name, thing))
benchmarks.sort()
def entry_point(argv):
for name, func in benchmarks:
print name, ':', func()
return 0
t = Translation(entry_point, standalone=True, backend='c')
c_exe = t.compile()
t = Translation(entry_point, standalone=True, backend='cli')
cli_exe = t.compile()
c_res = run_benchmark(c_exe)
cli_res = run_benchmark(cli_exe)
print 'benchmark genc gencli ratio'
print
for name, _ in benchmarks:
c_time = c_res[name]
cli_time = cli_res[name]
if c_time == 0:
ratio = '%10s' % '---'
else:
ratio = '%10.2f' % (cli_time/c_time)
print '%-32s %10.2f %10.2f %s' % (name, c_time, cli_time, ratio)
if __name__ == '__main__':
main()
| Python |
from pypy.rpython.microbench.microbench import MetaBench
class list__append:
__metaclass__ = MetaBench
def init():
return []
args = ['obj', 'i']
def loop(obj, i):
obj.append(i)
class list__get_item:
__metaclass__ = MetaBench
LOOPS = 100000000
def init():
obj = []
for i in xrange(1000):
obj.append(i)
return obj
args = ['obj', 'i']
def loop(obj, i):
return obj[i%1000]
class list__set_item:
__metaclass__ = MetaBench
LOOPS = 100000000
def init():
obj = []
for i in xrange(1000):
obj.append(i)
return obj
args = ['obj', 'i']
def loop(obj, i):
obj[i%1000] = i
class fixed_list__get_item:
__metaclass__ = MetaBench
LOOPS = 100000000
def init():
return [0] * 1000
args = ['obj', 'i']
def loop(obj, i):
return obj[i%1000]
class fixed_list__set_item:
__metaclass__ = MetaBench
LOOPS = 100000000
def init():
return [0] * 1000
args = ['obj', 'i']
def loop(obj, i):
obj[i%1000] = i
class list__iteration__int:
__metaclass__ = MetaBench
LOOPS = 100000
def init():
obj = [0]*1000
obj[0] = 42
return obj
args = ['obj']
def loop(obj):
tot = 0
for item in obj:
tot += item
return tot
class list__iteration__string:
__metaclass__ = MetaBench
LOOPS = 100000
def init():
obj = ['foo']*1000
obj[0] = 'bar'
return obj
args = ['obj']
def loop(obj):
tot = 0
for item in obj:
tot += len(item)
return tot
| Python |
#!/usr/bin/env python
import sys
import autopath
from time import clock
from py.compat import subprocess
from pypy.translator.interactive import Translation
LOOPS = 10000000
class MetaBench(type):
def __new__(self, cls_name, bases, cls_dict):
loop = cls_dict['loop']
loop.dont_inline = True
myglob = {
'init': cls_dict['init'],
'loop': loop,
'LOOPS': cls_dict.get('LOOPS', LOOPS),
'clock': clock,
}
args = ', '.join(cls_dict['args'])
source = """
def %(cls_name)s():
obj = init()
start = clock()
for i in xrange(LOOPS):
loop(%(args)s)
return clock() - start
""" % locals()
exec source in myglob
func = myglob[cls_name]
func.benchmark = True
return func
def run_benchmark(exe):
from pypy.translator.cli.test.runtest import CliFunctionWrapper
if isinstance(exe, CliFunctionWrapper):
stdout, stderr, retval = exe.run()
else:
assert isinstance(exe, str)
bench = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = bench.communicate()
retval = bench.wait()
if retval != 0:
print 'Running benchmark failed'
print 'Standard Output:'
print stdout
print '-' * 40
print 'Standard Error:'
print stderr
raise SystemExit(-1)
mydict = {}
for line in stdout.splitlines():
name, res = line.split(':')
mydict[name.strip()] = float(res)
return mydict
def import_benchmarks():
modules = sys.argv[1:]
if len(modules) == 0:
# import all the microbenchs
from glob import glob
for module in glob('*.py'):
if module not in ('__init__.py', 'autopath.py', 'microbench.py'):
modules.append(module)
for module in modules:
module = module.rstrip('.py')
exec 'from %s import *' % module in globals()
def main():
import_benchmarks()
benchmarks = []
for name, thing in globals().iteritems():
if getattr(thing, 'benchmark', False):
benchmarks.append((name, thing))
benchmarks.sort()
def entry_point(argv):
for name, func in benchmarks:
print name, ':', func()
return 0
t = Translation(entry_point, standalone=True, backend='c')
c_exe = t.compile()
t = Translation(entry_point, standalone=True, backend='cli')
cli_exe = t.compile()
c_res = run_benchmark(c_exe)
cli_res = run_benchmark(cli_exe)
print 'benchmark genc gencli ratio'
print
for name, _ in benchmarks:
c_time = c_res[name]
cli_time = cli_res[name]
if c_time == 0:
ratio = '%10s' % '---'
else:
ratio = '%10.2f' % (cli_time/c_time)
print '%-32s %10.2f %10.2f %s' % (name, c_time, cli_time, ratio)
if __name__ == '__main__':
main()
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
from pypy.rlib.objectmodel import r_dict
from pypy.rpython.microbench.microbench import MetaBench
class Obj:
def __init__(self, x):
self.x = x
def myhash(obj):
return obj.x
def mycmp(obj1, obj2):
return obj1.x == obj2.x
class Space:
def myhash(self, obj):
return obj.x
def mycmp(self, obj1, obj2):
return obj1.x == obj2.x
def _freeze_(self):
return True
space = Space()
obj1 = Obj(1)
obj2 = Obj(2)
class r_dict__set_item:
__metaclass__ = MetaBench
def init():
return r_dict(mycmp, myhash)
args = ['obj', 'i']
def loop(obj, i):
obj[obj1] = i
obj[obj2] = i
class r_dict__get_item:
__metaclass__ = MetaBench
def init():
res = r_dict(mycmp, myhash)
res[obj1] = 42
res[obj2] = 43
return res
args = ['obj', 'i']
def loop(obj, i):
return obj[obj1] + obj[obj2]
class r_dict__frozen_pbc__set_item:
__metaclass__ = MetaBench
def init():
return r_dict(space.mycmp, space.myhash)
args = ['obj', 'i']
def loop(obj, i):
obj[obj1] = i
obj[obj2] = i
class r_dict__frozen_pbc__get_item:
__metaclass__ = MetaBench
def init():
res = r_dict(space.mycmp, space.myhash)
res[obj1] = 42
res[obj2] = 43
return res
args = ['obj', 'i']
def loop(obj, i):
return obj[obj1] + obj[obj2]
| Python |
from pypy.rpython.microbench.microbench import MetaBench
def f1(x):
return x
def f2(x):
return x+1
def f3(x):
return x+2
def f4(x):
return x+3
FUNCS = [f1, f2, f3, f4]
class indirect__call:
__metaclass__ = MetaBench
def init():
return FUNCS
args = ['obj', 'i']
def loop(obj, i):
return obj[i%4](i)
| Python |
"""typesystem.py -- Typesystem-specific operations for RTyper."""
from pypy.annotation.pairtype import extendabletype
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.error import TyperError
class TypeSystem(object):
__metaclass__ = extendabletype
offers_exceptiondata = True
def __getattr__(self, name):
"""Lazy import to avoid circular dependencies."""
def load(modname):
try:
return __import__("pypy.rpython.%s.%s" % (self.name, modname),
None, None, ['__doc__'])
except ImportError:
return None
if name in ('rclass', 'rpbc', 'rbuiltin', 'rtuple', 'rlist',
'rslice', 'rdict', 'rrange', 'rstr', 'rgeneric',
'll_str', 'exceptiondata'):
mod = load(name)
if mod is not None:
setattr(self, name, mod)
return mod
raise AttributeError(name)
def derefType(self, T):
raise NotImplementedError()
def deref(self, obj):
"""Dereference `obj' to concrete object."""
raise NotImplementedError()
def check_null(self, repr, hop):
"""Emit operations to check that `hop's argument is not a null object.
"""
raise NotImplementedError()
def null_callable(self, T):
"""null callable object of type T"""
raise NotImplementedError()
def getcallable(self, graph, getconcretetype=None):
"""Return callable given a Python function."""
if getconcretetype is None:
getconcretetype = self.getconcretetype
llinputs = [getconcretetype(v) for v in graph.getargs()]
lloutput = getconcretetype(graph.getreturnvar())
typ, constr = self.callable_trait
FT = typ(llinputs, lloutput)
if hasattr(graph, 'func') and callable(graph.func):
return constr(FT, graph.name, graph = graph, _callable = graph.func)
else:
return constr(FT, graph.name, graph = graph)
def getexternalcallable(self, ll_args, ll_result, name, **kwds):
typ, constr = self.callable_trait
FT = typ(ll_args, ll_result)
return constr(FT, name, **kwds)
def getconcretetype(self, v):
"""Helper called by getcallable() to get the conrete type of a variable
in a graph."""
raise NotImplementedError()
def perform_normalizations(self, rtyper):
"""Prepare the annotator's internal data structures for rtyping
with the specified type system.
"""
# default implementation
from pypy.rpython.normalizecalls import perform_normalizations
perform_normalizations(rtyper)
class LowLevelTypeSystem(TypeSystem):
name = "lltypesystem"
callable_trait = (lltype.FuncType, lltype.functionptr)
def derefType(self, T):
assert isinstance(T, lltype.Ptr)
return T.TO
def deref(self, obj):
assert isinstance(lltype.typeOf(obj), lltype.Ptr)
return obj._obj
def check_null(self, repr, hop):
# None is a nullptr, which is false; everything else is true.
vlist = hop.inputargs(repr)
return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool)
def getconcretetype(self, v):
return getattr(v, 'concretetype', lltype.Ptr(lltype.PyObject))
def null_callable(self, T):
return lltype.nullptr(T.TO)
def generic_is(self, robj1, robj2, hop):
roriginal1 = robj1
roriginal2 = robj2
if robj1.lowleveltype is lltype.Void:
robj1 = robj2
elif robj2.lowleveltype is lltype.Void:
robj2 = robj1
if (not isinstance(robj1.lowleveltype, lltype.Ptr) or
not isinstance(robj2.lowleveltype, lltype.Ptr)):
raise TyperError('is of instances of the non-pointers: %r, %r' % (
roriginal1, roriginal2))
if robj1.lowleveltype != robj2.lowleveltype:
raise TyperError('is of instances of different pointer types: %r, %r' % (
roriginal1, roriginal2))
v_list = hop.inputargs(robj1, robj2)
return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool)
class ObjectOrientedTypeSystem(TypeSystem):
name = "ootypesystem"
callable_trait = (ootype.StaticMethod, ootype.static_meth)
def derefType(self, T):
assert isinstance(T, ootype.OOType)
return T
def deref(self, obj):
assert isinstance(ootype.typeOf(obj), ootype.OOType)
return obj
def check_null(self, repr, hop):
vlist = hop.inputargs(repr)
return hop.genop('oononnull', vlist, resulttype=ootype.Bool)
def null_callable(self, T):
return ootype.null(T)
def generic_is(self, robj1, robj2, hop):
roriginal1 = robj1
roriginal2 = robj2
if robj1.lowleveltype is lltype.Void:
robj1 = robj2
elif robj2.lowleveltype is lltype.Void:
robj2 = robj1
if (not isinstance(robj1.lowleveltype, (ootype.Instance, ootype.BuiltinADTType)) or
not isinstance(robj2.lowleveltype, (ootype.Instance, ootype.BuiltinADTType))) and \
(robj1.lowleveltype is not ootype.Class or
robj2.lowleveltype is not ootype.Class):
raise TyperError('is of instances of the non-instances: %r, %r' % (
roriginal1, roriginal2))
v_list = hop.inputargs(robj1, robj2)
return hop.genop('oois', v_list, resulttype=lltype.Bool)
# All typesystems are singletons
LowLevelTypeSystem.instance = LowLevelTypeSystem()
ObjectOrientedTypeSystem.instance = ObjectOrientedTypeSystem()
getfunctionptr = LowLevelTypeSystem.instance.getcallable
# Multiple dispatch on type system and high-level annotation
from pypy.annotation.pairtype import pairtype
from pypy.annotation.model import SomeObject
class __extend__(pairtype(TypeSystem, SomeObject)):
def rtyper_makerepr((ts, s_obj), rtyper):
return s_obj.rtyper_makerepr(rtyper)
def rtyper_makekey((ts, s_obj), rtyper):
if hasattr(s_obj, "rtyper_makekey_ex"):
return s_obj.rtyper_makekey_ex(rtyper)
return s_obj.rtyper_makekey()
| Python |
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.rmodel import Repr, HalfConcreteWrapper
from pypy.rpython.extfunctable import typetable
from pypy.rpython import rbuiltin
from pypy.rpython.module.support import init_opaque_object
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython import extregistry
from pypy.annotation.signature import annotation
from pypy.annotation.pairtype import pairtype
# ExternalObjects
class __extend__(annmodel.SomeExternalObject):
def rtyper_makerepr(self, rtyper):
# XXX kill with extfunctable.py
if self.knowntype in typetable:
return ExternalObjRepr(self.knowntype)
else:
# delegate to the get_repr() of the extregistrered Entry class
entry = extregistry.lookup_type(self.knowntype)
return entry.get_repr(rtyper, self)
def rtyper_makekey(self):
# grab all attributes of the SomeExternalObject for the key
attrs = lltype.frozendict(self.__dict__)
if 'const' in attrs:
del attrs['const']
if 'const_box' in attrs:
del attrs['const_box']
return self.__class__, attrs
class ExternalObjRepr(Repr):
"""Repr for the (obsolecent) extfunctable.declaretype() case.
If you use the extregistry instead you get to pick your own Repr.
"""
def __init__(self, knowntype):
self.exttypeinfo = typetable[knowntype]
TYPE = self.exttypeinfo.get_lltype()
self.lowleveltype = lltype.Ptr(TYPE)
self.instance_cache = {}
# The set of methods supported depends on 'knowntype', so we
# cannot have rtype_method_xxx() methods directly on the
# ExternalObjRepr class. But we can store them in 'self' now.
for name, extfuncinfo in self.exttypeinfo.methods.items():
methodname = 'rtype_method_' + name
bltintyper = rbuiltin.make_rtype_extfunc(extfuncinfo)
setattr(self, methodname, bltintyper)
def convert_const(self, value):
T = self.exttypeinfo.get_lltype()
if value is None:
return lltype.nullptr(T)
if not isinstance(value, self.exttypeinfo.typ):
raise TyperError("expected a %r: %r" % (self.exttypeinfo.typ,
value))
key = Constant(value)
try:
p = self.instance_cache[key]
except KeyError:
p = lltype.malloc(T)
init_opaque_object(p.obj, value)
self.instance_cache[key] = p
return p
def rtype_is_true(self, hop):
vlist = hop.inputargs(self)
return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool)
| Python |
from pypy.annotation.pairtype import pairtype, pair
from pypy.annotation import model as annmodel
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import Repr, IntegerRepr, inputconst
from pypy.rpython.rmodel import externalvsinternal
from pypy.rpython.rlist import AbstractBaseListRepr, AbstractListRepr, \
AbstractFixedSizeListRepr, AbstractListIteratorRepr, rtype_newlist, \
rtype_alloc_and_set, ll_setitem_nonneg, ADTIList, ADTIFixedList
from pypy.rpython.rlist import dum_nocheck, dum_checkidx
from pypy.rpython.lltypesystem.rslice import SliceRepr
from pypy.rpython.lltypesystem.rslice import startstop_slice_repr, startonly_slice_repr
from pypy.rpython.lltypesystem.rslice import minusone_slice_repr
from pypy.rpython.lltypesystem.lltype import \
GcForwardReference, Ptr, GcArray, GcStruct, \
Void, Signed, malloc, typeOf, Primitive, \
Bool, nullptr, typeMethod
from pypy.rpython.lltypesystem import rstr
from pypy.rpython import robject
from pypy.rlib.objectmodel import debug_assert
# ____________________________________________________________
#
# Concrete implementation of RPython lists:
#
# struct list {
# int length;
# items_array *items;
# }
#
# 'items' points to a C-like array in memory preceded by a 'length' header,
# where each item contains a primitive value or pointer to the actual list
# item.
#
# or for fixed-size lists an array is directly used:
#
# item_t list_items[]
#
class BaseListRepr(AbstractBaseListRepr):
rstr_ll = rstr.LLHelpers
def __init__(self, rtyper, item_repr, listitem=None):
self.rtyper = rtyper
self.LIST = GcForwardReference()
self.lowleveltype = Ptr(self.LIST)
if not isinstance(item_repr, Repr): # not computed yet, done by setup()
assert callable(item_repr)
self._item_repr_computer = item_repr
else:
self.external_item_repr, self.item_repr = externalvsinternal(rtyper, item_repr)
self.listitem = listitem
self.list_cache = {}
# setup() needs to be called to finish this initialization
## self.list_builder = ListBuilder(self)
## def _setup_repr_final(self):
## self.list_builder.setup()
def null_const(self):
return nullptr(self.LIST)
def get_eqfunc(self):
return inputconst(Void, self.item_repr.get_ll_eq_function())
def make_iterator_repr(self):
return ListIteratorRepr(self)
def get_itemarray_lowleveltype(self):
ITEM = self.item_repr.lowleveltype
ITEMARRAY = GcArray(ITEM,
adtmeths = ADTIFixedList({
"ll_newlist": ll_fixed_newlist,
"ll_length": ll_fixed_length,
"ll_items": ll_fixed_items,
##"list_builder": self.list_builder,
"ITEM": ITEM,
"ll_getitem_fast": ll_fixed_getitem_fast,
"ll_setitem_fast": ll_fixed_setitem_fast,
}))
return ITEMARRAY
##class ListBuilder(object):
## """Interface to allow lazy list building by the JIT."""
## def __init__(self, list_repr):
## # This should not keep a reference to the RTyper, even indirectly via
## # the list_repr. So tmp_list_repr is replaced by None in setup().
## self.tmp_list_repr = list_repr
## def setup(self):
## # Precompute the c_newitem and c_setitem_nonneg function pointers,
## # needed below.
## list_repr = self.tmp_list_repr
## if list_repr is None:
## return # already set up
## self.tmp_list_repr = None
## if list_repr.rtyper is None:
## return # only for test_rlist, which doesn't need this anyway
## LIST = list_repr.LIST
## LISTPTR = list_repr.lowleveltype
## ITEM = list_repr.item_repr.lowleveltype
## self.LIST = LIST
## self.LISTPTR = LISTPTR
## argtypes = [Signed]
## newlist_ptr = list_repr.rtyper.annotate_helper_fn(LIST.ll_newlist,
## argtypes)
## bk = list_repr.rtyper.annotator.bookkeeper
## argtypes = [bk.immutablevalue(dum_nocheck), LISTPTR, Signed, ITEM]
## setitem_nonneg_ptr = list_repr.rtyper.annotate_helper_fn(
## ll_setitem_nonneg, argtypes)
## #self.c_dum_nocheck = inputconst(Void, dum_nocheck)
## #self.c_LIST = inputconst(Void, self.LIST)
## def build_newlist(llops, length):
## c_newlist = llops.genconst(newlist_ptr)
## c_len = llops.genconst(length)
## c_LIST = llops.genvoidconst(LIST)
## return llops.genop('direct_call',
## [c_newlist, c_LIST, c_len],
## llops.constTYPE(LISTPTR))
## def build_setitem(llops, v_list, index, v_item):
## c_setitem_nonneg = llops.genconst(setitem_nonneg_ptr)
## c_i = llops.genconst(index)
## llops.genop('direct_call', [c_setitem_nonneg,
## llops.genvoidconst(dum_nocheck),
## v_list, c_i, v_item])
## self.build_newlist = build_newlist
## self.build_setitem = build_setitem
## def build(self, llops, items_v):
## """Make the operations that would build a list containing the
## provided items."""
## v_list = self.build_newlist(llops, len(items_v))
## for i, v in enumerate(items_v):
## self.build_setitem(llops, v_list, i, v)
## return v_list
## def getlistptr(self):
## list_repr = self.tmp_list_repr
## if list_repr is not None:
## list_repr.setup()
## return list_repr.lowleveltype
## else:
## return self.LISTPTR
## def __eq__(self, other):
## if not isinstance(other, ListBuilder):
## return False
## return self.getlistptr() == other.getlistptr()
## def __ne__(self, other):
## return not (self == other)
## def __hash__(self):
## return 1 # bad but not used alone
class __extend__(pairtype(BaseListRepr, BaseListRepr)):
def rtype_is_((r_lst1, r_lst2), hop):
if r_lst1.lowleveltype != r_lst2.lowleveltype:
# obscure logic, the is can be true only if both are None
v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2)
return hop.gendirectcall(ll_both_none, v_lst1, v_lst2)
return pairtype(Repr, Repr).rtype_is_(pair(r_lst1, r_lst2), hop)
class ListRepr(AbstractListRepr, BaseListRepr):
def _setup_repr(self):
if 'item_repr' not in self.__dict__:
self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer())
if isinstance(self.LIST, GcForwardReference):
ITEM = self.item_repr.lowleveltype
ITEMARRAY = self.get_itemarray_lowleveltype()
# XXX we might think of turning length stuff into Unsigned
self.LIST.become(GcStruct("list", ("length", Signed),
("items", Ptr(ITEMARRAY)),
adtmeths = ADTIList({
"ll_newlist": ll_newlist,
"ll_length": ll_length,
"ll_items": ll_items,
##"list_builder": self.list_builder,
"ITEM": ITEM,
"ll_getitem_fast": ll_getitem_fast,
"ll_setitem_fast": ll_setitem_fast,
"_ll_resize_ge": _ll_list_resize_ge,
"_ll_resize_le": _ll_list_resize_le,
"_ll_resize": _ll_list_resize,
}))
)
def compact_repr(self):
return 'ListR %s' % (self.item_repr.compact_repr(),)
def prepare_const(self, n):
result = malloc(self.LIST, immortal=True)
result.length = n
result.items = malloc(self.LIST.items.TO, n)
return result
def rtype_method_append(self, hop):
if getattr(self.listitem, 'hint_maxlength', False):
v_lst, v_value = hop.inputargs(self, self.item_repr)
hop.exception_cannot_occur()
hop.gendirectcall(ll_append_noresize, v_lst, v_value)
else:
AbstractListRepr.rtype_method_append(self, hop)
def rtype_hint(self, hop):
optimized = getattr(self.listitem, 'hint_maxlength', False)
hints = hop.args_s[-1].const
if 'maxlength' in hints:
if optimized:
s_iterable = hop.args_s[1]
r_iterable = hop.args_r[1]
v_list = hop.inputarg(self, arg=0)
v_iterable = hop.inputarg(r_iterable, arg=1)
hop2 = hop.copy()
while hop2.nb_args > 0:
hop2.r_s_popfirstarg()
hop2.v_s_insertfirstarg(v_iterable, s_iterable)
v_maxlength = r_iterable.rtype_len(hop2)
hop.llops.gendirectcall(ll_set_maxlength, v_list, v_maxlength)
return v_list
if 'fence' in hints:
v_list = hop.inputarg(self, arg=0)
if isinstance(hop.r_result, FixedSizeListRepr):
if optimized and 'exactlength' in hints:
llfn = ll_list2fixed_exact
else:
llfn = ll_list2fixed
v_list = hop.llops.gendirectcall(llfn, v_list)
return v_list
return AbstractListRepr.rtype_hint(self, hop)
class FixedSizeListRepr(AbstractFixedSizeListRepr, BaseListRepr):
def _setup_repr(self):
if 'item_repr' not in self.__dict__:
self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer())
if isinstance(self.LIST, GcForwardReference):
ITEM = self.item_repr.lowleveltype
ITEMARRAY = self.get_itemarray_lowleveltype()
self.LIST.become(ITEMARRAY)
def compact_repr(self):
return 'FixedSizeListR %s' % (self.item_repr.compact_repr(),)
def prepare_const(self, n):
result = malloc(self.LIST, n, immortal=True)
return result
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
# adapted C code
def _ll_list_resize_really(l, newsize):
"""
Ensure ob_item has room for at least newsize elements, and set
ob_size to newsize. If newsize > ob_size on entry, the content
of the new slots at exit is undefined heap trash; it's the caller's
responsiblity to overwrite them with sane values.
The number of allocated elements may grow, shrink, or stay the same.
Failure is impossible if newsize <= self.allocated on entry, although
that partly relies on an assumption that the system realloc() never
fails when passed a number of bytes <= the number of bytes last
allocated (the C standard doesn't guarantee this, but it's hard to
imagine a realloc implementation where it wouldn't be true).
Note that self->ob_item may change, and even if newsize is less
than ob_size on entry.
"""
allocated = len(l.items)
# This over-allocates proportional to the list size, making room
# for additional growth. The over-allocation is mild, but is
# enough to give linear-time amortized behavior over a long
# sequence of appends() in the presence of a poorly-performing
# system realloc().
# The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
## (newsize < 9 ? 3 : 6)
if newsize < 9:
some = 3
else:
some = 6
new_allocated = (newsize >> 3) + some + newsize
if newsize == 0:
new_allocated = 0
# XXX consider to have a real realloc
items = l.items
newitems = malloc(typeOf(l).TO.items.TO, new_allocated)
before_len = l.length
if before_len < new_allocated:
p = before_len - 1
else:
p = new_allocated - 1
while p >= 0:
newitems[p] = items[p]
ITEM = typeOf(l).TO.ITEM
if isinstance(ITEM, Ptr):
items[p] = nullptr(ITEM.TO)
p -= 1
l.length = newsize
l.items = newitems
_ll_list_resize_really._annenforceargs_ = (None, int)
# this common case was factored out of _ll_list_resize
# to see if inlining it gives some speed-up.
def _ll_list_resize(l, newsize):
# Bypass realloc() when a previous overallocation is large enough
# to accommodate the newsize. If the newsize falls lower than half
# the allocated size, then proceed with the realloc() to shrink the list.
allocated = len(l.items)
if allocated >= newsize and newsize >= ((allocated >> 1) - 5):
l.length = newsize
else:
_ll_list_resize_really(l, newsize)
def _ll_list_resize_ge(l, newsize):
if len(l.items) >= newsize:
l.length = newsize
else:
_ll_list_resize_really(l, newsize)
def _ll_list_resize_le(l, newsize):
if newsize >= (len(l.items) >> 1) - 5:
l.length = newsize
else:
_ll_list_resize_really(l, newsize)
def ll_append_noresize(l, newitem):
length = l.length
l.length = length + 1
l.ll_setitem_fast(length, newitem)
ll_append_noresize.oopspec = 'list.append(l, newitem)'
def ll_both_none(lst1, lst2):
return not lst1 and not lst2
# ____________________________________________________________
#
# Accessor methods
def ll_newlist(LIST, length):
debug_assert(length >= 0, "negative list length")
l = malloc(LIST)
l.length = length
l.items = malloc(LIST.items.TO, length)
return l
ll_newlist = typeMethod(ll_newlist)
ll_newlist.oopspec = 'newlist(length)'
def ll_length(l):
return l.length
def ll_items(l):
return l.items
def ll_getitem_fast(l, index):
debug_assert(index < l.length, "getitem out of bounds")
return l.ll_items()[index]
def ll_setitem_fast(l, index, item):
debug_assert(index < l.length, "setitem out of bounds")
l.ll_items()[index] = item
# fixed size versions
def ll_fixed_newlist(LIST, length):
debug_assert(length >= 0, "negative fixed list length")
l = malloc(LIST, length)
return l
ll_fixed_newlist = typeMethod(ll_fixed_newlist)
ll_fixed_newlist.oopspec = 'newlist(length)'
def ll_fixed_length(l):
return len(l)
def ll_fixed_items(l):
return l
def ll_fixed_getitem_fast(l, index):
debug_assert(index < len(l), "fixed getitem out of bounds")
return l[index]
def ll_fixed_setitem_fast(l, index, item):
debug_assert(index < len(l), "fixed setitem out of bounds")
l[index] = item
def newlist(llops, r_list, items_v):
LIST = r_list.LIST
cno = inputconst(Signed, len(items_v))
v_result = llops.gendirectcall(LIST.ll_newlist, cno)
v_func = inputconst(Void, dum_nocheck)
for i, v_item in enumerate(items_v):
ci = inputconst(Signed, i)
llops.gendirectcall(ll_setitem_nonneg, v_func, v_result, ci, v_item)
return v_result
# special operations for list comprehension optimization
def ll_set_maxlength(l, n):
LIST = typeOf(l).TO
l.items = malloc(LIST.items.TO, n)
def ll_list2fixed(l):
n = l.length
olditems = l.items
if n == len(olditems):
return olditems
else:
LIST = typeOf(l).TO
newitems = malloc(LIST.items.TO, n)
for i in range(n):
newitems[i] = olditems[i]
return newitems
def ll_list2fixed_exact(l):
return l.items
# ____________________________________________________________
#
# Iteration.
class ListIteratorRepr(AbstractListIteratorRepr):
def __init__(self, r_list):
self.r_list = r_list
self.lowleveltype = Ptr(GcStruct('listiter',
('list', r_list.lowleveltype),
('index', Signed)))
self.ll_listiter = ll_listiter
self.ll_listnext = ll_listnext
def ll_listiter(ITERPTR, lst):
iter = malloc(ITERPTR.TO)
iter.list = lst
iter.index = 0
return iter
def ll_listnext(iter):
l = iter.list
index = iter.index
if index >= l.ll_length():
raise StopIteration
iter.index = index + 1
return l.ll_getitem_fast(index)
| Python |
import types
import sys
from pypy.annotation.pairtype import pairtype, pair
from pypy.annotation import model as annmodel
from pypy.annotation import description
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython.lltypesystem.lltype import \
typeOf, Void, ForwardReference, Struct, Bool, Char, \
Ptr, malloc, nullptr, Array, Signed, FuncType
from pypy.rpython.rmodel import Repr, TyperError, inputconst, inputdesc, HalfConcreteWrapper
from pypy.rpython.rpbc import samesig,\
commonbase, allattributenames, adjust_shape, \
AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, \
AbstractMultipleFrozenPBCRepr, MethodOfFrozenPBCRepr, \
AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, \
SingleFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable
from pypy.rpython.lltypesystem import rclass, llmemory
from pypy.tool.sourcetools import has_varargs
from pypy.rpython import callparse
def rtype_is_None(robj1, rnone2, hop, pos=0):
if isinstance(robj1.lowleveltype, Ptr):
v1 = hop.inputarg(robj1, pos)
return hop.genop('ptr_iszero', [v1], resulttype=Bool)
elif robj1.lowleveltype == llmemory.Address:
v1 = hop.inputarg(robj1, pos)
cnull = hop.inputconst(llmemory.Address, robj1.null_instance())
return hop.genop('adr_eq', [v1, cnull], resulttype=Bool)
elif robj1 == none_frozen_pbc_repr:
return hop.inputconst(Bool, True)
elif isinstance(robj1, SmallFunctionSetPBCRepr):
if robj1.s_pbc.can_be_None:
v1 = hop.inputarg(robj1, pos)
return hop.genop('char_eq', [v1, inputconst(Char, '\000')],
resulttype=Bool)
else:
return inputconst(Bool, False)
else:
raise TyperError('rtype_is_None of %r' % (robj1))
# ____________________________________________________________
class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr):
"""Representation selected for multiple non-callable pre-built constants."""
def __init__(self, rtyper, access_set):
self.rtyper = rtyper
self.access_set = access_set
self.pbc_type = ForwardReference()
self.lowleveltype = Ptr(self.pbc_type)
self.pbc_cache = {}
def _setup_repr(self):
llfields = self._setup_repr_fields()
kwds = {'hints': {'immutable': True}}
self.pbc_type.become(Struct('pbc', *llfields, **kwds))
def create_instance(self):
return malloc(self.pbc_type, immortal=True)
def null_instance(self):
return nullptr(self.pbc_type)
def getfield(self, vpbc, attr, llops):
mangled_name, r_value = self.fieldmap[attr]
cmangledname = inputconst(Void, mangled_name)
return llops.genop('getfield', [vpbc, cmangledname],
resulttype = r_value)
class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr):
"""Representation selected for multiple non-callable pre-built constants
with no common access set."""
lowleveltype = llmemory.Address
EMPTY = Struct('pbc', hints={'immutable': True})
def convert_pbc(self, pbcptr):
return llmemory.fakeaddress(pbcptr)
def create_instance(self):
return malloc(self.EMPTY, immortal=True)
def null_instance(self):
return llmemory.Address._defl()
class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr),
pairtype(MultipleUnrelatedFrozenPBCRepr,
SingleFrozenPBCRepr),
pairtype(SingleFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr)):
def rtype_is_((robj1, robj2), hop):
if isinstance(robj1, MultipleUnrelatedFrozenPBCRepr):
r = robj1
else:
r = robj2
vlist = hop.inputargs(r, r)
return hop.genop('adr_eq', vlist, resulttype=Bool)
class __extend__(pairtype(MultipleFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr)):
def convert_from_to((robj1, robj2), v, llops):
return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address)
# ____________________________________________________________
class FunctionsPBCRepr(AbstractFunctionsPBCRepr):
"""Representation selected for a PBC of function(s)."""
def setup_specfunc(self):
fields = []
for row in self.uniquerows:
fields.append((row.attrname, row.fntype))
kwds = {'hints': {'immutable': True}}
return Ptr(Struct('specfunc', *fields, **kwds))
def create_specfunc(self):
return malloc(self.lowleveltype.TO, immortal=True)
def get_specfunc_row(self, llop, v, c_rowname, resulttype):
return llop.genop('getfield', [v, c_rowname], resulttype=resulttype)
class SmallFunctionSetPBCRepr(Repr):
def __init__(self, rtyper, s_pbc):
self.rtyper = rtyper
self.s_pbc = s_pbc
self.callfamily = s_pbc.descriptions.iterkeys().next().getcallfamily()
concretetable, uniquerows = get_concrete_calltable(self.rtyper,
self.callfamily)
assert len(uniquerows) == 1
self.lowleveltype = Char
self.pointer_repr = FunctionsPBCRepr(rtyper, s_pbc)
self._conversion_tables = {}
self._dispatch_cache = {}
def _setup_repr(self):
if self.s_pbc.subset_of:
assert self.s_pbc.can_be_None == self.s_pbc.subset_of.can_be_None
r = self.rtyper.getrepr(self.s_pbc.subset_of)
if r is not self:
r.setup()
self.descriptions = r.descriptions
self.c_pointer_table = r.c_pointer_table
return
self.descriptions = list(self.s_pbc.descriptions)
if self.s_pbc.can_be_None:
self.descriptions.insert(0, None)
POINTER_TABLE = Array(self.pointer_repr.lowleveltype)
pointer_table = malloc(POINTER_TABLE, len(self.descriptions),
immortal=True)
for i, desc in enumerate(self.descriptions):
if desc is not None:
pointer_table[i] = self.pointer_repr.convert_desc(desc)
else:
pointer_table[i] = self.pointer_repr.convert_const(None)
self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table)
def get_s_callable(self):
return self.s_pbc
def get_r_implfunc(self):
return self, 0
def get_s_signatures(self, shape):
funcdesc = self.s_pbc.descriptions.iterkeys().next()
return funcdesc.get_s_signatures(shape)
def convert_desc(self, funcdesc):
return chr(self.descriptions.index(funcdesc))
def convert_const(self, value):
if isinstance(value, types.MethodType) and value.im_self is None:
value = value.im_func # unbound method -> bare function
if value is None:
return chr(0)
funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value)
return self.convert_desc(funcdesc)
## def convert_to_concrete_llfn(self, v, shape, index, llop):
## return v
def rtype_simple_call(self, hop):
return self.call('simple_call', hop)
def rtype_call_args(self, hop):
return self.call('call_args', hop)
def dispatcher(self, shape, index, argtypes, resulttype):
key = shape, index, tuple(argtypes), resulttype
if key in self._dispatch_cache:
return self._dispatch_cache[key]
from pypy.translator.unsimplify import varoftype
from pypy.objspace.flow.model import FunctionGraph, Link, Block, SpaceOperation
inputargs = [varoftype(t) for t in [Char] + argtypes]
startblock = Block(inputargs)
startblock.exitswitch = inputargs[0]
#startblock.operations.append(SpaceOperation('debug_pdb', [], varoftype(Void)))
graph = FunctionGraph("dispatcher", startblock, varoftype(resulttype))
row_of_graphs = self.callfamily.calltables[shape][index]
links = []
descs = list(self.s_pbc.descriptions)
if self.s_pbc.can_be_None:
descs.insert(0, None)
for desc in descs:
if desc is None:
continue
args_v = [varoftype(t) for t in argtypes]
b = Block(args_v)
llfn = self.rtyper.getcallable(row_of_graphs[desc])
v_fn = inputconst(typeOf(llfn), llfn)
v_result = varoftype(resulttype)
b.operations.append(
SpaceOperation("direct_call", [v_fn] + args_v, v_result))
b.closeblock(Link([v_result], graph.returnblock))
i = self.descriptions.index(desc)
links.append(Link(inputargs[1:], b, chr(i)))
links[-1].llexitcase = chr(i)
startblock.closeblock(*links)
self.rtyper.annotator.translator.graphs.append(graph)
ll_ret = self.rtyper.type_system.getcallable(graph)
#FTYPE = FuncType
c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret)
return c_ret
def call(self, opname, hop):
bk = self.rtyper.annotator.bookkeeper
args = bk.build_args(opname, hop.args_s[1:])
s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc
descs = s_pbc.descriptions.keys()
shape, index = description.FunctionDesc.variant_for_call_site(bk, self.callfamily, descs, args)
row_of_graphs = self.callfamily.calltables[shape][index]
anygraph = row_of_graphs.itervalues().next() # pick any witness
vlist = [hop.inputarg(self, arg=0)]
vlist += callparse.callparse(self.rtyper, anygraph, hop, opname)
rresult = callparse.getrresult(self.rtyper, anygraph)
hop.exception_is_here()
v_dispatcher = self.dispatcher(shape, index, [v.concretetype for v in vlist[1:]], rresult.lowleveltype)
v_result = hop.genop('direct_call', [v_dispatcher] + vlist,
resulttype=rresult)
return hop.llops.convertvar(v_result, rresult, hop.r_result)
def rtype_is_true(self, hop):
if not self.s_pbc.can_be_None:
return inputconst(Bool, True)
else:
v1, = hop.inputargs(self)
return hop.genop('char_ne', [v1, inputconst(Char, '\000')],
resulttype=Bool)
## def rtype_simple_call(self, hop):
## v_index = hop.inputarg(self, arg=0)
## v_ptr = hop.llops.convertvar(v_index, self, self.pointer_repr)
## hop2 = hop.copy()
## hop2.args_r[0] = self.pointer_repr
## hop2.args_v[0] = v_ptr
## return hop2.dispatch()
## rtype_call_args = rtype_simple_call
class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)):
def convert_from_to((r_set, r_ptr), v, llops):
if r_ptr.lowleveltype is Void:
wrapper = HalfConcreteWrapper(r_ptr.get_unique_llfn)
return inputconst(Void, wrapper)
else:
assert v.concretetype is Char
v_int = llops.genop('cast_char_to_int', [v],
resulttype=Signed)
return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int],
resulttype=r_ptr.lowleveltype)
class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)):
def convert_from_to((r_ptr, r_set), v, llops):
assert r_ptr.lowleveltype is Void
desc, = r_ptr.s_pbc.descriptions
return inputconst(Char, r_set.convert_desc(desc))
def conversion_table(r_from, r_to):
if r_to in r_from._conversion_tables:
return r_from._conversion_tables[r_to]
else:
t = malloc(Array(Char), len(r_from.descriptions), immortal=True)
l = []
for i, d in enumerate(r_from.descriptions):
if d in r_to.descriptions:
j = r_to.descriptions.index(d)
l.append(j)
t[i] = chr(j)
else:
l.append(None)
if l == range(len(r_from.descriptions)):
r = None
else:
r = inputconst(Ptr(Array(Char)), t)
r_from._conversion_tables[r_to] = r
return r
## myf = open('convlog.txt', 'w')
class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)):
def convert_from_to((r_from, r_to), v, llops):
c_table = conversion_table(r_from, r_to)
if c_table:
assert v.concretetype is Char
## from pypy.rpython.lltypesystem.rstr import string_repr
## s = repr(llops.rtyper.annotator.annotated.get(llops.originalblock))
## if 'LOAD_GLOBAL' in s:
## import pdb; pdb.set_trace()
## print >> myf, 'static small conv', s
## print 'static small conv', s
## llops.genop('debug_print',
## [Constant(string_repr.convert_const("dynamic small conv" + s),
## string_repr.lowleveltype)])
v_int = llops.genop('cast_char_to_int', [v],
resulttype=Signed)
return llops.genop('getarrayitem', [c_table, v_int],
resulttype=Char)
else:
return v
class MethodsPBCRepr(AbstractMethodsPBCRepr):
"""Representation selected for a PBC of the form {func: classdef...}.
It assumes that all the methods come from the same name in a base
classdef."""
def rtype_simple_call(self, hop):
return self.redispatch_call(hop, call_args=False)
def rtype_call_args(self, hop):
return self.redispatch_call(hop, call_args=True)
def redispatch_call(self, hop, call_args):
r_class = self.r_im_self.rclass
mangled_name, r_func = r_class.clsfields[self.methodname]
assert isinstance(r_func, (FunctionsPBCRepr,
OverriddenFunctionPBCRepr,
SmallFunctionSetPBCRepr))
# s_func = r_func.s_pbc -- not precise enough, see
# test_precise_method_call_1. Build a more precise one...
funcdescs = [desc.funcdesc for desc in hop.args_s[0].descriptions]
s_func = annmodel.SomePBC(funcdescs, subset_of=r_func.s_pbc)
v_im_self = hop.inputarg(self, arg=0)
v_cls = self.r_im_self.getfield(v_im_self, '__class__', hop.llops)
v_func = r_class.getclsfield(v_cls, self.methodname, hop.llops)
hop2 = self.add_instance_arg_to_hop(hop, call_args)
opname = 'simple_call'
if call_args:
opname = 'call_args'
hop2.forced_opname = opname
hop2.v_s_insertfirstarg(v_func, s_func) # insert 'function'
if type(hop2.args_r[0]) is SmallFunctionSetPBCRepr and type(r_func) is FunctionsPBCRepr:
hop2.args_r[0] = FunctionsPBCRepr(self.rtyper, s_func)
else:
hop2.args_v[0] = hop2.llops.convertvar(hop2.args_v[0], r_func, hop2.args_r[0])
# now hop2 looks like simple_call(function, self, args...)
return hop2.dispatch()
# ____________________________________________________________
class ClassesPBCRepr(AbstractClassesPBCRepr):
"""Representation selected for a PBC of class(es)."""
# no __init__ here, AbstractClassesPBCRepr.__init__ is good enough
def _instantiate_runtime_class(self, hop, vtypeptr, r_instance):
from pypy.rpython.lltypesystem.rbuiltin import ll_instantiate
v_inst1 = hop.gendirectcall(ll_instantiate, vtypeptr)
return hop.genop('cast_pointer', [v_inst1], resulttype = r_instance)
# ____________________________________________________________
##def rtype_call_memo(hop):
## memo_table = hop.args_v[0].value
## if memo_table.s_result.is_constant():
## return hop.inputconst(hop.r_result, memo_table.s_result.const)
## fieldname = memo_table.fieldname
## assert hop.nb_args == 2, "XXX"
## r_pbc = hop.args_r[1]
## assert isinstance(r_pbc, (MultipleFrozenPBCRepr, ClassesPBCRepr))
## v_table, v_pbc = hop.inputargs(Void, r_pbc)
## return r_pbc.getfield(v_pbc, fieldname, hop.llops)
| Python |
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem.rclass import OBJECTPTR, InstanceRepr
from pypy.rpython.annlowlevel import cachedtype
VABLERTIPTR = OBJECTPTR
class VirtualizableInstanceRepr(InstanceRepr):
def __init__(self, rtyper, classdef):
InstanceRepr.__init__(self, rtyper, classdef)
classdesc = classdef.classdesc
if '_virtualizable_' in classdesc.classdict:
basedesc = classdesc.basedesc
assert basedesc is None or basedesc.lookup('_virtualizable_') is None
self.top_of_virtualizable_hierarchy = True
else:
self.top_of_virtualizable_hierarchy = False
self._setters = {}
self._getters = {}
def _setup_repr(self):
llfields = []
ACCESS = lltype.ForwardReference()
if self.top_of_virtualizable_hierarchy:
llfields.append(('vable_base', llmemory.Address))
llfields.append(('vable_rti', VABLERTIPTR))
llfields.append(('vable_access', lltype.Ptr(ACCESS)))
InstanceRepr._setup_repr(self, llfields,
hints = {'virtualizable': True},
adtmeths = {'ACCESS': ACCESS})
rbase = self.rbase
accessors = []
if self.top_of_virtualizable_hierarchy:
if len(rbase.allinstancefields) != 1:
raise TyperError("virtulizable class cannot have"
" non-virtualizable base class with instance"
" fields: %r" % self.classdef)
redirected_fields = []
else:
accessors.append(('parent', rbase.ACCESS))
redirected_fields = list(rbase.ACCESS.redirected_fields)
name = self.lowleveltype.TO._name
TOPPTR = self.get_top_virtualizable_type()
self.my_redirected_fields = my_redirected_fields = {}
for name, (mangled_name, r) in self.fields.items():
T = r.lowleveltype
if T is lltype.Void:
continue
GETTER = lltype.Ptr(lltype.FuncType([TOPPTR], T))
SETTER = lltype.Ptr(lltype.FuncType([TOPPTR, T], lltype.Void))
accessors.append(('get_'+mangled_name, GETTER))
accessors.append(('set_'+mangled_name, SETTER))
redirected_fields.append(mangled_name)
my_redirected_fields[name] = None
ACCESS.become(lltype.Struct(name+'_access',
hints = {'immutable': True},
adtmeths = {'redirected_fields': tuple(redirected_fields)},
*accessors))
self.ACCESS = ACCESS
def get_top_virtualizable_type(self):
if self.top_of_virtualizable_hierarchy:
return self.lowleveltype
else:
return self.rbase.get_top_virtualizable_type()
def set_vable(self, llops, vinst, force_cast=False):
if self.top_of_virtualizable_hierarchy:
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
for name, llvalue in (('access', lltype.nullptr(self.ACCESS)),
('base', llmemory.NULL),
('rti', lltype.nullptr(VABLERTIPTR.TO))):
cname = inputconst(lltype.Void, 'vable_'+name)
vvalue = inputconst(lltype.typeOf(llvalue), llvalue)
llops.genop('setfield', [vinst, cname, vvalue])
else:
self.rbase.set_vable(llops, vinst, force_cast=True)
def new_instance(self, llops, classcallhop=None, v_cpytype=None):
vptr = InstanceRepr.new_instance(self, llops, classcallhop, v_cpytype)
self.set_vable(llops, vptr)
return vptr
def get_getter(self, name):
try:
return self._getters[name]
except KeyError:
pass
TOPPTR = self.get_top_virtualizable_type()
ACCESSPTR = lltype.Ptr(self.ACCESS)
def ll_getter(inst):
top = lltype.cast_pointer(TOPPTR, inst)
access = top.vable_access
if access:
return getattr(lltype.cast_pointer(ACCESSPTR, access),
'get_'+name)(top)
else:
return getattr(inst, name)
ll_getter.oopspec = 'vable.get_%s(inst)' % name
self._getters[name] = ll_getter
return ll_getter
def get_setter(self, name):
try:
return self._setters[name]
except KeyError:
pass
TOPPTR = self.get_top_virtualizable_type()
ACCESSPTR = lltype.Ptr(self.ACCESS)
def ll_setter(inst, value):
top = lltype.cast_pointer(TOPPTR, inst)
access = top.vable_access
if access:
return getattr(lltype.cast_pointer(ACCESSPTR, access),
'set_'+name)(top, value)
else:
return setattr(inst, name, value)
ll_setter.oopspec = 'vable.set_%s(inst, value)' % name
self._setters[name] = ll_setter
return ll_setter
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
"""Read the given attribute (or __class__ for the type) of 'vinst'."""
if (attr in self.my_redirected_fields
and not flags.get('access_directly')):
mangled_name, r = self.fields[attr]
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
ll_getter = self.get_getter(mangled_name)
return llops.gendirectcall(ll_getter, vinst)
else:
return InstanceRepr.getfield(self, vinst, attr, llops, force_cast)
def setfield(self, vinst, attr, vvalue, llops, force_cast=False,
opname='setfield', flags={}):
"""Write the given attribute (or __class__ for the type) of 'vinst'."""
if (attr in self.my_redirected_fields
and not flags.get('access_directly')):
mangled_name, r = self.fields[attr]
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
ll_setter = self.get_setter(mangled_name)
llops.gendirectcall(ll_setter, vinst, vvalue)
else:
InstanceRepr.setfield(self, vinst, attr, vvalue, llops, force_cast,
opname)
| Python |
""" This file creates and maintains _cache/stdtypes.py, which
keeps information about C type sizes
"""
import py
import os
from pypy.translator.tool.cbuild import build_executable
from pypy.tool.udir import udir
def sizeof_c_type(c_typename, includes={}, compiler_exe=None):
from py.compat.subprocess import PIPE, Popen
includes['stdio.h'] = True
includes['sys' + os.path.sep + 'types.h'] = True
include_string = "\n".join(["#include <%s>" % i for i in includes.keys()])
c_source = py.code.Source('''
// includes
%s
// checking code
int main(void)
{
printf("%%d\\n", sizeof(%s));
return (0);
}
''' % (include_string, c_typename))
c_file = udir.join("typetest.c")
c_file.write(c_source)
c_exec = build_executable([str(c_file)], compiler_exe=compiler_exe)
pipe = Popen(c_exec, stdout=PIPE)
pipe.wait()
return int(pipe.stdout.read()) * 8
# XXX add float types as well here
TYPES = []
for _name in 'char short int long'.split():
for name in (_name, 'unsigned ' + _name):
TYPES.append(name)
TYPES += ['long long', 'unsigned long long', 'size_t']
if os.name != 'nt':
TYPES.append('mode_t')
def newline_repr(d):
assert isinstance(d, dict)
return "{\n%s,\n}" % ",\n".join(["%r:%r" % (k, v) for k, v in d.items()])
def get_type_sizes(filename, compiler_exe=None):
try:
mod = {}
exec py.path.local(filename).read() in mod
types = mod['types']
except (ImportError, py.error.ENOENT):
types = {}
try:
if py.builtin.sorted(types.keys()) != py.builtin.sorted(TYPES):
# invalidate file
types = {}
raise KeyError
return types
except KeyError:
types = dict([(i, sizeof_c_type(i, compiler_exe=compiler_exe))
for i in TYPES])
py.path.local(filename).write('types = ' +
repr(types) + "\n")
return types
import pypy
import py
py.path.local(pypy.__file__).new(basename='_cache').ensure(dir=1)
from pypy.tool import autopath
CACHE = py.magic.autopath()/'..'/'..'/'..'/'_cache'/'stdtypes.py'
platform = get_type_sizes(CACHE)
| Python |
# this file contains the definitions and most extremely faked
# implementations of things relating to the description of the layout
# of objects in memeory.
# sizeof, offsetof
import weakref
from pypy.rlib.objectmodel import Symbolic
from pypy.rpython.lltypesystem import lltype
class AddressOffset(Symbolic):
def annotation(self):
from pypy.annotation import model
return model.SomeInteger()
def lltype(self):
return lltype.Signed
def __add__(self, other):
if not isinstance(other, AddressOffset):
return NotImplemented
return CompositeOffset(self, other)
def _raw_malloc(self, rest, zero):
raise NotImplementedError("_raw_malloc(%r, %r)" % (self, rest))
def raw_memcopy(self, srcadr, dstsrc):
raise NotImplementedError("raw_memcopy(%r)" % (self,))
class ItemOffset(AddressOffset):
def __init__(self, TYPE, repeat=1):
self.TYPE = TYPE
self.repeat = repeat
def __repr__(self):
return "<ItemOffset %r %r>" % (self.TYPE, self.repeat)
def __mul__(self, other):
if not isinstance(other, int):
return NotImplemented
return ItemOffset(self.TYPE, self.repeat * other)
__rmul__ = __mul__
def __neg__(self):
return ItemOffset(self.TYPE, -self.repeat)
def ref(self, firstitemptr):
A = lltype.typeOf(firstitemptr).TO
if A == self.TYPE:
# for array of containers
parent, index = lltype.parentlink(firstitemptr._obj)
assert parent, "%r is not within a container" % (firstitemptr,)
assert isinstance(lltype.typeOf(parent),
(lltype.Array, lltype.FixedSizeArray)), (
"%r is not within an array" % (firstitemptr,))
if isinstance(index, str):
assert index.startswith('item') # itemN => N
index = int(index[4:])
return parent.getitem(index + self.repeat)._as_ptr()
elif isinstance(A, lltype.FixedSizeArray) and A.OF == self.TYPE:
# for array of primitives or pointers
return lltype.direct_ptradd(firstitemptr, self.repeat)
else:
raise TypeError('got %r, expected %r' % (A, self.TYPE))
def _raw_malloc(self, rest, zero):
assert not rest
if (isinstance(self.TYPE, lltype.ContainerType)
and self.TYPE._gckind == 'gc'):
assert self.repeat == 1
p = lltype.malloc(self.TYPE, flavor='raw', zero=zero)
return cast_ptr_to_adr(p)
else:
T = lltype.FixedSizeArray(self.TYPE, self.repeat)
p = lltype.malloc(T, flavor='raw', zero=zero)
array_adr = cast_ptr_to_adr(p)
return array_adr + ArrayItemsOffset(T)
def raw_memcopy(self, srcadr, dstadr):
repeat = self.repeat
if repeat == 0:
return
from pypy.rpython.rctypes.rmodel import reccopy
if isinstance(self.TYPE, lltype.ContainerType):
PTR = lltype.Ptr(self.TYPE)
else:
PTR = lltype.Ptr(lltype.FixedSizeArray(self.TYPE, 1))
while True:
src = cast_adr_to_ptr(srcadr, PTR)
dst = cast_adr_to_ptr(dstadr, PTR)
reccopy(src, dst)
repeat -= 1
if repeat <= 0:
break
srcadr += ItemOffset(self.TYPE)
dstadr += ItemOffset(self.TYPE)
class FieldOffset(AddressOffset):
def __init__(self, TYPE, fldname):
self.TYPE = TYPE
self.fldname = fldname
def __repr__(self):
return "<FieldOffset %r %r>" % (self.TYPE, self.fldname)
def ref(self, struct):
if lltype.typeOf(struct).TO != self.TYPE:
struct = lltype.cast_pointer(lltype.Ptr(self.TYPE), struct)
FIELD = getattr(self.TYPE, self.fldname)
if isinstance(FIELD, lltype.ContainerType):
return getattr(struct, self.fldname)
else:
return lltype.direct_fieldptr(struct, self.fldname)
def _raw_malloc(self, rest, parenttype=None, zero=False):
if self.fldname != self.TYPE._arrayfld:
# for the error msg
return AddressOffset._raw_malloc(self, rest, zero=zero)
assert rest
return rest[0]._raw_malloc(rest[1:], parenttype=parenttype or self.TYPE,
zero=zero)
def raw_memcopy(self, srcadr, dstadr):
if self.fldname != self.TYPE._arrayfld:
return AddressOffset.raw_memcopy(srcadr, dstadr) #for the error msg
PTR = lltype.Ptr(self.TYPE)
src = cast_adr_to_ptr(srcadr, PTR)
dst = cast_adr_to_ptr(dstadr, PTR)
from pypy.rpython.rctypes.rmodel import reccopy
reccopy(src, dst)
class CompositeOffset(AddressOffset):
def __new__(cls, *offsets):
lst = []
for item in offsets:
if isinstance(item, CompositeOffset):
lst.extend(item.offsets)
else:
lst.append(item)
for i in range(len(lst)-2, -1, -1):
if (isinstance(lst[i], ItemOffset) and
isinstance(lst[i+1], ItemOffset) and
lst[i].TYPE == lst[i+1].TYPE):
lst[i:i+2] = [ItemOffset(lst[i].TYPE,
lst[i].repeat + lst[i+1].repeat)]
if len(lst) == 1:
return lst[0]
else:
self = object.__new__(cls)
self.offsets = lst
return self
def __repr__(self):
return '< %s >' % (' + '.join([repr(item) for item in self.offsets]),)
def __neg__(self):
ofs = [-item for item in self.offsets]
ofs.reverse()
return CompositeOffset(*ofs)
def ref(self, ptr):
for item in self.offsets:
ptr = item.ref(ptr)
return ptr
def _raw_malloc(self, rest, zero):
return self.offsets[0]._raw_malloc(self.offsets[1:] + rest, zero=zero)
def raw_memcopy(self, srcadr, dstadr):
for o in self.offsets[:-1]:
o.raw_memcopy(srcadr, dstadr)
srcadr += o
dstadr += o
o = self.offsets[-1]
o.raw_memcopy(srcadr, dstadr)
class ArrayItemsOffset(AddressOffset):
def __init__(self, TYPE):
self.TYPE = TYPE
def __repr__(self):
return '< ArrayItemsOffset %r >' % (self.TYPE,)
def ref(self, arrayptr):
assert lltype.typeOf(arrayptr).TO == self.TYPE
if isinstance(self.TYPE.OF, lltype.ContainerType):
return arrayptr[0]
else:
return lltype.direct_arrayitems(arrayptr)
def _raw_malloc(self, rest, parenttype=None, zero=False):
if rest:
assert len(rest) == 1
assert isinstance(rest[0], ItemOffset)
assert self.TYPE.OF == rest[0].TYPE
count = rest[0].repeat
else:
count = 0
if self.TYPE._hints.get('isrpystring'):
count -= 1 # because malloc() will give us the extra char for free
p = lltype.malloc(parenttype or self.TYPE, count,
immortal = self.TYPE._gckind == 'raw',
zero = zero)
return cast_ptr_to_adr(p)
def raw_memcopy(self, srcadr, dstadr):
# should really copy the length field, but we can't
pass
class ArrayLengthOffset(AddressOffset):
def __init__(self, TYPE):
self.TYPE = TYPE
def __repr__(self):
return '< ArrayLengthOffset %r >' % (self.TYPE,)
def ref(self, arrayptr):
assert lltype.typeOf(arrayptr).TO == self.TYPE
return lltype._arraylenref._makeptr(arrayptr._obj, arrayptr._solid)
class GCHeaderOffset(AddressOffset):
def __init__(self, gcheaderbuilder):
self.gcheaderbuilder = gcheaderbuilder
def __repr__(self):
return '< GCHeaderOffset >'
def __neg__(self):
return GCHeaderAntiOffset(self.gcheaderbuilder)
def ref(self, headerptr):
gcptr = self.gcheaderbuilder.object_from_header(headerptr)
return gcptr
def _raw_malloc(self, rest, zero):
assert rest
if isinstance(rest[0], GCHeaderAntiOffset):
return rest[1]._raw_malloc(rest[2:], zero=zero) # just for fun
gcobjadr = rest[0]._raw_malloc(rest[1:], zero=zero)
headerptr = self.gcheaderbuilder.new_header(gcobjadr.ptr)
return cast_ptr_to_adr(headerptr)
def raw_memcopy(self, srcadr, dstadr):
from pypy.rpython.rctypes.rmodel import reccopy
reccopy(srcadr.ptr, dstadr.ptr)
class GCHeaderAntiOffset(AddressOffset):
def __init__(self, gcheaderbuilder):
self.gcheaderbuilder = gcheaderbuilder
def __repr__(self):
return '< GCHeaderAntiOffset >'
def __neg__(self):
return GCHeaderOffset(self.gcheaderbuilder)
def ref(self, gcptr):
headerptr = self.gcheaderbuilder.header_of_object(gcptr)
return headerptr
def _raw_malloc(self, rest, zero):
assert len(rest) >= 2
assert isinstance(rest[0], GCHeaderOffset)
return rest[1]._raw_malloc(rest[2:], zero=zero)
# ____________________________________________________________
def sizeof(TYPE, n=None):
if n is None:
assert not TYPE._is_varsize()
return ItemOffset(TYPE)
else:
if isinstance(TYPE, lltype.Array):
return itemoffsetof(TYPE, n)
elif isinstance(TYPE, lltype.Struct):
return FieldOffset(TYPE, TYPE._arrayfld) + \
itemoffsetof(TYPE._flds[TYPE._arrayfld], n)
else:
raise Exception("don't know how to take the size of a %r"%TYPE)
def offsetof(TYPE, fldname):
assert fldname in TYPE._flds
return FieldOffset(TYPE, fldname)
def itemoffsetof(TYPE, n=0):
return ArrayItemsOffset(TYPE) + ItemOffset(TYPE.OF) * n
# -------------------------------------------------------------
class fakeaddress(object):
# NOTE: the 'ptr' in the addresses must be normalized.
# Use cast_ptr_to_adr() instead of directly fakeaddress() if unsure.
def __init__(self, ptr):
self.ptr = ptr or None # null ptr => None
def __repr__(self):
if self.ptr is None:
s = 'NULL'
else:
s = str(self.ptr)
return '<fakeaddr %s>' % (s,)
def __add__(self, other):
if isinstance(other, AddressOffset):
if self.ptr is None:
raise NullAddressError("offset from NULL address")
return fakeaddress(other.ref(self.ptr))
if other == 0:
return self
return NotImplemented
def __sub__(self, other):
if isinstance(other, AddressOffset):
return self + (-other)
if other == 0:
return self
return NotImplemented
def __nonzero__(self):
return self.ptr is not None
def __eq__(self, other):
if isinstance(other, fakeaddress):
obj1 = self.ptr
obj2 = other.ptr
if obj1 is not None: obj1 = obj1._obj
if obj2 is not None: obj2 = obj2._obj
return obj1 == obj2
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, fakeaddress):
return not (self == other)
else:
return NotImplemented
def ref(self):
if not self:
raise NullAddressError
return self.ptr
## def get(self):
## return self.ref().get()
## def set(self, value):
## self.ref().set(value)
def _cast_to_ptr(self, EXPECTED_TYPE):
if self:
PTRTYPE = lltype.typeOf(self.ptr)
if (isinstance(EXPECTED_TYPE.TO, lltype.OpaqueType) or
isinstance(PTRTYPE.TO, lltype.OpaqueType)):
return lltype.cast_opaque_ptr(EXPECTED_TYPE, self.ptr)
else:
# regular case
return lltype.cast_pointer(EXPECTED_TYPE, self.ptr)
else:
return lltype.nullptr(EXPECTED_TYPE.TO)
## if (isinstance(ref, _arrayitemref) and
## isinstance(EXPECTED_TYPE.TO, lltype.FixedSizeArray) and
## ref.type() == EXPECTED_TYPE.TO.OF):
## # special case that requires direct_arrayitems
## p_items = lltype.direct_arrayitems(ref.array)
## return lltype.direct_ptradd(p_items, ref.index)
## elif (isinstance(ref, _structfieldref) and
## isinstance(EXPECTED_TYPE.TO, lltype.FixedSizeArray) and
## ref.type() == EXPECTED_TYPE.TO.OF):
## # special case that requires direct_fieldptr
## return lltype.direct_fieldptr(ref.struct,
## ref.fieldname)
## else:
## result = ref.get()
## if (isinstance(EXPECTED_TYPE.TO, lltype.OpaqueType) or
## isinstance(lltype.typeOf(result).TO, lltype.OpaqueType)):
## return lltype.cast_opaque_ptr(EXPECTED_TYPE, result)
## else:
## # regular case
## return lltype.cast_pointer(EXPECTED_TYPE, result)
def _cast_to_int(self):
if self:
return self.ptr._cast_to_int()
else:
return 0
# ____________________________________________________________
class NullAddressError(Exception):
pass
class DanglingPointerError(Exception):
pass
NULL = fakeaddress(None)
NULL.intaddress = 0 # this is to make memory.lladdress more happy
Address = lltype.Primitive("Address", NULL)
# GCREF is similar to Address but it is GC-aware
GCREF = lltype.Ptr(lltype.GcOpaqueType('GCREF'))
class _fakeaccessor(object):
def __init__(self, addr):
self.addr = addr
def __getitem__(self, index):
ptr = self.addr.ref()
if index != 0:
ptr = lltype.direct_ptradd(ptr, index)
return self.read_from_ptr(ptr)
def __setitem__(self, index, value):
assert lltype.typeOf(value) == self.TYPE
ptr = self.addr.ref()
if index != 0:
ptr = lltype.direct_ptradd(ptr, index)
self.write_into_ptr(ptr, value)
def read_from_ptr(self, ptr):
value = ptr[0]
assert lltype.typeOf(value) == self.TYPE
return value
def write_into_ptr(self, ptr, value):
ptr[0] = value
class _signed_fakeaccessor(_fakeaccessor):
TYPE = lltype.Signed
class _char_fakeaccessor(_fakeaccessor):
TYPE = lltype.Char
class _address_fakeaccessor(_fakeaccessor):
TYPE = Address
def read_from_ptr(self, ptr):
value = ptr[0]
if isinstance(value, lltype._ptr):
return value._cast_to_adr()
elif lltype.typeOf(value) == Address:
return value
else:
raise TypeError(value)
def write_into_ptr(self, ptr, value):
TARGETTYPE = lltype.typeOf(ptr).TO.OF
if TARGETTYPE == Address:
pass
elif isinstance(TARGETTYPE, lltype.Ptr):
value = cast_adr_to_ptr(value, TARGETTYPE)
else:
raise TypeError(TARGETTYPE)
ptr[0] = value
fakeaddress.signed = property(_signed_fakeaccessor)
fakeaddress.char = property(_char_fakeaccessor)
fakeaddress.address = property(_address_fakeaccessor)
fakeaddress._TYPE = Address
# the obtained address will not keep the object alive. e.g. if the object is
# only reachable through an address, it might get collected
def cast_ptr_to_adr(obj):
assert isinstance(lltype.typeOf(obj), lltype.Ptr)
return obj._cast_to_adr()
def cast_adr_to_ptr(adr, EXPECTED_TYPE):
return adr._cast_to_ptr(EXPECTED_TYPE)
def cast_adr_to_int(adr):
return adr._cast_to_int()
def cast_int_to_adr(int):
raise NotImplementedError("cast_int_to_adr")
# ____________________________________________________________
class fakeweakaddress(object):
def __init__(self, ob):
if ob is not None:
self.ref = weakref.ref(ob)
# umpf
from pypy.rpython.memory import lltypesimulation
if isinstance(ob, (lltype._ptr,lltypesimulation.simulatorptr)):
self.id = ob._cast_to_int()
else:
self.id = id(ob)
else:
self.ref = None
def get(self):
if self.ref is None:
return None
ob = self.ref()
# xxx stop-gap
#if ob is None:
# raise DanglingPointerError
return ob
def __repr__(self):
if self.ref is None:
s = 'NULL'
else:
s = str(self.ref)
return '<fakeweakaddr %s>' % (s,)
def cast_to_int(self):
# this is not always the behaviour that is really happening
# but make sure that nobody depends on it
return self.id ^ ~3
WeakGcAddress = lltype.Primitive("WeakGcAddress",
fakeweakaddress(None))
def cast_ptr_to_weakadr(obj):
# XXX this is missing the normalizations done by _ptr._cast_to_adr()
assert isinstance(lltype.typeOf(obj), lltype.Ptr)
return fakeweakaddress(obj)
def cast_weakadr_to_ptr(adr, EXPECTED_TYPE):
result = adr.get()
if result is None:
return lltype.nullptr(EXPECTED_TYPE.TO)
return result
fakeweakaddress._TYPE = WeakGcAddress
WEAKNULL = fakeweakaddress(None)
# ____________________________________________________________
def raw_malloc(size):
if not isinstance(size, AddressOffset):
raise NotImplementedError(size)
return size._raw_malloc([], zero=False)
def raw_free(adr):
# try to free the whole object if 'adr' is the address of the header
from pypy.rpython.memory.gcheader import GCHeaderBuilder
try:
objectptr = GCHeaderBuilder.object_from_header(adr.ptr)
except KeyError:
pass
else:
raw_free(cast_ptr_to_adr(objectptr))
assert isinstance(adr.ref()._obj, lltype._parentable)
adr.ptr._as_obj()._free()
def raw_malloc_usage(size):
if isinstance(size, AddressOffset):
# ouah
from pypy.rpython.memory.lltypelayout import convert_offset_to_int
size = convert_offset_to_int(size)
return size
def raw_memclear(adr, size):
if not isinstance(size, AddressOffset):
raise NotImplementedError(size)
assert lltype.typeOf(adr) == Address
zeroadr = size._raw_malloc([], zero=True)
size.raw_memcopy(zeroadr, adr)
def raw_memcopy(source, dest, size):
assert lltype.typeOf(source) == Address
assert lltype.typeOf(dest) == Address
size.raw_memcopy(source, dest)
# ____________________________________________________________
ARENA_ITEM = lltype.OpaqueType('ArenaItem')
class _arena(object):
#_cache = weakref.WeakKeyDictionary() # {obj: _arenaitem}
def __init__(self, rng, zero):
self.rng = rng
self.zero = zero
self.items = []
def getitemaddr(self, n):
while len(self.items) <= n:
self.items.append(_arenaitem(self, len(self.items)))
return fakeaddress(self.items[n]._as_ptr())
class _arenaitem(lltype._container):
_TYPE = ARENA_ITEM
def __init__(self, arena, nr):
self.arena = arena
self.nr = nr
self.reserved_size = None
def reserve(self, size):
if self.reserved_size is None:
# xxx check that we are not larger than unitsize*n
itemadr = raw_malloc(size)
self.container = itemadr.ptr._obj
#_arena._cache[itemadr.ptr._obj] = self
else:
assert size == self.reserved_size
class ArenaRange(AddressOffset):
def __init__(self, unitsize, n):
self.unitsize = unitsize
self.n = n
def _raw_malloc(self, rest, zero=False):
assert not rest
arena = _arena(self, zero=zero)
return arena.getitemaddr(0)
def arena(TYPE, n):
return ArenaRange(sizeof(TYPE), n)
def bump(adr, size):
baseptr = cast_adr_to_ptr(adr, lltype.Ptr(ARENA_ITEM))
baseptr._obj.reserve(size)
arena = baseptr._obj.arena
nr = baseptr._obj.nr
return arena.getitemaddr(nr + 1)
| Python |
# only for the LLInterpreter. Don't use directly.
from pypy.rpython.lltypesystem.lltype import pyobjectptr, malloc, free
from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free
from pypy.rpython.lltypesystem.llmemory import raw_memclear, raw_memcopy
from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage
| Python |
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rmodel import inputconst
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython.rtuple import AbstractTupleRepr, AbstractTupleIteratorRepr
from pypy.rpython.lltypesystem.lltype import \
Ptr, GcStruct, Void, Signed, malloc, typeOf, nullptr
from pypy.rpython.lltypesystem.rtupletype import TUPLE_TYPE
from pypy.rpython.lltypesystem import rstr
# ____________________________________________________________
#
# Concrete implementation of RPython tuples:
#
# struct tuple {
# type0 item0;
# type1 item1;
# type2 item2;
# ...
# }
class TupleRepr(AbstractTupleRepr):
rstr_ll = rstr.LLHelpers
def __init__(self, rtyper, items_r):
AbstractTupleRepr.__init__(self, rtyper, items_r)
self.lowleveltype = TUPLE_TYPE(self.lltypes)
def newtuple(cls, llops, r_tuple, items_v):
# items_v should have the lowleveltype of the internal reprs
if len(r_tuple.items_r) == 0:
return inputconst(Void, ()) # a Void empty tuple
c1 = inputconst(Void, r_tuple.lowleveltype.TO)
v_result = llops.genop('malloc', [c1], resulttype = r_tuple.lowleveltype)
for i in range(len(r_tuple.items_r)):
cname = inputconst(Void, r_tuple.fieldnames[i])
llops.genop('setfield', [v_result, cname, items_v[i]])
return v_result
newtuple = classmethod(newtuple)
def instantiate(self):
if len(self.items_r) == 0:
return dum_empty_tuple # PBC placeholder for an empty tuple
else:
return malloc(self.lowleveltype.TO)
def rtype_bltn_list(self, hop):
from pypy.rpython.lltypesystem import rlist
nitems = len(self.items_r)
vtup = hop.inputarg(self, 0)
LIST = hop.r_result.lowleveltype.TO
cno = inputconst(Signed, nitems)
vlist = hop.gendirectcall(LIST.ll_newlist, cno)
v_func = hop.inputconst(Void, rlist.dum_nocheck)
for index in range(nitems):
name = self.fieldnames[index]
ritem = self.items_r[index]
cname = hop.inputconst(Void, name)
vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem)
vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr)
cindex = inputconst(Signed, index)
hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem)
return vlist
def getitem_internal(self, llops, v_tuple, index):
"""Return the index'th item, in internal repr."""
name = self.fieldnames[index]
llresult = self.lltypes[index]
cname = inputconst(Void, name)
return llops.genop('getfield', [v_tuple, cname], resulttype = llresult)
def rtype_newtuple(hop):
return TupleRepr._rtype_newtuple(hop)
newtuple = TupleRepr.newtuple
def dum_empty_tuple(): pass
#
# _________________________ Conversions _________________________
class __extend__(pairtype(PyObjRepr, TupleRepr)):
def convert_from_to((r_from, r_to), v, llops):
vlist = []
for i in range(len(r_to.items_r)):
ci = inputconst(Signed, i)
v_item = llops.gencapicall('PyTuple_GetItem_WithIncref', [v, ci],
resulttype = pyobj_repr)
v_converted = llops.convertvar(v_item, pyobj_repr,
r_to.items_r[i])
vlist.append(v_converted)
return r_to.newtuple(llops, r_to, vlist)
class __extend__(pairtype(TupleRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
ci = inputconst(Signed, len(r_from.items_r))
v_result = llops.gencapicall('PyTuple_New', [ci],
resulttype = pyobj_repr)
for i in range(len(r_from.items_r)):
cname = inputconst(Void, r_from.fieldnames[i])
v_item = llops.genop('getfield', [v, cname],
resulttype = r_from.external_items_r[i].lowleveltype)
v_converted = llops.convertvar(v_item, r_from.external_items_r[i],
pyobj_repr)
ci = inputconst(Signed, i)
llops.gencapicall('PyTuple_SetItem_WithIncref', [v_result, ci,
v_converted])
return v_result
# ____________________________________________________________
#
# Iteration.
class Length1TupleIteratorRepr(AbstractTupleIteratorRepr):
def __init__(self, r_tuple):
self.r_tuple = r_tuple
self.lowleveltype = Ptr(GcStruct('tuple1iter',
('tuple', r_tuple.lowleveltype)))
self.ll_tupleiter = ll_tupleiter
self.ll_tuplenext = ll_tuplenext
TupleRepr.IteratorRepr = Length1TupleIteratorRepr
def ll_tupleiter(ITERPTR, tuple):
iter = malloc(ITERPTR.TO)
iter.tuple = tuple
return iter
def ll_tuplenext(iter):
# for iterating over length 1 tuples only!
t = iter.tuple
if t:
iter.tuple = nullptr(typeOf(t).TO)
return t.item0
else:
raise StopIteration
| Python |
# Helper to build the lowleveltype corresponding to an RPython tuple.
# This is not in rtuple.py so that it can be imported without bringing
# the whole rtyper in.
from pypy.rpython.lltypesystem.lltype import Void, Ptr, GcStruct
def TUPLE_TYPE(field_lltypes):
if len(field_lltypes) == 0:
return Void # empty tuple
else:
fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)]
kwds = {'hints': {'immutable': True,
'noidentity': True}}
return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds))
| Python |
from pypy.objspace.flow.model import Constant
from pypy.rpython.rclass import getclassrepr, getinstancerepr, get_type_repr
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem.rclass import InstanceRepr, CLASSTYPE
from pypy.rpython.lltypesystem.rclass import MissingRTypeAttribute
from pypy.rpython.lltypesystem.rclass import ll_issubclass_const
from pypy.rpython.rmodel import TyperError, inputconst
class TaggedInstanceRepr(InstanceRepr):
def __init__(self, rtyper, classdef, unboxedclassdef):
InstanceRepr.__init__(self, rtyper, classdef)
self.unboxedclassdef = unboxedclassdef
self.is_parent = unboxedclassdef is not classdef
def _setup_repr(self):
InstanceRepr._setup_repr(self)
flds = self.allinstancefields.keys()
flds.remove('__class__')
if self.is_parent:
if flds:
raise TyperError("%r is a base class of an UnboxedValue,"
"so it cannot have fields: %r" % (
self.classdef, flds))
else:
if len(flds) != 1:
raise TyperError("%r must have exactly one field: %r" % (
self.classdef, flds))
self.specialfieldname = flds[0]
def new_instance(self, llops, classcallhop=None):
if self.is_parent:
raise TyperError("don't instantiate %r, it is a parent of an "
"UnboxedValue class" % (self.classdef,))
if classcallhop is None:
raise TyperError("must instantiate %r by calling the class" % (
self.classdef,))
hop = classcallhop
if not (hop.spaceop.opname == 'simple_call' and hop.nb_args == 2):
raise TyperError("must instantiate %r with a simple class call" % (
self.classdef,))
v_value = hop.inputarg(lltype.Signed, arg=1)
c_one = hop.inputconst(lltype.Signed, 1)
hop.exception_is_here()
v2 = hop.genop('int_lshift_ovf', [v_value, c_one],
resulttype = lltype.Signed)
v2p1 = hop.genop('int_add', [v2, c_one],
resulttype = lltype.Signed)
v_instance = hop.genop('cast_int_to_ptr', [v2p1],
resulttype = self.lowleveltype)
return v_instance, False # don't call __init__
def convert_const_exact(self, value):
self.setup()
number = value.getvalue()
return ll_int_to_unboxed(self.lowleveltype, number)
def getvalue_from_unboxed(self, llops, vinst):
assert not self.is_parent
v2 = llops.genop('cast_ptr_to_int', [vinst], resulttype=lltype.Signed)
c_one = inputconst(lltype.Signed, 1)
return llops.genop('int_rshift', [v2, c_one], resulttype=lltype.Signed)
def gettype_from_unboxed(self, llops, vinst):
unboxedclass_repr = getclassrepr(self.rtyper, self.unboxedclassdef)
cunboxedcls = inputconst(CLASSTYPE, unboxedclass_repr.getvtable())
if self.is_parent:
# If the lltype of vinst shows that it cannot be a tagged value,
# we can directly read the typeptr. Otherwise, call a helper that
# checks if the tag bit is set in the pointer.
unboxedinstance_repr = getinstancerepr(self.rtyper,
self.unboxedclassdef)
try:
lltype.castable(unboxedinstance_repr.lowleveltype,
vinst.concretetype)
except lltype.InvalidCast:
can_be_tagged = False
else:
can_be_tagged = True
vinst = llops.genop('cast_pointer', [vinst],
resulttype=self.common_repr())
if can_be_tagged:
return llops.gendirectcall(ll_unboxed_getclass, vinst,
cunboxedcls)
else:
ctypeptr = inputconst(lltype.Void, 'typeptr')
return llops.genop('getfield', [vinst, ctypeptr],
resulttype = CLASSTYPE)
else:
return cunboxedcls
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
if not self.is_parent and attr == self.specialfieldname:
return self.getvalue_from_unboxed(llops, vinst)
elif attr == '__class__':
return self.gettype_from_unboxed(llops, vinst)
else:
raise MissingRTypeAttribute(attr)
def rtype_type(self, hop):
[vinst] = hop.inputargs(self)
return self.gettype_from_unboxed(hop.llops, vinst)
def rtype_setattr(self, hop):
# only for UnboxedValue.__init__(), which is not actually called
hop.genop('UnboxedValue_setattr', [])
def ll_str(self, i):
if lltype.cast_ptr_to_int(i) & 1:
from pypy.rpython.lltypesystem import rstr
from pypy.rpython.rint import signed_repr
llstr1 = signed_repr.ll_str(ll_unboxed_to_int(i))
return rstr.ll_strconcat(rstr.unboxed_instance_str_prefix,
rstr.ll_strconcat(llstr1,
rstr.unboxed_instance_str_suffix))
else:
return InstanceRepr.ll_str(self, i)
def rtype_isinstance(self, hop):
if not hop.args_s[1].is_constant():
raise TyperError("isinstance() too complicated")
[classdesc] = hop.args_s[1].descriptions
classdef = classdesc.getuniqueclassdef()
class_repr = get_type_repr(self.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
cls = v_cls.value
answer = self.unboxedclassdef.issubclass(classdef)
c_answer_if_unboxed = hop.inputconst(lltype.Bool, answer)
minid = hop.inputconst(lltype.Signed, cls.subclassrange_min)
maxid = hop.inputconst(lltype.Signed, cls.subclassrange_max)
return hop.gendirectcall(ll_unboxed_isinstance_const, v_obj,
minid, maxid, c_answer_if_unboxed)
def ll_int_to_unboxed(PTRTYPE, value):
return lltype.cast_int_to_ptr(PTRTYPE, value*2+1)
def ll_unboxed_to_int(p):
return lltype.cast_ptr_to_int(p) >> 1
def ll_unboxed_getclass(instance, class_if_unboxed):
if lltype.cast_ptr_to_int(instance) & 1:
return class_if_unboxed
else:
return instance.typeptr
def ll_unboxed_isinstance_const(obj, minid, maxid, answer_if_unboxed):
if not obj:
return False
if lltype.cast_ptr_to_int(obj) & 1:
return answer_if_unboxed
else:
return ll_issubclass_const(obj.typeptr, minid, maxid)
| Python |
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem import rclass
from pypy.rpython.lltypesystem.lltype import \
Array, malloc, Ptr, PyObject, pyobjectptr, \
FuncType, functionptr, Signed
from pypy.rpython.exceptiondata import AbstractExceptionData
from pypy.rpython.extfunctable import standardexceptions
from pypy.annotation.classdef import FORCE_ATTRIBUTES_INTO_CLASSES
class ExceptionData(AbstractExceptionData):
"""Public information for the code generators to help with exceptions."""
def make_helpers(self, rtyper):
# create helper functionptrs
self.fn_exception_match = self.make_exception_matcher(rtyper)
self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper)
self.fn_pyexcclass2exc = self.make_pyexcclass2exc(rtyper)
self.fn_raise_OSError = self.make_raise_OSError(rtyper)
def make_exception_matcher(self, rtyper):
# ll_exception_matcher(real_exception_vtable, match_exception_vtable)
s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type)
helper_fn = rtyper.annotate_helper_fn(rclass.ll_issubclass, [s_typeptr, s_typeptr])
return helper_fn
def make_type_of_exc_inst(self, rtyper):
# ll_type_of_exc_inst(exception_instance) -> exception_vtable
s_excinst = annmodel.SomePtr(self.lltype_of_exception_value)
helper_fn = rtyper.annotate_helper_fn(rclass.ll_type, [s_excinst])
return helper_fn
def make_pyexcclass2exc(self, rtyper):
# ll_pyexcclass2exc(python_exception_class) -> exception_instance
table = {}
Exception_def = rtyper.annotator.bookkeeper.getuniqueclassdef(Exception)
for clsdef in rtyper.class_reprs:
if (clsdef and clsdef is not Exception_def
and clsdef.issubclass(Exception_def)):
if not hasattr(clsdef.classdesc, 'pyobj'):
continue
cls = clsdef.classdesc.pyobj
if cls in self.standardexceptions and cls not in FORCE_ATTRIBUTES_INTO_CLASSES:
is_standard = True
assert not clsdef.attrs, (
"%r should not have grown attributes" % (cls,))
else:
is_standard = (cls.__module__ == 'exceptions'
and not clsdef.attrs)
if is_standard:
example = self.get_standard_ll_exc_instance(rtyper, clsdef)
table[cls] = example
#else:
# assert cls.__module__ != 'exceptions', (
# "built-in exceptions should not grow attributes")
r_inst = rclass.getinstancerepr(rtyper, None)
r_inst.setup()
default_excinst = malloc(self.lltype_of_exception_value.TO,
immortal=True)
default_excinst.typeptr = r_inst.rclass.getvtable()
# build the table in order base classes first, subclasses last
sortedtable = []
def add_class(cls):
if cls in table:
for base in cls.__bases__:
add_class(base)
sortedtable.append((cls, table[cls]))
del table[cls]
for cls in table.keys():
add_class(cls)
assert table == {}
#print sortedtable
A = Array(('pycls', Ptr(PyObject)),
('excinst', self.lltype_of_exception_value))
pycls2excinst = malloc(A, len(sortedtable), immortal=True)
for i in range(len(sortedtable)):
cls, example = sortedtable[i]
pycls2excinst[i].pycls = pyobjectptr(cls)
pycls2excinst[i].excinst = example
FUNCTYPE = FuncType([Ptr(PyObject), Ptr(PyObject)], Signed)
PyErr_GivenExceptionMatches = functionptr(
FUNCTYPE, "PyErr_GivenExceptionMatches", external="C",
_callable=lambda pyobj1, pyobj2:
int(issubclass(pyobj1._obj.value, pyobj2._obj.value)))
initial_value_of_i = len(pycls2excinst)-1
def ll_pyexcclass2exc(python_exception_class):
"""Return an RPython instance of the best approximation of the
Python exception identified by its Python class.
"""
i = initial_value_of_i
while i >= 0:
if PyErr_GivenExceptionMatches(python_exception_class,
pycls2excinst[i].pycls):
return pycls2excinst[i].excinst
i -= 1
return default_excinst
s_pyobj = annmodel.SomePtr(Ptr(PyObject))
helper_fn = rtyper.annotate_helper_fn(ll_pyexcclass2exc, [s_pyobj])
return helper_fn
def get_standard_ll_exc_instance(self, rtyper, clsdef):
r_inst = rclass.getinstancerepr(rtyper, clsdef)
example = r_inst.get_reusable_prebuilt_instance()
example = rclass.ll_cast_to_object(example)
return example
| Python |
import py
from pypy.rlib.rarithmetic import r_int, r_uint, intmask
from pypy.rlib.rarithmetic import r_ulonglong, r_longlong, base_int
from pypy.rlib.rarithmetic import normalizedinttype
from pypy.rlib.objectmodel import Symbolic
from pypy.tool.uid import Hashable
from pypy.tool.tls import tlsobject
from types import NoneType
from sys import maxint
import weakref
log = py.log.Producer('lltype')
TLS = tlsobject()
class _uninitialized(object):
def __init__(self, TYPE):
self.TYPE = TYPE
def __repr__(self):
return '<Uninitialized %r>'%(self.TYPE,)
def saferecursive(func, defl):
def safe(*args):
try:
seeing = TLS.seeing
except AttributeError:
seeing = TLS.seeing = {}
seeingkey = tuple([func] + [id(arg) for arg in args])
if seeingkey in seeing:
return defl
seeing[seeingkey] = True
try:
return func(*args)
finally:
del seeing[seeingkey]
return safe
#safe_equal = saferecursive(operator.eq, True)
def safe_equal(x, y):
# a specialized version for performance
try:
seeing = TLS.seeing_eq
except AttributeError:
seeing = TLS.seeing_eq = {}
seeingkey = (id(x), id(y))
if seeingkey in seeing:
return True
seeing[seeingkey] = True
try:
return x == y
finally:
del seeing[seeingkey]
class frozendict(dict):
def __hash__(self):
items = self.items()
items.sort()
return hash(tuple(items))
class LowLevelType(object):
# the following line prevents '__cached_hash' to be in the __dict__ of
# the instance, which is needed for __eq__() and __hash__() to work.
__slots__ = ['__dict__', '__cached_hash']
def __eq__(self, other):
return self.__class__ is other.__class__ and (
self is other or safe_equal(self.__dict__, other.__dict__))
def __ne__(self, other):
return not (self == other)
_is_compatible = __eq__
def _enforce(self, value):
if typeOf(value) != self:
raise TypeError
return value
def __hash__(self):
# cannot use saferecursive() -- see test_lltype.test_hash().
# NB. the __cached_hash should neither be used nor updated
# if we enter with hash_level > 0, because the computed
# __hash__ can be different in this situation.
hash_level = 0
try:
hash_level = TLS.nested_hash_level
if hash_level == 0:
return self.__cached_hash
except AttributeError:
pass
if hash_level >= 3:
return 0
items = self.__dict__.items()
items.sort()
TLS.nested_hash_level = hash_level + 1
try:
result = hash((self.__class__,) + tuple(items))
finally:
TLS.nested_hash_level = hash_level
if hash_level == 0:
self.__cached_hash = result
return result
# due to this dynamic hash value, we should forbid
# pickling, until we have an algorithm for that.
# but we just provide a tag for external help.
__hash_is_not_constant__ = True
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return self.__class__.__name__
def _short_name(self):
return str(self)
def _defl(self, parent=None, parentindex=None):
raise NotImplementedError
def _allocate(self, initialization, parent=None, parentindex=None):
assert initialization in ('raw', 'malloc', 'example')
raise NotImplementedError
def _freeze_(self):
return True
def _inline_is_varsize(self, last):
return False
def _is_atomic(self):
return False
def _is_varsize(self):
return False
NFOUND = object()
class ContainerType(LowLevelType):
_adtmeths = {}
def _inline_is_varsize(self, last):
raise TypeError, "%r cannot be inlined in structure" % self
def _install_extras(self, adtmeths={}, hints={}):
self._adtmeths = frozendict(adtmeths)
self._hints = frozendict(hints)
def __getattr__(self, name):
adtmeth = self._adtmeths.get(name, NFOUND)
if adtmeth is not NFOUND:
if getattr(adtmeth, '_type_method', False):
return adtmeth.__get__(self)
else:
return adtmeth
self._nofield(name)
def _nofield(self, name):
raise AttributeError("no field %r" % name)
class Struct(ContainerType):
_gckind = 'raw'
def __init__(self, name, *fields, **kwds):
self._name = self.__name__ = name
flds = {}
names = []
self._arrayfld = None
for name, typ in fields:
if name.startswith('_'):
raise NameError, ("%s: field name %r should not start with "
"an underscore" % (self._name, name,))
names.append(name)
if name in flds:
raise TypeError("%s: repeated field name" % self._name)
flds[name] = typ
if isinstance(typ, ContainerType) and typ._gckind != 'raw':
if name == fields[0][0] and typ._gckind == self._gckind:
pass # can inline a XxContainer as 1st field of XxStruct
else:
raise TypeError("%s: cannot inline %s container %r" % (
self._name, typ._gckind, typ))
# look if we have an inlined variable-sized array as the last field
if fields:
for name, typ in fields[:-1]:
typ._inline_is_varsize(False)
first = False
name, typ = fields[-1]
if typ._inline_is_varsize(True):
self._arrayfld = name
self._flds = frozendict(flds)
self._names = tuple(names)
self._install_extras(**kwds)
def _first_struct(self):
if self._names:
first = self._names[0]
FIRSTTYPE = self._flds[first]
if (isinstance(FIRSTTYPE, (Struct, PyObjectType)) and
self._gckind == FIRSTTYPE._gckind):
return first, FIRSTTYPE
return None, None
def _inline_is_varsize(self, last):
if self._arrayfld:
raise TypeError("cannot inline a var-sized struct "
"inside another container")
return False
def _is_atomic(self):
for typ in self._flds.values():
if not typ._is_atomic():
return False
return True
def _is_varsize(self):
return self._arrayfld is not None
def __getattr__(self, name):
try:
return self._flds[name]
except KeyError:
return ContainerType.__getattr__(self, name)
def _nofield(self, name):
raise AttributeError, 'struct %s has no field %r' % (self._name,
name)
def _names_without_voids(self):
names_without_voids = [name for name in self._names if self._flds[name] is not Void]
return names_without_voids
def _str_fields_without_voids(self):
return ', '.join(['%s: %s' % (name, self._flds[name])
for name in self._names_without_voids(False)])
_str_fields_without_voids = saferecursive(_str_fields_without_voids, '...')
def _str_without_voids(self):
return "%s %s { %s }" % (self.__class__.__name__,
self._name, self._str_fields_without_voids())
def _str_fields(self):
return ', '.join(['%s: %s' % (name, self._flds[name])
for name in self._names])
_str_fields = saferecursive(_str_fields, '...')
def __str__(self):
# -- long version --
#return "%s %s { %s }" % (self.__class__.__name__,
# self._name, self._str_fields())
# -- short version --
return "%s %s { %s }" % (self.__class__.__name__, self._name,
', '.join(self._names))
def _short_name(self):
return "%s %s" % (self.__class__.__name__, self._name)
## def _defl(self, parent=None, parentindex=None):
## return _struct(self, parent=parent, parentindex=parentindex)
def _allocate(self, initialization, parent=None, parentindex=None):
return _struct(self, initialization=initialization,
parent=parent, parentindex=parentindex)
def _container_example(self):
if self._arrayfld is None:
n = None
else:
n = 1
return _struct(self, n, initialization='example')
class RttiStruct(Struct):
_runtime_type_info = None
def _attach_runtime_type_info_funcptr(self, funcptr, destrptr):
if self._runtime_type_info is None:
self._runtime_type_info = opaqueptr(RuntimeTypeInfo, name=self._name, about=self)._obj
if funcptr is not None:
T = typeOf(funcptr)
if (not isinstance(T, Ptr) or
not isinstance(T.TO, FuncType) or
len(T.TO.ARGS) != 1 or
T.TO.RESULT != Ptr(RuntimeTypeInfo) or
castable(T.TO.ARGS[0], Ptr(self)) < 0):
raise TypeError("expected a runtime type info function "
"implementation, got: %s" % funcptr)
self._runtime_type_info.query_funcptr = funcptr
if destrptr is not None :
T = typeOf(destrptr)
if (not isinstance(T, Ptr) or
not isinstance(T.TO, FuncType) or
len(T.TO.ARGS) != 1 or
T.TO.RESULT != Void or
castable(T.TO.ARGS[0], Ptr(self)) < 0):
raise TypeError("expected a destructor function "
"implementation, got: %s" % destrptr)
self._runtime_type_info.destructor_funcptr = destrptr
class GcStruct(RttiStruct):
_gckind = 'gc'
class PyStruct(RttiStruct):
_gckind = 'cpy'
def __init__(self, name, *fields, **kwds):
RttiStruct.__init__(self, name, *fields, **kwds)
if self._first_struct() == (None, None):
raise TypeError("a PyStruct must have another PyStruct or "
"PyObject as first field")
STRUCT_BY_FLAVOR = {'raw': Struct,
'gc': GcStruct,
'cpy': PyStruct}
class Array(ContainerType):
_gckind = 'raw'
__name__ = 'array'
_anonym_struct = False
def __init__(self, *fields, **kwds):
if len(fields) == 1 and isinstance(fields[0], LowLevelType):
self.OF = fields[0]
else:
self.OF = Struct("<arrayitem>", *fields)
self._anonym_struct = True
if isinstance(self.OF, ContainerType) and self.OF._gckind != 'raw':
raise TypeError("cannot have a %s container as array item type"
% (self.OF._gckind,))
self.OF._inline_is_varsize(False)
self._install_extras(**kwds)
def _inline_is_varsize(self, last):
if not last:
raise TypeError("cannot inline an array in another container"
" unless as the last field of a structure")
return True
def _is_atomic(self):
return self.OF._is_atomic()
def _is_varsize(self):
return True
def _str_fields(self):
if isinstance(self.OF, Struct):
of = self.OF
if self._anonym_struct:
return "{ %s }" % of._str_fields()
else:
return "%s { %s }" % (of._name, of._str_fields())
else:
return str(self.OF)
_str_fields = saferecursive(_str_fields, '...')
def __str__(self):
return "%s of %s " % (self.__class__.__name__,
self._str_fields(),)
def _short_name(self):
return "%s %s" % (self.__class__.__name__,
self.OF._short_name(),)
_short_name = saferecursive(_short_name, '...')
def _container_example(self):
return _array(self, 1, initialization='example')
class GcArray(Array):
_gckind = 'gc'
def _inline_is_varsize(self, last):
raise TypeError("cannot inline a GC array inside a structure")
class FixedSizeArray(Struct):
# behaves more or less like a Struct with fields item0, item1, ...
# but also supports __getitem__(), __setitem__(), __len__().
def __init__(self, OF, length, **kwds):
fields = [('item%d' % i, OF) for i in range(length)]
super(FixedSizeArray, self).__init__('array%d' % length, *fields,
**kwds)
self.OF = OF
self.length = length
if isinstance(self.OF, ContainerType) and self.OF._gckind != 'raw':
raise TypeError("cannot have a %s container as array item type"
% (self.OF._gckind,))
self.OF._inline_is_varsize(False)
def _str_fields(self):
return str(self.OF)
_str_fields = saferecursive(_str_fields, '...')
def __str__(self):
return "%s of %d %s " % (self.__class__.__name__,
self.length,
self._str_fields(),)
def _short_name(self):
return "%s %d %s" % (self.__class__.__name__,
self.length,
self.OF._short_name(),)
_short_name = saferecursive(_short_name, '...')
def _first_struct(self):
# don't consider item0 as an inlined first substructure
return None, None
class FuncType(ContainerType):
_gckind = 'raw'
__name__ = 'func'
def __init__(self, args, result):
for arg in args:
assert isinstance(arg, LowLevelType)
# -- disabled the following check for the benefits of rctypes --
#if isinstance(arg, ContainerType):
# raise TypeError, "function arguments can only be primitives or pointers"
self.ARGS = tuple(args)
assert isinstance(result, LowLevelType)
if isinstance(result, ContainerType):
raise TypeError, "function result can only be primitive or pointer"
self.RESULT = result
def __str__(self):
args = ', '.join(map(str, self.ARGS))
return "Func ( %s ) -> %s" % (args, self.RESULT)
__str__ = saferecursive(__str__, '...')
def _short_name(self):
args = ', '.join([ARG._short_name() for ARG in self.ARGS])
return "Func(%s)->%s" % (args, self.RESULT._short_name())
_short_name = saferecursive(_short_name, '...')
def _container_example(self):
def ex(*args):
return self.RESULT._defl()
return _func(self, _callable=ex)
def _trueargs(self):
return [arg for arg in self.ARGS if arg is not Void]
class OpaqueType(ContainerType):
_gckind = 'raw'
def __init__(self, tag):
self.tag = tag
self.__name__ = tag
def __str__(self):
return "%s (opaque)" % self.tag
def _inline_is_varsize(self, last):
return False # OpaqueType can be inlined
def _container_example(self):
return _opaque(self)
def _defl(self, parent=None, parentindex=None):
return _opaque(self, parent=parent, parentindex=parentindex)
def _allocate(self, initialization, parent=None, parentindex=None):
return self._defl(parent=parent, parentindex=parentindex)
RuntimeTypeInfo = OpaqueType("RuntimeTypeInfo")
class GcOpaqueType(OpaqueType):
_gckind = 'gc'
def __str__(self):
return "%s (gcopaque)" % self.tag
def _inline_is_varsize(self, last):
raise TypeError, "%r cannot be inlined in structure" % self
class PyObjectType(ContainerType):
_gckind = 'cpy'
__name__ = 'PyObject'
def __str__(self):
return "PyObject"
def _inline_is_varsize(self, last):
return False
def _defl(self, parent=None, parentindex=None):
return _pyobjheader(parent, parentindex)
def _allocate(self, initialization, parent=None, parentindex=None):
return self._defl(parent=parent, parentindex=parentindex)
PyObject = PyObjectType()
class ForwardReference(ContainerType):
_gckind = 'raw'
def become(self, realcontainertype):
if not isinstance(realcontainertype, ContainerType):
raise TypeError("ForwardReference can only be to a container, "
"not %r" % (realcontainertype,))
if realcontainertype._gckind != self._gckind:
raise TypeError("become() gives conflicting gckind, use the "
"correct XxForwardReference")
self.__class__ = realcontainertype.__class__
self.__dict__ = realcontainertype.__dict__
def __hash__(self):
raise TypeError("%r object is not hashable" % self.__class__.__name__)
class GcForwardReference(ForwardReference):
_gckind = 'gc'
class PyForwardReference(ForwardReference):
_gckind = 'cpy'
class FuncForwardReference(ForwardReference):
_gckind = 'prebuilt'
FORWARDREF_BY_FLAVOR = {'raw': ForwardReference,
'gc': GcForwardReference,
'cpy': PyForwardReference,
'prebuilt': FuncForwardReference}
class Primitive(LowLevelType):
def __init__(self, name, default):
self._name = self.__name__ = name
self._default = default
def __str__(self):
return self._name
def _defl(self, parent=None, parentindex=None):
return self._default
def _allocate(self, initialization, parent=None, parentindex=None):
if self is not Void and initialization != 'example':
return _uninitialized(self)
else:
return self._default
def _is_atomic(self):
return True
def _example(self, parent=None, parentindex=None):
return self._default
class Number(Primitive):
def __init__(self, name, type, cast=None):
Primitive.__init__(self, name, type())
self._type = type
if cast is None:
self._cast = type
else:
self._cast = cast
def normalized(self):
return build_number(None, normalizedinttype(self._type))
_numbertypes = {int: Number("Signed", int, intmask)}
_numbertypes[r_int] = _numbertypes[int]
def build_number(name, type):
try:
return _numbertypes[type]
except KeyError:
pass
if name is None:
raise ValueError('No matching lowlevel type for %r'%type)
number = _numbertypes[type] = Number(name, type)
return number
Signed = build_number("Signed", int)
Unsigned = build_number("Unsigned", r_uint)
SignedLongLong = build_number("SignedLongLong", r_longlong)
UnsignedLongLong = build_number("UnsignedLongLong", r_ulonglong)
Float = Primitive("Float", 0.0)
Char = Primitive("Char", '\x00')
Bool = Primitive("Bool", False)
Void = Primitive("Void", None)
UniChar = Primitive("UniChar", u'\x00')
class Ptr(LowLevelType):
__name__ = property(lambda self: '%sPtr' % self.TO.__name__)
def __init__(self, TO):
if not isinstance(TO, ContainerType):
raise TypeError, ("can only point to a Container type, "
"not to %s" % (TO,))
self.TO = TO
def _needsgc(self):
# XXX deprecated interface
return self.TO._gckind not in ('raw', 'prebuilt')
def __str__(self):
return '* %s' % (self.TO, )
def _short_name(self):
return 'Ptr %s' % (self.TO._short_name(), )
def _is_atomic(self):
return self.TO._gckind == 'raw'
def _defl(self, parent=None, parentindex=None):
return _ptr(self, None)
def _allocate(self, initialization, parent=None, parentindex=None):
if initialization == 'example':
return _ptr(self, None)
elif initialization == 'malloc' and self._needsgc():
return _ptr(self, None)
else:
return _uninitialized(self)
def _example(self):
o = self.TO._container_example()
return _ptr(self, o, solid=True)
# ____________________________________________________________
def typeOf(val):
try:
return val._TYPE
except AttributeError:
tp = type(val)
if tp is _uninitialized:
raise UninitializedMemoryAccess("typeOf uninitialized value")
if tp is NoneType:
return Void # maybe
if tp is int:
return Signed
if tp is long:
if -maxint-1 <= val <= maxint:
return Signed
else:
return SignedLongLong
if tp is bool:
return Bool
if issubclass(tp, base_int):
return build_number(None, tp)
if tp is float:
return Float
if tp is str:
assert len(val) == 1
return Char
if tp is unicode:
assert len(val) == 1
return UniChar
if issubclass(tp, Symbolic):
return val.lltype()
raise TypeError("typeOf(%r object)" % (tp.__name__,))
_to_primitive = {
Char: chr,
UniChar: unichr,
Float: float,
Bool: bool,
}
def cast_primitive(TGT, value):
ORIG = typeOf(value)
if not isinstance(TGT, Primitive) or not isinstance(ORIG, Primitive):
raise TypeError, "can only primitive to primitive"
if ORIG == TGT:
return value
if ORIG == Char or ORIG == UniChar:
value = ord(value)
elif ORIG == Float:
value = long(value)
cast = _to_primitive.get(TGT)
if cast is not None:
return cast(value)
if isinstance(TGT, Number):
return TGT._cast(value)
raise TypeError, "unsupported cast"
def _cast_whatever(TGT, value):
from pypy.rpython.lltypesystem import llmemory
ORIG = typeOf(value)
if ORIG == TGT:
return value
if (isinstance(TGT, Primitive) and
isinstance(ORIG, Primitive)):
return cast_primitive(TGT, value)
elif isinstance(TGT, Ptr):
if isinstance(ORIG, Ptr):
if (isinstance(TGT.TO, OpaqueType) or
isinstance(ORIG.TO, OpaqueType)):
return cast_opaque_ptr(TGT, value)
else:
return cast_pointer(TGT, value)
elif ORIG == llmemory.Address:
return llmemory.cast_adr_to_ptr(value, TGT)
elif TGT == llmemory.Address and isinstance(ORIG, Ptr):
return llmemory.cast_ptr_to_adr(value)
raise TypeError("don't know how to cast from %r to %r" % (ORIG, TGT))
def erasedType(T):
while isinstance(T, Ptr) and isinstance(T.TO, Struct):
first, FIRSTTYPE = T.TO._first_struct()
if first is None:
break
T = Ptr(FIRSTTYPE)
return T
class InvalidCast(TypeError):
pass
def _castdepth(OUTSIDE, INSIDE):
if OUTSIDE == INSIDE:
return 0
dwn = 0
while isinstance(OUTSIDE, Struct):
first, FIRSTTYPE = OUTSIDE._first_struct()
if first is None:
break
dwn += 1
if FIRSTTYPE == INSIDE:
return dwn
OUTSIDE = getattr(OUTSIDE, first)
return -1
def castable(PTRTYPE, CURTYPE):
if CURTYPE.TO._gckind != PTRTYPE.TO._gckind:
raise TypeError("cast_pointer() cannot change the gc status: %s to %s"
% (CURTYPE, PTRTYPE))
if CURTYPE == PTRTYPE:
return 0
if (not isinstance(CURTYPE.TO, (Struct, PyObjectType)) or
not isinstance(PTRTYPE.TO, (Struct, PyObjectType))):
raise InvalidCast(CURTYPE, PTRTYPE)
CURSTRUC = CURTYPE.TO
PTRSTRUC = PTRTYPE.TO
d = _castdepth(CURSTRUC, PTRSTRUC)
if d >= 0:
return d
u = _castdepth(PTRSTRUC, CURSTRUC)
if u == -1:
raise InvalidCast(CURTYPE, PTRTYPE)
return -u
def cast_pointer(PTRTYPE, ptr):
CURTYPE = typeOf(ptr)
if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr):
raise TypeError, "can only cast pointers to other pointers"
return ptr._cast_to(PTRTYPE)
def cast_opaque_ptr(PTRTYPE, ptr):
CURTYPE = typeOf(ptr)
if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr):
raise TypeError, "can only cast pointers to other pointers"
if CURTYPE == PTRTYPE:
return ptr
if CURTYPE.TO._gckind != PTRTYPE.TO._gckind:
raise TypeError("cast_opaque_ptr() cannot change the gc status: "
"%s to %s" % (CURTYPE, PTRTYPE))
if (isinstance(CURTYPE.TO, OpaqueType)
and not isinstance(PTRTYPE.TO, OpaqueType)):
if not ptr:
return nullptr(PTRTYPE.TO)
try:
container = ptr._obj.container
except AttributeError:
raise InvalidCast("%r does not come from a container" % (ptr,))
solid = getattr(ptr._obj, 'solid', False)
p = _ptr(Ptr(typeOf(container)), container, solid)
return cast_pointer(PTRTYPE, p)
elif (not isinstance(CURTYPE.TO, OpaqueType)
and isinstance(PTRTYPE.TO, OpaqueType)):
if not ptr:
return nullptr(PTRTYPE.TO)
return opaqueptr(PTRTYPE.TO, 'hidden', container = ptr._obj,
solid = ptr._solid)
elif (isinstance(CURTYPE.TO, OpaqueType)
and isinstance(PTRTYPE.TO, OpaqueType)):
if not ptr:
return nullptr(PTRTYPE.TO)
try:
container = ptr._obj.container
except AttributeError:
raise InvalidCast("%r does not come from a container" % (ptr,))
return opaqueptr(PTRTYPE.TO, 'hidden',
container = container,
solid = ptr._obj.solid)
else:
raise TypeError("invalid cast_opaque_ptr(): %r -> %r" %
(CURTYPE, PTRTYPE))
def direct_fieldptr(structptr, fieldname):
"""Get a pointer to a field in the struct. The resulting
pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)).
It can be used in a regular getarrayitem(0) or setarrayitem(0)
to read or write to the field.
"""
CURTYPE = typeOf(structptr).TO
if not isinstance(CURTYPE, Struct):
raise TypeError, "direct_fieldptr: not a struct"
if fieldname not in CURTYPE._flds:
raise TypeError, "%s has no field %r" % (CURTYPE, fieldname)
if not structptr:
raise RuntimeError("direct_fieldptr: NULL argument")
return _subarray._makeptr(structptr._obj, fieldname, structptr._solid)
def direct_arrayitems(arrayptr):
"""Get a pointer to the first item of the array. The resulting
pointer is actually of type Ptr(FixedSizeArray(ITEM, 1)) but can
be used in a regular getarrayitem(n) or direct_ptradd(n) to access
further elements.
"""
CURTYPE = typeOf(arrayptr).TO
if not isinstance(CURTYPE, (Array, FixedSizeArray)):
raise TypeError, "direct_arrayitems: not an array"
if not arrayptr:
raise RuntimeError("direct_arrayitems: NULL argument")
return _subarray._makeptr(arrayptr._obj, 0, arrayptr._solid)
def direct_ptradd(ptr, n):
"""Shift a pointer forward or backward by n items. The pointer must
have been built by direct_arrayitems().
"""
if not ptr:
raise RuntimeError("direct_ptradd: NULL argument")
if not isinstance(ptr._obj, _subarray):
raise TypeError("direct_ptradd: only for direct_arrayitems() ptrs")
parent, base = parentlink(ptr._obj)
return _subarray._makeptr(parent, base + n, ptr._solid)
def _expose(val, solid=False):
"""XXX A nice docstring here"""
T = typeOf(val)
if isinstance(T, ContainerType):
val = _ptr(Ptr(T), val, solid=solid)
return val
def parentlink(container):
parent = container._parentstructure()
if parent is not None:
return parent, container._parent_index
## if isinstance(parent, _struct):
## for name in parent._TYPE._names:
## if getattr(parent, name) is container:
## return parent, name
## raise RuntimeError("lost ourselves")
## if isinstance(parent, _array):
## raise TypeError("cannot fish a pointer to an array item or an "
## "inlined substructure of it")
## raise AssertionError("don't know about %r" % (parent,))
else:
return None, None
def top_container(container):
top_parent = container
while True:
parent = top_parent._parentstructure()
if parent is None:
break
top_parent = parent
return top_parent
def normalizeptr(p):
# If p is a pointer, returns the same pointer casted to the largest
# containing structure (for the cast where p points to the header part).
# Also un-hides pointers to opaque. Null pointers become None.
assert not isinstance(p, _container) # pointer or primitive
T = typeOf(p)
if not isinstance(T, Ptr):
return p # primitive
if not p:
return None # null pointer
container = p._obj._normalizedcontainer()
if container is not p._obj:
p = _ptr(Ptr(typeOf(container)), container, p._solid)
return p
class DelayedPointer(Exception):
pass
class UninitializedMemoryAccess(Exception):
pass
class _ptr(object):
__slots__ = ('_TYPE', '_T',
'_weak', '_solid',
'_obj0', '__weakref__')
def _set_TYPE(self, TYPE):
_ptr._TYPE.__set__(self, TYPE)
def _set_T(self, T):
_ptr._T.__set__(self, T)
def _set_weak(self, weak):
_ptr._weak.__set__(self, weak)
def _set_solid(self, solid):
_ptr._solid.__set__(self, solid)
def _set_obj0(self, obj):
_ptr._obj0.__set__(self, obj)
def _togckind(self):
return self._T._gckind
def _needsgc(self):
# XXX deprecated interface
return self._TYPE._needsgc() # xxx other rules?
def __init__(self, TYPE, pointing_to, solid=False):
self._set_TYPE(TYPE)
self._set_T(TYPE.TO)
self._set_weak(False)
self._setobj(pointing_to, solid)
def _become(self, other):
assert self._TYPE == other._TYPE
assert not self._weak
self._setobj(other._obj, other._solid)
def __eq__(self, other):
if not isinstance(other, _ptr):
raise TypeError("comparing pointer with %r object" % (
type(other).__name__,))
if self._TYPE != other._TYPE:
raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE))
return self._obj == other._obj
def __ne__(self, other):
return not (self == other)
def _same_obj(self, other):
return self._obj == other._obj
def __hash__(self):
raise TypeError("pointer objects are not hashable")
def __nonzero__(self):
try:
return self._obj is not None
except DelayedPointer:
return True # assume it's not a delayed null
# _setobj, _getobj and _obj0 are really _internal_ implementations details of _ptr,
# use _obj if necessary instead !
def _setobj(self, pointing_to, solid=False):
if pointing_to is None:
obj0 = None
elif (solid or self._T._gckind != 'raw' or
isinstance(self._T, FuncType)):
obj0 = pointing_to
else:
self._set_weak(True)
obj0 = weakref.ref(pointing_to)
self._set_solid(solid)
self._set_obj0(obj0)
def _getobj(self):
obj = self._obj0
if obj is not None:
if self._weak:
obj = obj()
if obj is None:
raise RuntimeError("accessing already garbage collected %r"
% (self._T,))
if isinstance(obj, _container):
obj._check()
elif isinstance(obj, str) and obj.startswith("delayed!"):
raise DelayedPointer
return obj
_obj = property(_getobj)
def __getattr__(self, field_name): # ! can only return basic or ptr !
if isinstance(self._T, Struct):
if field_name in self._T._flds:
o = self._obj._getattr(field_name)
return _expose(o, self._solid)
if isinstance(self._T, ContainerType):
try:
adtmeth = self._T._adtmeths[field_name]
except KeyError:
pass
else:
try:
getter = adtmeth.__get__
except AttributeError:
return adtmeth
else:
return getter(self)
raise AttributeError("%r instance has no field %r" % (self._T,
field_name))
#def _setfirst(self, p):
# if isinstance(self._T, Struct) and self._T._names:
# if not isinstance(p, _ptr) or not isinstance(p._obj, _struct):
# raise InvalidCast(typeOf(p), typeOf(self))
# field_name = self._T._names[0]
# T1 = self._T._flds[field_name]
# T2 = typeOf(p._obj)
# if T1 != T2:
# raise InvalidCast(typeOf(p), typeOf(self))
# setattr(self._obj, field_name, p._obj)
# p._obj._setparentstructure(self._obj, 0)
# return
# raise TypeError("%r instance has no first field" % (self._T,))
def __setattr__(self, field_name, val):
if isinstance(self._T, Struct):
if field_name in self._T._flds:
T1 = self._T._flds[field_name]
T2 = typeOf(val)
if T1 == T2:
setattr(self._obj, field_name, val)
else:
raise TypeError("%r instance field %r:\n"
"expects %r\n"
" got %r" % (self._T, field_name, T1, T2))
return
raise AttributeError("%r instance has no field %r" % (self._T,
field_name))
def __getitem__(self, i): # ! can only return basic or ptr !
if isinstance(self._T, (Array, FixedSizeArray)):
start, stop = self._obj.getbounds()
if not (start <= i < stop):
if isinstance(i, slice):
raise TypeError("array slicing not supported")
raise IndexError("array index out of bounds")
o = self._obj.getitem(i)
return _expose(o, self._solid)
raise TypeError("%r instance is not an array" % (self._T,))
def __setitem__(self, i, val):
if isinstance(self._T, (Array, FixedSizeArray)):
T1 = self._T.OF
if isinstance(T1, ContainerType):
raise TypeError("cannot directly assign to container array items")
T2 = typeOf(val)
if T2 != T1:
raise TypeError("%r items:\n"
"expect %r\n"
" got %r" % (self._T, T1, T2))
start, stop = self._obj.getbounds()
if not (start <= i < stop):
if isinstance(i, slice):
raise TypeError("array slicing not supported")
raise IndexError("array index out of bounds")
self._obj.setitem(i, val)
return
raise TypeError("%r instance is not an array" % (self._T,))
def __len__(self):
if isinstance(self._T, (Array, FixedSizeArray)):
if self._T._hints.get('nolength', False):
raise TypeError("%r instance has no length attribute" %
(self._T,))
return self._obj.getlength()
raise TypeError("%r instance is not an array" % (self._T,))
def _fixedlength(self):
length = len(self) # always do this, for the checking
if isinstance(self._T, FixedSizeArray):
return length
else:
return None
def __iter__(self):
# this is a work-around for the 'isrpystring' hack in __getitem__,
# which otherwise causes list(p) to include the extra \x00 character.
for i in range(len(self)):
yield self[i]
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
try:
return '* %s' % (self._obj, )
except RuntimeError:
return '* DEAD %s' % self._T
except DelayedPointer:
return '* %s' % (self._obj0,)
def __call__(self, *args):
if isinstance(self._T, FuncType):
if len(args) != len(self._T.ARGS):
raise TypeError,"calling %r with wrong argument number: %r" % (self._T, args)
for a, ARG in zip(args, self._T.ARGS):
if typeOf(a) != ARG:
raise TypeError,"calling %r with wrong argument types: %r" % (self._T, args)
callb = self._obj._callable
if callb is None:
raise RuntimeError,"calling undefined function"
return callb(*args)
raise TypeError("%r instance is not a function" % (self._T,))
def _cast_to(self, PTRTYPE):
CURTYPE = self._TYPE
down_or_up = castable(PTRTYPE, CURTYPE)
if down_or_up == 0:
return self
if not self: # null pointer cast
return PTRTYPE._defl()
if isinstance(self._obj, int):
return _ptr(PTRTYPE, self._obj, solid=True)
if down_or_up > 0:
p = self
while down_or_up:
p = getattr(p, typeOf(p).TO._names[0])
down_or_up -= 1
return _ptr(PTRTYPE, p._obj, solid=self._solid)
u = -down_or_up
struc = self._obj
while u:
parent = struc._parentstructure()
if parent is None:
raise RuntimeError("widening to trash: %r" % self)
PARENTTYPE = struc._parent_type
if getattr(parent, PARENTTYPE._names[0]) is not struc:
raise InvalidCast(CURTYPE, PTRTYPE) # xxx different exception perhaps?
struc = parent
u -= 1
if PARENTTYPE != PTRTYPE.TO:
raise RuntimeError("widening %r inside %r instead of %r" % (CURTYPE, PARENTTYPE, PTRTYPE.TO))
return _ptr(PTRTYPE, struc, solid=self._solid)
def _cast_to_int(self):
if not self:
return 0 # NULL pointer
obj = self._obj
if isinstance(obj, int):
return obj # special case for cast_int_to_ptr() results
obj = normalizeptr(self)._obj
result = intmask(obj._getid())
# assume that id() returns an addressish value which is
# not zero and aligned to at least a multiple of 4
assert result != 0 and (result & 3) == 0
return result
def _cast_to_adr(self):
from pypy.rpython.lltypesystem import llmemory
if isinstance(self._T, FuncType):
return llmemory.fakeaddress(self)
elif isinstance(self._obj, _subarray):
return llmemory.fakeaddress(self)
## # return an address built as an offset in the whole array
## parent, parentindex = parentlink(self._obj)
## T = typeOf(parent)
## addr = llmemory.fakeaddress(normalizeptr(_ptr(Ptr(T), parent)))
## addr += llmemory.itemoffsetof(T, parentindex)
## return addr
else:
# normal case
return llmemory.fakeaddress(normalizeptr(self))
def _as_ptr(self):
return self
def _as_obj(self):
return self._obj
assert not '__dict__' in dir(_ptr)
class _container(object):
__slots__ = ()
def _parentstructure(self):
return None
def _check(self):
pass
def _as_ptr(self):
return _ptr(Ptr(self._TYPE), self, True)
def _as_obj(self):
return self
def _normalizedcontainer(self):
return self
def _getid(self):
return id(self)
class _parentable(_container):
_kind = "?"
__slots__ = ('_TYPE',
'_parent_type', '_parent_index', '_keepparent',
'_wrparent',
'__weakref__',
'_dead',
'_ctypes_storage')
def __init__(self, TYPE):
self._wrparent = None
self._TYPE = TYPE
self._dead = False
self._ctypes_storage = None
def _free(self):
self._check() # no double-frees
self._dead = True
self._ctypes_storage = None
def _setparentstructure(self, parent, parentindex):
self._wrparent = weakref.ref(parent)
self._parent_type = typeOf(parent)
self._parent_index = parentindex
if (isinstance(self._parent_type, Struct)
and parentindex in (self._parent_type._names[0], 0)
and self._TYPE._gckind == typeOf(parent)._gckind):
# keep strong reference to parent, we share the same allocation
self._keepparent = parent
def _parentstructure(self):
if self._wrparent is not None:
parent = self._wrparent()
if parent is None:
raise RuntimeError("accessing sub%s %r,\n"
"but already garbage collected parent %r"
% (self._kind, self, self._parent_type))
parent._check()
return parent
return None
def _check(self):
if self._dead:
raise RuntimeError("accessing freed %r" % self._TYPE)
self._parentstructure()
def _normalizedcontainer(self):
# if we are the first inlined substructure of a structure,
# return the whole (larger) structure instead
container = self
while True:
parent, index = parentlink(container)
if parent is None:
break
T = typeOf(parent)
if not isinstance(T, Struct) or T._first_struct()[0] != index:
break
container = parent
return container
def _setup_extra_args(self):
pass
def __eq__(self, other):
if not isinstance(other, _parentable):
return False
if (self._ctypes_storage is not None and
other._ctypes_storage is not None):
return self._ctypes_storage._eq(other._ctypes_storage)
else:
return self is other
def __ne__(self, other):
return not (self == other)
def _struct_variety(flds, cache={}):
flds = list(flds)
flds.sort()
tag = tuple(flds)
try:
return cache[tag]
except KeyError:
class _struct1(_struct):
__slots__ = flds
cache[tag] = _struct1
return _struct1
#for pickling support:
def _get_empty_instance_of_struct_variety(flds):
cls = _struct_variety(flds)
return object.__new__(cls)
class _struct(_parentable):
_kind = "structure"
__slots__ = ()
def __new__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None):
my_variety = _struct_variety(TYPE._names)
return object.__new__(my_variety)
def __init__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None):
_parentable.__init__(self, TYPE)
if n is not None and TYPE._arrayfld is None:
raise TypeError("%r is not variable-sized" % (TYPE,))
if n is None and TYPE._arrayfld is not None:
raise TypeError("%r is variable-sized" % (TYPE,))
first, FIRSTTYPE = TYPE._first_struct()
for fld, typ in TYPE._flds.items():
if fld == TYPE._arrayfld:
value = _array(typ, n, initialization=initialization, parent=self, parentindex=fld)
else:
value = typ._allocate(initialization=initialization, parent=self, parentindex=fld)
setattr(self, fld, value)
if parent is not None:
self._setparentstructure(parent, parentindex)
def __repr__(self):
return '<%s>' % (self,)
def _str_fields(self):
fields = []
names = self._TYPE._names
if len(names) > 10:
names = names[:5] + names[-1:]
skipped_after = 5
else:
skipped_after = None
for name in names:
T = self._TYPE._flds[name]
if isinstance(T, Primitive):
reprvalue = repr(getattr(self, name, '<uninitialized>'))
else:
reprvalue = '...'
fields.append('%s=%s' % (name, reprvalue))
if skipped_after:
fields.insert(skipped_after, '(...)')
return ', '.join(fields)
def __str__(self):
return 'struct %s { %s }' % (self._TYPE._name, self._str_fields())
def _getattr(self, field_name, uninitialized_ok=False):
r = getattr(self, field_name)
if isinstance(r, _uninitialized) and not uninitialized_ok:
raise UninitializedMemoryAccess("%r.%s"%(self, field_name))
return r
def __getattr__(self, field_name):
if self._ctypes_storage is not None:
return self._ctypes_storage._getattr(field_name)
raise AttributeError(field_name)
def __setattr__(self, field_name, value):
if field_name.startswith('_') or self._ctypes_storage is None:
_parentable.__setattr__(self, field_name, value)
else:
self._ctypes_storage._setattr(field_name, value)
# for FixedSizeArray kind of structs:
def getlength(self):
assert isinstance(self._TYPE, FixedSizeArray)
return self._TYPE.length
def getbounds(self):
return 0, self.getlength()
def getitem(self, index, uninitialized_ok=False):
assert isinstance(self._TYPE, FixedSizeArray)
return self._getattr('item%d' % index, uninitialized_ok)
def setitem(self, index, value):
assert isinstance(self._TYPE, FixedSizeArray)
setattr(self, 'item%d' % index, value)
def _setup_extra_args(self, *args):
fieldname, FIELDTYPE = self._TYPE._first_struct()
if fieldname is not None:
getattr(self, fieldname)._setup_extra_args(*args)
else:
assert not args
class _array(_parentable):
_kind = "array"
__slots__ = ('items',)
def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None):
if not isinstance(n, int):
raise TypeError, "array length must be an int"
if n < 0:
raise ValueError, "negative array length"
_parentable.__init__(self, TYPE)
self.items = [TYPE.OF._allocate(initialization=initialization, parent=self, parentindex=j)
for j in range(n)]
if parent is not None:
self._setparentstructure(parent, parentindex)
def __repr__(self):
return '<%s>' % (self,)
def _str_item(self, item):
if isinstance(item, _uninitialized):
return '#'
if isinstance(self._TYPE.OF, Struct):
of = self._TYPE.OF
if self._TYPE._anonym_struct:
return "{%s}" % item._str_fields()
else:
return "%s {%s}" % (of._name, item._str_fields())
else:
return repr(item)
def __str__(self):
items = self.items
if len(items) > 20:
items = items[:12] + items[-5:]
skipped_at = 12
else:
skipped_at = None
items = [self._str_item(item) for item in items]
if skipped_at:
items.insert(skipped_at, '(...)')
return 'array [ %s ]' % (', '.join(items),)
def getlength(self):
return len(self.items)
def getbounds(self):
stop = len(self.items)
if self._TYPE._hints.get('isrpystring', False):
# special hack for the null terminator
assert self._TYPE.OF == Char
stop += 1
return 0, stop
def getitem(self, index, uninitialized_ok=False):
if self._ctypes_storage is not None:
return self._ctypes_storage._getitem(index)
try:
v = self.items[index]
if isinstance(v, _uninitialized) and not uninitialized_ok:
raise UninitializedMemoryAccess("%r[%s]"%(self, index))
return v
except IndexError:
if (self._TYPE._hints.get('isrpystring', False) and
index == len(self.items)):
# special hack for the null terminator
assert self._TYPE.OF == Char
return '\x00'
raise
def setitem(self, index, value):
if self._ctypes_storage is not None:
self._ctypes_storage._setitem(index, value)
return
try:
self.items[index] = value
except IndexError:
if (self._TYPE._hints.get('isrpystring', False) and
index == len(self.items)):
# special hack for the null terminator: can overwrite it
# with another null
assert value == '\x00'
return
raise
assert not '__dict__' in dir(_array)
assert not '__dict__' in dir(_struct)
class _subarray(_parentable): # only for cast_subarray_pointer()
# and cast_structfield_pointer()
_kind = "subarray"
_cache = weakref.WeakKeyDictionary() # parentarray -> {subarrays}
def __init__(self, TYPE, parent, baseoffset_or_fieldname):
_parentable.__init__(self, TYPE)
self._setparentstructure(parent, baseoffset_or_fieldname)
def __repr__(self):
return '<_subarray at %r in %r>' % (self._parent_index,
self._parentstructure())
def getlength(self):
assert isinstance(self._TYPE, FixedSizeArray)
return self._TYPE.length
def getbounds(self):
baseoffset = self._parent_index
if isinstance(baseoffset, str):
return 0, 1 # structfield case
start, stop = self._parentstructure().getbounds()
return start - baseoffset, stop - baseoffset
def getitem(self, index, uninitialized_ok=False):
baseoffset = self._parent_index
if isinstance(baseoffset, str):
assert index == 0
fieldname = baseoffset # structfield case
return getattr(self._parentstructure(), fieldname)
else:
return self._parentstructure().getitem(baseoffset + index,
uninitialized_ok=uninitialized_ok)
def setitem(self, index, value):
baseoffset = self._parent_index
if isinstance(baseoffset, str):
assert index == 0
fieldname = baseoffset # structfield case
setattr(self._parentstructure(), fieldname, value)
else:
self._parentstructure().setitem(baseoffset + index, value)
def _makeptr(parent, baseoffset_or_fieldname, solid=False):
cache = _subarray._cache.setdefault(parent, {})
try:
subarray = cache[baseoffset_or_fieldname]
except KeyError:
PARENTTYPE = typeOf(parent)
if isinstance(baseoffset_or_fieldname, str):
# for direct_fieldptr
ITEMTYPE = getattr(PARENTTYPE, baseoffset_or_fieldname)
else:
# for direct_arrayitems
ITEMTYPE = PARENTTYPE.OF
ARRAYTYPE = FixedSizeArray(ITEMTYPE, 1)
subarray = _subarray(ARRAYTYPE, parent, baseoffset_or_fieldname)
cache[baseoffset_or_fieldname] = subarray
return _ptr(Ptr(subarray._TYPE), subarray, solid)
_makeptr = staticmethod(_makeptr)
def _getid(self):
raise NotImplementedError('_subarray._getid()')
class _arraylenref(_parentable):
"""Pseudo-reference to the length field of an array.
Only used internally by llmemory to implement ArrayLengthOffset.
"""
_kind = "arraylenptr"
_cache = weakref.WeakKeyDictionary() # array -> _arraylenref
def __init__(self, array):
TYPE = FixedSizeArray(Signed, 1)
_parentable.__init__(self, TYPE)
self.array = array
def getlength(self):
return 1
def getbounds(self):
return 0, 1
def getitem(self, index, uninitialized_ok=False):
assert index == 0
return self.array.getlength()
def setitem(self, index, value):
assert index == 0
if value != self.array.getlength():
raise Exception("can't change the length of an array")
def _makeptr(array, solid=False):
try:
lenref = _arraylenref._cache[array]
except KeyError:
lenref = _arraylenref(array)
_arraylenref._cache[array] = lenref
return _ptr(Ptr(lenref._TYPE), lenref, solid)
_makeptr = staticmethod(_makeptr)
def _getid(self):
raise NotImplementedError('_arraylenref._getid()')
class _func(_container):
def __init__(self, TYPE, **attrs):
self._TYPE = TYPE
self._name = "?"
self._callable = None
self.__dict__.update(attrs)
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return "fn %s" % self._name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(frozendict(self.__dict__))
def _getid(self):
if hasattr(self, 'graph'):
return id(self.graph)
elif self._callable:
return id(self._callable)
else:
return id(self)
class _opaque(_parentable):
def __init__(self, TYPE, parent=None, parentindex=None, **attrs):
_parentable.__init__(self, TYPE)
self._name = "?"
self.__dict__.update(attrs)
if parent is not None:
self._setparentstructure(parent, parentindex)
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return "%s %s" % (self._TYPE.__name__, self._name)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
if hasattr(self, 'container') and hasattr(other, 'container'):
obj1 = self.container._normalizedcontainer()
obj2 = other.container._normalizedcontainer()
return obj1 == obj2
else:
return self is other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
if hasattr(self, 'container'):
obj = self.container._normalizedcontainer()
return hash(obj)
else:
return _parentable.__hash__(self)
def _normalizedcontainer(self):
# if we are an opaque containing a normal Struct/GcStruct,
# unwrap it
if hasattr(self, 'container'):
return self.container._normalizedcontainer()
else:
return _parentable._normalizedcontainer(self)
class _pyobject(Hashable, _container):
__slots__ = [] # or we get in trouble with pickling
_TYPE = PyObject
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return "pyobject %s" % (Hashable.__str__(self),)
def _getid(self):
return id(self.value)
class _pyobjheader(_parentable):
def __init__(self, parent=None, parentindex=None):
_parentable.__init__(self, PyObject)
if parent is not None:
self._setparentstructure(parent, parentindex)
# the extra attributes 'ob_type' and 'setup_fnptr' are
# not set by __init__(), but by malloc(extra_args=(...))
def _setup_extra_args(self, ob_type, setup_fnptr=None):
assert typeOf(ob_type) == Ptr(PyObject)
self.ob_type = ob_type
self.setup_fnptr = setup_fnptr
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return "pyobjheader of type %r" % (getattr(self, 'ob_type', '???'),)
def malloc(T, n=None, flavor='gc', immortal=False, extra_args=(), zero=False):
if zero or immortal or flavor == 'cpy':
initialization = 'example'
elif flavor == 'raw':
initialization = 'raw'
else:
initialization = 'malloc'
if isinstance(T, Struct):
o = _struct(T, n, initialization=initialization)
elif isinstance(T, Array):
o = _array(T, n, initialization=initialization)
else:
raise TypeError, "malloc for Structs and Arrays only"
if T._gckind != 'gc' and not immortal and flavor.startswith('gc'):
raise TypeError, "gc flavor malloc of a non-GC non-immortal structure"
o._setup_extra_args(*extra_args)
solid = immortal or not flavor.startswith('gc') # immortal or non-gc case
return _ptr(Ptr(T), o, solid)
def free(p, flavor):
if flavor.startswith('gc'):
raise TypeError, "gc flavor free"
T = typeOf(p)
if not isinstance(T, Ptr) or p._togckind() != 'raw':
raise TypeError, "free(): only for pointers to non-gc containers"
p._obj0._free()
def functionptr(TYPE, name, **attrs):
if not isinstance(TYPE, FuncType):
raise TypeError, "functionptr() for FuncTypes only"
try:
hash(tuple(attrs.items()))
except TypeError:
raise TypeError("'%r' must be hashable"%attrs)
o = _func(TYPE, _name=name, **attrs)
return _ptr(Ptr(TYPE), o)
def nullptr(T):
return Ptr(T)._defl()
def opaqueptr(TYPE, name, **attrs):
if not isinstance(TYPE, OpaqueType):
raise TypeError, "opaqueptr() for OpaqueTypes only"
o = _opaque(TYPE, _name=name, **attrs)
return _ptr(Ptr(TYPE), o, solid=True)
def pyobjectptr(obj):
o = _pyobject(obj)
return _ptr(Ptr(PyObject), o)
def cast_ptr_to_int(ptr):
return ptr._cast_to_int()
def cast_int_to_ptr(PTRTYPE, oddint):
assert oddint & 1, "only odd integers can be cast back to ptr"
return _ptr(PTRTYPE, oddint, solid=True)
def attachRuntimeTypeInfo(GCSTRUCT, funcptr=None, destrptr=None):
if not isinstance(GCSTRUCT, RttiStruct):
raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT
GCSTRUCT._attach_runtime_type_info_funcptr(funcptr, destrptr)
return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info)
def getRuntimeTypeInfo(GCSTRUCT):
if not isinstance(GCSTRUCT, RttiStruct):
raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT
if GCSTRUCT._runtime_type_info is None:
raise ValueError, ("no attached runtime type info for GcStruct %s" %
GCSTRUCT._name)
return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info)
def runtime_type_info(p):
T = typeOf(p)
if not isinstance(T, Ptr) or not isinstance(T.TO, RttiStruct):
raise TypeError, "runtime_type_info on non-RttiStruct pointer: %s" % p
struct = p._obj
top_parent = top_container(struct)
result = getRuntimeTypeInfo(top_parent._TYPE)
static_info = getRuntimeTypeInfo(T.TO)
query_funcptr = getattr(static_info._obj, 'query_funcptr', None)
if query_funcptr is not None:
T = typeOf(query_funcptr).TO.ARGS[0]
result2 = query_funcptr(cast_pointer(T, p))
if result != result2:
raise RuntimeError, ("runtime type-info function for %s:\n"
" returned: %s,\n"
"should have been: %s" % (p, result2, result))
return result
def isCompatibleType(TYPE1, TYPE2):
return TYPE1._is_compatible(TYPE2)
def enforce(TYPE, value):
return TYPE._enforce(value)
# mark type ADT methods
def typeMethod(func):
func._type_method = True
return func
class staticAdtMethod(object):
# Like staticmethod(), but for ADT methods. The difference is only
# that this version compares and hashes correctly, unlike CPython's.
def __init__(self, obj):
self.obj = obj
def __get__(self, inst, typ=None):
return self.obj
def __hash__(self):
return hash(self.obj)
def __eq__(self, other):
if not isinstance(other, staticAdtMethod):
return NotImplemented
else:
return self.obj == other.obj
def __ne__(self, other):
if not isinstance(other, staticAdtMethod):
return NotImplemented
else:
return self.obj != other.obj
def dissect_ll_instance(v, t=None, memo=None):
if memo is None:
memo = {}
if id(v) in memo:
return
memo[id(v)] = True
if t is None:
t = typeOf(v)
yield t, v
if isinstance(t, Ptr):
if v._obj:
for i in dissect_ll_instance(v._obj, t.TO, memo):
yield i
elif isinstance(t, Struct):
parent = v._parentstructure()
if parent:
for i in dissect_ll_instance(parent, typeOf(parent), memo):
yield i
for n in t._flds:
f = getattr(t, n)
for i in dissect_ll_instance(getattr(v, n), t._flds[n], memo):
yield i
elif isinstance(t, Array):
for item in v.items:
for i in dissect_ll_instance(item, t.OF, memo):
yield i
| Python |
import sys
import math
from pypy.tool.sourcetools import func_with_new_name
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import opimpls
# ____________________________________________________________
# Implementation of the 'canfold' operations
# implementations of ops from flow.operation
ops_returning_a_bool = {'gt': True, 'ge': True,
'lt': True, 'le': True,
'eq': True, 'ne': True,
'is_true': True}
ops_unary = {'is_true': True, 'neg': True, 'abs': True, 'invert': True}
# global synonyms for some types
from pypy.rlib.rarithmetic import intmask
from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
type_by_name = {
'int': int,
'float': float,
'uint': r_uint,
'llong': r_longlong,
'ullong': r_ulonglong,
}
def no_op(x):
return x
def get_primitive_op_src(fullopname):
assert '_' in fullopname, "%s: not a primitive op" % (fullopname,)
typname, opname = fullopname.split('_', 1)
if opname not in opimpls and (opname + '_') in opimpls:
func = opimpls[opname + '_'] # or_, and_
else:
assert opname in opimpls, "%s: not a primitive op" % (fullopname,)
func = opimpls[opname]
if typname == 'char':
# char_lt, char_eq, ...
def op_function(x, y):
if not isinstance(x, str) or len(x) != 1:
raise TypeError("%r arg must be a char, got %r instead" % (
fullopname, typname, type(x).__name__))
if not isinstance(y, str) or len(y) != 1:
raise TypeError("%r arg must be a char, got %r instead" % (
fullopname, typname, type(y).__name__))
return func(x, y)
else:
if typname == 'int' and opname not in ops_returning_a_bool:
adjust_result = intmask
else:
adjust_result = no_op
assert typname in type_by_name, "%s: not a primitive op" % (
fullopname,)
argtype = type_by_name[typname]
if opname in ops_unary:
def op_function(x):
if not isinstance(x, argtype):
raise TypeError("%r arg must be %s, got %r instead" % (
fullopname, typname, type(x).__name__))
return adjust_result(func(x))
else:
def op_function(x, y):
if not isinstance(x, argtype):
raise TypeError("%r arg 1 must be %s, got %r instead" % (
fullopname, typname, type(x).__name__))
if not isinstance(y, argtype):
raise TypeError("%r arg 2 must be %s, got %r instead" % (
fullopname, typname, type(y).__name__))
return adjust_result(func(x, y))
return func_with_new_name(op_function, 'op_' + fullopname)
def checkptr(ptr):
if not isinstance(lltype.typeOf(ptr), lltype.Ptr):
raise TypeError("arg must be a pointer, got %r instead" % (
lltype.typeOf(ptr),))
def checkadr(adr):
if lltype.typeOf(adr) is not llmemory.Address:
raise TypeError("arg must be an address, got %r instead" % (
lltype.typeOf(adr),))
def op_ptr_eq(ptr1, ptr2):
checkptr(ptr1)
checkptr(ptr2)
return ptr1 == ptr2
def op_ptr_ne(ptr1, ptr2):
checkptr(ptr1)
checkptr(ptr2)
return ptr1 != ptr2
def op_ptr_nonzero(ptr1):
checkptr(ptr1)
return bool(ptr1)
def op_ptr_iszero(ptr1):
checkptr(ptr1)
return not bool(ptr1)
def op_getsubstruct(obj, field):
checkptr(obj)
# check the difference between op_getfield and op_getsubstruct:
assert isinstance(getattr(lltype.typeOf(obj).TO, field),
lltype.ContainerType)
return getattr(obj, field)
def op_getarraysubstruct(array, index):
checkptr(array)
result = array[index]
return result
# the diff between op_getarrayitem and op_getarraysubstruct
# is the same as between op_getfield and op_getsubstruct
def op_getarraysize(array):
checkptr(array)
return len(array)
def op_direct_fieldptr(obj, field):
checkptr(obj)
assert isinstance(field, str)
return lltype.direct_fieldptr(obj, field)
def op_direct_arrayitems(obj):
checkptr(obj)
return lltype.direct_arrayitems(obj)
def op_direct_ptradd(obj, index):
checkptr(obj)
assert isinstance(index, int)
return lltype.direct_ptradd(obj, index)
def op_bool_not(b):
assert type(b) is bool
return not b
def op_int_add(x, y):
assert isinstance(x, (int, llmemory.AddressOffset))
assert isinstance(y, (int, llmemory.AddressOffset))
return intmask(x + y)
def op_int_mul(x, y):
assert isinstance(x, (int, llmemory.AddressOffset))
assert isinstance(y, (int, llmemory.AddressOffset))
return intmask(x * y)
def op_int_floordiv(x, y):
assert isinstance(x, int)
assert isinstance(y, int)
r = x//y
if x^y < 0 and x%y != 0:
r += 1
return r
def op_int_mod(x, y):
assert isinstance(x, int)
assert isinstance(y, int)
r = x%y
if x^y < 0 and x%y != 0:
r -= y
return r
def op_llong_floordiv(x, y):
assert isinstance(x, r_longlong)
assert isinstance(y, r_longlong)
r = x//y
if x^y < 0 and x%y != 0:
r += 1
return r
def op_llong_mod(x, y):
assert isinstance(x, r_longlong)
assert isinstance(y, r_longlong)
r = x%y
if x^y < 0 and x%y != 0:
r -= y
return r
def op_same_as(x):
return x
def op_cast_primitive(TYPE, value):
assert isinstance(lltype.typeOf(value), lltype.Primitive)
return lltype.cast_primitive(TYPE, value)
op_cast_primitive.need_result_type = True
def op_cast_int_to_float(i):
assert type(i) is int
return float(i)
def op_cast_uint_to_float(u):
assert type(u) is r_uint
return float(u)
def op_cast_longlong_to_float(i):
assert type(i) is r_longlong
# take first 31 bits
li = float(int(i & r_longlong(0x7fffffff)))
ui = float(int(i >> 31)) * float(0x80000000)
return ui + li
def op_cast_int_to_char(b):
assert type(b) is int
return chr(b)
def op_cast_bool_to_int(b):
assert type(b) is bool
return int(b)
def op_cast_bool_to_uint(b):
assert type(b) is bool
return r_uint(int(b))
def op_cast_bool_to_float(b):
assert type(b) is bool
return float(b)
def op_cast_float_to_uint(f):
assert type(f) is float
return r_uint(int(f))
def op_cast_float_to_longlong(f):
assert type(f) is float
r = float(0x100000000)
small = f / r
high = int(small)
truncated = int((small - high) * r)
return r_longlong(high) * 0x100000000 + truncated
def op_cast_char_to_int(b):
assert type(b) is str and len(b) == 1
return ord(b)
def op_cast_unichar_to_int(b):
assert type(b) is unicode and len(b) == 1
return ord(b)
def op_cast_int_to_unichar(b):
assert type(b) is int
return unichr(b)
def op_cast_int_to_uint(b):
assert type(b) is int
return r_uint(b)
def op_cast_uint_to_int(b):
assert type(b) is r_uint
return intmask(b)
def op_cast_int_to_longlong(b):
assert type(b) is int
return r_longlong(b)
def op_truncate_longlong_to_int(b):
assert type(b) is r_longlong
assert -sys.maxint-1 <= b <= sys.maxint
return int(b)
def op_float_pow(b,c):
assert type(b) is float
assert type(c) is float
return math.pow(b,c)
def op_cast_pointer(RESTYPE, obj):
checkptr(obj)
return lltype.cast_pointer(RESTYPE, obj)
op_cast_pointer.need_result_type = True
def op_cast_ptr_to_weakadr(ptr):
checkptr(ptr)
return llmemory.cast_ptr_to_weakadr(ptr)
def op_cast_weakadr_to_ptr(TYPE, wadr):
assert lltype.typeOf(wadr) == llmemory.WeakGcAddress
return llmemory.cast_weakadr_to_ptr(wadr, TYPE)
op_cast_weakadr_to_ptr.need_result_type = True
def op_cast_weakadr_to_int(wadr):
assert lltype.typeOf(wadr) == llmemory.WeakGcAddress
return wadr.cast_to_int()
def op_cast_ptr_to_adr(ptr):
checkptr(ptr)
return llmemory.cast_ptr_to_adr(ptr)
def op_cast_adr_to_ptr(TYPE, adr):
checkadr(adr)
return llmemory.cast_adr_to_ptr(adr, TYPE)
op_cast_adr_to_ptr.need_result_type = True
def op_cast_adr_to_int(adr):
checkadr(adr)
return llmemory.cast_adr_to_int(adr)
def op_cast_int_to_adr(int):
return llmemory.cast_int_to_adr(int)
##def op_cast_int_to_adr(x):
## assert type(x) is int
## return llmemory.cast_int_to_adr(x)
def op_unichar_eq(x, y):
assert isinstance(x, unicode) and len(x) == 1
assert isinstance(y, unicode) and len(y) == 1
return x == y
def op_unichar_ne(x, y):
assert isinstance(x, unicode) and len(x) == 1
assert isinstance(y, unicode) and len(y) == 1
return x != y
def op_adr_lt(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 < addr2
def op_adr_le(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 <= addr2
def op_adr_eq(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 == addr2
def op_adr_ne(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 != addr2
def op_adr_gt(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 > addr2
def op_adr_ge(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 >= addr2
def op_adr_add(addr, offset):
checkadr(addr)
assert lltype.typeOf(offset) is lltype.Signed
return addr + offset
def op_adr_sub(addr, offset):
checkadr(addr)
assert lltype.typeOf(offset) is lltype.Signed
return addr - offset
def op_adr_delta(addr1, addr2):
checkadr(addr1)
checkadr(addr2)
return addr1 - addr2
def op_getfield(p, name):
checkptr(p)
if not lltype.typeOf(p).TO._hints.get('immutable'):
raise TypeError("cannot fold getfield on mutable struct")
return getattr(p, name)
def op_getarrayitem(p, index):
checkptr(p)
if not lltype.typeOf(p).TO._hints.get('immutable'):
raise TypeError("cannot fold getfield on mutable array")
return p[index]
# ____________________________________________________________
def get_op_impl(opname):
# get the op_xxx() function from the globals above
try:
return globals()['op_' + opname]
except KeyError:
return get_primitive_op_src(opname)
| Python |
from pypy.rpython.lltypesystem.lltype import GcArray, Array, Char, malloc
from pypy.rlib.rarithmetic import r_uint
CHAR_ARRAY = GcArray(Char)
def ll_int_str(repr, i):
return ll_int2dec(i)
def ll_int2dec(i):
from pypy.rpython.lltypesystem.rstr import mallocstr
temp = malloc(CHAR_ARRAY, 20)
len = 0
sign = 0
if i < 0:
sign = 1
i = r_uint(-i)
else:
i = r_uint(i)
if i == 0:
len = 1
temp[0] = '0'
else:
while i:
temp[len] = chr(i%10+ord('0'))
i //= 10
len += 1
len += sign
result = mallocstr(len)
result.hash = 0
if sign:
result.chars[0] = '-'
j = 1
else:
j = 0
while j < len:
result.chars[j] = temp[len-j-1]
j += 1
return result
hex_chars = malloc(Array(Char), 16, immortal=True)
for i in range(16):
hex_chars[i] = "%x"%i
def ll_int2hex(i, addPrefix):
from pypy.rpython.lltypesystem.rstr import mallocstr
temp = malloc(CHAR_ARRAY, 20)
len = 0
sign = 0
if i < 0:
sign = 1
i = r_uint(-i)
else:
i = r_uint(i)
if i == 0:
len = 1
temp[0] = '0'
else:
while i:
temp[len] = hex_chars[i & 0xf]
i >>= 4
len += 1
len += sign
if addPrefix:
len += 2
result = mallocstr(len)
result.hash = 0
j = 0
if sign:
result.chars[0] = '-'
j = 1
if addPrefix:
result.chars[j] = '0'
result.chars[j+1] = 'x'
j += 2
while j < len:
result.chars[j] = temp[len-j-1]
j += 1
return result
def ll_int2oct(i, addPrefix):
from pypy.rpython.lltypesystem.rstr import mallocstr
if i == 0:
result = mallocstr(1)
result.hash = 0
result.chars[0] = '0'
return result
temp = malloc(CHAR_ARRAY, 25)
len = 0
sign = 0
if i < 0:
sign = 1
i = r_uint(-i)
else:
i = r_uint(i)
while i:
temp[len] = hex_chars[i & 0x7]
i >>= 3
len += 1
len += sign
if addPrefix:
len += 1
result = mallocstr(len)
result.hash = 0
j = 0
if sign:
result.chars[0] = '-'
j = 1
if addPrefix:
result.chars[j] = '0'
j += 1
while j < len:
result.chars[j] = temp[len-j-1]
j += 1
return result
def ll_float_str(repr, f):
from pypy.rpython.lltypesystem.module.ll_strtod import Implementation
from pypy.rpython.lltypesystem.rstr import percent_f
return Implementation.ll_strtod_formatd(percent_f, f)
| Python |
import sys
import ctypes
import ctypes.util
from pypy.rpython.lltypesystem import lltype
from pypy.tool.uid import fixid
def uaddressof(obj):
return fixid(ctypes.addressof(obj))
_ctypes_cache = {
lltype.Signed: ctypes.c_long,
lltype.Char: ctypes.c_ubyte,
}
def build_ctypes_struct(S, max_n=None):
fields = []
for fieldname in S._names:
FIELDTYPE = S._flds[fieldname]
if max_n is not None and fieldname == S._arrayfld:
cls = build_ctypes_array(FIELDTYPE, max_n)
else:
cls = get_ctypes_type(FIELDTYPE)
fields.append((fieldname, cls))
class CStruct(ctypes.Structure):
_fields_ = fields
def _malloc(cls, n=None):
if S._arrayfld is None:
if n is not None:
raise TypeError("%r is not variable-sized" % (S,))
storage = cls()
return storage
else:
if n is None:
raise TypeError("%r is variable-sized" % (S,))
biggercls = build_ctypes_struct(S, n)
bigstruct = biggercls()
array = getattr(bigstruct, S._arrayfld)
if hasattr(array, 'length'):
array.length = n
return bigstruct
_malloc = classmethod(_malloc)
def _getattr(self, field_name):
T = getattr(S, field_name)
cobj = getattr(self, field_name)
return ctypes2lltype(T, cobj)
def _setattr(self, field_name, value):
cobj = lltype2ctypes(value)
setattr(self, field_name, cobj)
def _eq(self, other):
return ctypes.addressof(self) == ctypes.addressof(other)
CStruct.__name__ = 'ctypes_%s' % (S,)
if max_n is not None:
CStruct._normalized_ctype = get_ctypes_type(S)
return CStruct
def build_ctypes_array(A, max_n=0):
assert max_n >= 0
ITEM = A.OF
ctypes_item = get_ctypes_type(ITEM)
class CArray(ctypes.Structure):
if not A._hints.get('nolength'):
_fields_ = [('length', ctypes.c_int),
('items', max_n * ctypes_item)]
else:
_fields_ = [('items', max_n * ctypes_item)]
def _malloc(cls, n=None):
if not isinstance(n, int):
raise TypeError, "array length must be an int"
biggercls = build_ctypes_array(A, n)
bigarray = biggercls()
if hasattr(bigarray, 'length'):
bigarray.length = n
return bigarray
_malloc = classmethod(_malloc)
def _indexable(self, index):
PtrType = ctypes.POINTER((index+1) * ctypes_item)
p = ctypes.cast(ctypes.pointer(self.items), PtrType)
return p.contents
def _getitem(self, index, boundscheck=True):
if boundscheck:
items = self.items
else:
items = self._indexable(index)
cobj = items[index]
return ctypes2lltype(ITEM, cobj)
def _setitem(self, index, value, boundscheck=True):
if boundscheck:
items = self.items
else:
items = self._indexable(index)
cobj = lltype2ctypes(value)
items[index] = cobj
def _eq(self, other):
return ctypes.addressof(self) == ctypes.addressof(other)
CArray.__name__ = 'ctypes_%s*%d' % (A, max_n)
if max_n > 0:
CArray._normalized_ctype = get_ctypes_type(A)
return CArray
def get_ctypes_type(T):
try:
return _ctypes_cache[T]
except KeyError:
if isinstance(T, lltype.Ptr):
cls = ctypes.POINTER(get_ctypes_type(T.TO))
elif isinstance(T, lltype.Struct):
cls = build_ctypes_struct(T)
elif isinstance(T, lltype.Array):
cls = build_ctypes_array(T)
else:
raise NotImplementedError(T)
_ctypes_cache[T] = cls
return cls
def convert_struct(container):
STRUCT = container._TYPE
cls = get_ctypes_type(STRUCT)
cstruct = cls._malloc()
container._ctypes_storage = cstruct
for field_name in STRUCT._names:
field_value = getattr(container, field_name)
if not isinstance(field_value, lltype._uninitialized):
setattr(cstruct, field_name, lltype2ctypes(field_value))
remove_regular_struct_content(container)
def remove_regular_struct_content(container):
STRUCT = container._TYPE
for field_name in STRUCT._names:
delattr(container, field_name)
def convert_array(container):
ARRAY = container._TYPE
cls = get_ctypes_type(ARRAY)
carray = cls._malloc(container.getlength())
container._ctypes_storage = carray
for i in range(container.getlength()):
item_value = container.items[i] # fish fish
if not isinstance(item_value, lltype._uninitialized):
carray.items[i] = lltype2ctypes(item_value)
remove_regular_array_content(container)
def remove_regular_array_content(container):
for i in range(container.getlength()):
container.items[i] = None
class _array_of_unknown_length(lltype._parentable):
_kind = "array"
__slots__ = ()
def __repr__(self):
return '<C array at 0x%x>' % (uaddressof(self._ctypes_storage),)
def getbounds(self):
# we have no clue, so we allow whatever index
return 0, sys.maxint
def getitem(self, index, uninitialized_ok=False):
return self._ctypes_storage._getitem(index, boundscheck=False)
def setitem(self, index, value):
self._ctypes_storage._setitem(index, value, boundscheck=False)
# ____________________________________________________________
def lltype2ctypes(llobj, normalize=True):
"""Convert the lltype object 'llobj' to its ctypes equivalent.
'normalize' should only be False in tests, where we want to
inspect the resulting ctypes object manually.
"""
T = lltype.typeOf(llobj)
if isinstance(T, lltype.Ptr):
container = llobj._obj
if container._ctypes_storage is None:
if isinstance(T.TO, lltype.Struct):
convert_struct(container)
elif isinstance(T.TO, lltype.Array):
convert_array(container)
else:
raise NotImplementedError(T)
storage = container._ctypes_storage
p = ctypes.pointer(storage)
if normalize and hasattr(storage, '_normalized_ctype'):
p = ctypes.cast(p, ctypes.POINTER(storage._normalized_ctype))
return p
if T is lltype.Char:
return ord(llobj)
return llobj
def ctypes2lltype(T, cobj):
"""Convert the ctypes object 'cobj' to its lltype equivalent.
'T' is the expected lltype type.
"""
if T is lltype.Char:
llobj = chr(cobj)
elif isinstance(T, lltype.Ptr):
if isinstance(T.TO, lltype.Struct):
# XXX var-sized structs
container = lltype._struct(T.TO)
container._ctypes_storage = cobj.contents
remove_regular_struct_content(container)
elif isinstance(T.TO, lltype.Array):
if T.TO._hints.get('nolength', False):
container = _array_of_unknown_length(T.TO)
container._ctypes_storage = cobj.contents
else:
raise NotImplementedError("array with an explicit length")
else:
raise NotImplementedError(T)
llobj = lltype._ptr(T, container, solid=True)
else:
llobj = cobj
assert lltype.typeOf(llobj) == T
return llobj
# __________ the standard C library __________
if sys.platform == 'win32':
standard_c_lib = ctypes.cdll.LoadLibrary('msvcrt.dll')
else:
standard_c_lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# ____________________________________________
def get_ctypes_callable(funcptr):
if getattr(funcptr._obj, 'source', None) is not None:
# give up - for tests with an inlined bit of C code
raise NotImplementedError("cannot call a C function defined in "
"a custom C source snippet")
FUNCTYPE = lltype.typeOf(funcptr).TO
funcname = funcptr._obj._name
libraries = getattr(funcptr._obj, 'libraries', None)
if not libraries:
cfunc = getattr(standard_c_lib, funcname, None)
else:
cfunc = None
for libname in libraries:
libpath = ctypes.util.find_library(libname)
if libpath:
clib = ctypes.cdll.LoadLibrary(libpath)
cfunc = getattr(clib, funcname, None)
if cfunc is not None:
break
if cfunc is None:
# function name not found in any of the libraries
if not libraries:
place = 'the standard C library'
elif len(libraries) == 1:
place = 'library %r' % (libraries[0],)
else:
place = 'any of the libraries %r' % (libraries,)
raise NotImplementedError("function %r not found in %s" % (
funcname, place))
# get_ctypes_type() can raise NotImplementedError too
cfunc.argtypes = [get_ctypes_type(T) for T in FUNCTYPE.ARGS]
cfunc.restype = get_ctypes_type(FUNCTYPE.RESULT)
return cfunc
def make_callable_via_ctypes(funcptr):
try:
cfunc = get_ctypes_callable(funcptr)
except NotImplementedError, e:
def invoke_via_ctypes(*argvalues):
raise NotImplementedError, e
else:
RESULT = lltype.typeOf(funcptr).TO.RESULT
def invoke_via_ctypes(*argvalues):
cargs = [lltype2ctypes(value) for value in argvalues]
cres = cfunc(*cargs)
return ctypes2lltype(RESULT, cres)
funcptr._obj._callable = invoke_via_ctypes
| Python |
import sys
import types
from pypy.annotation.pairtype import pairtype, pair
from pypy.objspace.flow.model import Constant
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import Repr, inputconst, warning, mangle
from pypy.rpython.rclass import AbstractClassRepr,\
AbstractInstanceRepr,\
MissingRTypeAttribute,\
getclassrepr, getinstancerepr,\
get_type_repr, rtype_new_instance
from pypy.rpython.lltypesystem.lltype import \
Ptr, Struct, GcStruct, malloc, \
cast_pointer, cast_ptr_to_int, castable, nullptr, \
RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, \
Array, Char, Void, attachRuntimeTypeInfo, \
FuncType, Bool, Signed, functionptr, FuncType, PyObject
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.annotation import model as annmodel
from pypy.rlib.objectmodel import UnboxedValue
from pypy.rlib.rarithmetic import intmask
#
# There is one "vtable" per user class, with the following structure:
# A root class "object" has:
#
# struct object_vtable {
# // struct object_vtable* parenttypeptr; not used any more
# RuntimeTypeInfo * rtti;
# Signed subclassrange_min; //this is also the id of the class itself
# Signed subclassrange_max;
# array { char } * name;
# struct object * instantiate();
# }
#
# Every other class X, with parent Y, has the structure:
#
# struct vtable_X {
# struct vtable_Y super; // inlined
# ... // extra class attributes
# }
# The type of the instances is:
#
# struct object { // for the root class
# struct object_vtable* typeptr;
# }
#
# struct X {
# struct Y super; // inlined
# ... // extra instance attributes
# }
#
# there's also a nongcobject
OBJECT_VTABLE = lltype.ForwardReference()
CLASSTYPE = Ptr(OBJECT_VTABLE)
OBJECT = GcStruct('object', ('typeptr', CLASSTYPE),
hints = {'immutable': True, 'shouldntbenull': True})
OBJECTPTR = Ptr(OBJECT)
OBJECT_VTABLE.become(Struct('object_vtable',
#('parenttypeptr', CLASSTYPE),
('subclassrange_min', Signed),
('subclassrange_max', Signed),
('rtti', Ptr(RuntimeTypeInfo)),
('name', Ptr(Array(Char))),
('instantiate', Ptr(FuncType([], OBJECTPTR))),
hints = {'immutable': True}))
# non-gc case
NONGCOBJECT = Struct('nongcobject', ('typeptr', CLASSTYPE))
NONGCOBJECTPTR = Ptr(OBJECT)
# cpy case (XXX try to merge the typeptr with the ob_type)
CPYOBJECT = lltype.PyStruct('cpyobject', ('head', PyObject),
('typeptr', CLASSTYPE))
CPYOBJECTPTR = Ptr(CPYOBJECT)
OBJECT_BY_FLAVOR = {'gc': OBJECT,
'raw': NONGCOBJECT,
'cpy': CPYOBJECT}
LLFLAVOR = {'gc' : 'gc',
'raw' : 'raw',
'cpy' : 'cpy',
'stack': 'raw',
}
RTTIFLAVORS = ('gc', 'cpy')
def cast_vtable_to_typeptr(vtable):
while typeOf(vtable).TO != OBJECT_VTABLE:
vtable = vtable.super
return vtable
class ClassRepr(AbstractClassRepr):
def __init__(self, rtyper, classdef):
AbstractClassRepr.__init__(self, rtyper, classdef)
if classdef is None:
# 'object' root type
self.vtable_type = OBJECT_VTABLE
else:
self.vtable_type = lltype.ForwardReference()
self.lowleveltype = Ptr(self.vtable_type)
def _setup_repr(self):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
clsfields = {}
pbcfields = {}
allmethods = {}
if self.classdef is not None:
# class attributes
llfields = []
attrs = self.classdef.attrs.items()
attrs.sort()
for name, attrdef in attrs:
if attrdef.readonly:
s_value = attrdef.s_value
s_unboundmethod = self.prepare_method(s_value)
if s_unboundmethod is not None:
allmethods[name] = True
s_value = s_unboundmethod
r = self.rtyper.getrepr(s_value)
mangled_name = 'cls_' + name
clsfields[name] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
# attributes showing up in getattrs done on the class as a PBC
extra_access_sets = self.rtyper.class_pbc_attributes.get(
self.classdef, {})
for access_set, (attr, counter) in extra_access_sets.items():
r = self.rtyper.getrepr(access_set.s_value)
mangled_name = mangle('pbc%d' % counter, attr)
pbcfields[access_set, attr] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
#
self.rbase = getclassrepr(self.rtyper, self.classdef.basedef)
self.rbase.setup()
kwds = {'hints': {'immutable': True}}
vtable_type = Struct('%s_vtable' % self.classdef.name,
('super', self.rbase.vtable_type),
*llfields, **kwds)
self.vtable_type.become(vtable_type)
allmethods.update(self.rbase.allmethods)
self.clsfields = clsfields
self.pbcfields = pbcfields
self.allmethods = allmethods
self.vtable = None
# def convert_const(self, value):
# if not isinstance(value, (type, types.ClassType)):
# raise TyperError("not a class: %r" % (value,))
# try:
# subclassdef = self.rtyper.annotator.getuserclasses()[value]
# except KeyError:
# raise TyperError("no classdef: %r" % (value,))
# if self.classdef is not None:
# if self.classdef.commonbase(subclassdef) != self.classdef:
# raise TyperError("not a subclass of %r: %r" % (
# self.classdef.cls, value))
# #
# return getclassrepr(self.rtyper, subclassdef).getvtable()
def getvtable(self, cast_to_typeptr=True):
"""Return a ptr to the vtable of this type."""
if self.vtable is None:
self.vtable = malloc(self.vtable_type, immortal=True)
self.setup_vtable(self.vtable, self)
#
vtable = self.vtable
if cast_to_typeptr:
vtable = cast_vtable_to_typeptr(vtable)
return vtable
getruntime = getvtable
def setup_vtable(self, vtable, rsubcls):
"""Initialize the 'self' portion of the 'vtable' belonging to the
given subclass."""
if self.classdef is None:
# initialize the 'subclassrange_*' and 'name' fields
if rsubcls.classdef is not None:
#vtable.parenttypeptr = rsubcls.rbase.getvtable()
vtable.subclassrange_min = rsubcls.classdef.minid
vtable.subclassrange_max = rsubcls.classdef.maxid
else: #for the root class
vtable.subclassrange_min = 0
vtable.subclassrange_max = sys.maxint
rinstance = getinstancerepr(self.rtyper, rsubcls.classdef)
rinstance.setup()
if rinstance.gcflavor in RTTIFLAVORS:
vtable.rtti = getRuntimeTypeInfo(rinstance.object_type)
if rsubcls.classdef is None:
name = 'object'
else:
name = rsubcls.classdef.shortname
vtable.name = malloc(Array(Char), len(name)+1, immortal=True)
for i in range(len(name)):
vtable.name[i] = name[i]
vtable.name[len(name)] = '\x00'
if hasattr(rsubcls.classdef, 'my_instantiate_graph'):
graph = rsubcls.classdef.my_instantiate_graph
vtable.instantiate = self.rtyper.getcallable(graph)
#else: the classdef was created recently, so no instantiate()
# could reach it
else:
# setup class attributes: for each attribute name at the level
# of 'self', look up its value in the subclass rsubcls
def assign(mangled_name, value):
if isinstance(value, Constant) and isinstance(value.value, staticmethod):
value = Constant(value.value.__get__(42)) # staticmethod => bare function
llvalue = r.convert_desc_or_const(value)
setattr(vtable, mangled_name, llvalue)
mro = list(rsubcls.classdef.getmro())
for fldname in self.clsfields:
mangled_name, r = self.clsfields[fldname]
if r.lowleveltype is Void:
continue
value = rsubcls.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
assign(mangled_name, value)
# extra PBC attributes
for (access_set, attr), (mangled_name, r) in self.pbcfields.items():
if rsubcls.classdef.classdesc not in access_set.descs:
continue # only for the classes in the same pbc access set
if r.lowleveltype is Void:
continue
attrvalue = rsubcls.classdef.classdesc.read_attribute(attr, None)
if attrvalue is not None:
assign(mangled_name, attrvalue)
# then initialize the 'super' portion of the vtable
self.rbase.setup_vtable(vtable.super, rsubcls)
#def fromparentpart(self, v_vtableptr, llops):
# """Return the vtable pointer cast from the parent vtable's type
# to self's vtable type."""
def fromtypeptr(self, vcls, llops):
"""Return the type pointer cast to self's vtable type."""
self.setup()
castable(self.lowleveltype, vcls.concretetype) # sanity check
return llops.genop('cast_pointer', [vcls],
resulttype=self.lowleveltype)
fromclasstype = fromtypeptr
def getclsfield(self, vcls, attr, llops):
"""Read the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getclsfield(vcls, attr, llops)
def setclsfield(self, vcls, attr, vvalue, llops):
"""Write the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
llops.genop('setfield', [v_vtable, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setclsfield(vcls, attr, vvalue, llops)
def getpbcfield(self, vcls, access_set, attr, llops):
if (access_set, attr) not in self.pbcfields:
raise TyperError("internal error: missing PBC field")
mangled_name, r = self.pbcfields[access_set, attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
def rtype_issubtype(self, hop):
class_repr = get_type_repr(self.rtyper)
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
if isinstance(v_cls2, Constant):
cls2 = v_cls2.value
# XXX re-implement the following optimization
## if cls2.subclassrange_max == cls2.subclassrange_min:
## # a class with no subclass
## return hop.genop('ptr_eq', [v_cls1, v_cls2], resulttype=Bool)
## else:
minid = hop.inputconst(Signed, cls2.subclassrange_min)
maxid = hop.inputconst(Signed, cls2.subclassrange_max)
return hop.gendirectcall(ll_issubclass_const, v_cls1, minid,
maxid)
else:
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
return hop.gendirectcall(ll_issubclass, v_cls1, v_cls2)
# ____________________________________________________________
class InstanceRepr(AbstractInstanceRepr):
def __init__(self, rtyper, classdef, gcflavor='gc'):
AbstractInstanceRepr.__init__(self, rtyper, classdef)
if classdef is None:
self.object_type = OBJECT_BY_FLAVOR[LLFLAVOR[gcflavor]]
else:
ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]]
self.object_type = ForwardRef()
self.prebuiltinstances = {} # { id(x): (x, _ptr) }
self.lowleveltype = Ptr(self.object_type)
self.gcflavor = gcflavor
def _setup_repr(self, llfields=None, hints=None, adtmeths=None):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
self.rclass = getclassrepr(self.rtyper, self.classdef)
fields = {}
allinstancefields = {}
if self.classdef is None:
fields['__class__'] = 'typeptr', get_type_repr(self.rtyper)
else:
# instance attributes
if llfields is None:
llfields = []
attrs = self.classdef.attrs.items()
attrs.sort()
for name, attrdef in attrs:
if not attrdef.readonly:
r = self.rtyper.getrepr(attrdef.s_value)
mangled_name = 'inst_' + name
fields[name] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
#
# hash() support
if self.rtyper.needs_hash_support(self.classdef):
from pypy.rpython import rint
fields['_hash_cache_'] = 'hash_cache', rint.signed_repr
llfields.append(('hash_cache', Signed))
self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef,
self.gcflavor)
self.rbase.setup()
#
# PyObject wrapper support
if self.has_wrapper and '_wrapper_' not in self.rbase.allinstancefields:
fields['_wrapper_'] = 'wrapper', pyobj_repr
llfields.append(('wrapper', Ptr(PyObject)))
MkStruct = lltype.STRUCT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
if adtmeths is None:
adtmeths = {}
if hints is None:
hints = {}
if '_immutable_' in self.classdef.classdesc.classdict:
hints = hints.copy()
hints['immutable'] = True
object_type = MkStruct(self.classdef.name,
('super', self.rbase.object_type),
hints=hints,
adtmeths=adtmeths,
*llfields)
self.object_type.become(object_type)
allinstancefields.update(self.rbase.allinstancefields)
allinstancefields.update(fields)
self.fields = fields
self.allinstancefields = allinstancefields
if self.gcflavor in RTTIFLAVORS:
attachRuntimeTypeInfo(self.object_type)
def _setup_repr_final(self):
if self.gcflavor in RTTIFLAVORS:
if (self.classdef is not None and
self.classdef.classdesc.lookup('__del__') is not None):
s_func = self.classdef.classdesc.s_read_attribute('__del__')
source_desc = self.classdef.classdesc.lookup('__del__')
source_classdef = source_desc.getclassdef(None)
source_repr = getinstancerepr(self.rtyper, source_classdef)
assert len(s_func.descriptions) == 1
funcdesc = s_func.descriptions.keys()[0]
graph = funcdesc.getuniquegraph()
FUNCTYPE = FuncType([Ptr(source_repr.object_type)], Void)
destrptr = functionptr(FUNCTYPE, graph.name,
graph=graph,
_callable=graph.func)
else:
destrptr = None
OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
self.rtyper.attachRuntimeTypeInfoFunc(self.object_type,
ll_runtime_type_info,
OBJECT, destrptr)
def common_repr(self): # -> object or nongcobject reprs
return getinstancerepr(self.rtyper, None, self.gcflavor)
def null_instance(self):
return nullptr(self.object_type)
def upcast(self, result):
return cast_pointer(self.lowleveltype, result)
def create_instance(self):
if self.gcflavor == 'cpy':
from pypy.rpython import rcpy
extra_args = (rcpy.build_pytypeobject(self),)
else:
extra_args = ()
return malloc(self.object_type, flavor=self.gcflavor,
extra_args=extra_args)
def has_wrapper(self):
return self.classdef is not None and (
self.rtyper.needs_wrapper(self.classdef.classdesc.pyobj))
has_wrapper = property(has_wrapper)
def get_ll_hash_function(self):
if self.classdef is None:
raise TyperError, 'missing hash support flag in classdef'
if self.rtyper.needs_hash_support(self.classdef):
try:
return self._ll_hash_function
except AttributeError:
INSPTR = self.lowleveltype
def _ll_hash_function(ins):
return ll_inst_hash(cast_pointer(INSPTR, ins))
self._ll_hash_function = _ll_hash_function
return _ll_hash_function
else:
return self.rbase.get_ll_hash_function()
def initialize_prebuilt_instance(self, value, classdef, result):
if self.classdef is not None:
# recursively build the parent part of the instance
self.rbase.initialize_prebuilt_instance(value, classdef,
result.super)
# then add instance attributes from this level
for name, (mangled_name, r) in self.fields.items():
if r.lowleveltype is Void:
llattrvalue = None
elif name == '_hash_cache_': # hash() support
llattrvalue = hash(value)
else:
try:
attrvalue = getattr(value, name)
except AttributeError:
attrvalue = self.classdef.classdesc.read_attribute(name, None)
if attrvalue is None:
warning("prebuilt instance %r has no attribute %r" % (
value, name))
llattrvalue = r.lowleveltype._defl()
else:
llattrvalue = r.convert_desc_or_const(attrvalue)
else:
llattrvalue = r.convert_const(attrvalue)
setattr(result, mangled_name, llattrvalue)
else:
# OBJECT part
rclass = getclassrepr(self.rtyper, classdef)
result.typeptr = rclass.getvtable()
def getfieldrepr(self, attr):
"""Return the repr used for the given attribute."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
return r
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfieldrepr(attr)
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
"""Read the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
return llops.genop('getfield', [vinst, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfield(vinst, attr, llops, force_cast=True,
flags=flags)
def setfield(self, vinst, attr, vvalue, llops, force_cast=False,
opname='setfield', flags={}):
"""Write the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
llops.genop(opname, [vinst, cname, vvalue])
# XXX this is a temporary hack to clear a dead PyObject
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True,
opname=opname, flags=flags)
def new_instance(self, llops, classcallhop=None, v_cpytype=None):
"""Build a new instance, without calling __init__."""
mallocop = 'malloc'
ctype = inputconst(Void, self.object_type)
vlist = [ctype]
if self.classdef is not None:
flavor = self.gcflavor
if flavor != 'gc': # not default flavor
mallocop = 'flavored_malloc'
vlist.insert(0, inputconst(Void, flavor))
if flavor == 'cpy':
if v_cpytype is None:
from pypy.rpython import rcpy
cpytype = rcpy.build_pytypeobject(self)
v_cpytype = inputconst(Ptr(PyObject), cpytype)
vlist.append(v_cpytype)
vptr = llops.genop(mallocop, vlist,
resulttype = Ptr(self.object_type))
ctypeptr = inputconst(CLASSTYPE, self.rclass.getvtable())
self.setfield(vptr, '__class__', ctypeptr, llops)
# initialize instance attributes from their defaults from the class
if self.classdef is not None:
flds = self.allinstancefields.keys()
flds.sort()
for fldname in flds:
if fldname == '__class__':
continue
mangled_name, r = self.allinstancefields[fldname]
if r.lowleveltype is Void:
continue
if fldname == '_hash_cache_':
value = Constant(0, Signed)
else:
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
cvalue = inputconst(r.lowleveltype,
r.convert_desc_or_const(value))
self.setfield(vptr, fldname, cvalue, llops,
{'access_directly': True})
return vptr
def rtype_type(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
instance_repr = self.common_repr()
vinst, = hop.inputargs(instance_repr)
if hop.args_s[0].can_be_none():
return hop.gendirectcall(ll_inst_type, vinst)
else:
return instance_repr.getfield(vinst, '__class__', hop.llops)
def rtype_getattr(self, hop):
attr = hop.args_s[1].const
vinst, vattr = hop.inputargs(self, Void)
if attr == '__class__' and hop.r_result.lowleveltype is Void:
# special case for when the result of '.__class__' is a constant
[desc] = hop.s_result.descriptions
return hop.inputconst(Void, desc.pyobj)
if attr in self.allinstancefields:
return self.getfield(vinst, attr, hop.llops,
flags=hop.args_s[0].flags)
elif attr in self.rclass.allmethods:
# special case for methods: represented as their 'self' only
# (see MethodsPBCRepr)
return hop.r_result.get_method_from_instance(self, vinst,
hop.llops)
else:
vcls = self.getfield(vinst, '__class__', hop.llops)
return self.rclass.getclsfield(vcls, attr, hop.llops)
def rtype_setattr(self, hop):
attr = hop.args_s[1].const
r_value = self.getfieldrepr(attr)
vinst, vattr, vvalue = hop.inputargs(self, Void, r_value)
self.setfield(vinst, attr, vvalue, hop.llops,
flags=hop.args_s[0].flags)
def rtype_is_true(self, hop):
vinst, = hop.inputargs(self)
return hop.genop('ptr_nonzero', [vinst], resulttype=Bool)
def ll_str(self, i): # doesn't work for non-gc classes!
from pypy.rpython.lltypesystem import rstr
from pypy.rpython.lltypesystem.ll_str import ll_int2hex
from pypy.rlib.rarithmetic import r_uint
if not i:
return rstr.null_str
instance = cast_pointer(OBJECTPTR, i)
uid = r_uint(cast_ptr_to_int(i))
nameLen = len(instance.typeptr.name)
nameString = rstr.mallocstr(nameLen-1)
i = 0
while i < nameLen - 1:
nameString.chars[i] = instance.typeptr.name[i]
i += 1
res = rstr.instance_str_prefix
res = rstr.ll_strconcat(res, nameString)
res = rstr.ll_strconcat(res, rstr.instance_str_infix)
res = rstr.ll_strconcat(res, ll_int2hex(uid, False))
res = rstr.ll_strconcat(res, rstr.instance_str_suffix)
return res
def rtype_isinstance(self, hop):
class_repr = get_type_repr(hop.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
if isinstance(v_cls, Constant):
cls = v_cls.value
# XXX re-implement the following optimization
#if cls.subclassrange_max == cls.subclassrange_min:
# # a class with no subclass
# return hop.gendirectcall(rclass.ll_isinstance_exact, v_obj, v_cls)
#else:
minid = hop.inputconst(Signed, cls.subclassrange_min)
maxid = hop.inputconst(Signed, cls.subclassrange_max)
return hop.gendirectcall(ll_isinstance_const, v_obj, minid, maxid)
else:
return hop.gendirectcall(ll_isinstance, v_obj, v_cls)
def buildinstancerepr(rtyper, classdef, gcflavor='gc'):
if classdef is None:
unboxed = []
virtualizable = False
else:
unboxed = [subdef for subdef in classdef.getallsubdefs()
if subdef.classdesc.pyobj is not None and
issubclass(subdef.classdesc.pyobj, UnboxedValue)]
virtualizable = classdef.classdesc.read_attribute('_virtualizable_',
Constant(False)).value
if virtualizable:
assert len(unboxed) == 0
assert gcflavor == 'gc'
from pypy.rpython.lltypesystem import rvirtualizable
return rvirtualizable.VirtualizableInstanceRepr(rtyper, classdef)
elif len(unboxed) == 0:
return InstanceRepr(rtyper, classdef, gcflavor)
else:
# the UnboxedValue class and its parent classes need a
# special repr for their instances
if len(unboxed) != 1:
raise TyperError("%r has several UnboxedValue subclasses" % (
classdef,))
assert gcflavor == 'gc'
from pypy.rpython.lltypesystem import rtagged
return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0])
class __extend__(pairtype(InstanceRepr, InstanceRepr)):
def convert_from_to((r_ins1, r_ins2), v, llops):
# which is a subclass of which?
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
if basedef == r_ins2.classdef:
# r_ins1 is an instance of the subclass: converting to parent
v = llops.genop('cast_pointer', [v],
resulttype = r_ins2.lowleveltype)
return v
elif basedef == r_ins1.classdef:
# r_ins2 is an instance of the subclass: potentially unsafe
# casting, but we do it anyway (e.g. the annotator produces
# such casts after a successful isinstance() check)
v = llops.genop('cast_pointer', [v],
resulttype = r_ins2.lowleveltype)
return v
else:
return NotImplemented
def rtype_is_((r_ins1, r_ins2), hop):
if r_ins1.gcflavor != r_ins2.gcflavor:
# obscure logic, the is can be true only if both are None
v_ins1, v_ins2 = hop.inputargs(r_ins1.common_repr(), r_ins2.common_repr())
return hop.gendirectcall(ll_both_none, v_ins1, v_ins2)
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor)
return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop)
rtype_eq = rtype_is_
def rtype_ne(rpair, hop):
v = rpair.rtype_eq(hop)
return hop.genop("bool_not", [v], resulttype=Bool)
#
# _________________________ Conversions for CPython _________________________
# part I: wrapping, destructor, preserving object identity
def call_destructor(thing, repr):
ll_call_destructor(thing, repr)
def ll_call_destructor(thang, repr):
return 42 # will be mapped
class Entry(ExtRegistryEntry):
_about_ = ll_call_destructor
s_result_annotation = None
def specialize_call(self, hop):
v_inst, c_spec = hop.inputargs(*hop.args_r)
repr = c_spec.value
if repr.has_wrapper:
null = hop.inputconst(Ptr(PyObject), nullptr(PyObject))
# XXX this bare_setfield is needed because we cannot do refcount operations
# XXX on a dead object. Actually this is an abuse. Instead,
# XXX we should consider a different operation for 'uninitialized fields'
repr.setfield(v_inst, '_wrapper_', null, hop.llops,
opname='bare_setfield')
hop.genop('gc_unprotect', [v_inst])
def create_pywrapper(thing, repr):
return ll_create_pywrapper(thing, repr)
def ll_create_pywrapper(thing, repr):
return 42
def into_cobject(v_inst, repr, llops):
llops.genop('gc_protect', [v_inst])
ARG = repr.lowleveltype
reprPBC = llops.rtyper.annotator.bookkeeper.immutablevalue(repr)
fp_dtor = llops.rtyper.annotate_helper_fn(call_destructor, [ARG, reprPBC])
FUNC = FuncType([ARG, Void], Void)
c_dtor = inputconst(Ptr(FUNC), fp_dtor)
return llops.gencapicall('PyCObject_FromVoidPtr', [v_inst, c_dtor], resulttype=pyobj_repr)
def outof_cobject(v_obj, repr, llops):
v_inst = llops.gencapicall('PyCObject_AsVoidPtr', [v_obj], resulttype=repr)
llops.genop('gc_protect', [v_inst])
return v_inst
class Entry(ExtRegistryEntry):
_about_ = ll_create_pywrapper
s_result_annotation = annmodel.SomePtr(Ptr(PyObject))
def specialize_call(self, hop):
v_inst, c_spec = hop.inputargs(*hop.args_r)
repr = c_spec.value
v_res = into_cobject(v_inst, repr, hop.llops)
v_cobj = v_res
c_cls = hop.inputconst(pyobj_repr, repr.classdef.classdesc.pyobj)
c_0 = hop.inputconst(Signed, 0)
v_res = hop.llops.gencapicall('PyType_GenericAlloc', [c_cls, c_0],
resulttype=pyobj_repr)
c_self = hop.inputconst(pyobj_repr, '__self__')
hop.genop('setattr', [v_res, c_self, v_cobj], resulttype=pyobj_repr)
if repr.has_wrapper:
repr.setfield(v_inst, '_wrapper_', v_res, hop.llops)
hop.genop('gc_unprotect', [v_res]) # yes a weak ref
return v_res
def fetch_pywrapper(thing, repr):
return ll_fetch_pywrapper(thing, repr)
def ll_fetch_pywrapper(thing, repr):
return 42
class Entry(ExtRegistryEntry):
_about_ = ll_fetch_pywrapper
s_result_annotation = annmodel.SomePtr(Ptr(PyObject))
def specialize_call(self, hop):
v_inst, c_spec = hop.inputargs(*hop.args_r)
repr = c_spec.value
if repr.has_wrapper:
return repr.getfield(v_inst, '_wrapper_', hop.llops)
else:
null = hop.inputconst(Ptr(PyObject), nullptr(PyObject))
return null
def ll_wrap_object(obj, repr):
ret = fetch_pywrapper(obj, repr)
if not ret:
ret = create_pywrapper(obj, repr)
return ret
class __extend__(pairtype(InstanceRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
c_repr = inputconst(Void, r_from)
if r_from.has_wrapper:
return llops.gendirectcall(ll_wrap_object, v, c_repr)
else:
return llops.gendirectcall(create_pywrapper, v, c_repr)
# part II: unwrapping, creating the instance
class __extend__(pairtype(PyObjRepr, InstanceRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_to.has_wrapper:
init, context = llops.rtyper.get_wrapping_hint(r_to.classdef)
if context is init:
# saving an extra __new__ method, we create the instance on __init__
v_inst = r_to.new_instance(llops)
v_cobj = into_cobject(v_inst, r_to, llops)
c_self = inputconst(pyobj_repr, '__self__')
llops.genop('setattr', [v, c_self, v_cobj], resulttype=pyobj_repr)
r_to.setfield(v_inst, '_wrapper_', v, llops)
llops.genop('gc_unprotect', [v])
return v_inst
# if we don't have a wrapper field, we just don't support __init__
c_self = inputconst(pyobj_repr, '__self__')
v = llops.genop('getattr', [v, c_self], resulttype=r_from)
return outof_cobject(v, r_to, llops)
# ____________________________________________________________
#
# Low-level implementation of operations on classes and instances
# doesn't work for non-gc stuff!
def ll_cast_to_object(obj):
return cast_pointer(OBJECTPTR, obj)
# doesn't work for non-gc stuff!
def ll_type(obj):
return cast_pointer(OBJECTPTR, obj).typeptr
def ll_issubclass(subcls, cls):
return cls.subclassrange_min <= subcls.subclassrange_min <= cls.subclassrange_max
def ll_issubclass_const(subcls, minid, maxid):
return minid <= subcls.subclassrange_min <= maxid
def ll_isinstance(obj, cls): # obj should be cast to OBJECT or NONGCOBJECT
if not obj:
return False
obj_cls = obj.typeptr
return ll_issubclass(obj_cls, cls)
def ll_isinstance_const(obj, minid, maxid):
if not obj:
return False
return ll_issubclass_const(obj.typeptr, minid, maxid)
def ll_isinstance_exact(obj, cls):
if not obj:
return False
obj_cls = obj.typeptr
return obj_cls == cls
def ll_runtime_type_info(obj):
return obj.typeptr.rtti
def ll_inst_hash(ins):
if not ins:
return 0 # for None
cached = ins.hash_cache
if cached == 0:
cached = ins.hash_cache = intmask(id(ins))
return cached
def ll_inst_type(obj):
if obj:
return obj.typeptr
else:
# type(None) -> NULL (for now)
return nullptr(typeOf(obj).TO.typeptr.TO)
def ll_both_none(ins1, ins2):
return not ins1 and not ins2
# ____________________________________________________________
_missing = object()
def fishllattr(inst, name, default=_missing):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return getattr(p, 'inst_' + name)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
if default is _missing:
raise AttributeError("%s has no field %s" % (lltype.typeOf(widest),
name))
return default
def feedllattr(inst, name, llvalue):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return setattr(p, 'inst_' + name, llvalue)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
raise AttributeError("%s has no field %s" % (lltype.typeOf(widest),
name))
| Python |
from pypy.annotation.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem import rclass
from pypy.rpython.lltypesystem.rdict import rtype_r_dict
from pypy.rlib import objectmodel
from pypy.rpython.rmodel import TyperError, Constant
from pypy.rpython.robject import pyobj_repr
from pypy.rpython.rbool import bool_repr
def rtype_builtin_isinstance(hop):
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
if hop.args_r[0] == pyobj_repr or hop.args_r[1] == pyobj_repr:
v_obj, v_typ = hop.inputargs(pyobj_repr, pyobj_repr)
c = hop.inputconst(pyobj_repr, isinstance)
v = hop.genop('simple_call', [c, v_obj, v_typ], resulttype = pyobj_repr)
return hop.llops.convertvar(v, pyobj_repr, bool_repr)
if hop.args_s[1].is_constant() and hop.args_s[1].const == list:
if hop.args_s[0].knowntype != list:
raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None")
rlist = hop.args_r[0]
vlist = hop.inputarg(rlist, arg=0)
cnone = hop.inputconst(rlist, None)
return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool)
assert isinstance(hop.args_r[0], rclass.InstanceRepr)
return hop.args_r[0].rtype_isinstance(hop)
def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well
my_instantiate = typeptr.instantiate
return my_instantiate()
def rtype_instantiate(hop):
s_class = hop.args_s[0]
assert isinstance(s_class, annmodel.SomePBC)
if len(s_class.descriptions) != 1:
# instantiate() on a variable class
vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper))
v_inst = hop.gendirectcall(ll_instantiate, vtypeptr)
return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result
resulttype = hop.r_result.lowleveltype)
classdef = s_class.descriptions.keys()[0].getuniqueclassdef()
return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops)
def rtype_builtin_hasattr(hop):
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
if hop.args_r[0] == pyobj_repr:
v_obj, v_name = hop.inputargs(pyobj_repr, pyobj_repr)
c = hop.inputconst(pyobj_repr, hasattr)
v = hop.genop('simple_call', [c, v_obj, v_name], resulttype = pyobj_repr)
return hop.llops.convertvar(v, pyobj_repr, bool_repr)
raise TyperError("hasattr is only suported on a constant or on PyObject")
def rtype_builtin___import__(hop):
args_v = hop.inputargs(*[pyobj_repr for ign in hop.args_r])
c = hop.inputconst(pyobj_repr, __import__)
return hop.genop('simple_call', [c] + args_v, resulttype = pyobj_repr)
BUILTIN_TYPER = {}
BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate
BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance
BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr
BUILTIN_TYPER[__import__] = rtype_builtin___import__
BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict
| Python |
"""
The table of all LL operations.
"""
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.objspace.flow.model import roproperty
class LLOp(object):
def __init__(self, sideeffects=True, canfold=False, canraise=(),
pyobj=False, canunwindgc=False, canrun=False, oo=False,
tryfold=False):
# self.opname = ... (set afterwards)
if canfold:
sideeffects = False
# The operation has no side-effects: it can be removed
# if its result is not used
self.sideeffects = sideeffects
# Can be safely constant-folded: no side-effects
# and always gives the same result for given args
self.canfold = canfold
# Can *try* to fold the operation, but it may raise on you
self.tryfold = tryfold or canfold
# Exceptions that can be raised
self.canraise = canraise
assert isinstance(canraise, tuple)
assert not canraise or not canfold
# The operation manipulates PyObjects
self.pyobj = pyobj
# The operation can unwind the stack in a stackless gc build
self.canunwindgc = canunwindgc
if canunwindgc:
if (StackException not in self.canraise and
Exception not in self.canraise):
self.canraise += (StackException,)
# The operation can be run directly with __call__
self.canrun = canrun or canfold
# The operation belongs to the ootypesystem
self.oo = oo
# __________ make the LLOp instances callable from LL helpers __________
__name__ = property(lambda self: 'llop_'+self.opname)
def __call__(self, RESULTTYPE, *args):
# llop is meant to be rtyped and not called directly, unless it is
# a canfold=True operation
fold = self.fold
if getattr(fold, 'need_result_type', False):
val = fold(RESULTTYPE, *args)
else:
val = fold(*args)
if RESULTTYPE is not lltype.Void:
val = lltype.enforce(RESULTTYPE, val)
return val
def get_fold_impl(self):
global lltype # <- lazy import hack, worth an XXX
from pypy.rpython.lltypesystem import lltype
if self.canrun:
if self.oo:
from pypy.rpython.ootypesystem.ooopimpl import get_op_impl
else:
from pypy.rpython.lltypesystem.opimpl import get_op_impl
op_impl = get_op_impl(self.opname)
else:
error = TypeError("cannot constant-fold operation %r" % (
self.opname,))
def op_impl(*args):
raise error
# cache the implementation function into 'self'
self.fold = op_impl
return op_impl
fold = roproperty(get_fold_impl)
def is_pure(self, *ARGTYPES):
return (self.canfold or # canfold => pure operation
self is llop.debug_assert or # debug_assert is pure enough
# reading from immutable
(self in (llop.getfield, llop.getarrayitem) and
ARGTYPES[0].TO._hints.get('immutable')))
def enum_ops_without_sideeffects(raising_is_ok=False):
"""Enumerate operations that have no side-effects
(see also enum_foldable_ops)."""
for opname, opdesc in LL_OPERATIONS.iteritems():
if not opdesc.sideeffects:
if not opdesc.canraise or raising_is_ok:
yield opname
def enum_foldable_ops(_ignored=None):
"""Enumerate operations that can be constant-folded."""
for opname, opdesc in LL_OPERATIONS.iteritems():
if opdesc.canfold:
assert not opdesc.canraise
yield opname
class Entry(ExtRegistryEntry):
"Annotation and rtyping of LLOp instances, which are callable."
_type_ = LLOp
def compute_result_annotation(self, RESULTTYPE, *args):
from pypy.annotation.model import lltype_to_annotation
assert RESULTTYPE.is_constant()
return lltype_to_annotation(RESULTTYPE.const)
def specialize_call(self, hop):
op = self.instance # the LLOp object that was called
args_v = [hop.inputarg(r, i+1) for i, r in enumerate(hop.args_r[1:])]
hop.exception_is_here()
return hop.genop(op.opname, args_v, resulttype=hop.r_result.lowleveltype)
class StackException(Exception):
"""Base for internal exceptions possibly used by the stackless
implementation."""
# ____________________________________________________________
#
# This list corresponds to the operations implemented by the LLInterpreter.
# Note that many exception-raising operations can be replaced by calls
# to helper functions in pypy.rpython.raisingops.raisingops.
# ***** Run test_lloperation after changes. *****
LL_OPERATIONS = {
'direct_call': LLOp(canraise=(Exception,)),
'indirect_call': LLOp(canraise=(Exception,)),
# __________ numeric operations __________
'bool_not': LLOp(canfold=True),
'char_lt': LLOp(canfold=True),
'char_le': LLOp(canfold=True),
'char_eq': LLOp(canfold=True),
'char_ne': LLOp(canfold=True),
'char_gt': LLOp(canfold=True),
'char_ge': LLOp(canfold=True),
'unichar_eq': LLOp(canfold=True),
'unichar_ne': LLOp(canfold=True),
'int_is_true': LLOp(canfold=True),
'int_neg': LLOp(canfold=True),
'int_neg_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_abs': LLOp(canfold=True),
'int_abs_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_invert': LLOp(canfold=True),
'int_add': LLOp(canfold=True),
'int_sub': LLOp(canfold=True),
'int_mul': LLOp(canfold=True),
'int_floordiv': LLOp(canfold=True),
'int_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'int_mod': LLOp(canfold=True),
'int_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'int_lt': LLOp(canfold=True),
'int_le': LLOp(canfold=True),
'int_eq': LLOp(canfold=True),
'int_ne': LLOp(canfold=True),
'int_gt': LLOp(canfold=True),
'int_ge': LLOp(canfold=True),
'int_and': LLOp(canfold=True),
'int_or': LLOp(canfold=True),
'int_lshift': LLOp(canfold=True),
'int_lshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'int_rshift': LLOp(canfold=True),
'int_rshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'int_xor': LLOp(canfold=True),
'int_add_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_sub_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_mul_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_floordiv_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_floordiv_ovf_zer': LLOp(canraise=(OverflowError, ZeroDivisionError),
tryfold=True),
'int_mod_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_mod_ovf_zer': LLOp(canraise=(OverflowError, ZeroDivisionError),
tryfold=True),
'int_lshift_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'int_lshift_ovf_val': LLOp(canraise=(OverflowError, ValueError,),
tryfold=True),
'uint_is_true': LLOp(canfold=True),
'uint_invert': LLOp(canfold=True),
'uint_add': LLOp(canfold=True),
'uint_sub': LLOp(canfold=True),
'uint_mul': LLOp(canfold=True),
'uint_floordiv': LLOp(canfold=True),
'uint_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'uint_mod': LLOp(canfold=True),
'uint_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'uint_lt': LLOp(canfold=True),
'uint_le': LLOp(canfold=True),
'uint_eq': LLOp(canfold=True),
'uint_ne': LLOp(canfold=True),
'uint_gt': LLOp(canfold=True),
'uint_ge': LLOp(canfold=True),
'uint_and': LLOp(canfold=True),
'uint_or': LLOp(canfold=True),
'uint_lshift': LLOp(canfold=True),
'uint_lshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'uint_rshift': LLOp(canfold=True),
'uint_rshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'uint_xor': LLOp(canfold=True),
'float_is_true': LLOp(canfold=True),
'float_neg': LLOp(canfold=True),
'float_abs': LLOp(canfold=True),
'float_add': LLOp(canfold=True),
'float_sub': LLOp(canfold=True),
'float_mul': LLOp(canfold=True),
'float_truediv': LLOp(canfold=True),
'float_lt': LLOp(canfold=True),
'float_le': LLOp(canfold=True),
'float_eq': LLOp(canfold=True),
'float_ne': LLOp(canfold=True),
'float_gt': LLOp(canfold=True),
'float_ge': LLOp(canfold=True),
'float_pow': LLOp(canfold=True),
'llong_is_true': LLOp(canfold=True),
'llong_neg': LLOp(canfold=True),
'llong_neg_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'llong_abs': LLOp(canfold=True),
'llong_abs_ovf': LLOp(canraise=(OverflowError,), tryfold=True),
'llong_invert': LLOp(canfold=True),
'llong_add': LLOp(canfold=True),
'llong_sub': LLOp(canfold=True),
'llong_mul': LLOp(canfold=True),
'llong_floordiv': LLOp(canfold=True),
'llong_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'llong_mod': LLOp(canfold=True),
'llong_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'llong_lt': LLOp(canfold=True),
'llong_le': LLOp(canfold=True),
'llong_eq': LLOp(canfold=True),
'llong_ne': LLOp(canfold=True),
'llong_gt': LLOp(canfold=True),
'llong_ge': LLOp(canfold=True),
'llong_and': LLOp(canfold=True),
'llong_or': LLOp(canfold=True),
'llong_lshift': LLOp(canfold=True),
'llong_lshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'llong_rshift': LLOp(canfold=True),
'llong_rshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'llong_xor': LLOp(canfold=True),
'ullong_is_true': LLOp(canfold=True),
'ullong_invert': LLOp(canfold=True),
'ullong_add': LLOp(canfold=True),
'ullong_sub': LLOp(canfold=True),
'ullong_mul': LLOp(canfold=True),
'ullong_floordiv': LLOp(canfold=True),
'ullong_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'ullong_mod': LLOp(canfold=True),
'ullong_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True),
'ullong_lt': LLOp(canfold=True),
'ullong_le': LLOp(canfold=True),
'ullong_eq': LLOp(canfold=True),
'ullong_ne': LLOp(canfold=True),
'ullong_gt': LLOp(canfold=True),
'ullong_ge': LLOp(canfold=True),
'ullong_and': LLOp(canfold=True),
'ullong_or': LLOp(canfold=True),
'ullong_lshift': LLOp(canfold=True),
'ullong_lshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'ullong_rshift': LLOp(canfold=True),
'ullong_rshift_val': LLOp(canraise=(ValueError,), tryfold=True),
'ullong_xor': LLOp(canfold=True),
'cast_primitive': LLOp(canfold=True),
'cast_bool_to_int': LLOp(canfold=True),
'cast_bool_to_uint': LLOp(canfold=True),
'cast_bool_to_float': LLOp(canfold=True),
'cast_char_to_int': LLOp(canfold=True),
'cast_unichar_to_int': LLOp(canfold=True),
'cast_int_to_char': LLOp(canfold=True),
'cast_int_to_unichar': LLOp(canfold=True),
'cast_int_to_uint': LLOp(canfold=True),
'cast_int_to_float': LLOp(canfold=True),
'cast_int_to_longlong': LLOp(canfold=True),
'cast_uint_to_int': LLOp(canfold=True),
'cast_uint_to_float': LLOp(canfold=True),
'cast_longlong_to_float':LLOp(canfold=True),
'cast_float_to_int': LLOp(canraise=(OverflowError,), tryfold=True),
'cast_float_to_uint': LLOp(canfold=True), # XXX need OverflowError?
'cast_float_to_longlong':LLOp(canfold=True),
'truncate_longlong_to_int':LLOp(canfold=True),
# __________ pointer operations __________
'malloc': LLOp(canraise=(MemoryError,), canunwindgc=True),
'zero_malloc': LLOp(canraise=(MemoryError,), canunwindgc=True),
'malloc_varsize': LLOp(canraise=(MemoryError,), canunwindgc=True),
'zero_malloc_varsize': LLOp(canraise=(MemoryError,), canunwindgc=True),
'zero_gc_pointers_inside': LLOp(),
'flavored_malloc': LLOp(canraise=(MemoryError,)),
'flavored_malloc_varsize' : LLOp(canraise=(MemoryError,)),
'flavored_free': LLOp(),
'getfield': LLOp(sideeffects=False, canrun=True),
'getarrayitem': LLOp(sideeffects=False, canrun=True),
'getarraysize': LLOp(canfold=True),
'getsubstruct': LLOp(canfold=True),
'getarraysubstruct': LLOp(canfold=True),
'setfield': LLOp(),
'bare_setfield': LLOp(),
'setarrayitem': LLOp(),
'bare_setarrayitem': LLOp(),
'cast_pointer': LLOp(canfold=True),
'ptr_eq': LLOp(canfold=True),
'ptr_ne': LLOp(canfold=True),
'ptr_nonzero': LLOp(canfold=True),
'ptr_iszero': LLOp(canfold=True),
'cast_ptr_to_int': LLOp(sideeffects=False),
'cast_int_to_ptr': LLOp(sideeffects=False),
'direct_fieldptr': LLOp(canfold=True),
'direct_arrayitems': LLOp(canfold=True),
'direct_ptradd': LLOp(canfold=True),
'cast_opaque_ptr': LLOp(sideeffects=False),
# __________ address operations __________
'boehm_malloc': LLOp(),
'boehm_malloc_atomic': LLOp(),
'boehm_register_finalizer': LLOp(),
'raw_malloc': LLOp(),
'raw_malloc_usage': LLOp(sideeffects=False),
'raw_free': LLOp(),
'raw_memclear': LLOp(),
'raw_memcopy': LLOp(),
'raw_load': LLOp(sideeffects=False),
'raw_store': LLOp(),
'adr_add': LLOp(canfold=True),
'adr_sub': LLOp(canfold=True),
'adr_delta': LLOp(canfold=True),
'adr_lt': LLOp(canfold=True),
'adr_le': LLOp(canfold=True),
'adr_eq': LLOp(canfold=True),
'adr_ne': LLOp(canfold=True),
'adr_gt': LLOp(canfold=True),
'adr_ge': LLOp(canfold=True),
'adr_call': LLOp(canraise=(Exception,)),
'cast_ptr_to_adr': LLOp(canfold=True),
'cast_adr_to_ptr': LLOp(canfold=True),
'cast_ptr_to_weakadr': LLOp(canfold=True),
'cast_weakadr_to_ptr': LLOp(canfold=True),
'cast_weakadr_to_int': LLOp(canfold=True),
'cast_adr_to_int': LLOp(canfold=True),
'cast_int_to_adr': LLOp(canfold=True), # not implemented in llinterp
# __________ used by the JIT ________
'call_boehm_gc_alloc': LLOp(canraise=(MemoryError,)),
# __________ GC operations __________
'gc__collect': LLOp(canunwindgc=True),
'gc_free': LLOp(),
'gc_fetch_exception': LLOp(),
'gc_restore_exception': LLOp(),
'gc_call_rtti_destructor': LLOp(),
'gc_deallocate': LLOp(),
'gc_push_alive_pyobj': LLOp(),
'gc_pop_alive_pyobj': LLOp(),
'gc_protect': LLOp(),
'gc_unprotect': LLOp(),
'gc_reload_possibly_moved': LLOp(),
# experimental operations in support of thread cloning, only
# implemented by the Mark&Sweep GC
'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True),
'gc_x_clone': LLOp(canraise=(MemoryError, RuntimeError),
canunwindgc=True),
'gc_x_size_header': LLOp(),
# this one is even more experimental; only implemented with the
# Mark&Sweep GC, and likely only useful when combined with
# stackless:
'gc_x_become': LLOp(canraise=(RuntimeError,), canunwindgc=True),
# NOTE NOTE NOTE! don't forget *** canunwindgc=True *** for anything that
# can go through a stack unwind, in particular anything that mallocs!
# __________ stackless operation(s) __________
'yield_current_frame_to_caller': LLOp(canraise=(StackException,)),
# can always unwind, not just if stackless gc
'resume_point': LLOp(canraise=(Exception,)),
'resume_state_create': LLOp(canraise=(MemoryError,), canunwindgc=True),
'resume_state_invoke': LLOp(canraise=(Exception, StackException)),
# __________ misc operations __________
'keepalive': LLOp(),
'same_as': LLOp(canfold=True),
'hint': LLOp(),
'is_early_constant': LLOp(sideeffects=False),
'check_no_more_arg': LLOp(canraise=(Exception,)),
'check_self_nonzero': LLOp(canraise=(Exception,)),
'decode_arg': LLOp(canraise=(Exception,)),
'decode_arg_def': LLOp(canraise=(Exception,)),
'getslice': LLOp(canraise=(Exception,)),
'check_and_clear_exc': LLOp(),
# __________ debugging __________
'debug_view': LLOp(),
'debug_print': LLOp(),
'debug_pdb': LLOp(),
'debug_assert': LLOp(tryfold=True),
'debug_fatalerror': LLOp(),
# __________ instrumentation _________
'instrument_count': LLOp(),
# __________ ootype operations __________
'new': LLOp(oo=True, canraise=(Exception,)),
'runtimenew': LLOp(oo=True, canraise=(Exception,)),
'oonewcustomdict': LLOp(oo=True, canraise=(Exception,)),
'oosetfield': LLOp(oo=True),
'oogetfield': LLOp(oo=True, sideeffects=False),
'oosend': LLOp(oo=True, canraise=(Exception,)),
'ooupcast': LLOp(oo=True, canfold=True),
'oodowncast': LLOp(oo=True, canfold=True),
'oononnull': LLOp(oo=True, canfold=True),
'oois': LLOp(oo=True, canfold=True),
'instanceof': LLOp(oo=True, canfold=True),
'classof': LLOp(oo=True, canfold=True),
'subclassof': LLOp(oo=True, canfold=True),
'ooidentityhash': LLOp(oo=True, sideeffects=False),
'oostring': LLOp(oo=True, sideeffects=False),
'ooparse_int': LLOp(oo=True, canraise=(ValueError,)),
'ooparse_float': LLOp(oo=True, canraise=(ValueError,)),
'oohash': LLOp(oo=True, sideeffects=False),
# _____ read frame var support ___
'get_frame_base': LLOp(sideeffects=False),
'frame_info': LLOp(sideeffects=False),
}
# ***** Run test_lloperation after changes. *****
# __________ operations on PyObjects __________
from pypy.objspace.flow.operation import FunctionByName
opimpls = FunctionByName.copy()
opimpls['is_true'] = bool
for opname in opimpls:
LL_OPERATIONS[opname] = LLOp(canraise=(Exception,), pyobj=True)
LL_OPERATIONS['simple_call'] = LLOp(canraise=(Exception,), pyobj=True)
del opname, FunctionByName
# ____________________________________________________________
# Post-processing
# Stick the opnames into the LLOp instances
for opname, opdesc in LL_OPERATIONS.iteritems():
opdesc.opname = opname
del opname, opdesc
# Also export all operations in an attribute-based namespace.
# Example usage from LL helpers: z = llop.int_add(Signed, x, y)
class LLOP(object):
def _freeze_(self):
return True
llop = LLOP()
for opname, opdesc in LL_OPERATIONS.iteritems():
setattr(llop, opname, opdesc)
del opname, opdesc
| Python |
from pypy.annotation.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.objspace.flow.model import Constant
from pypy.rpython.rdict import AbstractDictRepr, AbstractDictIteratorRepr,\
rtype_newdict, dum_variant, dum_keys, dum_values, dum_items
from pypy.rpython.lltypesystem import lltype
from pypy.rlib.rarithmetic import r_uint
from pypy.rlib.objectmodel import hlinvoke
from pypy.rpython import robject
from pypy.rlib import objectmodel
from pypy.rpython import rmodel
# ____________________________________________________________
#
# generic implementation of RPython dictionary, with parametric DICTKEY and
# DICTVALUE types.
#
# XXX for immutable dicts, the array should be inlined and
# num_pristine_entries and everused are not needed.
#
# struct dictentry {
# DICTKEY key;
# bool f_valid; # (optional) the entry is filled
# bool f_everused; # (optional) the entry is or has ever been filled
# DICTVALUE value;
# int f_hash; # (optional) key hash, if hard to recompute
# }
#
# struct dicttable {
# int num_items;
# int num_pristine_entries; # never used entries
# Array *entries;
# (Function DICTKEY, DICTKEY -> bool) *fnkeyeq;
# (Function DICTKEY -> int) *fnkeyhash;
# }
#
#
class DictRepr(AbstractDictRepr):
def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
custom_eq_hash=None):
self.rtyper = rtyper
self.DICT = lltype.GcForwardReference()
self.lowleveltype = lltype.Ptr(self.DICT)
self.custom_eq_hash = custom_eq_hash is not None
if not isinstance(key_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(key_repr)
self._key_repr_computer = key_repr
else:
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if not isinstance(value_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(value_repr)
self._value_repr_computer = value_repr
else:
self.external_value_repr, self.value_repr = self.pickrepr(value_repr)
self.dictkey = dictkey
self.dictvalue = dictvalue
self.dict_cache = {}
self._custom_eq_hash_repr = custom_eq_hash
# setup() needs to be called to finish this initialization
def _externalvsinternal(self, rtyper, item_repr):
return rmodel.externalvsinternal(self.rtyper, item_repr)
def _setup_repr(self):
if 'key_repr' not in self.__dict__:
key_repr = self._key_repr_computer()
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if 'value_repr' not in self.__dict__:
self.external_value_repr, self.value_repr = self.pickrepr(self._value_repr_computer())
if isinstance(self.DICT, lltype.GcForwardReference):
self.DICTKEY = self.key_repr.lowleveltype
self.DICTVALUE = self.value_repr.lowleveltype
# compute the shape of the DICTENTRY structure
entryfields = []
entrymeths = {
'must_clear_key': (isinstance(self.DICTKEY, lltype.Ptr)
and self.DICTKEY._needsgc()),
'must_clear_value': (isinstance(self.DICTVALUE, lltype.Ptr)
and self.DICTVALUE._needsgc()),
}
# * the key
entryfields.append(("key", self.DICTKEY))
# * if NULL is not a valid ll value for the key or the value
# field of the entry, it can be used as a marker for
# never-used entries. Otherwise, we need an explicit flag.
s_key = self.dictkey.s_value
s_value = self.dictvalue.s_value
nullkeymarker = not self.key_repr.can_ll_be_null(s_key)
nullvaluemarker = not self.value_repr.can_ll_be_null(s_value)
dummykeyobj = self.key_repr.get_ll_dummyval_obj(self.rtyper,
s_key)
dummyvalueobj = self.value_repr.get_ll_dummyval_obj(self.rtyper,
s_value)
# * the state of the entry - trying to encode it as dummy objects
if nullkeymarker and dummykeyobj:
# all the state can be encoded in the key
entrymeths['everused'] = ll_everused_from_key
entrymeths['dummy_obj'] = dummykeyobj
entrymeths['valid'] = ll_valid_from_key
entrymeths['mark_deleted'] = ll_mark_deleted_in_key
# the key is overwritten by 'dummy' when the entry is deleted
entrymeths['must_clear_key'] = False
elif nullvaluemarker and dummyvalueobj:
# all the state can be encoded in the value
entrymeths['everused'] = ll_everused_from_value
entrymeths['dummy_obj'] = dummyvalueobj
entrymeths['valid'] = ll_valid_from_value
entrymeths['mark_deleted'] = ll_mark_deleted_in_value
# value is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_value'] = False
else:
# we need a flag to know if the entry was ever used
# (we cannot use a NULL as a marker for this, because
# the key and value will be reset to NULL to clear their
# reference)
entryfields.append(("f_everused", lltype.Bool))
entrymeths['everused'] = ll_everused_from_flag
# can we still rely on a dummy obj to mark deleted entries?
if dummykeyobj:
entrymeths['dummy_obj'] = dummykeyobj
entrymeths['valid'] = ll_valid_from_key
entrymeths['mark_deleted'] = ll_mark_deleted_in_key
# key is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_key'] = False
elif dummyvalueobj:
entrymeths['dummy_obj'] = dummyvalueobj
entrymeths['valid'] = ll_valid_from_value
entrymeths['mark_deleted'] = ll_mark_deleted_in_value
# value is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_value'] = False
else:
entryfields.append(("f_valid", lltype.Bool))
entrymeths['valid'] = ll_valid_from_flag
entrymeths['mark_deleted'] = ll_mark_deleted_in_flag
# * the value
entryfields.append(("value", self.DICTVALUE))
# * the hash, if needed
if self.custom_eq_hash:
fasthashfn = None
else:
fasthashfn = self.key_repr.get_ll_fasthash_function()
if fasthashfn is None:
entryfields.append(("f_hash", lltype.Signed))
entrymeths['hash'] = ll_hash_from_cache
else:
entrymeths['hash'] = ll_hash_recomputed
entrymeths['fasthashfn'] = fasthashfn
# Build the lltype data structures
self.DICTENTRY = lltype.Struct("dictentry", adtmeths=entrymeths,
*entryfields)
self.DICTENTRYARRAY = lltype.GcArray(self.DICTENTRY)
fields = [ ("num_items", lltype.Signed),
("num_pristine_entries", lltype.Signed),
("entries", lltype.Ptr(self.DICTENTRYARRAY)) ]
if self.custom_eq_hash:
self.r_rdict_eqfn, self.r_rdict_hashfn = self._custom_eq_hash_repr()
fields.extend([ ("fnkeyeq", self.r_rdict_eqfn.lowleveltype),
("fnkeyhash", self.r_rdict_hashfn.lowleveltype) ])
adtmeths = {
'keyhash': ll_keyhash_custom,
'keyeq': ll_keyeq_custom,
'r_rdict_eqfn': self.r_rdict_eqfn,
'r_rdict_hashfn': self.r_rdict_hashfn,
'paranoia': True,
}
else:
# figure out which functions must be used to hash and compare
ll_keyhash = self.key_repr.get_ll_hash_function()
ll_keyeq = self.key_repr.get_ll_eq_function() # can be None
ll_keyhash = lltype.staticAdtMethod(ll_keyhash)
if ll_keyeq is not None:
ll_keyeq = lltype.staticAdtMethod(ll_keyeq)
adtmeths = {
'keyhash': ll_keyhash,
'keyeq': ll_keyeq,
'paranoia': False,
}
adtmeths['KEY'] = self.DICTKEY
adtmeths['VALUE'] = self.DICTVALUE
self.DICT.become(lltype.GcStruct("dicttable", adtmeths=adtmeths,
*fields))
def convert_const(self, dictobj):
# get object from bound dict methods
#dictobj = getattr(dictobj, '__self__', dictobj)
if dictobj is None:
return lltype.nullptr(self.DICT)
if not isinstance(dictobj, (dict, objectmodel.r_dict)):
raise TyperError("expected a dict: %r" % (dictobj,))
try:
key = Constant(dictobj)
return self.dict_cache[key]
except KeyError:
self.setup()
l_dict = ll_newdict_size(self.DICT, len(dictobj))
self.dict_cache[key] = l_dict
r_key = self.key_repr
r_value = self.value_repr
if isinstance(dictobj, objectmodel.r_dict):
if self.r_rdict_eqfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_eqfn.convert_const(dictobj.key_eq)
l_dict.fnkeyeq = l_fn
if self.r_rdict_hashfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_hashfn.convert_const(dictobj.key_hash)
l_dict.fnkeyhash = l_fn
for dictkeycontainer, dictvalue in dictobj._dict.items():
llkey = r_key.convert_const(dictkeycontainer.key)
llvalue = r_value.convert_const(dictvalue)
ll_dict_insertclean(l_dict, llkey, llvalue,
dictkeycontainer.hash)
return l_dict
else:
for dictkey, dictvalue in dictobj.items():
llkey = r_key.convert_const(dictkey)
llvalue = r_value.convert_const(dictvalue)
ll_dict_insertclean(l_dict, llkey, llvalue,
l_dict.keyhash(llkey))
return l_dict
def rtype_len(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_len, v_dict)
def rtype_is_true(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_is_true, v_dict)
def make_iterator_repr(self, *variant):
return DictIteratorRepr(self, *variant)
def rtype_method_get(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_get, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_setdefault(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_setdefault, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_copy(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_copy, v_dict)
def rtype_method_update(self, hop):
v_dic1, v_dic2 = hop.inputargs(self, self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_update, v_dic1, v_dic2)
def _rtype_method_kvi(self, hop, spec):
v_dic, = hop.inputargs(self)
r_list = hop.r_result
v_func = hop.inputconst(lltype.Void, spec)
cLIST = hop.inputconst(lltype.Void, r_list.lowleveltype.TO)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_kvi, v_dic, cLIST, v_func)
def rtype_method_keys(self, hop):
return self._rtype_method_kvi(hop, dum_keys)
def rtype_method_values(self, hop):
return self._rtype_method_kvi(hop, dum_values)
def rtype_method_items(self, hop):
return self._rtype_method_kvi(hop, dum_items)
def rtype_method_iterkeys(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "keys").newiter(hop)
def rtype_method_itervalues(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "values").newiter(hop)
def rtype_method_iteritems(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "items").newiter(hop)
def rtype_method_clear(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_clear, v_dict)
class __extend__(pairtype(DictRepr, rmodel.Repr)):
def rtype_getitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
v_res = hop.gendirectcall(ll_dict_getitem, v_dict, v_key)
return r_dict.recast_value(hop.llops, v_res)
def rtype_delitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
return hop.gendirectcall(ll_dict_delitem, v_dict, v_key)
def rtype_setitem((r_dict, r_key), hop):
v_dict, v_key, v_value = hop.inputargs(r_dict, r_dict.key_repr, r_dict.value_repr)
if r_dict.custom_eq_hash:
hop.exception_is_here()
else:
hop.exception_cannot_occur()
hop.gendirectcall(ll_dict_setitem, v_dict, v_key, v_value)
def rtype_contains((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
return hop.gendirectcall(ll_contains, v_dict, v_key)
class __extend__(pairtype(DictRepr, DictRepr)):
def convert_from_to((r_dict1, r_dict2), v, llops):
# check that we don't convert from Dicts with
# different key/value types
if r_dict1.dictkey is None or r_dict2.dictkey is None:
return NotImplemented
if r_dict1.dictkey is not r_dict2.dictkey:
return NotImplemented
if r_dict1.dictvalue is None or r_dict2.dictvalue is None:
return NotImplemented
if r_dict1.dictvalue is not r_dict2.dictvalue:
return NotImplemented
return v
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
def ll_everused_from_flag(entry):
return entry.f_everused
def ll_everused_from_key(entry):
return bool(entry.key)
def ll_everused_from_value(entry):
return bool(entry.value)
def ll_valid_from_flag(entry):
return entry.f_valid
def ll_mark_deleted_in_flag(entry):
entry.f_valid = False
def ll_valid_from_key(entry):
ENTRY = lltype.typeOf(entry).TO
dummy = ENTRY.dummy_obj.ll_dummy_value
return entry.everused() and entry.key != dummy
def ll_mark_deleted_in_key(entry):
ENTRY = lltype.typeOf(entry).TO
dummy = ENTRY.dummy_obj.ll_dummy_value
entry.key = dummy
def ll_valid_from_value(entry):
ENTRY = lltype.typeOf(entry).TO
dummy = ENTRY.dummy_obj.ll_dummy_value
return entry.everused() and entry.value != dummy
def ll_mark_deleted_in_value(entry):
ENTRY = lltype.typeOf(entry).TO
dummy = ENTRY.dummy_obj.ll_dummy_value
entry.value = dummy
def ll_hash_from_cache(entry):
return entry.f_hash
def ll_hash_recomputed(entry):
ENTRY = lltype.typeOf(entry).TO
return ENTRY.fasthashfn(entry.key)
def ll_keyhash_custom(d, key):
DICT = lltype.typeOf(d).TO
return hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key)
def ll_keyeq_custom(d, key1, key2):
DICT = lltype.typeOf(d).TO
return hlinvoke(DICT.r_rdict_eqfn, d.fnkeyeq, key1, key2)
def ll_dict_len(d):
return d.num_items
def ll_dict_is_true(d):
# check if a dict is True, allowing for None
return bool(d) and d.num_items != 0
def ll_dict_getitem(d, key):
entry = ll_dict_lookup(d, key, d.keyhash(key))
if entry.valid():
return entry.value
else:
raise KeyError
ll_dict_getitem.oopspec = 'dict.getitem(d, key)'
ll_dict_getitem.oopargcheck = lambda d, key: bool(d)
def ll_dict_setitem(d, key, value):
hash = d.keyhash(key)
entry = ll_dict_lookup(d, key, hash)
everused = entry.everused()
valid = entry.valid()
# set up the new entry
ENTRY = lltype.typeOf(entry).TO
entry.value = value
if valid:
return
entry.key = key
if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
d.num_items += 1
if not everused:
if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
d.num_pristine_entries -= 1
if d.num_pristine_entries <= len(d.entries) / 3:
ll_dict_resize(d)
ll_dict_setitem.oopspec = 'dict.setitem(d, key, value)'
def ll_dict_insertclean(d, key, value, hash):
# Internal routine used by ll_dict_resize() to insert an item which is
# known to be absent from the dict. This routine also assumes that
# the dict contains no deleted entries. This routine has the advantage
# of never calling d.keyhash() and d.keyeq(), so it cannot call back
# to user code. ll_dict_insertclean() doesn't resize the dict, either.
entry = ll_dict_lookup_clean(d, hash)
ENTRY = lltype.typeOf(entry).TO
entry.value = value
entry.key = key
if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
d.num_items += 1
d.num_pristine_entries -= 1
def ll_dict_delitem(d, key):
entry = ll_dict_lookup(d, key, d.keyhash(key))
if not entry.valid():
raise KeyError
entry.mark_deleted()
d.num_items -= 1
# clear the key and the value if they are GC pointers
ENTRY = lltype.typeOf(entry).TO
if ENTRY.must_clear_key:
key = entry.key # careful about destructor side effects:
# keep key alive until entry.value has also
# been zeroed (if it must be)
entry.key = lltype.nullptr(ENTRY.key.TO)
if ENTRY.must_clear_value:
entry.value = lltype.nullptr(ENTRY.value.TO)
num_entries = len(d.entries)
if num_entries > DICT_INITSIZE and d.num_items < num_entries / 4:
ll_dict_resize(d)
def ll_dict_resize(d):
old_entries = d.entries
old_size = len(old_entries)
# make a 'new_size' estimate and shrink it if there are many
# deleted entry markers
new_size = old_size * 2
while new_size > DICT_INITSIZE and d.num_items < new_size / 4:
new_size /= 2
d.entries = lltype.malloc(lltype.typeOf(old_entries).TO, new_size, zero=True)
d.num_items = 0
d.num_pristine_entries = new_size
i = 0
while i < old_size:
entry = old_entries[i]
if entry.valid():
ll_dict_insertclean(d, entry.key, entry.value, entry.hash())
i += 1
# ------- a port of CPython's dictobject.c's lookdict implementation -------
PERTURB_SHIFT = 5
def ll_dict_lookup(d, key, hash):
DICT = lltype.typeOf(d).TO
entries = d.entries
mask = len(entries) - 1
i = r_uint(hash & mask)
# do the first try before any looping
entry = entries[i]
if entry.valid():
checkingkey = entry.key
if checkingkey == key:
return entry # found the entry
if d.keyeq is not None and entry.hash() == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
if DICT.paranoia:
if (entries != d.entries or
not entry.valid() or entry.key != checkingkey):
# the compare did major nasty stuff to the dict: start over
return ll_dict_lookup(d, key, hash)
if found:
return entry # found the entry
freeslot = lltype.nullptr(lltype.typeOf(entry).TO)
elif entry.everused():
freeslot = entry
else:
return entry # pristine entry -- lookup failed
# In the loop, a deleted entry (everused and not valid) is by far
# (factor of 100s) the least likely outcome, so test for that last.
perturb = r_uint(hash)
while 1:
i = ((i << 2) + i + perturb + 1) & mask
entry = entries[i]
if not entry.everused():
return freeslot or entry
elif entry.valid():
checkingkey = entry.key
if checkingkey == key:
return entry
if d.keyeq is not None and entry.hash() == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
if DICT.paranoia:
if (entries != d.entries or
not entry.valid() or entry.key != checkingkey):
# the compare did major nasty stuff to the dict:
# start over
return ll_dict_lookup(d, key, hash)
if found:
return entry # found the entry
elif not freeslot:
freeslot = entry
perturb >>= PERTURB_SHIFT
def ll_dict_lookup_clean(d, hash):
# a simplified version of ll_dict_lookup() which assumes that the
# key is new, and the dictionary doesn't contain deleted entries.
# It only find the next free slot for the given hash.
entries = d.entries
mask = len(entries) - 1
i = r_uint(hash & mask)
entry = entries[i]
perturb = r_uint(hash)
while entry.everused():
i = ((i << 2) + i + perturb + 1) & mask
entry = entries[i]
perturb >>= PERTURB_SHIFT
return entry
# ____________________________________________________________
#
# Irregular operations.
DICT_INITSIZE = 8
def ll_newdict(DICT):
d = lltype.malloc(DICT)
d.entries = lltype.malloc(DICT.entries.TO, DICT_INITSIZE, zero=True)
d.num_items = 0
d.num_pristine_entries = DICT_INITSIZE
return d
ll_newdict.oopspec = 'newdict()'
def ll_newdict_size(DICT, length_estimate):
length_estimate = (length_estimate // 2) * 3
n = DICT_INITSIZE
while n < length_estimate:
n *= 2
d = lltype.malloc(DICT)
d.entries = lltype.malloc(DICT.entries.TO, n, zero=True)
d.num_items = 0
d.num_pristine_entries = DICT_INITSIZE
return d
ll_newdict_size.oopspec = 'newdict()'
def rtype_r_dict(hop):
r_dict = hop.r_result
if not r_dict.custom_eq_hash:
raise TyperError("r_dict() call does not return an r_dict instance")
v_eqfn, v_hashfn = hop.inputargs(r_dict.r_rdict_eqfn,
r_dict.r_rdict_hashfn)
cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
hop.exception_cannot_occur()
v_result = hop.gendirectcall(ll_newdict, cDICT)
if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyeq')
hop.genop('setfield', [v_result, cname, v_eqfn])
if r_dict.r_rdict_hashfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyhash')
hop.genop('setfield', [v_result, cname, v_hashfn])
return v_result
# ____________________________________________________________
#
# Iteration.
class DictIteratorRepr(AbstractDictIteratorRepr):
def __init__(self, r_dict, variant="keys"):
self.r_dict = r_dict
self.variant = variant
self.lowleveltype = lltype.Ptr(lltype.GcStruct('dictiter',
('dict', r_dict.lowleveltype),
('index', lltype.Signed)))
self.ll_dictiter = ll_dictiter
self.ll_dictnext = ll_dictnext
def ll_dictiter(ITERPTR, d):
iter = lltype.malloc(ITERPTR.TO)
iter.dict = d
iter.index = 0
return iter
def ll_dictnext(iter, func, RETURNTYPE):
dict = iter.dict
if dict:
entries = dict.entries
index = iter.index
entries_len = len(entries)
while index < entries_len:
entry = entries[index]
index = index + 1
if entry.valid():
iter.index = index
if RETURNTYPE is lltype.Void:
return None
elif func is dum_items:
r = lltype.malloc(RETURNTYPE.TO)
r.item0 = recast(RETURNTYPE.TO.item0, entry.key)
r.item1 = recast(RETURNTYPE.TO.item1, entry.value)
return r
elif func is dum_keys:
return entry.key
elif func is dum_values:
return entry.value
# clear the reference to the dict and prevent restarts
iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
raise StopIteration
# _____________________________________________________________
# methods
def ll_get(dict, key, default):
entry = ll_dict_lookup(dict, key, dict.keyhash(key))
if entry.valid():
return entry.value
else:
return default
def ll_setdefault(dict, key, default):
entry = ll_dict_lookup(dict, key, dict.keyhash(key))
if entry.valid():
return entry.value
else:
ll_dict_setitem(dict, key, default)
return default
def ll_copy(dict):
DICT = lltype.typeOf(dict).TO
dictsize = len(dict.entries)
d = lltype.malloc(DICT)
d.entries = lltype.malloc(DICT.entries.TO, dictsize, zero=True)
d.num_items = dict.num_items
d.num_pristine_entries = dict.num_pristine_entries
if hasattr(DICT, 'fnkeyeq'): d.fnkeyeq = dict.fnkeyeq
if hasattr(DICT, 'fnkeyhash'): d.fnkeyhash = dict.fnkeyhash
i = 0
while i < dictsize:
d_entry = d.entries[i]
entry = dict.entries[i]
ENTRY = lltype.typeOf(entry).TO
d_entry.key = entry.key
if hasattr(ENTRY, 'f_valid'): d_entry.f_valid = entry.f_valid
if hasattr(ENTRY, 'f_everused'): d_entry.f_everused = entry.f_everused
d_entry.value = entry.value
if hasattr(ENTRY, 'f_hash'): d_entry.f_hash = entry.f_hash
i += 1
return d
def ll_clear(d):
if len(d.entries) == d.num_pristine_entries == DICT_INITSIZE:
return
DICT = lltype.typeOf(d).TO
d.entries = lltype.malloc(DICT.entries.TO, DICT_INITSIZE, zero=True)
d.num_items = 0
d.num_pristine_entries = DICT_INITSIZE
def ll_update(dic1, dic2):
entries = dic2.entries
d2len = len(entries)
i = 0
while i < d2len:
entry = entries[i]
if entry.valid():
ll_dict_setitem(dic1, entry.key, entry.value)
i += 1
# this is an implementation of keys(), values() and items()
# in a single function.
# note that by specialization on func, three different
# and very efficient functions are created.
def recast(P, v):
if isinstance(P, lltype.Ptr):
return lltype.cast_pointer(P, v)
else:
return v
def ll_kvi(dic, LIST, func):
res = LIST.ll_newlist(dic.num_items)
entries = dic.entries
dlen = len(entries)
items = res.ll_items()
i = 0
p = 0
while i < dlen:
entry = entries[i]
if entry.valid():
ELEM = lltype.typeOf(items).TO.OF
if ELEM is not lltype.Void:
if func is dum_items:
r = lltype.malloc(ELEM.TO)
r.item0 = recast(ELEM.TO.item0, entry.key)
r.item1 = recast(ELEM.TO.item1, entry.value)
items[p] = r
elif func is dum_keys:
items[p] = recast(ELEM, entry.key)
elif func is dum_values:
items[p] = recast(ELEM, entry.value)
p += 1
i += 1
return res
def ll_contains(d, key):
entry = ll_dict_lookup(d, key, d.keyhash(key))
return entry.valid()
ll_contains.oopspec = 'dict.contains(d, key)'
ll_contains.oopargcheck = lambda d, key: bool(d)
| Python |
import math
from pypy.rpython.lltypesystem import lltype, rtupletype
FREXP_RESULT = rtupletype.TUPLE_TYPE([lltype.Float, lltype.Signed]).TO
MODF_RESULT = rtupletype.TUPLE_TYPE([lltype.Float, lltype.Float]).TO
def ll_frexp_result(mantissa, exponent):
tup = lltype.malloc(FREXP_RESULT)
tup.item0 = mantissa
tup.item1 = exponent
return tup
def ll_modf_result(fracpart, intpart):
tup = lltype.malloc(MODF_RESULT)
tup.item0 = fracpart
tup.item1 = intpart
return tup
def ll_math_frexp(x):
mantissa, exponent = math.frexp(x)
return ll_frexp_result(mantissa, exponent)
def ll_math_modf(x):
fracpart, intpart = math.modf(x)
return ll_modf_result(fracpart, intpart)
| Python |
from pypy.rlib import rarithmetic
from pypy.rpython.module.support import LLSupport
from pypy.tool.staticmethods import ClassMethods
class Implementation:
def ll_strtod_formatd(fmt, x):
return LLSupport.to_rstr(rarithmetic.formatd(LLSupport.from_rstr(fmt), x))
ll_strtod_formatd.suggested_primitive = True
ll_strtod_formatd = staticmethod(ll_strtod_formatd)
def ll_strtod_parts_to_float(sign, beforept, afterpt, exponent):
return rarithmetic.parts_to_float(LLSupport.from_rstr(sign),
LLSupport.from_rstr(beforept),
LLSupport.from_rstr(afterpt),
LLSupport.from_rstr(exponent))
ll_strtod_parts_to_float.suggested_primitive = True
ll_strtod_parts_to_float = staticmethod(ll_strtod_parts_to_float)
| Python |
from pypy.rpython.module.support import LLSupport
from pypy.rpython.module.ll_os_path import BaseOsPath
class Implementation(BaseOsPath, LLSupport):
pass
| Python |
import os, errno
from pypy.rpython.module.support import LLSupport
from pypy.rpython.module.support import ll_strcpy
from pypy.rpython.module.ll_os import BaseOS
from pypy.rpython.lltypesystem import lltype, rtupletype
from pypy.rlib.rarithmetic import intmask
STAT_RESULT = rtupletype.TUPLE_TYPE([lltype.Signed]*10).TO
PIPE_RESULT = rtupletype.TUPLE_TYPE([lltype.Signed]*2).TO
WAITPID_RESULT = rtupletype.TUPLE_TYPE([lltype.Signed]*2).TO
class Implementation(BaseOS, LLSupport):
def ll_stat_result(stat0, stat1, stat2, stat3, stat4,
stat5, stat6, stat7, stat8, stat9):
tup = lltype.malloc(STAT_RESULT)
tup.item0 = intmask(stat0)
tup.item1 = intmask(stat1)
tup.item2 = intmask(stat2)
tup.item3 = intmask(stat3)
tup.item4 = intmask(stat4)
tup.item5 = intmask(stat5)
tup.item6 = intmask(stat6)
tup.item7 = intmask(stat7)
tup.item8 = intmask(stat8)
tup.item9 = intmask(stat9)
return tup
ll_stat_result = staticmethod(ll_stat_result)
def ll_pipe_result(fd1, fd2):
tup = lltype.malloc(PIPE_RESULT)
tup.item0 = fd1
tup.item1 = fd2
return tup
ll_pipe_result = staticmethod(ll_pipe_result)
def ll_os_read(cls, fd, count):
from pypy.rpython.lltypesystem.rstr import mallocstr
if count < 0:
raise OSError(errno.EINVAL, None)
buffer = mallocstr(count)
n = cls.ll_read_into(fd, buffer)
if n != count:
s = mallocstr(n)
ll_strcpy(s, buffer, n)
buffer = s
return buffer
def ll_os_readlink(cls, path):
from pypy.rpython.lltypesystem.rstr import mallocstr
bufsize = 1023
while 1:
buffer = mallocstr(bufsize)
n = cls.ll_readlink_into(cls, path, buffer)
if n < bufsize:
break
bufsize *= 4 # overflow, try again with a bigger buffer
s = mallocstr(n)
ll_strcpy(s, buffer, n)
return s
def ll_waitpid_result(fd1, fd2):
tup = lltype.malloc(WAITPID_RESULT)
tup.item0 = fd1
tup.item1 = fd2
return tup
ll_waitpid_result = staticmethod(ll_waitpid_result)
| Python |
from pypy.rpython.rslice import AbstractSliceRepr
from pypy.rpython.lltypesystem.lltype import \
GcStruct, Signed, Ptr, Void, malloc, PyObject, nullptr
from pypy.annotation.pairtype import pairtype
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython.rmodel import inputconst, PyObjPtr, IntegerRepr
# ____________________________________________________________
#
# Concrete implementation of RPython slice objects:
#
# - if stop is None, use only a Signed
# - if stop is not None:
#
# struct slice {
# Signed start;
# Signed stop;
# // step is always 1
# }
SLICE = GcStruct("slice", ("start", Signed), ("stop", Signed),
hints = {'immutable': True})
class SliceRepr(AbstractSliceRepr):
pass
startstop_slice_repr = SliceRepr()
startstop_slice_repr.lowleveltype = Ptr(SLICE)
startonly_slice_repr = SliceRepr()
startonly_slice_repr.lowleveltype = Signed
minusone_slice_repr = SliceRepr()
minusone_slice_repr.lowleveltype = Void # only for [:-1]
# ____________________________________________________________
def ll_newslice(start, stop):
s = malloc(SLICE)
s.start = start
s.stop = stop
return s
# ____________________________________________________________
#
# limited support for casting into PyObject
# stuff like this should go into one file maybe
class __extend__(pairtype(SliceRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
null = inputconst(Ptr(PyObject), nullptr(PyObject))
def pyint(v):
return llops.gencapicall('PyInt_FromLong', [v], resulttype=r_to)
v_step = v_start = v_stop = null
if r_from.lowleveltype is Signed:
v_start = pyint(v)
elif r_from.lowleveltype is Void:
v_stop = inputconst(r_to, -1)
else:
v_start = pyint(llops.genop('getfield', [v, inputconst(Void, 'start')],
resulttype=Signed))
v_stop = pyint(llops.genop('getfield', [v, inputconst(Void, 'stop')],
resulttype=Signed))
return llops.gencapicall('PySlice_New',
[v_start, v_stop, v_step],
resulttype = pyobj_repr)
| Python |
from pypy.rpython.lltypesystem.lltype import Ptr, GcStruct, Signed, malloc, Void
from pypy.rpython.rrange import AbstractRangeRepr, AbstractRangeIteratorRepr
# ____________________________________________________________
#
# Concrete implementation of RPython lists that are returned by range()
# and never mutated afterwards:
#
# struct range {
# Signed start, stop; // step is always constant
# }
#
# struct rangest {
# Signed start, stop, step; // rare case, for completeness
# }
def ll_length(l):
if l.step > 0:
lo = l.start
hi = l.stop
step = l.step
else:
lo = l.stop
hi = l.start
step = -l.step
if hi <= lo:
return 0
n = (hi - lo - 1) // step + 1
return n
def ll_getitem_fast(l, index):
return l.start + index * l.step
RANGEST = GcStruct("range", ("start", Signed), ("stop", Signed), ("step", Signed),
adtmeths = {
"ll_length":ll_length,
"ll_getitem_fast":ll_getitem_fast,
},
hints = {'immutable': True})
RANGESTITER = GcStruct("range", ("next", Signed), ("stop", Signed), ("step", Signed))
class RangeRepr(AbstractRangeRepr):
RANGEST = Ptr(RANGEST)
RANGESTITER = Ptr(RANGESTITER)
getfield_opname = "getfield"
def __init__(self, step, *args):
self.RANGE = Ptr(GcStruct("range", ("start", Signed), ("stop", Signed),
adtmeths = {
"ll_length":ll_length,
"ll_getitem_fast":ll_getitem_fast,
"step":step,
},
hints = {'immutable': True}))
self.RANGEITER = Ptr(GcStruct("range", ("next", Signed), ("stop", Signed)))
AbstractRangeRepr.__init__(self, step, *args)
self.ll_newrange = ll_newrange
self.ll_newrangest = ll_newrangest
def make_iterator_repr(self):
return RangeIteratorRepr(self)
def ll_newrange(RANGE, start, stop):
l = malloc(RANGE.TO)
l.start = start
l.stop = stop
return l
def ll_newrangest(start, stop, step):
if step == 0:
raise ValueError
l = malloc(RANGEST)
l.start = start
l.stop = stop
l.step = step
return l
class RangeIteratorRepr(AbstractRangeIteratorRepr):
def __init__(self, *args):
AbstractRangeIteratorRepr.__init__(self, *args)
self.ll_rangeiter = ll_rangeiter
def ll_rangeiter(ITERPTR, rng):
iter = malloc(ITERPTR.TO)
iter.next = rng.start
iter.stop = rng.stop
if ITERPTR.TO is RANGESTITER:
iter.step = rng.step
return iter
| Python |
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.annotation.model import lltype_to_annotation
from pypy.rlib.objectmodel import Symbolic, CDefinedIntSymbolic
from pypy.rlib import rarithmetic
class CConstant(Symbolic):
""" A C-level constant, maybe #define, rendered directly.
"""
def __init__(self, c_name, TP):
self.c_name = c_name
self.TP = TP
def annotation(self):
return lltype_to_annotation(self.TP)
def lltype(self):
return self.TP
def llexternal(name, args, result, _callable=None, sources=[], includes=[],
libraries=[], include_dirs=[]):
ext_type = lltype.FuncType(args, result)
funcptr = lltype.functionptr(ext_type, name, external='C',
sources=tuple(sources),
includes=tuple(includes),
libraries=tuple(libraries),
include_dirs=tuple(include_dirs),
_callable=_callable)
if _callable is None:
from pypy.rpython.lltypesystem import ll2ctypes
ll2ctypes.make_callable_via_ctypes(funcptr)
return funcptr
def setup():
""" creates necessary c-level types
"""
from pypy.rpython.lltypesystem.rfficache import platform
for name, bits in platform.items():
if name.startswith('unsigned'):
name = 'u' + name[9:]
signed = False
else:
signed = True
name = name.replace(' ', '')
llname = name.upper()
inttype = rarithmetic.build_int('r_' + name, signed, bits)
globals()['r_' + name] = inttype
globals()[llname] = lltype.build_number(llname, inttype)
setup()
def CStruct(name, *fields, **kwds):
""" A small helper to create external C structure, not the
pypy one
"""
hints = kwds.get('hints', {})
hints = hints.copy()
kwds['hints'] = hints
hints['external'] = 'C'
hints['c_name'] = name
# Hack: prefix all attribute names with 'c_' to cope with names starting
# with '_'. The genc backend removes the 'c_' prefixes...
c_fields = [('c_' + key, value) for key, value in fields]
return lltype.Ptr(lltype.Struct(name, *c_fields, **kwds))
c_errno = CConstant('errno', lltype.Signed)
# void *
VOIDP = lltype.Ptr(lltype.FixedSizeArray(lltype.Void, 1))
# char *
CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True}))
# int *
INTP = lltype.Ptr(lltype.Array(lltype.Signed, hints={'nolength': True}))
# various type mapping
# str -> char*
def str2charp(s):
""" str -> char*
"""
array = lltype.malloc(CCHARP.TO, len(s) + 1, flavor='raw')
for i in range(len(s)):
array[i] = s[i]
array[len(s)] = '\x00'
return array
def free_charp(cp):
lltype.free(cp, flavor='raw')
# char* -> str
# doesn't free char*
def charp2str(cp):
l = []
i = 0
while cp[i] != '\x00':
l.append(cp[i])
i += 1
return "".join(l)
# char**
CCHARPP = lltype.Ptr(lltype.Array(CCHARP, hints={'nolength': True}))
def liststr2charpp(l):
""" list[str] -> char**, NULL terminated
"""
array = lltype.malloc(CCHARPP.TO, len(l) + 1, flavor='raw')
for i in range(len(l)):
array[i] = str2charp(l[i])
array[len(l)] = lltype.nullptr(CCHARP.TO)
return array
def free_charpp(ref):
""" frees list of char**, NULL terminated
"""
i = 0
while ref[i]:
free_charp(ref[i])
i += 1
lltype.free(ref, flavor='raw')
| Python |
from weakref import WeakValueDictionary
from pypy.annotation.pairtype import pairtype
from pypy.rpython.error import TyperError
from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated
from pypy.rlib.objectmodel import debug_assert
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rlib.rarithmetic import _hash_string
from pypy.rpython.rmodel import inputconst, IntegerRepr
from pypy.rpython.rstr import AbstractStringRepr,AbstractCharRepr,\
AbstractUniCharRepr, AbstractStringIteratorRepr,\
AbstractLLHelpers
from pypy.rpython.lltypesystem import ll_str
from pypy.rpython.lltypesystem.lltype import \
GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, \
Bool, Void, GcArray, nullptr, pyobjectptr
# ____________________________________________________________
#
# Concrete implementation of RPython strings:
#
# struct str {
# hash: Signed
# chars: array of Char
# }
STR = GcStruct('rpy_string', ('hash', Signed),
('chars', Array(Char, hints={'immutable': True,
'isrpystring': True})))
SIGNED_ARRAY = GcArray(Signed)
CONST_STR_CACHE = WeakValueDictionary()
class StringRepr(AbstractStringRepr):
lowleveltype = Ptr(STR)
def __init__(self, *args):
AbstractStringRepr.__init__(self, *args)
self.ll = LLHelpers
def convert_const(self, value):
if value is None:
return nullptr(STR)
#value = getattr(value, '__self__', value) # for bound string methods
if not isinstance(value, str):
raise TyperError("not a str: %r" % (value,))
try:
return CONST_STR_CACHE[value]
except KeyError:
p = mallocstr(len(value))
for i in range(len(value)):
p.chars[i] = value[i]
p.hash = 0
self.ll.ll_strhash(p) # precompute the hash
CONST_STR_CACHE[value] = p
return p
def make_iterator_repr(self):
return string_iterator_repr
def can_ll_be_null(self, s_value):
if self is string_repr:
return s_value.can_be_none()
else:
return True # for CharRepr/UniCharRepr subclasses,
# where NULL is always valid: it is chr(0)
def _list_length_items(self, hop, v_lst, LIST):
LIST = LIST.TO
v_length = hop.gendirectcall(LIST.ll_length, v_lst)
v_items = hop.gendirectcall(LIST.ll_items, v_lst)
return v_length, v_items
class CharRepr(AbstractCharRepr, StringRepr):
lowleveltype = Char
class UniCharRepr(AbstractUniCharRepr):
lowleveltype = UniChar
class __extend__(pairtype(PyObjRepr, AbstractStringRepr)):
def convert_from_to((r_from, r_to), v, llops):
v_len = llops.gencapicall('PyString_Size', [v], resulttype=Signed)
cstr = inputconst(Void, STR)
v_result = llops.genop('malloc_varsize', [cstr, v_len],
resulttype=Ptr(STR))
cchars = inputconst(Void, "chars")
v_chars = llops.genop('getsubstruct', [v_result, cchars],
resulttype=Ptr(STR.chars))
llops.gencapicall('PyString_ToLLCharArray', [v, v_chars])
string_repr = llops.rtyper.type_system.rstr.string_repr
v_result = llops.convertvar(v_result, string_repr, r_to)
return v_result
class __extend__(pairtype(AbstractStringRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
string_repr = llops.rtyper.type_system.rstr.string_repr
v = llops.convertvar(v, r_from, string_repr)
cchars = inputconst(Void, "chars")
v_chars = llops.genop('getsubstruct', [v, cchars],
resulttype=Ptr(STR.chars))
v_size = llops.genop('getarraysize', [v_chars],
resulttype=Signed)
# xxx put in table
return llops.gencapicall('PyString_FromLLCharArrayAndSize',
[v_chars, v_size],
resulttype=pyobj_repr,
_callable= lambda chars, sz: pyobjectptr(''.join(chars)))
def mallocstr(length):
debug_assert(length >= 0, "negative string length")
r = malloc(STR, length)
if not we_are_translated() or not malloc_zero_filled:
r.hash = 0
return r
mallocstr._annspecialcase_ = 'specialize:semierased'
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
#
def ll_construct_restart_positions(s, l):
# Construct the array of possible restarting positions
# T = Array_of_ints [-1..len2]
# T[-1] = -1 s2.chars[-1] is supposed to be unequal to everything else
T = malloc( SIGNED_ARRAY, l)
T[0] = 0
i = 1
j = 0
while i<l:
if s.chars[i] == s.chars[j]:
j += 1
T[i] = j
i += 1
elif j>0:
j = T[j-1]
else:
T[i] = 0
i += 1
j = 0
return T
class LLHelpers(AbstractLLHelpers):
def ll_char_mul(ch, times):
if times < 0:
times = 0
newstr = mallocstr(times)
j = 0
while j < times:
newstr.chars[j] = ch
j += 1
return newstr
def ll_strlen(s):
return len(s.chars)
def ll_stritem_nonneg(s, i):
chars = s.chars
debug_assert(i>=0, "negative str getitem index")
debug_assert(i<len(chars), "str getitem index out of bound")
return chars[i]
def ll_chr2str(ch):
s = mallocstr(1)
s.chars[0] = ch
return s
def ll_strhash(s):
# unlike CPython, there is no reason to avoid to return -1
# but our malloc initializes the memory to zero, so we use zero as the
# special non-computed-yet value.
x = s.hash
if x == 0:
x = _hash_string(s.chars)
s.hash = x
return x
ll_strhash._pure_function_ = True # it's pure but it does not look like it
def ll_strfasthash(s):
return s.hash # assumes that the hash is already computed
def ll_strconcat(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
newstr = mallocstr(len1 + len2)
j = 0
while j < len1:
newstr.chars[j] = s1.chars[j]
j += 1
i = 0
while i < len2:
newstr.chars[j] = s2.chars[i]
i += 1
j += 1
return newstr
def ll_strip(s, ch, left, right):
s_len = len(s.chars)
if s_len == 0:
return emptystr
lpos = 0
rpos = s_len - 1
if left:
while lpos < rpos and s.chars[lpos] == ch:
lpos += 1
if right:
while lpos < rpos and s.chars[rpos] == ch:
rpos -= 1
r_len = rpos - lpos + 1
result = mallocstr(r_len)
i = 0
j = lpos
while i < r_len:
result.chars[i] = s.chars[j]
i += 1
j += 1
return result
def ll_upper(s):
s_chars = s.chars
s_len = len(s_chars)
if s_len == 0:
return emptystr
i = 0
result = mallocstr(s_len)
while i < s_len:
ch = s_chars[i]
if 'a' <= ch <= 'z':
ch = chr(ord(ch) - 32)
result.chars[i] = ch
i += 1
return result
def ll_lower(s):
s_chars = s.chars
s_len = len(s_chars)
if s_len == 0:
return emptystr
i = 0
result = mallocstr(s_len)
while i < s_len:
ch = s_chars[i]
if 'A' <= ch <= 'Z':
ch = chr(ord(ch) + 32)
result.chars[i] = ch
i += 1
return result
def ll_join(s, length, items):
s_chars = s.chars
s_len = len(s_chars)
num_items = length
if num_items == 0:
return emptystr
itemslen = 0
i = 0
while i < num_items:
itemslen += len(items[i].chars)
i += 1
result = mallocstr(itemslen + s_len * (num_items - 1))
res_chars = result.chars
res_index = 0
i = 0
item_chars = items[i].chars
item_len = len(item_chars)
j = 0
while j < item_len:
res_chars[res_index] = item_chars[j]
j += 1
res_index += 1
i += 1
while i < num_items:
j = 0
while j < s_len:
res_chars[res_index] = s_chars[j]
j += 1
res_index += 1
item_chars = items[i].chars
item_len = len(item_chars)
j = 0
while j < item_len:
res_chars[res_index] = item_chars[j]
j += 1
res_index += 1
i += 1
return result
def ll_strcmp(s1, s2):
if not s1 and not s2:
return True
if not s1 or not s2:
return False
chars1 = s1.chars
chars2 = s2.chars
len1 = len(chars1)
len2 = len(chars2)
if len1 < len2:
cmplen = len1
else:
cmplen = len2
i = 0
while i < cmplen:
diff = ord(chars1[i]) - ord(chars2[i])
if diff != 0:
return diff
i += 1
return len1 - len2
def ll_streq(s1, s2):
if s1 == s2: # also if both are NULLs
return True
if not s1 or not s2:
return False
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 != len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
while j < len1:
if chars1[j] != chars2[j]:
return False
j += 1
return True
def ll_startswith(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 < len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
while j < len2:
if chars1[j] != chars2[j]:
return False
j += 1
return True
def ll_endswith(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 < len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
offset = len1 - len2
while j < len2:
if chars1[offset + j] != chars2[j]:
return False
j += 1
return True
def ll_find_char(s, ch, start, end):
i = start
while i < end:
if s.chars[i] == ch:
return i
i += 1
return -1
def ll_rfind_char(s, ch, start, end):
i = end
while i > start:
i -= 1
if s.chars[i] == ch:
return i
return -1
def ll_count_char(s, ch, start, end):
count = 0
i = start
while i < end:
if s.chars[i] == ch:
count += 1
i += 1
return count
def ll_find(cls, s1, s2, start, end):
"""Knuth Morris Prath algorithm for substring match"""
len1 = len(s1.chars)
if end > len1:
end = len1
len2 = len(s2.chars)
if len2 == 1:
return cls.ll_find_char(s1, s2.chars[0], start, end)
if len2 == 0:
if (end-start) < 0:
return -1
return start
T = ll_construct_restart_positions(s2, len2)
# Now the find algorithm
i = 0
m = start
while m+i<end:
if s1.chars[m+i]==s2.chars[i]:
i += 1
if i==len2:
return m
else:
# mismatch, go back to the last possible starting pos
if i==0:
m += 1
else:
e = T[i-1]
m = m + i - e
i = e
return -1
ll_find = classmethod(ll_find)
def ll_rfind(cls, s1, s2, start, end):
"""Reversed version of ll_find()"""
len2 = len(s2.chars)
if len2 == 1:
return cls.ll_rfind_char(s1, s2.chars[0], start, end)
if len2 == 0:
len1 = len(s1.chars)
if end > len(s1.chars):
return len1
return end
# Construct the array of possible restarting positions
T = malloc( SIGNED_ARRAY, len2 )
T[0] = 1
i = 1
j = 1
while i<len2:
if s2.chars[len2-i-1] == s2.chars[len2-j]:
j += 1
T[i] = j
i += 1
elif j>1:
j = T[j-2]
else:
T[i] = 1
i += 1
j = 1
# Now the find algorithm
i = 1
m = end
while m-i>=start:
if s1.chars[m-i]==s2.chars[len2-i]:
if i==len2:
return m-i
i += 1
else:
# mismatch, go back to the last possible starting pos
if i==1:
m -= 1
else:
e = T[i-2]
m = m - i + e
i = e
return -1
ll_rfind = classmethod(ll_rfind)
def ll_count(cls, s1, s2, start, end):
"""Knuth Morris Prath algorithm for substring match"""
# XXX more code should be shared with ll_find
len1 = len(s1.chars)
if end > len1:
end = len1
len2 = len(s2.chars)
if len2 == 1:
return cls.ll_count_char(s1, s2.chars[0], start, end)
if len2 == 0:
if (end-start) < 0:
return 0
return end - start + 1
T = ll_construct_restart_positions(s2, len2)
# Now the find algorithm
i = 0
m = start
result = 0
while m+i<end:
if s1.chars[m+i]==s2.chars[i]:
i += 1
if i==len2:
result += 1
i = 0
m += len2
continue
# mismatch, go back to the last possible starting pos
if i==0:
m += 1
else:
e = T[i-1]
m = m + i - e
i = e
return result
ll_count = classmethod(ll_count)
def ll_join_strs(length, items):
num_items = length
itemslen = 0
i = 0
while i < num_items:
itemslen += len(items[i].chars)
i += 1
result = mallocstr(itemslen)
res_chars = result.chars
res_index = 0
i = 0
while i < num_items:
item_chars = items[i].chars
item_len = len(item_chars)
j = 0
while j < item_len:
res_chars[res_index] = item_chars[j]
j += 1
res_index += 1
i += 1
return result
def ll_join_chars(length, chars):
num_chars = length
result = mallocstr(num_chars)
res_chars = result.chars
i = 0
while i < num_chars:
res_chars[i] = chars[i]
i += 1
return result
def ll_stringslice_startonly(s1, start):
len1 = len(s1.chars)
newstr = mallocstr(len1 - start)
j = 0
while start < len1:
newstr.chars[j] = s1.chars[start]
start += 1
j += 1
return newstr
def ll_stringslice(s1, slice):
start = slice.start
stop = slice.stop
if stop >= len(s1.chars):
if start == 0:
return s1
stop = len(s1.chars)
newstr = mallocstr(stop - start)
j = 0
while start < stop:
newstr.chars[j] = s1.chars[start]
start += 1
j += 1
return newstr
def ll_stringslice_minusone(s1):
newlen = len(s1.chars) - 1
newstr = mallocstr(newlen)
j = 0
while j < newlen:
newstr.chars[j] = s1.chars[j]
j += 1
return newstr
def ll_split_chr(LIST, s, c):
chars = s.chars
strlen = len(chars)
count = 1
i = 0
while i < strlen:
if chars[i] == c:
count += 1
i += 1
res = LIST.ll_newlist(count)
items = res.ll_items()
i = 0
j = 0
resindex = 0
while j < strlen:
if chars[j] == c:
item = items[resindex] = mallocstr(j - i)
newchars = item.chars
k = i
while k < j:
newchars[k - i] = chars[k]
k += 1
resindex += 1
i = j + 1
j += 1
item = items[resindex] = mallocstr(j - i)
newchars = item.chars
k = i
while k < j:
newchars[k - i] = chars[k]
k += 1
resindex += 1
return res
def ll_replace_chr_chr(s, c1, c2):
length = len(s.chars)
newstr = mallocstr(length)
src = s.chars
dst = newstr.chars
j = 0
while j < length:
c = src[j]
if c == c1:
c = c2
dst[j] = c
j += 1
return newstr
def ll_contains(s, c):
chars = s.chars
strlen = len(chars)
i = 0
while i < strlen:
if chars[i] == c:
return True
i += 1
return False
def ll_int(s, base):
if not 2 <= base <= 36:
raise ValueError
chars = s.chars
strlen = len(chars)
i = 0
#XXX: only space is allowed as white space for now
while i < strlen and chars[i] == ' ':
i += 1
if not i < strlen:
raise ValueError
#check sign
sign = 1
if chars[i] == '-':
sign = -1
i += 1
elif chars[i] == '+':
i += 1;
# skip whitespaces between sign and digits
while i < strlen and chars[i] == ' ':
i += 1
#now get digits
val = 0
oldpos = i
while i < strlen:
c = ord(chars[i])
if ord('a') <= c <= ord('z'):
digit = c - ord('a') + 10
elif ord('A') <= c <= ord('Z'):
digit = c - ord('A') + 10
elif ord('0') <= c <= ord('9'):
digit = c - ord('0')
else:
break
if digit >= base:
break
val = val * base + digit
i += 1
if i == oldpos:
raise ValueError # catch strings like '+' and '+ '
#skip trailing whitespace
while i < strlen and chars[i] == ' ':
i += 1
if not i == strlen:
raise ValueError
return sign * val
# interface to build strings:
# x = ll_build_start(n)
# ll_build_push(x, next_string, 0)
# ll_build_push(x, next_string, 1)
# ...
# ll_build_push(x, next_string, n-1)
# s = ll_build_finish(x)
def ll_build_start(parts_count):
return malloc(TEMP, parts_count)
def ll_build_push(builder, next_string, index):
builder[index] = next_string
def ll_build_finish(builder):
return LLHelpers.ll_join_strs(len(builder), builder)
def ll_constant(s):
return string_repr.convert_const(s)
ll_constant._annspecialcase_ = 'specialize:memo'
def do_stringformat(cls, hop, sourcevarsrepr):
s_str = hop.args_s[0]
assert s_str.is_constant()
s = s_str.const
things = cls.parse_fmt_string(s)
size = inputconst(Signed, len(things)) # could be unsigned?
cTEMP = inputconst(Void, TEMP)
vtemp = hop.genop("malloc_varsize", [cTEMP, size],
resulttype=Ptr(TEMP))
# XXX hash
r_tuple = hop.args_r[1]
v_tuple = hop.args_v[1]
argsiter = iter(sourcevarsrepr)
InstanceRepr = hop.rtyper.type_system.rclass.InstanceRepr
for i, thing in enumerate(things):
if isinstance(thing, tuple):
code = thing[0]
vitem, r_arg = argsiter.next()
if not hasattr(r_arg, 'll_str'):
raise TyperError("ll_str unsupported for: %r" % r_arg)
if code == 's' or (code == 'r' and isinstance(r_arg, InstanceRepr)):
vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
elif code == 'd':
assert isinstance(r_arg, IntegerRepr)
#vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
vchunk = hop.gendirectcall(ll_str.ll_int2dec, vitem)
elif code == 'f':
#assert isinstance(r_arg, FloatRepr)
vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
elif code == 'x':
assert isinstance(r_arg, IntegerRepr)
vchunk = hop.gendirectcall(ll_str.ll_int2hex, vitem,
inputconst(Bool, False))
elif code == 'o':
assert isinstance(r_arg, IntegerRepr)
vchunk = hop.gendirectcall(ll_str.ll_int2oct, vitem,
inputconst(Bool, False))
else:
raise TyperError, "%%%s is not RPython" % (code, )
else:
from pypy.rpython.lltypesystem.rstr import string_repr
vchunk = inputconst(string_repr, thing)
i = inputconst(Signed, i)
hop.genop('setarrayitem', [vtemp, i, vchunk])
hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%'
return hop.gendirectcall(cls.ll_join_strs, size, vtemp)
do_stringformat = classmethod(do_stringformat)
TEMP = GcArray(Ptr(STR))
# TODO: make the public interface of the rstr module cleaner
ll_strconcat = LLHelpers.ll_strconcat
ll_join = LLHelpers.ll_join
do_stringformat = LLHelpers.do_stringformat
string_repr = StringRepr()
char_repr = CharRepr()
unichar_repr = UniCharRepr()
char_repr.ll = LLHelpers
unichar_repr.ll = LLHelpers
emptystr = string_repr.convert_const("")
class StringIteratorRepr(AbstractStringIteratorRepr):
lowleveltype = Ptr(GcStruct('stringiter',
('string', string_repr.lowleveltype),
('index', Signed)))
def __init__(self):
self.ll_striter = ll_striter
self.ll_strnext = ll_strnext
def ll_striter(string):
iter = malloc(string_iterator_repr.lowleveltype.TO)
iter.string = string
iter.index = 0
return iter
def ll_strnext(iter):
chars = iter.string.chars
index = iter.index
if index >= len(chars):
raise StopIteration
iter.index = index + 1
return chars[index]
string_iterator_repr = StringIteratorRepr()
# these should be in rclass, but circular imports prevent (also it's
# not that insane that a string constant is built in this file).
instance_str_prefix = string_repr.convert_const("<")
instance_str_infix = string_repr.convert_const(" object at 0x")
instance_str_suffix = string_repr.convert_const(">")
null_str = string_repr.convert_const("NULL")
unboxed_instance_str_prefix = string_repr.convert_const("<unboxed ")
unboxed_instance_str_suffix = string_repr.convert_const(">")
percent_f = string_repr.convert_const("%f")
| Python |
from pypy.rpython.rgeneric import AbstractGenericCallableRepr
from pypy.rpython.lltypesystem.lltype import Ptr, FuncType
class GenericCallableRepr(AbstractGenericCallableRepr):
def create_low_leveltype(self):
l_args = [r_arg.lowleveltype for r_arg in self.args_r]
l_retval = self.r_result.lowleveltype
return Ptr(FuncType(l_args, l_retval))
| Python |
from pypy.interpreter.argument import Arguments, ArgErr
from pypy.annotation import model as annmodel
from pypy.rpython import rtuple
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem import lltype
class CallPatternTooComplex(TyperError):
pass
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()]
def getrresult(rtyper, graph):
"""Return the repr of the result variable of the 'graph'."""
if graph.getreturnvar() in rtyper.annotator.bindings:
return rtyper.bindingrepr(graph.getreturnvar())
else:
return lltype.Void
def getsig(rtyper, graph):
"""Return the complete 'signature' of the graph."""
return (graph.signature,
graph.defaults,
getrinputs(rtyper, graph),
getrresult(rtyper, graph))
def callparse(rtyper, graph, hop, opname, r_self=None):
"""Parse the arguments of 'hop' when calling the given 'graph'.
"""
rinputs = getrinputs(rtyper, graph)
space = RPythonCallsSpace()
def args_h(start):
return [VarHolder(i, hop.args_s[i])
for i in range(start, hop.nb_args)]
if r_self is None:
start = 1
else:
start = 0
rinputs[0] = r_self
if opname == "simple_call":
arguments = Arguments(space, args_h(start))
elif opname == "call_args":
arguments = Arguments.fromshape(space,
hop.args_s[start].const, # shape
args_h(start+1))
# parse the arguments according to the function we are calling
signature = graph.signature
defs_h = []
if graph.defaults:
for x in graph.defaults:
defs_h.append(ConstHolder(x))
try:
holders = arguments.match_signature(signature, defs_h)
except ArgErr, e:
raise TyperError, "signature mismatch: %s" % e.getmsg(graph.name)
assert len(holders) == len(rinputs), "argument parsing mismatch"
vlist = []
for h,r in zip(holders, rinputs):
v = h.emit(r, hop)
vlist.append(v)
return vlist
class Holder(object):
def is_tuple(self):
return False
def emit(self, repr, hop):
try:
cache = self._cache
except AttributeError:
cache = self._cache = {}
try:
return cache[repr]
except KeyError:
v = self._emit(repr, hop)
cache[repr] = v
return v
class VarHolder(Holder):
def __init__(self, num, s_obj):
self.num = num
self.s_obj = s_obj
def is_tuple(self):
return isinstance(self.s_obj, annmodel.SomeTuple)
def items(self):
assert self.is_tuple()
n = len(self.s_obj.items)
return tuple([ItemHolder(self, i) for i in range(n)])
def _emit(self, repr, hop):
return hop.inputarg(repr, arg=self.num)
def access(self, hop):
repr = hop.args_r[self.num]
return repr, self.emit(repr, hop)
class ConstHolder(Holder):
def __init__(self, value):
self.value = value
def is_tuple(self):
return type(self.value) is tuple
def items(self):
assert self.is_tuple()
return self.value
def _emit(self, repr, hop):
return hop.inputconst(repr, self.value)
class NewTupleHolder(Holder):
def __new__(cls, holders):
for h in holders:
if not isinstance(h, ItemHolder) or not h.holder == holders[0].holder:
break
else:
if 0 < len(holders) == len(holders[0].holder.items()):
return holders[0].holder
inst = Holder.__new__(cls)
inst.holders = tuple(holders)
return inst
def is_tuple(self):
return True
def items(self):
return self.holders
def _emit(self, repr, hop):
assert isinstance(repr, rtuple.AbstractTupleRepr)
tupleitems_v = []
for h in self.holders:
v = h.emit(repr.items_r[len(tupleitems_v)], hop)
tupleitems_v.append(v)
vtuple = repr.newtuple(hop.llops, repr, tupleitems_v)
return vtuple
class ItemHolder(Holder):
def __init__(self, holder, index):
self.holder = holder
self.index = index
def _emit(self, repr, hop):
index = self.index
r_tup, v_tuple = self.holder.access(hop)
v = r_tup.getitem_internal(hop, v_tuple, index)
return hop.llops.convertvar(v, r_tup.items_r[index], repr)
# for parsing call arguments
class RPythonCallsSpace:
"""Pseudo Object Space providing almost no real operation.
For the Arguments class: if it really needs other operations, it means
that the call pattern is too complex for R-Python.
"""
w_tuple = NewTupleHolder
def newtuple(self, items):
return NewTupleHolder(items)
def newdict(self):
raise CallPatternTooComplex, "'**' argument"
def unpackiterable(self, it, expected_length=None):
if it.is_tuple():
items = it.items()
if (expected_length is not None and
expected_length != len(items)):
raise ValueError
return items
raise CallPatternTooComplex, "'*' argument must be a tuple"
def is_w(self, one, other):
return one is other
def type(self, item):
return type(item)
| Python |
from pypy.rpython.rctypes.implementation import CTypesCallEntry, CTypesObjEntry
from pypy.annotation.model import SomeString
from ctypes import c_char_p
class CallEntry(CTypesCallEntry):
"Annotation and rtyping of calls to c_char_p."
_about_ = c_char_p
def specialize_call(self, hop):
string_repr = hop.rtyper.type_system.rstr.string_repr
r_char_p = hop.r_result
hop.exception_cannot_occur()
v_result = r_char_p.allocate_instance(hop.llops)
if len(hop.args_s):
v_value, = hop.inputargs(string_repr)
r_char_p.setstring(hop.llops, v_result, v_value)
return v_result
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of c_char_p instances."
_type_ = c_char_p
s_return_trick = SomeString(can_be_None=True)
def get_field_annotation(self, s_char_p, fieldname):
assert fieldname == 'value'
return self.s_return_trick
def get_repr(self, rtyper, s_char_p):
from pypy.rpython.rctypes import rchar_p
return rchar_p.CCharPRepr(rtyper, s_char_p, rchar_p.CCHARP)
| Python |
from pypy.objspace.flow.model import Constant
from pypy.annotation.pairtype import pairtype
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rmodel import Repr, IntegerRepr, inputconst
from pypy.rpython.error import TyperError
from pypy.rpython.rbuiltin import BuiltinFunctionRepr
class TypeRepr(BuiltinFunctionRepr):
def __init__(self, s_ctype):
assert s_ctype.s_self is None
if not s_ctype.is_constant():
raise TyperError("non-constant ctypes type object")
ctype = s_ctype.const
BuiltinFunctionRepr.__init__(self, ctype)
class __extend__(pairtype(TypeRepr, IntegerRepr)):
def rtype_mul((r_ctype, r_int), hop):
v_ctype, v_repeatcount = hop.inputargs(r_ctype, lltype.Signed)
assert isinstance(v_ctype, Constant)
return v_repeatcount
class VarSizedTypeRepr(Repr):
"""Repr of the var-sized array type built at runtime as 'ctype*int'.
The ctype must be a real constant ctype, so the var-sized type can
be represented as just the runtime length.
"""
lowleveltype = lltype.Signed
def rtype_simple_call(self, hop):
r_array = hop.r_result
args_r = [self] + [r_array.r_item] * (hop.nb_args-1)
args_v = hop.inputargs(*args_r)
v_repeatcount = args_v[0]
hop.exception_cannot_occur()
v_result = r_array.allocate_instance_varsize(hop.llops, v_repeatcount)
r_array.initializeitems(hop.llops, v_result, args_v[1:])
return v_result
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.rctypes.implementation import CTypesCallEntry, CTypesObjEntry
from pypy.annotation.model import SomeCTypesObject
from pypy.rpython.lltypesystem import lltype
from ctypes import pointer, POINTER, byref, c_int
PointerType = type(POINTER(c_int))
class CallEntry(CTypesCallEntry):
"Annotation and rtyping of calls to POINTER types."
_type_ = PointerType
def specialize_call(self, hop):
# delegate calls to the logic for calls to ctypes.pointer()
return PointerFnEntry.specialize_call(hop)
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of pointer instances."
_metatype_ = PointerType
def get_field_annotation(self, s_pointer, fieldname):
assert fieldname == "contents"
ptrtype = self.type
assert s_pointer.knowntype == ptrtype
return SomeCTypesObject(ptrtype._type_, ownsmemory=False)
def get_repr(self, rtyper, s_pointer):
from pypy.rpython.rctypes.rpointer import PointerRepr
return PointerRepr(rtyper, s_pointer)
class PointerFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.pointer()."
_about_ = pointer
def compute_result_annotation(self, s_arg):
assert isinstance(s_arg, SomeCTypesObject)
ctype = s_arg.knowntype
result_ctype = POINTER(ctype)
return SomeCTypesObject(result_ctype, ownsmemory=True)
def specialize_call(hop):
r_ptr = hop.r_result
hop.exception_cannot_occur()
v_result = r_ptr.allocate_instance(hop.llops)
if len(hop.args_s):
v_contentsbox, = hop.inputargs(r_ptr.r_contents)
r_ptr.setcontents(hop.llops, v_result, v_contentsbox)
return v_result
specialize_call = staticmethod(specialize_call)
# byref() is equivalent to pointer() -- the difference is only an
# optimization that is useful in ctypes but not in rctypes.
PointerFnEntry._register_value(byref)
class POINTERFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.POINTER(): constant-folded."
_about_ = POINTER
def compute_result_annotation(self, s_arg):
from pypy.annotation.bookkeeper import getbookkeeper
from atype import SomeVarSizedCTypesType
if isinstance(s_arg, SomeVarSizedCTypesType):
# POINTER(varsized_array_type): given that rctypes performs
# no index checking, this pointer-to-array type is equivalent
# to a pointer to an array of whatever size.
# ('0' is a bad idea, though, as FixedSizeArrays of length 0
# tend to say they have impossible items.)
RESTYPE = POINTER(s_arg.ctype_array._type_ * 1)
else:
# POINTER(constant_ctype) returns the constant annotation
# corresponding to the POINTER(ctype).
assert s_arg.is_constant(), (
"POINTER(%r): argument must be constant" % (s_arg,))
RESTYPE = POINTER(s_arg.const)
return getbookkeeper().immutablevalue(RESTYPE)
def specialize_call(self, hop):
assert hop.s_result.is_constant()
hop.exception_cannot_occur()
return hop.inputconst(lltype.Void, hop.s_result.const)
| Python |
from pypy.rpython.lltypesystem import lltype
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rmodel import IntegerRepr, inputconst
from pypy.rpython.rctypes.rmodel import CTypesRefRepr
from pypy.objspace.flow.model import Constant
from pypy.rpython.rslice import AbstractSliceRepr
from pypy.rpython.lltypesystem.rstr import string_repr
from pypy.rpython.error import TyperError
class StringBufRepr(CTypesRefRepr):
def rtype_len(self, hop):
[v_stringbuf] = hop.inputargs(self)
v_array = self.get_c_data(hop.llops, v_stringbuf)
return hop.genop('getarraysize', [v_array],
resulttype = lltype.Signed)
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
v_box = hop.inputarg(self, 0)
hop.exception_cannot_occur()
if s_attr.const == 'value':
from pypy.rpython.rctypes.rarray import ll_chararrayvalue
return hop.gendirectcall(ll_chararrayvalue, v_box)
elif s_attr.const == 'raw':
return hop.gendirectcall(ll_stringbufraw, v_box)
else:
raise TyperError("StringBufRepr has no attribute %r" % (
s_attr.const,))
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const in ('value', 'raw')
v_box, v_attr, v_value = hop.inputargs(self, lltype.Void, string_repr)
hop.gendirectcall(ll_stringbuf_setvalue_from_string, v_box, v_value)
def get_c_data_of_item(self, llops, v_stringbuf, v_index):
v_array = self.get_c_data(llops, v_stringbuf)
v_char_p = llops.genop('direct_arrayitems', [v_array],
resulttype = ONE_CHAR_PTR)
if isinstance(v_index, Constant) and v_index.value == 0:
pass # skip direct_ptradd
else:
v_char_p = llops.genop('direct_ptradd', [v_char_p, v_index],
resulttype = ONE_CHAR_PTR)
return v_char_p
ONE_CHAR_PTR = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1))
class __extend__(pairtype(StringBufRepr, IntegerRepr)):
def rtype_getitem((r_stringbuf, r_int), hop):
v_stringbuf, v_index = hop.inputargs(r_stringbuf, lltype.Signed)
v_array = r_stringbuf.get_c_data(hop.llops, v_stringbuf)
hop.exception_cannot_occur()
return hop.genop('getarrayitem', [v_array, v_index],
resulttype = lltype.Char)
def rtype_setitem((r_stringbuf, r_int), hop):
v_stringbuf, v_index, v_item = hop.inputargs(r_stringbuf,
lltype.Signed,
lltype.Char)
v_array = r_stringbuf.get_c_data(hop.llops, v_stringbuf)
hop.exception_cannot_occur()
hop.genop('setarrayitem', [v_array, v_index, v_item])
class __extend__(pairtype(StringBufRepr, AbstractSliceRepr)):
def rtype_getitem((r_stringbuf, r_slice), hop):
rs = r_stringbuf.rtyper.type_system.rslice
if r_slice == rs.startonly_slice_repr:
v_stringbuf, v_start = hop.inputargs(r_stringbuf, rs.startonly_slice_repr)
v_array = r_stringbuf.get_c_data(hop.llops, v_stringbuf)
return hop.gendirectcall(ll_slice_startonly, v_array, v_start)
if r_slice == rs.startstop_slice_repr:
v_stringbuf, v_slice = hop.inputargs(r_stringbuf, rs.startstop_slice_repr)
v_array = r_stringbuf.get_c_data(hop.llops, v_stringbuf)
return hop.gendirectcall(ll_slice, v_array, v_slice)
raise TyperError('getitem does not support slices with %r' % (r_slice,))
def ll_slice_startonly(sbuf, start):
return ll_slice_start_stop(sbuf, start, len(sbuf))
def ll_slice(sbuf, slice):
return ll_slice_start_stop(sbuf, slice.start, slice.stop)
def ll_slice_start_stop(sbuf, start, stop):
length = len(sbuf)
if start < 0:
start = length + start
if start < 0:
start = 0
if stop < 0:
stop = length + stop
if stop < 0:
stop = 0
if stop > length:
stop = length
if start > stop:
start = stop
newlength = stop - start
newstr = lltype.malloc(string_repr.lowleveltype.TO, newlength)
newstr.hash = 0
for i in range(newlength):
newstr.chars[i] = sbuf[start + i]
return newstr
def ll_stringbuf_setvalue_from_string(box, s):
# Copy the string into the stringbuf. In ctypes the final \x00 is
# copied unless the string has exactly the same size as the stringbuf.
# We do the same, but unlike ctypes don't raise ValueError if the
# string is longer than the stringbuf; we just truncate instead.
# There is no support for setattr raising exceptions in RPython so far.
p = box.c_data
n = min(len(s.chars) + 1, len(p))
for i in range(n):
p[i] = s.chars[i]
def ll_stringbufraw(box):
p = box.c_data
length = len(p)
newstr = lltype.malloc(string_repr.lowleveltype.TO, length)
newstr.hash = 0
for i in range(length):
newstr.chars[i] = p[i]
return newstr
STRBUFTYPE = lltype.Array(lltype.Char)
| Python |
from ctypes import Structure, Union
from pypy.annotation.model import SomeCTypesObject, SomeInteger
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.rctypes.implementation import CTypesCallEntry, CTypesObjEntry
from pypy.rpython.lltypesystem import lltype
StructType = type(Structure)
UnionType = type(Union)
# XXX this also implements Unions, but they are not properly emulated
# by the llinterpreter. They work in the generated C code, though.
def offsetof(Struct, fieldname):
"Utility function that returns the offset of a field in a structure."
return getattr(Struct, fieldname).offset
class OffsetOfFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to offsetof()"
_about_ = offsetof
def compute_result_annotation(self, s_Struct, s_fieldname):
assert s_Struct.is_constant()
assert s_fieldname.is_constant()
ofs = offsetof(s_Struct.const, s_fieldname.const)
assert ofs >= 0
s_result = SomeInteger(nonneg=True)
s_result.const = ofs
return s_result
def specialize_call(self, hop):
ofs = hop.s_result.const
return hop.inputconst(lltype.Signed, ofs)
# ____________________________________________________________
class CallEntry(CTypesCallEntry):
"Annotation and rtyping of calls to structure types."
_type_ = StructType, UnionType
def specialize_call(self, hop, **kwds_i):
from pypy.rpython.error import TyperError
r_struct = hop.r_result
hop.exception_cannot_occur()
v_result = r_struct.allocate_instance(hop.llops)
index_by_name = {}
name_by_index = {}
# collect the keyword arguments
for key, index in kwds_i.items():
assert key.startswith('i_')
name = key[2:]
assert index not in name_by_index
index_by_name[name] = index
name_by_index[index] = name
# add the positional arguments
ctype = self.instance
fieldsiter = iter(ctype._fields_)
for i in range(hop.nb_args):
if i not in name_by_index:
try:
name, _ = fieldsiter.next()
except StopIteration:
raise TyperError("too many arguments in struct construction")
if name in index_by_name:
raise TyperError("multiple values for field %r" % (name,))
index_by_name[name] = i
name_by_index[i] = name
# initialize the fields from the arguments, as far as they are present
for name, _ in ctype._fields_:
if name in index_by_name:
index = index_by_name[name]
v_valuebox = hop.inputarg(r_struct.r_fields[name], arg=index)
r_struct.setfield(hop.llops, v_result, name, v_valuebox)
return v_result
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of structure instances."
_metatype_ = StructType, UnionType
def get_field_annotation(self, s_struct, fieldname):
for name, ctype in self.type._fields_:
if name == fieldname:
s_result = SomeCTypesObject(ctype, ownsmemory=False)
return s_result.return_annotation()
raise AttributeError('%r has no field %r' % (self.type, fieldname))
def get_repr(self, rtyper, s_struct):
from pypy.rpython.rctypes.rstruct import StructRepr
is_struct = isinstance(self.type, StructType)
is_union = isinstance(self.type, UnionType)
assert is_struct ^ is_union
return StructRepr(rtyper, s_struct, is_union)
| Python |
"""
Helpers to access the C-level 'errno' variable.
"""
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.annotation import model as annmodel
from ctypes import pythonapi, py_object
##def setfromerrno(exc=OSError):
## """Raise an exception of the given class with the last failed C library
## function's errno."""
## pythonapi.PyErr_SetFromErrno(py_object(exc))
def geterrno():
"""Return the current 'errno' value."""
try:
pythonapi.PyErr_SetFromErrno(py_object(OSError))
except OSError, e:
return e.errno
else:
raise RuntimeError("setfromerrno() should have raised")
class GetErrnoFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to geterrno()"
_about_ = geterrno
def compute_result_annotation(self):
return annmodel.SomeInteger()
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
hop.exception_cannot_occur()
return hop.llops.gencapicall('geterrno', [],
resulttype = lltype.Signed,
includes = (),
_callable = geterrno)
| Python |
from pypy.annotation.model import SomeCTypesObject
from pypy.annotation import model as annmodel
from pypy.annotation.pairtype import pairtype
from pypy.rpython.error import TyperError
from pypy.rpython.rctypes.implementation import CTypesEntry
from pypy.rpython.lltypesystem import lltype
import ctypes
CFuncPtrType = type(ctypes.CFUNCTYPE(None))
class SomeCTypesFunc(annmodel.SomeBuiltin):
"""Stands for a known constant ctypes function. Variables containing
potentially multiple ctypes functions are regular SomeCTypesObjects.
This is a separate annotation because some features are only supported
for calls to constant functions, like _rctypes_pyerrchecker_ and
functions with no declared argtypes. It also produces better code:
a direct_call instead of an indirect_call.
"""
def normalized(self):
ctype = normalized_func_ctype(self.const)
return cto_union(ctype, ctype) # -> SomeCTypesObject
class __extend__(pairtype(SomeCTypesFunc, SomeCTypesFunc)):
def union((ctf1, ctf2)):
ctype1 = normalized_func_ctype(ctf1.const)
ctype2 = normalized_func_ctype(ctf2.const)
return cto_union(ctype1, ctype2)
class __extend__(pairtype(SomeCTypesFunc, SomeCTypesObject)):
def union((ctf1, cto2)):
ctype1 = normalized_func_ctype(ctf1.const)
return cto_union(ctype1, cto2.knowntype)
class __extend__(pairtype(SomeCTypesObject, SomeCTypesFunc)):
def union((cto1, ctf2)):
ctype2 = normalized_func_ctype(ctf2.const)
return cto_union(cto1.knowntype, ctype2)
def normalized_func_ctype(cfuncptr):
if getattr(cfuncptr, 'argtypes', None) is None:
raise annmodel.UnionError("cannot merge two ctypes functions "
"without declared argtypes")
return ctypes.CFUNCTYPE(cfuncptr.restype,
*cfuncptr.argtypes)
def cto_union(ctype1, ctype2):
if ctype1 != ctype2:
raise annmodel.UnionError("a ctypes function object can only be "
"merged with another function with the same "
"signature")
return SomeCTypesObject(ctype1, ownsmemory=True)
class CallEntry(CTypesEntry):
"""Annotation and rtyping of calls to external functions
declared with ctypes.
"""
_metatype_ = CFuncPtrType
def compute_annotation(self):
#self.ctype_object_discovered()
func = self.instance
analyser = self.compute_result_annotation
methodname = getattr(func, '__name__', None)
return SomeCTypesFunc(analyser, methodname=methodname)
def get_instance_sample(self):
if self.instance is not None:
return self.instance
else:
return self.type() # a sample NULL function object
def compute_result_annotation(self, *args_s):
"""
Answer the annotation of the external function's result
"""
cfuncptr = self.get_instance_sample()
result_ctype = cfuncptr.restype
if result_ctype is None:
return None
if result_ctype is ctypes.py_object:
raise Exception("ctypes functions cannot have restype=py_object; "
"set their restype to a subclass of py_object "
"and call apyobject.register_py_object_subclass")
#... because then in ctypes you don't get automatic unwrapping.
# That would not be annotatable, for the same reason that
# reading the .value attribute of py_object is not annotatable
s_result = SomeCTypesObject(result_ctype, ownsmemory=True)
return s_result.return_annotation()
## def object_seen(self, bookkeeper):
## "Called when the annotator sees this ctypes function object."
## # if the function is a Python callback, emulate a call to it
## # so that the callback is properly annotated
## if hasattr(self.instance, 'callback'):
## callback = self.instance.callback
## argtypes = self.instance.argtypes
## restype = self.instance.restype
## s_callback = bookkeeper.immutablevalue(callback)
## # the input arg annotations, which are automatically unwrapped
## args_s = [bookkeeper.valueoftype(ctype).return_annotation()
## for ctype in argtypes]
## uniquekey = (callback, argtypes, restype)
## s_res = bookkeeper.emulate_pbc_call(uniquekey, s_callback, args_s)
## # check the result type
## if restype is None:
## s_expected = annmodel.s_None
## else:
## s_expected = bookkeeper.valueoftype(restype)
## # can also return the unwrapped version of the ctype,
## # e.g. an int instead of a c_int
## s_orelse = s_expected.return_annotation()
## assert s_expected.contains(s_res) or s_orelse.contains(s_res), (
## "%r should return a %s but returned %s" % (callback,
## restype,
## s_res))
def specialize_call(self, hop):
from pypy.rpython.rctypes.rfunc import get_funcptr_constant
from pypy.rpython.rctypes.rfunc import rtype_funcptr_call
cfuncptr = self.instance
v_funcptr, args_r, r_res = get_funcptr_constant(hop.rtyper, cfuncptr,
hop.args_s)
pyerrchecker = getattr(cfuncptr, '_rctypes_pyerrchecker_', None)
return rtype_funcptr_call(hop, v_funcptr, args_r, r_res, pyerrchecker)
def get_repr(self, rtyper, s_funcptr):
# for variables containing ctypes function pointers
from pypy.rpython.rctypes.rfunc import CFuncPtrRepr
return CFuncPtrRepr(rtyper, s_funcptr)
| Python |
from ctypes import ARRAY, c_int
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem.rstr import string_repr, emptystr
from pypy.rpython.rmodel import IntegerRepr, inputconst
from pypy.rpython.rslice import AbstractSliceRepr
from pypy.rpython.lltypesystem import lltype
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rctypes.rmodel import CTypesRefRepr, CTypesValueRepr
from pypy.rpython.rctypes.rmodel import genreccopy_arrayitem, reccopy, C_ZERO
from pypy.rpython.rctypes.rprimitive import PrimitiveRepr
from pypy.rpython.rctypes.rpointer import PointerRepr
from pypy.rpython.rctypes.aarray import VarSizedArrayType
from pypy.annotation.model import SomeCTypesObject
from pypy.objspace.flow.model import Constant
ArrayType = type(ARRAY(c_int, 10))
class ArrayRepr(CTypesRefRepr):
def __init__(self, rtyper, s_array):
array_ctype = s_array.knowntype
item_ctype = array_ctype._type_
if isinstance(array_ctype, VarSizedArrayType):
self.length = None
else:
self.length = array_ctype._length_
# Find the repr and low-level type of items from their ctype
self.r_item = rtyper.getrepr(SomeCTypesObject(item_ctype,
ownsmemory=False))
# Here, self.c_data_type == self.ll_type
if self.length is not None:
c_data_type = lltype.FixedSizeArray(self.r_item.ll_type,
self.length)
else:
c_data_type = lltype.Array(self.r_item.ll_type,
hints={'nolength': True})
super(ArrayRepr, self).__init__(rtyper, s_array, c_data_type)
def get_content_keepalive_type(self):
"An extra array of keepalives, one per item."
item_keepalive_type = self.r_item.get_content_keepalive_type()
if not item_keepalive_type:
return None
elif self.length is not None:
return lltype.FixedSizeArray(item_keepalive_type, self.length)
else:
raise NotImplementedError("XXX not supported yet: "
"var-sized arrays of pointers")
def initialize_const(self, p, value):
for i in range(self.length):
llitem = self.r_item.convert_const(value[i])
if isinstance(self.r_item, CTypesRefRepr):
# ByRef case
reccopy(llitem.c_data, p.c_data[i])
else:
# ByValue case
p.c_data[i] = llitem.c_data[0]
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
assert self.r_item.ll_type == lltype.Char # .value: char arrays only
hop.exception_cannot_occur()
if self.length == 0:
return hop.inputconst(lltype.typeOf(emptystr), emptystr)
else:
v_box = hop.inputarg(self, 0)
return hop.gendirectcall(ll_chararrayvalue, v_box)
def get_c_data_of_item(self, llops, v_array, v_index):
v_c_array = self.get_c_data(llops, v_array)
if isinstance(self.r_item, CTypesRefRepr):
# ByRef case
return llops.genop('getarraysubstruct', [v_c_array, v_index],
lltype.Ptr(self.r_item.c_data_type))
else:
# ByValue case
P = lltype.Ptr(lltype.FixedSizeArray(self.r_item.ll_type, 1))
v_items = llops.genop('direct_arrayitems', [v_c_array],
resulttype = P)
if isinstance(v_index, Constant) and v_index.value == 0:
pass # skip direct_ptradd
else:
v_items = llops.genop('direct_ptradd', [v_items, v_index],
resulttype = P)
return v_items
def get_item_value(self, llops, v_array, v_index):
# ByValue case only
assert isinstance(self.r_item, CTypesValueRepr)
v_c_array = self.get_c_data(llops, v_array)
return llops.genop('getarrayitem', [v_c_array, v_index],
resulttype = self.r_item.ll_type)
## def set_item_value(self, llops, v_array, v_index, v_newvalue):
## # ByValue case only
## assert isinstance(self.r_item, CTypesValueRepr)
## v_c_array = self.get_c_data(llops, v_array)
## llops.genop('setarrayitem', [v_c_array, v_index, v_newvalue])
def setitem(self, llops, v_array, v_index, v_item):
v_newvalue = self.r_item.get_c_data_or_value(llops, v_item)
# copy the new value (which might be a whole structure)
v_c_array = self.get_c_data(llops, v_array)
genreccopy_arrayitem(llops, v_newvalue, v_c_array, v_index)
# copy the keepalive information too
v_keepalive_array = self.getkeepalive(llops, v_array)
if v_keepalive_array is not None:
v_newkeepalive = self.r_item.getkeepalive(llops, v_item)
genreccopy_arrayitem(llops, v_newkeepalive,
v_keepalive_array, v_index)
def initializeitems(self, llops, v_array, items_v):
for i, v_item in enumerate(items_v):
c_index = inputconst(lltype.Signed, i)
self.setitem(llops, v_array, c_index, v_item)
class __extend__(pairtype(ArrayRepr, IntegerRepr)):
def rtype_getitem((r_array, r_int), hop):
v_array, v_index = hop.inputargs(r_array, lltype.Signed)
hop.exception_cannot_occur()
if isinstance(r_array.r_item, PrimitiveRepr):
# primitive case (optimization; the below also works in this case)
# NB. this optimization is invalid for PointerReprs! See for
# example: a[0].contents = ... to change the first pointer of
# an array of pointers.
v_value = r_array.get_item_value(hop.llops, v_array, v_index)
return r_array.r_item.return_value(hop.llops, v_value)
else:
# ByRef case
v_c_data = r_array.get_c_data_of_item(hop.llops, v_array, v_index)
return r_array.r_item.return_c_data(hop.llops, v_c_data)
def rtype_setitem((r_array, r_int), hop):
v_array, v_index, v_item = hop.inputargs(r_array, lltype.Signed,
r_array.r_item)
hop.exception_cannot_occur()
r_array.setitem(hop.llops, v_array, v_index, v_item)
class __extend__(pairtype(ArrayRepr, AbstractSliceRepr)):
def rtype_getitem((r_array, r_slic), hop):
rs = hop.rtyper.type_system.rslice
hop.exception_cannot_occur()
if r_slic == rs.startstop_slice_repr:
# slicing: char array only
assert r_array.r_item.ll_type == lltype.Char
if r_array.length == 0:
return hop.inputconst(lltype.typeOf(emptystr), emptystr)
v_array, v_slice = hop.inputargs(r_array, rs.startstop_slice_repr)
return hop.gendirectcall(ll_chararrayslice, v_array, v_slice)
raise TyperError('getitem does not support slices with %r' % (r_slic,))
class __extend__(pairtype(ArrayRepr, PointerRepr)):
def convert_from_to((r_from, r_to), v, llops):
# XXX keepalives
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
v_c_array = r_from.get_c_data_of_item(llops, v, C_ZERO)
r_temp.setvalue(llops, v_owned_box, v_c_array)
return llops.convertvar(v_owned_box, r_temp, r_to)
def ll_chararrayvalue(box):
from pypy.rpython.rctypes import rchar_p
p = box.c_data
length = rchar_p.ll_strnlen(lltype.direct_arrayitems(p), len(p))
newstr = lltype.malloc(string_repr.lowleveltype.TO, length)
newstr.hash = 0
for i in range(length):
newstr.chars[i] = p[i]
return newstr
def ll_chararrayslice(box, slice):
from pypy.rpython.rctypes import rchar_p
p = box.c_data
start = slice.start
stop = slice.stop
length = stop - start
assert length >= 0
newstr = lltype.malloc(string_repr.lowleveltype.TO, length)
newstr.hash = 0
for i in range(length):
newstr.chars[i] = p[start+i]
return newstr
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.rctypes.implementation import CTypesCallEntry, CTypesObjEntry
from pypy.annotation.model import SomeInteger, SomeCTypesObject
from ctypes import c_void_p, c_int, POINTER, cast, c_char, c_char_p
from pypy.rpython.rctypes.astringbuf import StringBufferType
from pypy.rpython.rctypes.afunc import CFuncPtrType, SomeCTypesFunc
PointerType = type(POINTER(c_int))
class CallEntry(CTypesCallEntry):
"Annotation and rtyping of calls to c_void_p."
_about_ = c_void_p
def specialize_call(self, hop):
r_void_p = hop.r_result
hop.exception_cannot_occur()
v_result = r_void_p.allocate_instance(hop.llops)
if hop.args_r:
if hop.args_s[0].is_constant() and hop.args_s[0].const is None:
pass # c_void_p(None) == c_void_p()
else:
from pypy.rpython.lltypesystem import lltype, llmemory
[v_intadr] = hop.inputargs(lltype.Signed) # xxx id-sized
v_adr = hop.genop('cast_int_to_adr', [v_intadr],
resulttype = llmemory.Address)
r_void_p.setvalue(hop.llops, v_result, v_adr)
return v_result
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of c_void_p instances."
_type_ = c_void_p
def get_field_annotation(self, s_void_p, fieldname):
assert fieldname == "value"
return SomeInteger() # xxx id-sized
def get_repr(self, rtyper, s_void_p):
from pypy.rpython.rctypes.rvoid_p import CVoidPRepr
from pypy.rpython.lltypesystem import llmemory
return CVoidPRepr(rtyper, s_void_p, llmemory.Address)
class CastFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.cast()"
_about_ = cast
def checkptr(self, ctype):
assert (isinstance(ctype, PointerType) or
ctype in (c_void_p, c_char_p) or
isinstance(ctype, CFuncPtrType)), (
"cast(): can only cast between pointers so far, not %r" % (ctype,))
def compute_result_annotation(self, s_arg, s_type):
assert s_type.is_constant(), (
"cast(p, %r): argument 2 must be constant" % (s_type,))
type = s_type.const
self.checkptr(type)
if (s_arg.knowntype == StringBufferType or
isinstance(s_arg, SomeCTypesFunc)):
pass
else:
self.checkptr(s_arg.knowntype)
return SomeCTypesObject(type, ownsmemory=True)
def specialize_call(self, hop):
from pypy.rpython.rctypes.rpointer import PointerRepr
from pypy.rpython.rctypes.rvoid_p import CVoidPRepr
from pypy.rpython.rctypes.rchar_p import CCharPRepr
from pypy.rpython.rctypes.rstringbuf import StringBufRepr
from pypy.rpython.rctypes.rfunc import CFuncPtrRepr
from pypy.rpython.lltypesystem import lltype, llmemory
r_arg = hop.args_r[0]
if isinstance(hop.args_s[0], SomeCTypesFunc):
# cast(const_cfuncptr, c_void_p): force the const_cfuncptr
# to become a general non-constant SomeCTypesObject
s_arg = hop.args_s[0].normalized()
r_arg = hop.rtyper.getrepr(s_arg)
assert isinstance(r_arg, (PointerRepr, CVoidPRepr, CCharPRepr,
StringBufRepr, CFuncPtrRepr))
targetctype = hop.args_s[1].const
v_box, c_targetctype = hop.inputargs(r_arg, lltype.Void)
if isinstance(r_arg, StringBufRepr):
v_index = hop.inputconst(lltype.Signed, 0)
v_adr = r_arg.get_c_data_of_item(hop.llops, v_box, v_index)
else:
v_adr = r_arg.getvalue(hop.llops, v_box)
if v_adr.concretetype != llmemory.Address:
v_adr = hop.genop('cast_ptr_to_adr', [v_adr],
resulttype = llmemory.Address)
if targetctype == c_void_p:
# cast to void
v_result = v_adr
else:
# cast to pointer
v_result = hop.genop('cast_adr_to_ptr', [v_adr],
resulttype = hop.r_result.ll_type)
hop.exception_cannot_occur()
return hop.r_result.cast_return_value(hop.llops, v_result)
| Python |
from pypy.rpython.rmodel import Repr, inputconst
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.annotation.model import SomeCTypesObject
from pypy.annotation.pairtype import pairtype
class CTypesRepr(Repr):
"Base class for the Reprs representing ctypes object."
# Attributes that are types:
#
# * 'ctype' is the ctypes type.
#
# * 'll_type' is the low-level type representing the raw C data,
# like Signed or Array(...).
#
# * 'c_data_type' is a low-level container type that also represents
# the raw C data; the difference is that we can take
# an lltype pointer to it. For primitives or pointers
# this is a FixedSizeArray with a single item of
# type 'll_type'. Otherwise, c_data_type == ll_type.
#
# * 'lowleveltype' is the Repr's choosen low-level type for the RPython
# variables. It's a Ptr to a GcStruct. This is a box
# traked by our GC around the raw 'c_data_type'-shaped
# data.
#
# * 'r_memoryowner.lowleveltype' is the lowleveltype of the repr for the
# same ctype but for ownsmemory=True.
def __init__(self, rtyper, s_ctypesobject, ll_type):
# s_ctypesobject: the annotation to represent
# ll_type: the low-level type representing the raw
# data, which is then embedded in a box.
ctype = s_ctypesobject.knowntype
self.rtyper = rtyper
self.ctype = ctype
self.ll_type = ll_type
self.ownsmemory = s_ctypesobject.ownsmemory
self.c_data_type = self.get_c_data_type(ll_type)
fields = []
content_keepalive_type = self.get_content_keepalive_type()
if content_keepalive_type:
fields.append(( "keepalive", content_keepalive_type ))
if self.ownsmemory:
self.r_memoryowner = self
fields.append(( "c_data", self.c_data_type ))
else:
s_memoryowner = SomeCTypesObject(ctype, ownsmemory=True)
self.r_memoryowner = rtyper.getrepr(s_memoryowner)
fields += [
( "c_data_owner_keepalive", self.r_memoryowner.lowleveltype ),
( "c_data", lltype.Ptr(self.c_data_type) ),
]
self.lowleveltype = lltype.Ptr(
lltype.GcStruct( "CtypesBox_%s" % (ctype.__name__,),
*fields
)
)
self.const_cache = {} # store generated const values+original value
def get_content_keepalive_type(self):
"""Return the type of the extra keepalive field used for the content
of this object."""
return None
def ctypecheck(self, value):
return isinstance(value, self.ctype)
def convert_const(self, value):
if self.ctypecheck(value):
key = "by_id", id(value)
keepalive = value
else:
if self.ownsmemory:
raise TyperError("convert_const(%r) but repr owns memory" % (
value,))
key = "by_value", value
keepalive = None
try:
return self.const_cache[key][0]
except KeyError:
self.setup()
p = lltype.malloc(self.r_memoryowner.lowleveltype.TO, zero=True)
self.initialize_const(p, value)
if self.ownsmemory:
result = p
else:
# we must return a non-memory-owning box that keeps the
# memory-owning box alive
result = lltype.malloc(self.lowleveltype.TO, zero=True)
result.c_data = p.c_data # initialize c_data pointer
result.c_data_owner_keepalive = p
self.const_cache[key] = result, keepalive
return result
def get_c_data(self, llops, v_box):
if self.ownsmemory:
inputargs = [v_box, inputconst(lltype.Void, "c_data")]
return llops.genop('getsubstruct', inputargs,
lltype.Ptr(self.c_data_type) )
else:
inputargs = [v_box, inputconst(lltype.Void, "c_data")]
return llops.genop('getfield', inputargs,
lltype.Ptr(self.c_data_type) )
def get_c_data_owner(self, llops, v_box):
if self.ownsmemory:
return v_box
else:
inputargs = [v_box, inputconst(lltype.Void,
"c_data_owner_keepalive")]
return llops.genop('getfield', inputargs,
self.r_memoryowner.lowleveltype)
def allocate_instance(self, llops):
TYPE = self.lowleveltype.TO
if TYPE._is_varsize():
raise TyperError("allocating array with unknown length")
c1 = inputconst(lltype.Void, TYPE)
return llops.genop("zero_malloc", [c1], resulttype=self.lowleveltype)
def allocate_instance_varsize(self, llops, v_length):
TYPE = self.lowleveltype.TO
if not TYPE._is_varsize():
raise TyperError("allocating non-array with a specified length")
c1 = inputconst(lltype.Void, TYPE)
return llops.genop("zero_malloc_varsize", [c1, v_length],
resulttype=self.lowleveltype)
def allocate_instance_ref(self, llops, v_c_data, v_c_data_owner=None):
"""Only if self.ownsmemory is false. This allocates a new instance
and initialize its c_data pointer."""
if self.ownsmemory:
raise TyperError("allocate_instance_ref: %r owns its memory" % (
self,))
v_box = self.allocate_instance(llops)
inputargs = [v_box, inputconst(lltype.Void, "c_data"), v_c_data]
llops.genop('setfield', inputargs)
if v_c_data_owner is not None:
assert (v_c_data_owner.concretetype ==
self.r_memoryowner.lowleveltype)
inputargs = [v_box,
inputconst(lltype.Void, "c_data_owner_keepalive"),
v_c_data_owner]
llops.genop('setfield', inputargs)
return v_box
def return_c_data(self, llops, v_c_data):
"""Turn a raw C pointer to the data into a memory-alias box.
Used when the data is returned from an operation or C function call.
Special-cased in PrimitiveRepr.
"""
# XXX add v_c_data_owner
return self.allocate_instance_ref(llops, v_c_data)
def getkeepalive(self, llops, v_box):
try:
TYPE = self.lowleveltype.TO.keepalive
except AttributeError:
return None
else:
if isinstance(TYPE, lltype.ContainerType):
TYPE = lltype.Ptr(TYPE)
opname = 'getsubstruct'
else:
opname = 'getfield'
c_name = inputconst(lltype.Void, 'keepalive')
return llops.genop(opname, [v_box, c_name],
resulttype = TYPE)
class __extend__(pairtype(CTypesRepr, CTypesRepr)):
def convert_from_to((r_from, r_to), v, llops):
"""Transparent conversion from the memory-owned to the memory-aliased
version of the same ctypes repr."""
if (r_from.ctype == r_to.ctype and
r_from.ownsmemory and not r_to.ownsmemory):
v_c_data = r_from.get_c_data(llops, v)
v_result = r_to.allocate_instance_ref(llops, v_c_data, v)
# copy of the 'keepalive' field over
v_keepalive = r_from.getkeepalive(llops, v)
if v_keepalive is not None:
genreccopy_structfield(llops, v_keepalive,
v_result, 'keepalive')
return v_result
else:
return NotImplemented
class CTypesRefRepr(CTypesRepr):
"""Base class for ctypes repr that have some kind of by-reference
semantics, like structures and arrays."""
def get_c_data_type(self, ll_type):
assert isinstance(ll_type, lltype.ContainerType)
return ll_type
def get_c_data_or_value(self, llops, v_box):
return self.get_c_data(llops, v_box)
class CTypesValueRepr(CTypesRepr):
"""Base class for ctypes repr that have some kind of by-value
semantics, like primitives and pointers."""
def get_c_data_type(self, ll_type):
return lltype.FixedSizeArray(ll_type, 1)
def getvalue_from_c_data(self, llops, v_c_data):
return llops.genop('getarrayitem', [v_c_data, C_ZERO],
resulttype=self.ll_type)
def setvalue_inside_c_data(self, llops, v_c_data, v_value):
llops.genop('setarrayitem', [v_c_data, C_ZERO, v_value])
def getvalue(self, llops, v_box):
"""Reads from the 'value' field of the raw data."""
v_c_data = self.get_c_data(llops, v_box)
return self.getvalue_from_c_data(llops, v_c_data)
def setvalue(self, llops, v_box, v_value):
"""Writes to the 'value' field of the raw data."""
v_c_data = self.get_c_data(llops, v_box)
self.setvalue_inside_c_data(llops, v_c_data, v_value)
get_c_data_or_value = getvalue
def initialize_const(self, p, value):
if self.ctypecheck(value):
value = value.value
p.c_data[0] = value
def return_value(self, llops, v_value):
# like return_c_data(), but when the input is only the value
# field instead of the c_data pointer
r_temp = self.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
r_temp.setvalue(llops, v_owned_box, v_value)
return llops.convertvar(v_owned_box, r_temp, self)
def cast_return_value(self, llops, v_value):
# like return_value(), but used for the cast function
return self.return_value(llops, v_value)
def rtype_is_true(self, hop):
[v_box] = hop.inputargs(self)
v_value = self.getvalue(hop.llops, v_box)
return hop.gendirectcall(ll_is_true, v_value)
# ____________________________________________________________
def ll_is_true(x):
return bool(x)
C_ZERO = inputconst(lltype.Signed, 0)
def reccopy(source, dest):
# copy recursively a structure or array onto another.
T = lltype.typeOf(source).TO
assert T == lltype.typeOf(dest).TO
if isinstance(T, (lltype.Array, lltype.FixedSizeArray)):
assert source._obj.getlength() == dest._obj.getlength()
ITEMTYPE = T.OF
for i in range(source._obj.getlength()):
if isinstance(ITEMTYPE, lltype.ContainerType):
subsrc = source[i]
subdst = dest[i]
reccopy(subsrc, subdst)
else:
# this is a hack XXX de-hack this
llvalue = source._obj.getitem(i, uninitialized_ok=True)
dest._obj.setitem(i, llvalue)
elif isinstance(T, lltype.Struct):
for name in T._names:
FIELDTYPE = getattr(T, name)
if isinstance(FIELDTYPE, lltype.ContainerType):
subsrc = getattr(source, name)
subdst = getattr(dest, name)
reccopy(subsrc, subdst)
else:
# this is a hack XXX de-hack this
llvalue = source._obj._getattr(name, uninitialized_ok=True)
setattr(dest._obj, name, llvalue)
else:
raise TypeError(T)
def reccopy_arrayitem(source, destarray, destindex):
ITEMTYPE = lltype.typeOf(destarray).TO.OF
if isinstance(ITEMTYPE, lltype.Primitive):
destarray[destindex] = source
else:
reccopy(source, destarray[destindex])
def genreccopy(llops, v_source, v_dest):
# helper to generate the llops that copy recursively a structure
# or array onto another. 'v_source' and 'v_dest' can also be pairs
# (v, i) to mean the ith item of the array that v points to.
T = v_source.concretetype.TO
assert T == v_dest.concretetype.TO
if isinstance(T, lltype.FixedSizeArray):
# XXX don't do that if the length is large
ITEMTYPE = T.OF
for i in range(T.length):
c_i = inputconst(lltype.Signed, i)
if isinstance(ITEMTYPE, lltype.ContainerType):
RESTYPE = lltype.Ptr(ITEMTYPE)
v_subsrc = llops.genop('getarraysubstruct', [v_source, c_i],
resulttype = RESTYPE)
v_subdst = llops.genop('getarraysubstruct', [v_dest, c_i],
resulttype = RESTYPE)
genreccopy(llops, v_subsrc, v_subdst)
else:
v_value = llops.genop('getarrayitem', [v_source, c_i],
resulttype = ITEMTYPE)
llops.genop('setarrayitem', [v_dest, c_i, v_value])
elif isinstance(T, lltype.Array):
raise NotImplementedError("XXX genreccopy() for arrays")
elif isinstance(T, lltype.Struct):
for name in T._names:
FIELDTYPE = getattr(T, name)
cname = inputconst(lltype.Void, name)
if isinstance(FIELDTYPE, lltype.ContainerType):
RESTYPE = lltype.Ptr(FIELDTYPE)
v_subsrc = llops.genop('getsubstruct', [v_source, cname],
resulttype = RESTYPE)
v_subdst = llops.genop('getsubstruct', [v_dest, cname],
resulttype = RESTYPE)
genreccopy(llops, v_subsrc, v_subdst)
else:
v_value = llops.genop('getfield', [v_source, cname],
resulttype = FIELDTYPE)
llops.genop('setfield', [v_dest, cname, v_value])
else:
raise TypeError(T)
def genreccopy_arrayitem(llops, v_source, v_destarray, v_destindex):
ITEMTYPE = v_destarray.concretetype.TO.OF
if isinstance(ITEMTYPE, lltype.ContainerType):
v_dest = llops.genop('getarraysubstruct', [v_destarray, v_destindex],
resulttype = lltype.Ptr(ITEMTYPE))
genreccopy(llops, v_source, v_dest)
else:
llops.genop('setarrayitem', [v_destarray, v_destindex, v_source])
def genreccopy_structfield(llops, v_source, v_deststruct, fieldname):
c_name = inputconst(lltype.Void, fieldname)
FIELDTYPE = getattr(v_deststruct.concretetype.TO, fieldname)
if isinstance(FIELDTYPE, lltype.ContainerType):
v_dest = llops.genop('getsubstruct', [v_deststruct, c_name],
resulttype = lltype.Ptr(FIELDTYPE))
genreccopy(llops, v_source, v_dest)
else:
llops.genop('setfield', [v_deststruct, c_name, v_source])
| Python |
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem.rstr import CharRepr, UniCharRepr
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rmodel import IntegerRepr, FloatRepr
from pypy.rpython.error import TyperError
from pypy.rpython.rctypes.rmodel import CTypesValueRepr
class PrimitiveRepr(CTypesValueRepr):
def __init__(self, rtyper, s_ctypesobject, ll_type):
CTypesValueRepr.__init__(self, rtyper, s_ctypesobject, ll_type)
if isinstance(ll_type, lltype.Number):
normalized_lltype = ll_type.normalized()
else:
normalized_lltype = ll_type
self.value_repr = rtyper.getprimitiverepr(ll_type)
self.normalized_value_repr = rtyper.getprimitiverepr(normalized_lltype)
def return_c_data(self, llops, v_c_data):
"""Read out the atomic data from a raw C pointer.
Used when the data is returned from an operation or C function call.
"""
v_value = self.getvalue_from_c_data(llops, v_c_data)
return self.return_value(llops, v_value)
def return_value(self, llops, v_value):
# like return_c_data(), but when the input is only the value
# field instead of the c_data pointer
return llops.convertvar(v_value, self.value_repr,
self.normalized_value_repr)
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_primitive = hop.inputarg(self, 0)
hop.exception_cannot_occur()
v_c_data = self.get_c_data(hop.llops, v_primitive)
return self.return_c_data(hop.llops, v_c_data)
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_primitive, v_attr, v_value = hop.inputargs(self, lltype.Void,
self.ll_type)
self.setvalue(hop.llops, v_primitive, v_value)
def rtype_is_true(self, hop):
[v_box] = hop.inputargs(self)
v_value = self.return_value(hop.llops, self.getvalue(hop.llops, v_box))
if v_value.concretetype in (lltype.Char, lltype.UniChar):
llfn = ll_c_char_is_true
else:
llfn = ll_is_true
return hop.gendirectcall(llfn, v_value)
def initialize_const(self, p, value):
if isinstance(value, self.ctype):
value = value.value
p.c_data[0] = lltype.cast_primitive(self.ll_type, value)
def ll_is_true(x):
return bool(x)
def ll_c_char_is_true(x):
return bool(ord(x))
class __extend__(pairtype(IntegerRepr, PrimitiveRepr),
pairtype(FloatRepr, PrimitiveRepr),
pairtype(CharRepr, PrimitiveRepr),
pairtype(UniCharRepr, PrimitiveRepr)):
def convert_from_to((r_from, r_to), v, llops):
# first convert 'v' to the precise expected low-level type
r_input = r_to.rtyper.getprimitiverepr(r_to.ll_type)
v = llops.convertvar(v, r_from, r_input)
# allocate a memory-owning box to hold a copy of the ll value 'v'
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
r_temp.setvalue(llops, v_owned_box, v)
# return this box possibly converted to the expected output repr,
# which might be a memory-aliasing box
return llops.convertvar(v_owned_box, r_temp, r_to)
| Python |
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rstr import AbstractStringRepr
from pypy.rpython.lltypesystem.rstr import string_repr
from pypy.rpython.rctypes.rmodel import CTypesValueRepr, C_ZERO
from pypy.rpython.rctypes.rarray import ArrayRepr
from pypy.rpython.rctypes.rstringbuf import StringBufRepr
from pypy.annotation.pairtype import pairtype
from ctypes import c_char, c_char_p, cast
class CCharPRepr(CTypesValueRepr):
def return_c_data(self, llops, v_c_data):
"""Read out the RPython string from a raw C pointer.
Used when the data is returned from an operation or C function call.
"""
v_char_p = self.getvalue_from_c_data(llops, v_c_data)
return llops.gendirectcall(ll_charp2str, v_char_p)
def return_value(self, llops, v_value):
# like return_c_data(), but when the input is only the value
# field instead of the c_data pointer
return llops.gendirectcall(ll_charp2str, v_value)
def cast_return_value(self, llops, v_value):
# This should not return a string but a char pointer
return CTypesValueRepr.return_value(self, llops, v_value)
def get_content_keepalive_type(self):
"An extra keepalive used for the RPython string."
return string_repr.lowleveltype
def getstring(self, llops, v_box):
return llops.gendirectcall(ll_getstring, v_box)
def setstring(self, llops, v_box, v_str):
llops.gendirectcall(ll_setstring, v_box, v_str)
def convert_const(self, value):
if value is not None and not isinstance(value, (str, c_char_p)):
# maybe an array of characters? cast to a c_char_p
assert type(value)._type_ == c_char
value = cast(value, c_char_p)
return super(CCharPRepr, self).convert_const(value)
def initialize_const(self, p, string):
if isinstance(string, c_char_p):
string = string.value
llstring = string_repr.convert_const(string)
ll_setstring(p, llstring)
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_char_p = hop.inputarg(self, 0)
hop.exception_cannot_occur()
return self.getstring(hop.llops, v_char_p)
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_char_p, v_attr, v_value = hop.inputargs(self, lltype.Void,
string_repr)
self.setstring(hop.llops, v_char_p, v_value)
class __extend__(pairtype(AbstractStringRepr, CCharPRepr)):
def convert_from_to((r_from, r_to), v, llops):
# r_from could be char_repr: first convert it to string_repr
v = llops.convertvar(v, r_from, string_repr)
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
r_temp.setstring(llops, v_owned_box, v)
return llops.convertvar(v_owned_box, r_temp, r_to)
class __extend__(pairtype(ArrayRepr, CCharPRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.r_item.ctype != c_char:
return NotImplemented
# warning: no keepalives, only for short-lived conversions like
# in argument passing
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
v_c_array = r_from.get_c_data_of_item(llops, v, C_ZERO)
r_temp.setvalue(llops, v_owned_box, v_c_array)
return llops.convertvar(v_owned_box, r_temp, r_to)
class __extend__(pairtype(StringBufRepr, CCharPRepr)):
def convert_from_to((r_from, r_to), v, llops):
# warning: no keepalives, only for short-lived conversions like
# in argument passing
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
v_c_array = r_from.get_c_data_of_item(llops, v, C_ZERO)
r_temp.setvalue(llops, v_owned_box, v_c_array)
return llops.convertvar(v_owned_box, r_temp, r_to)
# XXX some code duplication above
CCHARP = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1))
def ll_strlen(p):
i = 0
while ord(p[i]) != 0:
i += 1
return i
def ll_strnlen(p, maxlen):
i = 0
while i < maxlen and ord(p[i]) != 0:
i += 1
return i
def ll_str2charp(s):
return lltype.direct_arrayitems(s.chars)
def ll_charp2str(p):
if not p:
return lltype.nullptr(string_repr.lowleveltype.TO)
length = ll_strlen(p)
newstr = lltype.malloc(string_repr.lowleveltype.TO, length)
newstr.hash = 0
for i in range(length):
newstr.chars[i] = p[i]
return newstr
def ll_getstring(box):
p = box.c_data[0]
if p:
if box.keepalive and ll_str2charp(box.keepalive) == p:
maxlen = len(box.keepalive.chars)
length = ll_strnlen(p, maxlen)
if length == maxlen:
# no embedded zero in the string
return box.keepalive
else:
length = ll_strlen(p)
newstr = lltype.malloc(string_repr.lowleveltype.TO, length)
newstr.hash = 0
for i in range(length):
newstr.chars[i] = p[i]
return newstr
else:
return lltype.nullptr(string_repr.lowleveltype.TO)
def ll_setstring(box, string):
if string:
box.c_data[0] = ll_str2charp(string)
else:
box.c_data[0] = lltype.nullptr(CCHARP.TO)
box.keepalive = string
| Python |
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rctypes.rmodel import CTypesRefRepr, CTypesValueRepr
from pypy.rpython.rctypes.rmodel import genreccopy_structfield, reccopy
from pypy.rpython.rctypes.rprimitive import PrimitiveRepr
from pypy.annotation.model import SomeCTypesObject
class StructRepr(CTypesRefRepr):
def __init__(self, rtyper, s_struct, is_union=False):
struct_ctype = s_struct.knowntype
# Find the repr and low-level type of the fields from their ctype
self.r_fields = {}
llfields = []
for name, field_ctype in struct_ctype._fields_:
r_field = rtyper.getrepr(SomeCTypesObject(field_ctype,
ownsmemory=False))
self.r_fields[name] = r_field
llfields.append((cmangle(name), r_field.ll_type))
# Here, self.c_data_type == self.ll_type
external = getattr(struct_ctype, '_external_', False)
extras = {'hints': {'c_name': struct_ctype.__name__,
'external': external}}
if is_union:
extras['hints']['union'] = True
c_data_type = lltype.Struct(struct_ctype.__name__, *llfields, **extras)
super(StructRepr, self).__init__(rtyper, s_struct, c_data_type)
def get_content_keepalive_type(self):
"An extra struct of keepalives, one per field."
keepalives = []
for name, field_ctype in self.ctype._fields_:
r_field = self.r_fields[name]
field_keepalive_type = r_field.get_content_keepalive_type()
if field_keepalive_type:
keepalives.append((cmangle(name), field_keepalive_type))
if not keepalives:
return None
else:
return lltype.Struct('keepalives', *keepalives)
def initialize_const(self, p, value):
for name, r_field in self.r_fields.items():
llitem = r_field.convert_const(getattr(value, name))
if isinstance(r_field, CTypesRefRepr):
# ByRef case
reccopy(llitem.c_data, getattr(p.c_data, cmangle(name)))
else:
# ByValue case
setattr(p.c_data, cmangle(name), llitem.c_data[0])
def get_c_data_of_field(self, llops, v_struct, fieldname):
v_c_struct = self.get_c_data(llops, v_struct)
r_field = self.r_fields[fieldname]
c_fieldname = inputconst(lltype.Void, cmangle(fieldname))
if isinstance(r_field, CTypesRefRepr):
# ByRef case
return llops.genop('getsubstruct', [v_c_struct, c_fieldname],
lltype.Ptr(r_field.c_data_type))
else:
# ByValue case
P = lltype.Ptr(lltype.FixedSizeArray(r_field.ll_type, 1))
return llops.genop('direct_fieldptr', [v_c_struct, c_fieldname],
resulttype = P)
def get_field_value(self, llops, v_struct, fieldname):
# ByValue case only
r_field = self.r_fields[fieldname]
assert isinstance(r_field, CTypesValueRepr)
v_c_struct = self.get_c_data(llops, v_struct)
c_fieldname = inputconst(lltype.Void, cmangle(fieldname))
return llops.genop('getfield', [v_c_struct, c_fieldname],
resulttype = r_field.ll_type)
## def set_field_value(self, llops, v_struct, fieldname, v_newvalue):
## # ByValue case only
## r_field = self.r_fields[fieldname]
## assert isinstance(r_field, CTypesValueRepr)
## v_c_struct = self.get_c_data(llops, v_struct)
## c_fieldname = inputconst(lltype.Void, fieldname)
## llops.genop('setfield', [v_c_struct, c_fieldname, v_newvalue])
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
name = s_attr.const
r_field = self.r_fields[name]
v_struct, v_attr = hop.inputargs(self, lltype.Void)
hop.exception_cannot_occur()
if isinstance(r_field, PrimitiveRepr):
# primitive case (optimization; the below also works in this case)
# NB. this optimization is invalid for PointerReprs! See for
# example: s.p.contents = ... to change the pointer field 'p'
# of 's'.
v_value = self.get_field_value(hop.llops, v_struct, name)
return r_field.return_value(hop.llops, v_value)
else:
# ByRef case
v_c_data = self.get_c_data_of_field(hop.llops, v_struct, name)
return r_field.return_c_data(hop.llops, v_c_data)
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
name = s_attr.const
r_field = self.r_fields[name]
v_struct, v_attr, v_item = hop.inputargs(self, lltype.Void, r_field)
self.setfield(hop.llops, v_struct, name, v_item)
def setfield(self, llops, v_struct, name, v_item):
r_field = self.r_fields[name]
v_newvalue = r_field.get_c_data_or_value(llops, v_item)
# copy the new value (which might be a whole substructure)
v_c_struct = self.get_c_data(llops, v_struct)
genreccopy_structfield(llops, v_newvalue, v_c_struct, cmangle(name))
# copy the keepalive information too
v_newkeepalive = r_field.getkeepalive(llops, v_item)
if v_newkeepalive is not None:
v_keepalive_struct = self.getkeepalive(llops, v_struct)
genreccopy_structfield(llops, v_newkeepalive,
v_keepalive_struct, cmangle(name))
def cmangle(name):
# obscure: names starting with '_' are not allowed in
# lltype.Struct, so we prefix all names with 'c_'
return 'c_' + name
| Python |
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.rctypes.implementation import CTypesObjEntry
from pypy.annotation.model import SomeCTypesObject, SomeString, SomeInteger
from pypy.rlib.rarithmetic import r_uint
from ctypes import create_string_buffer, c_char, sizeof
######################################################################
# NOTE: astringbuf and rstringbuf should be removed and replaced #
# with a regular var-sized array of char, now that we #
# support var-sized arrays. #
######################################################################
class StringBufferType(object):
"""Placeholder for the result type of create_string_buffer(),
which cannot be represented as a regular ctypes type because
the length is not an annotation-time constant.
"""
_type_ = c_char
#_length_ = unspecified
class CreateStringBufferFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.create_string_buffer()"
_about_ = create_string_buffer
def compute_result_annotation(self, s_length):
if s_length.knowntype not in (int, r_uint):
raise Exception("only supports create_string_buffer(length)")
return SomeCTypesObject(StringBufferType, ownsmemory=True)
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype
[v_length] = hop.inputargs(lltype.Signed)
r_stringbuf = hop.r_result
hop.exception_cannot_occur()
return hop.genop("zero_malloc_varsize", [
hop.inputconst(lltype.Void, r_stringbuf.lowleveltype.TO),
v_length,
], resulttype=r_stringbuf.lowleveltype,
)
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of instances of the pseudo-ctype StringBufferType"
_type_ = StringBufferType
def get_field_annotation(self, s_array, fieldname):
assert fieldname in ('value', 'raw')
return SomeString() # can_be_None = False
def get_repr(self, rtyper, s_stringbuf):
from pypy.rpython.rctypes import rstringbuf
return rstringbuf.StringBufRepr(rtyper, s_stringbuf,
rstringbuf.STRBUFTYPE)
class SizeOfFnEntry(ExtRegistryEntry):
"Annotation and rtyping of calls to ctypes.sizeof()"
_about_ = sizeof
def compute_result_annotation(self, s_arg):
return SomeInteger(nonneg=True)
def specialize_call(self, hop):
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.error import TyperError
[s_arg] = hop.args_s
[r_arg] = hop.args_r
hop.exception_cannot_occur()
if isinstance(s_arg, SomeCTypesObject):
if s_arg.knowntype is StringBufferType:
# sizeof(string_buffer) == len(string_buffer)
return r_arg.rtype_len(hop)
else:
if not s_arg.is_constant():
raise TyperError("ctypes.sizeof(non_constant_type)")
# XXX check that s_arg.const is really a ctypes type
ctype = s_arg.const
s_arg = SomeCTypesObject(ctype, ownsmemory=True)
r_arg = hop.rtyper.getrepr(s_arg)
return hop.inputconst(lltype.Signed, llmemory.sizeof(r_arg.ll_type))
| Python |
from pypy.rpython.rmodel import IntegerRepr, inputconst
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem import lltype
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rctypes.rmodel import CTypesValueRepr, genreccopy
from pypy.annotation.model import SomeCTypesObject
from pypy.objspace.flow.model import Constant
class PointerRepr(CTypesValueRepr):
def __init__(self, rtyper, s_pointer):
# For recursive types, getting the r_contents is delayed until
# _setup_repr().
ll_contents = lltype.Ptr(lltype.ForwardReference())
self.keepalive_box_type = lltype.GcForwardReference()
super(PointerRepr, self).__init__(rtyper, s_pointer, ll_contents)
def _setup_repr(self):
# Find the repr and low-level type of the contents from its ctype
rtyper = self.rtyper
ref_ctype = self.ctype._type_
self.r_contents = rtyper.getrepr(SomeCTypesObject(ref_ctype,
ownsmemory=False))
if isinstance(self.ll_type.TO, lltype.ForwardReference):
self.ll_type.TO.become(self.r_contents.c_data_type)
if isinstance(self.keepalive_box_type, lltype.GcForwardReference):
self.keepalive_box_type.become(
self.r_contents.r_memoryowner.lowleveltype.TO)
def get_content_keepalive_type(self):
"Keepalive for the box that holds the data that 'self' points to."
return lltype.Ptr(self.keepalive_box_type)
def setkeepalive(self, llops, v_box, v_owner):
inputargs = [v_box, inputconst(lltype.Void, 'keepalive'),
v_owner]
llops.genop('setfield', inputargs)
def initialize_const(self, p, ptr):
if not ptr: # NULL pointer, or literal None passed as argument to
return # functions expecting pointers
llcontents = self.r_contents.convert_const(ptr.contents)
p.c_data[0] = llcontents.c_data
# the following line is probably pointless, as 'llcontents' will be
# an immortal global constant just like 'p', but better safe than sorry
p.keepalive = llcontents.c_data_owner_keepalive
def setcontents(self, llops, v_ptr, v_contentsbox):
v_c_data = self.r_contents.get_c_data(llops, v_contentsbox)
v_owner = self.r_contents.get_c_data_owner(llops, v_contentsbox)
self.setvalue(llops, v_ptr, v_c_data)
self.setkeepalive(llops, v_ptr, v_owner)
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'contents'
v_ptr = hop.inputarg(self, 0)
v_c_ptr = self.getvalue(hop.llops, v_ptr)
hop.exception_cannot_occur()
return self.r_contents.allocate_instance_ref(hop.llops, v_c_ptr)
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'contents'
v_ptr, v_attr, v_newcontents = hop.inputargs(self, lltype.Void,
self.r_contents)
self.setcontents(hop.llops, v_ptr, v_newcontents)
class __extend__(pairtype(PointerRepr, IntegerRepr)):
def rtype_getitem((r_ptr, _), hop):
self = r_ptr
v_ptr, v_index = hop.inputargs(self, lltype.Signed)
v_c_ptr = self.getvalue(hop.llops, v_ptr)
hop.exception_cannot_occur()
if isinstance(v_index, Constant) and v_index.value == 0:
pass # skip direct_ptradd
else:
v_c_ptr = hop.genop('direct_ptradd', [v_c_ptr, v_index],
resulttype = r_ptr.ll_type)
return self.r_contents.return_c_data(hop.llops, v_c_ptr)
def rtype_setitem((r_ptr, _), hop):
# p[0] = x is not the same as p.contents.value = x
# it makes a copy of the data in 'x' just like rarray.rtype_setitem()
self = r_ptr
v_ptr, v_index, v_contentsbox = hop.inputargs(self, lltype.Signed,
self.r_contents)
v_new_c_data = self.r_contents.get_c_data(hop.llops, v_contentsbox)
v_target = self.getvalue(hop.llops, v_ptr)
if hop.args_s[1].is_constant() and hop.args_s[1].const == 0:
pass
else:
# not supported by ctypes either
raise TyperError("assignment to pointer[x] with x != 0")
# copy the whole structure's content over
hop.exception_cannot_occur()
genreccopy(hop.llops, v_new_c_data, v_target)
| Python |
from pypy.annotation.pairtype import pairtype
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.rctypes.rmodel import CTypesValueRepr
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython import extregistry
class CTypesPyObjRepr(CTypesValueRepr):
def convert_const(self, value):
if value is None:
return lltype.nullptr(self.lowleveltype.TO)
else:
return super(CTypesPyObjRepr, self).convert_const(value)
def initialize_const(self, p, value):
if isinstance(value, self.ctype):
value = value.value
if extregistry.is_registered(value):
entry = extregistry.lookup(value)
if hasattr(entry, 'get_ll_pyobjectptr'):
p.c_data[0] = entry.get_ll_pyobjectptr(self.rtyper)
return
p.c_data[0] = lltype.pyobjectptr(value)
def rtype_getattr(self, hop):
# only for 'allow_someobjects' annotations
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_pyobj = hop.inputarg(self, 0)
hop.exception_cannot_occur()
return self.getvalue(hop.llops, v_pyobj)
def rtype_setattr(self, hop):
s_attr = hop.args_s[1]
assert s_attr.is_constant()
assert s_attr.const == 'value'
v_pyobj, v_attr, v_newvalue = hop.inputargs(self, lltype.Void,
pyobj_repr)
self.setvalue(hop.llops, v_pyobj, v_newvalue)
def rtype_is_true(self, hop):
[v_box] = hop.inputargs(self)
return hop.gendirectcall(ll_pyobjbox_is_true, v_box)
def ll_pyobjbox_is_true(box):
return bool(box) and bool(box.c_data[0])
class __extend__(pairtype(CTypesPyObjRepr, PyObjRepr)):
# conversion used by wrapper.py in genc when returning a py_object
# from a function exposed in a C extension module
def convert_from_to((r_from, r_to), v, llops):
return r_from.getvalue(llops, v)
class __extend__(pairtype(PyObjRepr, CTypesPyObjRepr)):
# conversion used by wrapper.py in genc when passing a py_object
# argument into a function exposed in a C extension module
def convert_from_to((r_from, r_to), v, llops):
# allocate a memory-owning box to hold a copy of the 'PyObject*'
r_temp = r_to.r_memoryowner
v_owned_box = r_temp.allocate_instance(llops)
r_temp.setvalue(llops, v_owned_box, v)
# return this box possibly converted to the expected output repr,
# which might be a memory-aliasing box
return llops.convertvar(v_owned_box, r_temp, r_to)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.