code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
#
# Pyrex -- Main Program, Unix
#
from Pyrex.Compiler.Main import main
main(command_line = 1)
| Python |
#!/usr/bin/env python
#
# Pyrex -- Main Program, Unix
#
from Pyrex.Compiler.Main import main
main(command_line = 1)
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
# Subclasses disutils.command.build_ext,
# replacing it with a Pyrex version that compiles pyx->c
# before calling the original build_ext command.
# July 2002, Graham Fawcett
# Modified by Darrell Gallion <dgallion1@yahoo.com>
# to allow inclusion of .c files along with .pyx files.
# Pyrex is (c) Greg Ewing.
import distutils.command.build_ext
import Pyrex.Compiler.Main
from Pyrex.Compiler.Errors import PyrexError
from distutils.dep_util import newer
import os
import sys
def replace_suffix(path, new_suffix):
return os.path.splitext(path)[0] + new_suffix
class build_ext (distutils.command.build_ext.build_ext):
description = "compile Pyrex scripts, then build C/C++ extensions (compile/link to build directory)"
def finalize_options (self):
distutils.command.build_ext.build_ext.finalize_options(self)
# The following hack should no longer be needed.
if 0:
# compiling with mingw32 gets an "initializer not a constant" error
# doesn't appear to happen with MSVC!
# so if we are compiling with mingw32,
# switch to C++ mode, to avoid the problem
if self.compiler == 'mingw32':
self.swig_cpp = 1
def swig_sources (self, sources):
if not self.extensions:
return
# collect the names of the source (.pyx) files
pyx_sources = []
pyx_sources = [source for source in sources if source.endswith('.pyx')]
other_sources = [source for source in sources if not source.endswith('.pyx')]
extension = self.swig_cpp and '.cpp' or '.c'
for pyx in pyx_sources:
# should I raise an exception if it doesn't exist?
if os.path.exists(pyx):
source = pyx
#target = source.replace('.pyx', extension)
target = replace_suffix(source, extension)
if newer(source, target) or self.force:
self.pyrex_compile(source)
if self.swig_cpp:
# rename .c to .cpp (Pyrex always builds .c ...)
if os.path.exists(target):
os.unlink(target)
#os.rename(source.replace('.pyx', '.c'), target)
os.rename(replace_suffix(source, '.c'), target)
# massage the cpp file
self.c_to_cpp(target)
return [replace_suffix(src, extension) for src in pyx_sources] + other_sources
def pyrex_compile(self, source):
result = Pyrex.Compiler.Main.compile(source)
if result.num_errors <> 0:
sys.exit(1)
def c_to_cpp(self, filename):
"""touch up the Pyrex generated c/cpp files to meet mingw32/distutils requirements."""
f = open(filename, 'r')
lines = [line for line in f.readlines() if not line.startswith('staticforward PyTypeObject __pyx_type_')]
f.close()
f = open(filename, 'w')
lines.insert(1, 'extern "C" {\n')
lines.append('}\n')
f.write(''.join(lines))
f.close()
| Python |
# July 2002, Graham Fawcett
#
# this hack was inspired by the way Thomas Heller got py2exe
# to appear as a distutil command
#
# we replace distutils.command.build_ext with our own version
# and keep the old one under the module name _build_ext,
# so that *our* build_ext can make use of it.
from build_ext import build_ext
| Python |
###############################################
#
# Odds and ends for debugging
#
###############################################
def print_call_chain(*args):
import sys
print " ".join(map(str, args))
f = sys._getframe(2)
while f:
name = f.f_code.co_name
s = f.f_locals.get('self', None)
if s:
c = getattr(s, "__class__", None)
if c:
name = "%s.%s" % (c.__name__, name)
print "Called from:", name, f.f_lineno
f = f.f_back
print "-" * 70
| Python |
import py
class Directory(py.test.collect.Directory):
def run(self):
return []
| Python |
#
# Pyrex -- Misc Mac-specific things
#
import os, MacOS, macfs
def open_new_file(path):
# On the Mac, try to preserve Finder position
# of previously existing file.
fsspec = macfs.FSSpec(path)
try:
old_finfo = fsspec.GetFInfo()
except MacOS.Error, e:
#print "MacUtils.open_new_file:", e ###
old_finfo = None
try:
os.unlink(path)
except OSError:
pass
file = open(path, "w")
new_finfo = fsspec.GetFInfo()
if old_finfo:
#print "MacUtils.open_new_file:", path ###
#print "...old file info =", old_finfo.Creator, old_finfo.Type, old_finfo.Location ###
#print "...new file info =", new_finfo.Creator, new_finfo.Type, new_finfo.Location ###
new_finfo.Location = old_finfo.Location
new_finfo.Flags = old_finfo.Flags
# Make darn sure the type and creator are right. There seems
# to be a bug in MacPython 2.2 that screws them up sometimes.
new_finfo.Creator = "R*ch"
new_finfo.Type = "TEXT"
fsspec.SetFInfo(new_finfo)
return file
| Python |
#
# Pyrex - Darwin system interface
#
verbose = 0
import os
from Pyrex.Utils import replace_suffix
from Pyrex.Compiler.Errors import PyrexError
py_include_dirs = [
"/Library/Frameworks/Python.framework/Headers"
]
compiler = "gcc"
compiler_options = \
"-g -c -fno-strict-aliasing -Wno-long-double -no-cpp-precomp " \
"-mno-fused-madd -fno-common -dynamic" \
.split()
linker = "gcc"
linker_options = \
"-Wl,-F.,-w -bundle -framework Python" \
.split()
class CCompilerError(PyrexError):
pass
def c_compile(c_file, verbose_flag = 0):
# Compile the given C source file to produce
# an object file. Returns the pathname of the
# resulting file.
c_file = os.path.join(os.getcwd(), c_file)
o_file = replace_suffix(c_file, ".o")
include_options = []
for dir in py_include_dirs:
include_options.append("-I%s" % dir)
args = [compiler] + compiler_options + include_options + [c_file, "-o", o_file]
if verbose_flag or verbose:
print " ".join(args)
status = os.spawnvp(os.P_WAIT, compiler, args)
if status <> 0:
raise CCompilerError("C compiler returned status %s" % status)
return o_file
def c_link(obj_file, verbose_flag = 0):
return c_link_list([obj_file], verbose_flag)
def c_link_list(obj_files, verbose_flag = 0):
# Link the given object files into a dynamically
# loadable extension file. Returns the pathname
# of the resulting file.
out_file = replace_suffix(obj_files[0], ".so")
args = [linker] + linker_options + obj_files + ["-o", out_file]
if verbose_flag or verbose:
print " ".join(args)
status = os.spawnvp(os.P_WAIT, linker, args)
if status <> 0:
raise CCompilerError("Linker returned status %s" % status)
return out_file
| Python |
"""Suite Misc Suite: Suite that adds additional features to the Application.
Level 1, version 1
Generated from MPW:MPW Shell
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'misc'
class MPW_Misc_Suite:
def DoScript(self, _object, _attributes={}, **_arguments):
"""DoScript: Execute an MPW command, any command that could be executed from the command line can be sent as a script.
Required argument: The script to execute
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'dosc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| Python |
"""Suite Standard Suite: Common terms for most applications
Level 1, version 1
Generated from Macintosh HD:System 8.0:Finder
AETE/AEUT resource version 0/144, language 0, script 0
"""
import aetools
import MacOS
_code = 'core'
class Finder_Std_Suite:
_argmap_class_info = {
'_in' : 'wrcd',
}
def class_info(self, _object=None, _attributes={}, **_arguments):
"""class info: Get information about an object class
Required argument: the object class about which information is requested
Keyword argument _in: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the object's properties and elements
"""
_code = 'core'
_subcode = 'qobj'
aetools.keysubst(_arguments, self._argmap_class_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_close = {
'saving' : 'savo',
'saving_in' : 'kfil',
}
def close(self, _object, _attributes={}, **_arguments):
"""close: Close an object
Required argument: the object to close
Keyword argument saving: specifies whether changes should be saved before closing
Keyword argument saving_in: the file in which to save the object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'clos'
aetools.keysubst(_arguments, self._argmap_close)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_count = {
'each' : 'kocl',
}
def count(self, _object, _attributes={}, **_arguments):
"""count: Return the number of elements of a particular class within an object
Required argument: the object whose elements are to be counted
Keyword argument each: the class of the elements to be counted
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the number of elements
"""
_code = 'core'
_subcode = 'cnte'
aetools.keysubst(_arguments, self._argmap_count)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_data_size = {
'as' : 'rtyp',
}
def data_size(self, _object, _attributes={}, **_arguments):
"""data size: Return the size in bytes of an object
Required argument: the object whose data size is to be returned
Keyword argument as: the data type for which the size is calculated
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the size of the object in bytes
"""
_code = 'core'
_subcode = 'dsiz'
aetools.keysubst(_arguments, self._argmap_data_size)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def delete(self, _object, _attributes={}, **_arguments):
"""delete: Delete an element from an object
Required argument: the element to delete
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'delo'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_duplicate = {
'to' : 'insh',
'replacing' : 'alrp',
'routing_suppressed' : 'rout',
}
def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate object(s)
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument replacing: Specifies whether or not to replace items in the destination that have the same name as items being duplicated
Keyword argument routing_suppressed: Specifies whether or not to autoroute items (default is false). Only applies when copying to the system folder.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
"""
_code = 'core'
_subcode = 'clon'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'alrp', _Enum_bool)
aetools.enumsubst(_arguments, 'rout', _Enum_bool)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_event_info = {
'_in' : 'wrcd',
}
def event_info(self, _object, _attributes={}, **_arguments):
"""event info: Get information about the Apple events in a suite
Required argument: the event class of the Apple events for which to return information
Keyword argument _in: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the events and their parameters
"""
_code = 'core'
_subcode = 'gtei'
aetools.keysubst(_arguments, self._argmap_event_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def exists(self, _object, _attributes={}, **_arguments):
"""exists: Verify if an object exists
Required argument: the object in question
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true if it exists, false if not
"""
_code = 'core'
_subcode = 'doex'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_get = {
'as' : 'rtyp',
}
def get(self, _object, _attributes={}, **_arguments):
"""get: Get the data for an object
Required argument: the object whose data is to be returned
Keyword argument as: the desired types for the data, in order of preference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the data from the object
"""
_code = 'core'
_subcode = 'getd'
aetools.keysubst(_arguments, self._argmap_get)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_make = {
'new' : 'kocl',
'at' : 'insh',
'to' : 'to ',
'with_data' : 'data',
'with_properties' : 'prdt',
}
def make(self, _no_object=None, _attributes={}, **_arguments):
"""make: Make a new element
Keyword argument new: the class of the new element
Keyword argument at: the location at which to insert the element
Keyword argument to: when creating an alias file, the original item to create an alias to
Keyword argument with_data: the initial data for the element
Keyword argument with_properties: the initial values for the properties of the element
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the new object(s)
"""
_code = 'core'
_subcode = 'crel'
aetools.keysubst(_arguments, self._argmap_make)
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_move = {
'to' : 'insh',
'replacing' : 'alrp',
'positioned_at' : 'mvpl',
'routing_suppressed' : 'rout',
}
def move(self, _object, _attributes={}, **_arguments):
"""move: Move object(s) to a new location
Required argument: the object(s) to move
Keyword argument to: the new location for the object(s)
Keyword argument replacing: Specifies whether or not to replace items in the destination that have the same name as items being moved
Keyword argument positioned_at: Gives a list (in local window coordinates) of positions for the destination items
Keyword argument routing_suppressed: Specifies whether or not to autoroute items (default is false). Only applies when moving to the system folder.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the object(s) after they have been moved
"""
_code = 'core'
_subcode = 'move'
aetools.keysubst(_arguments, self._argmap_move)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'alrp', _Enum_bool)
aetools.enumsubst(_arguments, 'mvpl', _Enum_list)
aetools.enumsubst(_arguments, 'rout', _Enum_bool)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_open = {
'using' : 'usin',
'with_properties' : 'prdt',
}
def open(self, _object, _attributes={}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument using: the application file to open the object with
Keyword argument with_properties: the initial values for the properties, to be sent along with the open event sent to the application that opens the direct object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
aetools.keysubst(_arguments, self._argmap_open)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _print(self, _object, _attributes={}, **_arguments):
"""print: Print the specified object(s)
Required argument: list of objects to print
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'pdoc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_quit = {
'saving' : 'savo',
}
def quit(self, _no_object=None, _attributes={}, **_arguments):
"""quit: Quit the Finder (direct parameter ignored)
Keyword argument saving: specifies whether to save currently open documents (not supported by Finder)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'quit'
aetools.keysubst(_arguments, self._argmap_quit)
if _no_object != None: raise TypeError, 'No direct arg expected'
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_save = {
'_in' : 'kfil',
'as' : 'fltp',
}
def save(self, _object, _attributes={}, **_arguments):
"""save: Save an object (Not supported by Finder)
Required argument: the object to save
Keyword argument _in: the file in which to save the object (not supported by Finder)
Keyword argument as: the file type of the document in which to save the data (not supported by Finder)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'save'
aetools.keysubst(_arguments, self._argmap_save)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_set = {
'to' : 'data',
}
def set(self, _object, _attributes={}, **_arguments):
"""set: Set an object's data
Required argument: the object to change
Keyword argument to: the new value
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'setd'
aetools.keysubst(_arguments, self._argmap_set)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_suite_info = {
'_in' : 'wrcd',
}
def suite_info(self, _object, _attributes={}, **_arguments):
"""suite info: Get information about event suite(s)
Required argument: the suite for which to return information
Keyword argument _in: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the suites and their versions
"""
_code = 'core'
_subcode = 'gtsi'
aetools.keysubst(_arguments, self._argmap_suite_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - An application program"""
want = 'capp'
class about_this_computer(aetools.NProperty):
"""about this computer - the "About this Computer" dialog and the list of running processes displayed in it"""
which = 'abbx'
want = 'obj '
class apple_menu_items_folder(aetools.NProperty):
"""apple menu items folder - the special folder named "Apple Menu Items," the contents of which appear in the Apple menu"""
which = 'amnu'
want = 'obj '
class clipboard(aetools.NProperty):
"""clipboard - the Finder's clipboard window"""
which = 'pcli'
want = 'obj '
class control_panels_folder(aetools.NProperty):
"""control panels folder - the special folder named 'Control Panels'"""
which = 'ctrl'
want = 'obj '
class desktop(aetools.NProperty):
"""desktop - the desktop"""
which = 'desk'
want = 'obj '
class extensions_folder(aetools.NProperty):
"""extensions folder - the special folder named 'Extensions'"""
which = 'extn'
want = 'obj '
class file_sharing(aetools.NProperty):
"""file sharing - Is file sharing on?"""
which = 'fshr'
want = 'bool'
class Finder_preferences(aetools.NProperty):
"""Finder preferences - Various preferences that apply to the Finder as a whole"""
which = 'pfrp'
want = 'obj '
class fonts_folder(aetools.NProperty):
"""fonts folder - the special folder named 'Fonts'"""
which = 'ffnt'
want = 'obj '
class frontmost(aetools.NProperty):
"""frontmost - Is the Finder the frontmost process?"""
which = 'pisf'
want = 'bool'
class insertion_location(aetools.NProperty):
"""insertion location - the container in which a new folder would appear if "New Folder" was selected"""
which = 'pins'
want = 'obj '
class largest_free_block(aetools.NProperty):
"""largest free block - the largest free block of process memory available to launch an application"""
which = 'mfre'
want = 'long'
class preferences_folder(aetools.NProperty):
"""preferences folder - the special folder named 'Preferences'"""
which = 'pref'
want = 'obj '
class product_version(aetools.NProperty):
"""product version - the version of the System software running on this computer"""
which = 'ver2'
want = 'itxt'
class selection(aetools.NProperty):
"""selection - the selection visible to the user"""
which = 'sele'
want = 'obj '
class sharing_starting_up(aetools.NProperty):
"""sharing starting up - Is file sharing in the process of starting up?"""
which = 'fsup'
want = 'bool'
class shutdown_items_folder(aetools.NProperty):
"""shutdown items folder - the special folder named 'Shutdown Items'"""
which = 'shdf'
want = 'obj '
class startup_items_folder(aetools.NProperty):
"""startup items folder - the special folder named 'Startup Items'"""
which = 'strt'
want = 'obj '
class system_folder(aetools.NProperty):
"""system folder - the System folder"""
which = 'macs'
want = 'obj '
class temporary_items_folder(aetools.NProperty):
"""temporary items folder - the special folder named "Temporary Items" (invisible)"""
which = 'temp'
want = 'obj '
class version(aetools.NProperty):
"""version - the version of the Finder"""
which = 'vers'
want = 'itxt'
class view_preferences(aetools.NProperty):
"""view preferences - backwards compatibility with Finder Scripting Extension. DEPRECATED -- not supported after Finder 8.0"""
which = 'pvwp'
want = 'obj '
class visible(aetools.NProperty):
"""visible - Is the Finder's layer visible?"""
which = 'pvis'
want = 'bool'
# element 'dsut' as ['indx', 'name']
# element 'alia' as ['indx', 'name']
# element 'appf' as ['indx', 'name', 'ID ']
# element 'clpf' as ['indx', 'name']
# element 'lwnd' as ['indx', 'name']
# element 'ctnr' as ['indx', 'name']
# element 'cwnd' as ['indx', 'name']
# element 'dwnd' as ['indx', 'name']
# element 'ccdv' as ['indx', 'name']
# element 'dafi' as ['indx', 'name']
# element 'cdsk' as ['indx', 'name']
# element 'cdis' as ['indx', 'name', 'ID ']
# element 'docf' as ['indx', 'name']
# element 'file' as ['indx', 'name']
# element 'cfol' as ['indx', 'name', 'ID ']
# element 'fntf' as ['indx', 'name']
# element 'fsut' as ['indx', 'name']
# element 'iwnd' as ['indx', 'name']
# element 'cobj' as ['indx', 'name']
# element 'sctr' as ['indx', 'name']
# element 'swnd' as ['indx', 'name']
# element 'sndf' as ['indx', 'name']
# element 'qwnd' as ['indx', 'name']
# element 'stcs' as ['indx', 'name']
# element 'ctrs' as ['indx', 'name']
# element 'cwin' as ['indx', 'name']
class file(aetools.ComponentItem):
"""file - A file"""
want = 'file'
class creator_type(aetools.NProperty):
"""creator type - the OSType identifying the application that created the item"""
which = 'fcrt'
want = 'type'
class file_type_obsolete(aetools.NProperty):
"""file type obsolete - the OSType identifying the type of data contained in the item (DEPRECATED - for use with scripts compiled before Finder 8.0. Will be removed in the next release)"""
which = 'fitp'
want = 'type'
class file_type(aetools.NProperty):
"""file type - the OSType identifying the type of data contained in the item"""
which = 'asty'
want = 'type'
class locked_obsolete(aetools.NProperty):
"""locked obsolete - Is the file locked? (DEPRECATED - for use with scripts compiled before Finder 8.0. Will be removed in the next release)"""
which = 'islk'
want = 'bool'
class locked(aetools.NProperty):
"""locked - Is the file locked?"""
which = 'aslk'
want = 'bool'
# repeated property product_version the version of the product (visible at the top of the "Get Info" window)
class stationery(aetools.NProperty):
"""stationery - Is the file a stationery pad?"""
which = 'pspd'
want = 'bool'
# repeated property version the version of the file (visible at the bottom of the "Get Info" window)
files = file
class window(aetools.ComponentItem):
"""window - A window"""
want = 'cwin'
class collapsed(aetools.NProperty):
"""collapsed - Is the window collapsed (only applies to non-pop-up windows)?"""
which = 'wshd'
want = 'bool'
class popup(aetools.NProperty):
"""popup - Is the window is a pop-up window?"""
which = 'drwr'
want = 'bool'
class pulled_open(aetools.NProperty):
"""pulled open - Is the window pulled open (only applies to pop-up windows)?"""
which = 'pull'
want = 'bool'
# repeated property visible Is the window visible (always true for Finder windows)?
class zoomed_full_size(aetools.NProperty):
"""zoomed full size - Is the window zoomed to the full size of the screen? (can only be set, not read)"""
which = 'zumf'
want = 'bool'
windows = window
# XXXX application element 'dsut' not found!!
# XXXX application element 'alia' not found!!
# XXXX application element 'appf' not found!!
# XXXX application element 'clpf' not found!!
# XXXX application element 'lwnd' not found!!
# XXXX application element 'ctnr' not found!!
# XXXX application element 'cwnd' not found!!
# XXXX application element 'dwnd' not found!!
# XXXX application element 'ccdv' not found!!
# XXXX application element 'dafi' not found!!
# XXXX application element 'cdsk' not found!!
# XXXX application element 'cdis' not found!!
# XXXX application element 'docf' not found!!
# XXXX application element 'cfol' not found!!
# XXXX application element 'fntf' not found!!
# XXXX application element 'fsut' not found!!
# XXXX application element 'iwnd' not found!!
# XXXX application element 'cobj' not found!!
# XXXX application element 'sctr' not found!!
# XXXX application element 'swnd' not found!!
# XXXX application element 'sndf' not found!!
# XXXX application element 'qwnd' not found!!
# XXXX application element 'stcs' not found!!
# XXXX application element 'ctrs' not found!!
application._propdict = {
'about_this_computer' : about_this_computer,
'apple_menu_items_folder' : apple_menu_items_folder,
'clipboard' : clipboard,
'control_panels_folder' : control_panels_folder,
'desktop' : desktop,
'extensions_folder' : extensions_folder,
'file_sharing' : file_sharing,
'Finder_preferences' : Finder_preferences,
'fonts_folder' : fonts_folder,
'frontmost' : frontmost,
'insertion_location' : insertion_location,
'largest_free_block' : largest_free_block,
'preferences_folder' : preferences_folder,
'product_version' : product_version,
'selection' : selection,
'sharing_starting_up' : sharing_starting_up,
'shutdown_items_folder' : shutdown_items_folder,
'startup_items_folder' : startup_items_folder,
'system_folder' : system_folder,
'temporary_items_folder' : temporary_items_folder,
'version' : version,
'view_preferences' : view_preferences,
'visible' : visible,
}
application._elemdict = {
'file' : file,
'window' : window,
}
file._propdict = {
'creator_type' : creator_type,
'file_type_obsolete' : file_type_obsolete,
'file_type' : file_type,
'locked_obsolete' : locked_obsolete,
'locked' : locked,
'product_version' : product_version,
'stationery' : stationery,
'version' : version,
}
file._elemdict = {
}
window._propdict = {
'collapsed' : collapsed,
'popup' : popup,
'pulled_open' : pulled_open,
'visible' : visible,
'zoomed_full_size' : zoomed_full_size,
}
window._elemdict = {
}
# XXXX enum list not found!!
# XXXX enum bool not found!!
# XXXX enum savo not found!!
#
# Indices of types declared in this module
#
_classdeclarations = {
'cwin' : window,
'file' : file,
'capp' : application,
}
_propdeclarations = {
'amnu' : apple_menu_items_folder,
'pvwp' : view_preferences,
'extn' : extensions_folder,
'pins' : insertion_location,
'fshr' : file_sharing,
'aslk' : locked,
'drwr' : popup,
'fcrt' : creator_type,
'pcli' : clipboard,
'asty' : file_type,
'strt' : startup_items_folder,
'islk' : locked_obsolete,
'pvis' : visible,
'pref' : preferences_folder,
'pisf' : frontmost,
'sele' : selection,
'temp' : temporary_items_folder,
'pull' : pulled_open,
'abbx' : about_this_computer,
'wshd' : collapsed,
'pspd' : stationery,
'fitp' : file_type_obsolete,
'pfrp' : Finder_preferences,
'desk' : desktop,
'fsup' : sharing_starting_up,
'mfre' : largest_free_block,
'ctrl' : control_panels_folder,
'zumf' : zoomed_full_size,
'shdf' : shutdown_items_folder,
'ffnt' : fonts_folder,
'macs' : system_folder,
'ver2' : product_version,
'vers' : version,
}
_compdeclarations = {
}
_enumdeclarations = {
}
| Python |
"""Suite Misc Suite: Suite that adds additional features to the Application.
Level 1, version 1
Generated from Macintosh HD:Desktop Folder:ToolServer 3.4.1:ToolServer
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'misc'
class TS_Misc_Suite:
def DoScript(self, _object, _attributes={}, **_arguments):
"""DoScript: Execute an MPW command, any command that could be executed from the command line can be sent as a script.
Required argument: The script to execute
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'dosc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
#if _arguments.has_key('errn'):
# raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
#if _arguments.has_key('----'):
# return _arguments['----']
errn = 0
stat = 0
stdout = ""
stderr = ""
if _arguments.has_key('errn'):
errn = _arguments['errn']
if errn:
errn = aetools.decodeerror(_arguments)
if _arguments.has_key('stat'):
stat = _arguments['stat']
if _arguments.has_key('----'):
stdout = _arguments['----']
if _arguments.has_key('diag'):
stderr = _arguments['diag']
return (errn, stat, stdout, stderr)
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| Python |
#
# Simple Apple-event driven Python interpreter
#
import os, sys, traceback
from cStringIO import StringIO
from MiniAEFrame import AEServer, MiniApplication
class PythonServer(AEServer, MiniApplication):
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', ignore)
self.installaehandler('aevt', 'quit', quit)
self.installaehandler('misc', 'dosc', doscript)
def ignore(**kwds):
pass
def quit(**kwds):
server._quit()
def doscript(args, **kwds):
print "doscript:", repr(args) ###
stat = 0
output = ""
errput = ""
#print "Normalising args" ###
if type(args) == type(""):
args = [args]
#print "Setting sys.argv" ###
sys.argv = args
#print "Finding script directory and module file" ###
dir = os.path.dirname(args[0])
dir = os.path.join(start_dir, dir)
pyfile = os.path.basename(args[0])
mod = os.path.splitext(pyfile)[0]
#print "dir:", repr(dir) ###
#print "mod:", repr(mod) ###
os.chdir(dir)
sys.path = start_path[:]
sys.path[0] = dir
#print "path:", sys.path ###
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
#sys.__stdout__.write("Path: %s\n" % sys.path) ###
#sys.__stdout__.write("Importing: %s\n" % mod) ###
try:
__import__(mod)
except KeyboardInterrupt:
raise
except SystemExit, exc:
#sys.__stdout__.write("Caught a SystemExit\n") ###
try:
stat = int(str(exc))
except ValueError:
stat = 1
#sys.__stdout__.write("stat = %s\n" % stat) ###
except:
traceback.print_exc()
stat = 1
#sys.__stdout__.write("Done the import\n") ###
finally:
output = sys.stdout.getvalue()
#sys.__stdout__.write("Output:\n%s" % output) ###
errput = sys.stderr.getvalue()
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stdout__
pass
return [stat, output, errput]
start_dir = os.getcwd()
start_path = sys.path[:]
server = PythonServer()
#print "Open for business"
try:
server.mainloop()
except:
traceback.print_exc()
#sys.exit(1)
#print "Closing shop"
| Python |
#
# Pyrex -- Mac system interface
#
import os, sys, string
import aetools
from aetools import TalkTo
from StdSuites.Standard_Suite import Standard_Suite_Events as Standard_Suite
from Pyrex.Utils import replace_suffix
from Pyrex.Compiler.Errors import PyrexError
c_compiler = "MWCPPC"
c_optimizations = "off"
#c_linker = "PPCLink"
c_linker = "MWLinkPPC"
shared_lib_suffix = ".slb"
#py_home = "Python2.2:Home:"
py_home = sys.exec_prefix
py_include_dirs = (
py_home + "Include:",
py_home + "Mac:Include:"
)
pythoncore = py_home + "PythonCore"
mwlibdir = "MPW:Interfaces&Libraries:Libraries:MWPPCLibraries:"
libraries = (
#mwlibdir + "'MSL C.PPC.Lib'",
#mwlibdir + "'MSL RuntimePPC.Lib'",
mwlibdir + "'MSL ShLibRuntime.Lib'",
mwlibdir + "InterfaceLib",
#mwlibdir + "MathLib",
)
class CCompilerError(PyrexError):
pass
#---------------- ToolServer ---------------------------
from TS_Misc_Suite import TS_Misc_Suite
class ToolServer(Standard_Suite, TS_Misc_Suite, TalkTo):
pass
def send_toolserver_command(cmd):
ts = ToolServer('MPSX', start = 1)
return ts.DoScript(cmd)
def do_toolserver_command(command):
try:
result = send_toolserver_command(command)
except aetools.Error, e:
raise CCompilerError("Apple Event error: %s" % e)
errn, stat, stdout, stderr = result
if errn:
raise CCompilerError("ToolServer error: %s" % errn)
stdout = string.replace(stdout, "\r", "\n")
stderr = string.replace(stderr, "\r", "\n")
if stdout:
#print "<<< Begin ToolServer StdOut >>>"
sys.stderr.write(stdout)
#print "<<< End ToolServer StdOut >>>"
if stderr:
#print "<<< Begin ToolServer StdErr >>>"
sys.stderr.write(stderr)
#print "<<< End ToolServer StdErr >>>"
return stat
#-------------------------------------------------------
def c_compile(c_file):
# Compile the given C source file to produce
# an object file. Returns the pathname of the
# resulting file.
c_file = os.path.join(os.getcwd(), c_file)
#print "c_compile: c_file =", repr(c_file) ###
c_file_dir = os.path.dirname(c_file)
o_file = replace_suffix(c_file, ".o")
include_options = ["-i %s" % c_file_dir]
for dir in py_include_dirs:
include_options.append("-i %s" % dir)
command = "%s -opt %s -nomapcr -w off -r %s %s -o %s" % (
c_compiler,
c_optimizations,
string.join(include_options),
c_file,
o_file,
#e_file
)
#print "...command =", repr(command) ###
stat = do_toolserver_command(command)
if stat:
raise CCompilerError("C compiler returned status %s" % stat)
return o_file
def c_link(obj_file):
return c_link_list([obj_file])
def c_link_list(obj_files):
# Link the given object files into a dynamically
# loadable extension file. Returns the pathname
# of the resulting file.
out_file = replace_suffix(obj_files[0], shared_lib_suffix)
command = "%s -xm s -export all %s %s %s -o %s" % (
c_linker,
string.join(obj_files),
pythoncore,
string.join(libraries),
out_file)
stat = do_toolserver_command(command)
if stat:
raise CCompilerError("Linker returned status %s" % stat)
return out_file
def test_c_compile(link = 0):
objs = []
for arg in sys.argv[1:]:
if arg.endswith(".c"):
try:
obj = c_compile(arg)
except PyrexError, e:
#print "Caught a PyrexError:" ###
#print repr(e) ###
print "%s.%s:" % (e.__class__.__module__,
e.__class__.__name__), e
sys.exit(1)
else:
obj = arg
objs.append(obj)
if link:
c_link_list(objs)
| Python |
"Apple Event suite for pyserver."
import aetools
import MacOS
_code = 'misc'
class PS_Misc_Suite:
def DoScript(self, _object, _attributes={}, **_arguments):
"""DoScript: Execute a Python file, optionally with command line args.
Required argument: filename.py or [filename.py, arg, ...]
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'dosc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.has_key('errn'):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| Python |
#
# Pyrex -- Things that don't belong
# anywhere else in particular
#
import os, sys
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def default_open_new_file(path):
return open(path, "w")
if sys.platform == "mac":
from Pyrex.Mac.MacUtils import open_new_file
else:
open_new_file = default_open_new_file
| Python |
#
# Pyrex - Command Line Parsing
#
import sys
usage = """\
Usage: pyrexc [options] sourcefile...
Options:
-v, --version Display version number of pyrex compiler
-l, --create-listing Write error messages to a listing file
-I, --include-dir <directory> Search for include files in named directory
-o, --output-file <filename> Specify name of generated C file"""
def bad_usage():
print >>sys.stderr, usage
sys.exit(1)
def parse_command_line(args):
from Pyrex.Compiler.Main import \
CompilationOptions, default_options
def pop_arg():
if args:
return args.pop(0)
else:
bad_usage()
def get_param(option):
tail = option[2:]
if tail:
return tail
else:
return pop_arg()
options = CompilationOptions(default_options)
sources = []
while args:
if args[0].startswith("-"):
option = pop_arg()
if option in ("-v", "--version"):
options.show_version = 1
elif option in ("-l", "--create-listing"):
options.use_listing_file = 1
elif option in ("-C", "--compile"):
options.c_only = 0
elif option in ("-X", "--link"):
options.c_only = 0
options.obj_only = 0
elif option.startswith("-I"):
options.include_path.append(get_param(option))
elif option == "--include-dir":
options.include_path.append(pop_arg())
elif option in ("-o", "--output-file"):
options.output_file = pop_arg()
else:
bad_usage()
else:
sources.append(pop_arg())
if options.use_listing_file and len(sources) > 1:
print >>sys.stderr, \
"pyrexc: Only one source file allowed when using -o"
sys.exit(1)
return options, sources
| Python |
debug_disposal_code = 0
debug_temp_alloc = 0
debug_coercion = 0
| Python |
#
# Pyrex Scanner - Lexical Definitions
#
# Changing anything in this file will cause Lexicon.pickle
# to be rebuilt next time pyrexc is run.
#
string_prefixes = "cCrR"
def make_lexicon():
from Pyrex.Plex import \
Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
TEXT, IGNORE, State, Lexicon
from Scanning import Method
letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_")
digit = Any("0123456789")
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
#resword = apply(Str, reserved_words)
decimal = Rep1(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
name = letter + Rep(letter | digit)
intconst = decimal | (Str("0x") + Rep1(hexdigit))
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
sq_string = (
Str("'") +
Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) +
Str("'")
)
dq_string = (
Str('"') +
Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) +
Str('"')
)
non_sq = AnyBut("'") | (Str('\\') + AnyChar)
tsq_string = (
Str("'''")
+ Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq))
+ Str("'''")
)
non_dq = AnyBut('"') | (Str('\\') + AnyChar)
tdq_string = (
Str('"""')
+ Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq))
+ Str('"""')
)
stringlit = Opt(Any(string_prefixes)) + (sq_string | dq_string | tsq_string| tdq_string)
beginstring = Opt(Any(string_prefixes)) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
two_hex = hexdigit + hexdigit
escapeseq = Str("\\") + (two_oct | three_oct | two_hex | AnyChar)
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**",
"+=", "-=", "*=", "/=", "%=", "&=", "|=", "^=",
"<<=", ">>=", "**=")
spaces = Rep1(Any(" \t\f"))
comment = Str("#") + Rep(AnyBut("\n"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
return Lexicon([
#(resword, TEXT),
(name, 'IDENT'),
(intconst, 'INT'),
(fltconst, 'FLOAT'),
(imagconst, 'IMAG'),
(punct | diphthong, TEXT),
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
#(stringlit, 'STRING'),
(beginstring, Method('begin_string_action')),
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
State('INDENT', [
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str('"'), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
(Str("'"), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
(Eof, Method('eof_action'))
],
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
)
| Python |
version = '0.9.2.1'
| Python |
#
# Pyrex Parser
#
import os, re
from string import join, replace
from types import ListType, TupleType
from Scanning import PyrexScanner
import Nodes
import ExprNodes
from Errors import error, InternalError
def p_ident(s, message = "Expected an identifier"):
if s.sy == 'IDENT':
name = s.systring
s.next()
return name
else:
s.error(message)
def p_ident_list(s):
names = []
while s.sy == 'IDENT':
names.append(s.systring)
s.next()
if s.sy <> ',':
break
s.next()
return names
#------------------------------------------
#
# Expressions
#
#------------------------------------------
def p_binop_expr(s, ops, p_sub_expr):
#print "p_binop_expr:", ops, p_sub_expr ###
n1 = p_sub_expr(s)
#print "p_binop_expr(%s):" % p_sub_expr, s.sy ###
while s.sy in ops:
op = s.sy
pos = s.position()
s.next()
n2 = p_sub_expr(s)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
return n1
#test: and_test ('or' and_test)* | lambdef
def p_simple_expr(s):
#return p_binop_expr(s, ('or',), p_and_test)
return p_rassoc_binop_expr(s, ('or',), p_and_test)
def p_rassoc_binop_expr(s, ops, p_subexpr):
n1 = p_subexpr(s)
if s.sy in ops:
pos = s.position()
op = s.sy
s.next()
n2 = p_rassoc_binop_expr(s, ops, p_subexpr)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
return n1
#and_test: not_test ('and' not_test)*
def p_and_test(s):
#return p_binop_expr(s, ('and',), p_not_test)
return p_rassoc_binop_expr(s, ('and',), p_not_test)
#not_test: 'not' not_test | comparison
def p_not_test(s):
if s.sy == 'not':
pos = s.position()
s.next()
return ExprNodes.NotNode(pos, operand = p_not_test(s))
else:
return p_comparison(s)
#comparison: expr (comp_op expr)*
#comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
def p_comparison(s):
n1 = p_bit_expr(s)
if s.sy in comparison_ops:
pos = s.position()
op = p_cmp_op(s)
n2 = p_bit_expr(s)
n1 = ExprNodes.PrimaryCmpNode(pos,
operator = op, operand1 = n1, operand2 = n2)
if s.sy in comparison_ops:
n1.cascade = p_cascaded_cmp(s)
return n1
def p_cascaded_cmp(s):
pos = s.position()
op = p_cmp_op(s)
n2 = p_bit_expr(s)
result = ExprNodes.CascadedCmpNode(pos,
operator = op, operand2 = n2)
if s.sy in comparison_ops:
result.cascade = p_cascaded_cmp(s)
return result
def p_cmp_op(s):
if s.sy == 'not':
s.next()
s.expect('in')
op = 'not_in'
elif s.sy == 'is':
s.next()
if s.sy == 'not':
s.next()
op = 'is_not'
else:
op = 'is'
else:
op = s.sy
s.next()
if op == '<>':
op = '!='
return op
comparison_ops = (
'<', '>', '==', '>=', '<=', '<>', '!=',
'in', 'is', 'not'
)
#expr: xor_expr ('|' xor_expr)*
def p_bit_expr(s):
return p_binop_expr(s, ('|',), p_xor_expr)
#xor_expr: and_expr ('^' and_expr)*
def p_xor_expr(s):
return p_binop_expr(s, ('^',), p_and_expr)
#and_expr: shift_expr ('&' shift_expr)*
def p_and_expr(s):
return p_binop_expr(s, ('&',), p_shift_expr)
#shift_expr: arith_expr (('<<'|'>>') arith_expr)*
def p_shift_expr(s):
return p_binop_expr(s, ('<<', '>>'), p_arith_expr)
#arith_expr: term (('+'|'-') term)*
def p_arith_expr(s):
return p_binop_expr(s, ('+', '-'), p_term)
#term: factor (('*'|'/'|'%') factor)*
def p_term(s):
return p_binop_expr(s, ('*', '/', '%'), p_factor)
#factor: ('+'|'-'|'~'|'&'|typecast|sizeof) factor | power
def p_factor(s):
sy = s.sy
if sy in ('+', '-', '~'):
op = s.sy
pos = s.position()
s.next()
return ExprNodes.unop_node(pos, op, p_factor(s))
elif sy == '&':
pos = s.position()
s.next()
arg = p_factor(s)
return ExprNodes.AmpersandNode(pos, operand = arg)
elif sy == "<":
return p_typecast(s)
elif sy == 'IDENT' and s.systring == "sizeof":
return p_sizeof(s)
else:
return p_power(s)
def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
s.expect(">")
operand = p_factor(s)
return ExprNodes.TypecastNode(pos,
base_type = base_type,
declarator = declarator,
operand = operand)
def p_sizeof(s):
# s.sy == ident "sizeof"
pos = s.position()
s.next()
s.expect('(')
if looking_at_type(s):
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
else:
operand = p_simple_expr(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
s.expect(')')
return node
#power: atom trailer* ('**' factor)*
def p_power(s):
n1 = p_atom(s)
while s.sy in ('(', '[', '.'):
n1 = p_trailer(s, n1)
if s.sy == '**':
pos = s.position()
s.next()
n2 = p_factor(s)
n1 = ExprNodes.binop_node(pos, '**', n1, n2)
return n1
#trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
def p_trailer(s, node1):
pos = s.position()
if s.sy == '(':
return p_call(s, node1)
elif s.sy == '[':
return p_index(s, node1)
else: # s.sy == '.'
s.next()
name = p_ident(s)
return ExprNodes.AttributeNode(pos,
obj = node1, attribute = name)
# arglist: argument (',' argument)* [',']
# argument: [test '='] test # Really [keyword '='] test
def p_call(s, function):
# s.sy == '('
pos = s.position()
s.next()
positional_args = []
keyword_args = []
star_arg = None
starstar_arg = None
while s.sy not in ('*', '**', ')'):
arg = p_simple_expr(s)
if s.sy == '=':
s.next()
if not arg.is_name:
s.error("Expected an identifier before '='",
pos = arg.pos)
keyword = ExprNodes.StringNode(arg.pos,
value = arg.name)
arg = p_simple_expr(s)
keyword_args.append((keyword, arg))
else:
if keyword_args:
s.error("Non-keyword arg following keyword arg",
pos = arg.pos)
positional_args.append(arg)
if s.sy <> ',':
break
s.next()
if s.sy == '*':
s.next()
star_arg = p_simple_expr(s)
if s.sy == ',':
s.next()
if s.sy == '**':
s.next()
starstar_arg = p_simple_expr(s)
if s.sy == ',':
s.next()
s.expect(')')
if not (keyword_args or star_arg or starstar_arg):
return ExprNodes.SimpleCallNode(pos,
function = function,
args = positional_args)
else:
arg_tuple = None
keyword_dict = None
if positional_args or not star_arg:
arg_tuple = ExprNodes.TupleNode(pos,
args = positional_args)
if star_arg:
star_arg_tuple = ExprNodes.AsTupleNode(pos, arg = star_arg)
if arg_tuple:
arg_tuple = ExprNodes.binop_node(pos,
operator = '+', operand1 = arg_tuple,
operand2 = star_arg_tuple)
else:
arg_tuple = star_arg_tuple
if keyword_args:
keyword_dict = ExprNodes.DictNode(pos,
key_value_pairs = keyword_args)
return ExprNodes.GeneralCallNode(pos,
function = function,
positional_args = arg_tuple,
keyword_args = keyword_dict,
starstar_arg = starstar_arg)
#lambdef: 'lambda' [varargslist] ':' test
#subscriptlist: subscript (',' subscript)* [',']
def p_index(s, base):
# s.sy == '['
pos = s.position()
s.next()
subscripts = p_subscript_list(s)
if len(subscripts) == 1 and len(subscripts[0]) == 2:
start, stop = subscripts[0]
result = ExprNodes.SliceIndexNode(pos,
base = base, start = start, stop = stop)
else:
indexes = make_slice_nodes(pos, subscripts)
if len(indexes) == 1:
index = indexes[0]
else:
index = ExprNodes.TupleNode(pos, args = indexes)
result = ExprNodes.IndexNode(pos,
base = base, index = index)
s.expect(']')
return result
def p_subscript_list(s):
items = [p_subscript(s)]
while s.sy == ',':
s.next()
if s.sy == ']':
break
items.append(p_subscript(s))
return items
#subscript: '.' '.' '.' | test | [test] ':' [test] [':' [test]]
def p_subscript(s):
# Parse a subscript and return a list of
# 1, 2 or 3 ExprNodes, depending on how
# many slice elements were encountered.
pos = s.position()
if s.sy == '.':
expect_ellipsis(s)
return [ExprNodes.EllipsisNode(pos)]
else:
start = p_slice_element(s, (':',))
if s.sy <> ':':
return [start]
s.next()
stop = p_slice_element(s, (':', ',', ']'))
if s.sy <> ':':
return [start, stop]
s.next()
step = p_slice_element(s, (':', ',', ']'))
return [start, stop, step]
def p_slice_element(s, follow_set):
# Simple expression which may be missing iff
# it is followed by something in follow_set.
if s.sy not in follow_set:
return p_simple_expr(s)
else:
return None
def expect_ellipsis(s):
s.expect('.')
s.expect('.')
s.expect('.')
def make_slice_nodes(pos, subscripts):
# Convert a list of subscripts as returned
# by p_subscript_list into a list of ExprNodes,
# creating SliceNodes for elements with 2 or
# more components.
result = []
for subscript in subscripts:
if len(subscript) == 1:
result.append(subscript[0])
else:
result.append(make_slice_node(pos, *subscript))
return result
def make_slice_node(pos, start, stop = None, step = None):
if not start:
start = ExprNodes.NoneNode(pos)
if not stop:
stop = ExprNodes.NoneNode(pos)
if not step:
step = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos,
start = start, stop = stop, step = step)
#atom: '(' [testlist] ')' | '[' [listmaker] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING+
def p_atom(s):
pos = s.position()
sy = s.sy
if sy == '(':
s.next()
if s.sy == ')':
result = ExprNodes.TupleNode(pos, args = [])
else:
result = p_expr(s)
s.expect(')')
return result
elif sy == '[':
return p_list_maker(s)
elif sy == '{':
return p_dict_maker(s)
elif sy == '`':
return p_backquote_expr(s)
elif sy == 'INT':
digits = s.systring
if digits[:2] == "0x":
value = long(digits[2:], 16)
elif digits[:1] == "0":
value = int(digits, 8)
else:
value = int(s.systring)
s.next()
return ExprNodes.IntNode(pos, value = value)
elif sy == 'FLOAT':
value = float(s.systring)
s.next()
return ExprNodes.FloatNode(pos, value = value)
elif sy == 'IMAG':
value = float(s.systring[:-1])
s.next()
return ExprNodes.ImagNode(pos, value = value)
elif sy == 'STRING' or sy == 'BEGIN_STRING':
kind, value = p_cat_string_literal(s)
if kind == 'c':
return ExprNodes.CharNode(pos, value = value)
else:
return ExprNodes.StringNode(pos, value = value)
elif sy == 'IDENT':
name = s.systring
s.next()
if name == "None":
return ExprNodes.NoneNode(pos)
else:
return ExprNodes.NameNode(pos, name=name)
elif sy == 'NULL':
s.next()
return ExprNodes.NullNode(pos)
else:
s.error("Expected an identifier or literal")
def p_cat_string_literal(s):
# A sequence of one or more adjacent string literals.
# Returns (kind, value) where kind in ('', 'c', 'r')
kind, value = p_string_literal(s)
if kind <> 'c':
strings = [value]
while s.sy == 'STRING' or s.sy == 'BEGIN_STRING':
next_kind, next_value = p_string_literal(s)
if next_kind == 'c':
self.error(
"Cannot concatenate char literal with another string or char literal")
strings.append(next_value)
value = ''.join(strings)
return kind, value
def p_opt_string_literal(s):
if s.sy == 'STRING' or s.sy == 'BEGIN_STRING':
return p_string_literal(s)
else:
return None
def p_string_literal(s):
# A single string or char literal.
# Returns (kind, value) where kind in ('', 'c', 'r')
if s.sy == 'STRING':
value = unquote(s.systring)
s.next()
return value
# s.sy == 'BEGIN_STRING'
pos = s.position()
#is_raw = s.systring[:1].lower() == "r"
kind = s.systring[:1].lower()
if kind not in "cr":
kind = ''
chars = []
while 1:
s.next()
sy = s.sy
#print "p_string_literal: sy =", sy, repr(s.systring) ###
if sy == 'CHARS':
systr = s.systring
if len(systr) == 1 and systr in "'\"\n":
chars.append('\\')
chars.append(systr)
elif sy == 'ESCAPE':
systr = s.systring
if kind == 'r':
if systr == '\\\n':
chars.append(r'\\\n')
else:
chars.append('\\' + systr)
else:
c = systr[1]
if c in "'\"\\abfnrtv01234567":
chars.append(systr)
elif c == 'x':
chars.append('\\x0' + systr[2:])
elif c == '\n':
pass
else:
chars.append(systr[1:])
elif sy == 'NEWLINE':
chars.append(r'\n')
elif sy == 'END_STRING':
break
elif sy == 'EOF':
s.error("Unclosed string literal", pos = pos)
else:
s.error(
"Unexpected token %r:%r in string literal" %
(sy, s.systring))
s.next()
value = join(chars, '')
#print "p_string_literal: value =", repr(value) ###
return kind, value
def unquote(s):
is_raw = 0
if s[:1].lower() == "r":
is_raw = 1
s = s[1:]
q = s[:3]
if q == '"""' or q == "'''":
s = s[3:-3]
else:
s = s[1:-1]
if is_raw:
s = s.replace('\\', '\\\\')
s = s.replace('\n', '\\\n')
else:
# Split into double quotes, newlines, escape sequences
# and spans of regular chars
l1 = re.split(r'((?:\\[0-7]{1,3})|(?:\\x[0-9A-Fa-f]{2})|(?:\\.)|(?:\\\n)|(?:\n)|")', s)
print "unquote: l1 =", l1 ###
l2 = []
for item in l1:
if item == '"' or item == '\n':
l2.append('\\' + item)
elif item == '\\\n':
pass
elif item[:1] == '\\':
if len(item) == 2:
if item[1] in '"\\abfnrtv':
l2.append(item)
else:
l2.append(item[1])
elif item[1:2] == 'x':
l2.append('\\x0' + item[2:])
else:
# octal escape
l2.append(item)
else:
l2.append(item)
s = "".join(l2)
return s
def p_list_maker(s):
# s.sy == '['
pos = s.position()
s.next()
exprs = p_simple_expr_list(s)
s.expect(']')
return ExprNodes.ListNode(pos, args = exprs)
#dictmaker: test ':' test (',' test ':' test)* [',']
def p_dict_maker(s):
# s.sy == '{'
pos = s.position()
s.next()
items = []
while s.sy <> '}':
key = p_simple_expr(s)
s.expect(':')
value = p_simple_expr(s)
items.append((key, value))
if s.sy <> ',':
break
s.next()
s.expect('}')
return ExprNodes.DictNode(pos, key_value_pairs = items)
def p_backquote_expr(s):
# s.sy == '`'
pos = s.position()
s.next()
arg = p_expr(s)
s.expect('`')
return ExprNodes.BackquoteNode(pos, arg = arg)
#testlist: test (',' test)* [',']
def p_simple_expr_list(s):
exprs = []
while s.sy not in expr_terminators:
exprs.append(p_simple_expr(s))
if s.sy <> ',':
break
s.next()
return exprs
def p_expr(s):
pos = s.position()
expr = p_simple_expr(s)
if s.sy == ',':
s.next()
exprs = [expr] + p_simple_expr_list(s)
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
expr_terminators = (')', ']', '}', ':', '=', 'NEWLINE')
#-------------------------------------------------------
#
# Statements
#
#-------------------------------------------------------
def p_global_statement(s):
# assume s.sy == 'global'
pos = s.position()
s.next()
names = p_ident_list(s)
return Nodes.GlobalNode(pos, names = names)
def p_expression_or_assignment(s):
expr_list = [p_expr(s)]
if s.sy in augassign_ops:
n1 = expr_list[0]
op = s.sy[:-1]
pos = s.position()
s.next()
n2 = p_expr(s)
# Parse a limited form of augmented assignment:
# 'name += expr' --> 'name = name + y'
# with a specially marked binop node.
# Augmented assignment to more complex expressions isn't supported yet.
if isinstance(n1, ExprNodes.NameNode):
n1copy = ExprNodes.NameNode(n1.pos, name = n1.name)
else:
s.error("not implemented: augmented assignment to an expression more complex than a variable name")
binop = ExprNodes.binop_node(pos, op, n1, n2)
binop.inplace = 1
return Nodes.SingleAssignmentNode(pos, lhs = n1copy, rhs = binop)
while s.sy == '=':
s.next()
expr_list.append(p_expr(s))
if len(expr_list) == 1:
expr = expr_list[0]
return Nodes.ExprStatNode(expr.pos, expr = expr)
else:
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
return nodes[0]
else:
#return Nodes.StatListNode(nodes[0].pos, stats = nodes)
return Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
augassign_ops = (
'+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=',
'<<=', '>>=', '**='
)
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing
# the LHSs and RHS of one (possibly cascaded) assignment
# statement. If they are all sequence constructors with
# the same number of arguments, rearranges them into a
# list of equivalent assignments between the individual
# elements. This transformation is applied recursively.
size = find_parallel_assignment_size(input)
if size >= 0:
for i in range(size):
new_exprs = [expr.args[i] for expr in input]
flatten_parallel_assignments(new_exprs, output)
else:
output.append(input)
def find_parallel_assignment_size(input):
# The input is a list of expression nodes. If
# they are all sequence constructors with the same number
# of arguments, return that number, else return -1.
# Produces an error message if they are all sequence
# constructors but not all the same size.
for expr in input:
if not expr.is_sequence_constructor:
return -1
rhs = input[-1]
rhs_size = len(rhs.args)
for lhs in input[:-1]:
lhs_size = len(lhs.args)
if lhs_size <> rhs_size:
error(lhs.pos, "Unpacking sequence of wrong size (expected %d, got %d)"
% (lhs_size, rhs_size))
return -1
return rhs_size
def p_print_statement(s):
# s.sy == 'print'
pos = s.position()
s.next()
if s.sy == '>>':
s.error("'print >>' not yet implemented")
args = []
ewc = 0
if s.sy not in ('NEWLINE', 'EOF'):
args.append(p_simple_expr(s))
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
ewc = 1
break
args.append(p_simple_expr(s))
return Nodes.PrintStatNode(pos,
args = args, ends_with_comma = ewc)
def p_del_statement(s):
# s.sy == 'del'
pos = s.position()
s.next()
args = p_simple_expr_list(s)
return Nodes.DelStatNode(pos, args = args)
def p_pass_statement(s, with_newline = 0):
pos = s.position()
s.expect('pass')
if with_newline:
s.expect_newline("Expected a newline")
return Nodes.PassStatNode(pos)
def p_break_statement(s):
# s.sy == 'break'
pos = s.position()
s.next()
return Nodes.BreakStatNode(pos)
def p_continue_statement(s):
# s.sy == 'continue'
pos = s.position()
s.next()
return Nodes.ContinueStatNode(pos)
def p_return_statement(s):
# s.sy == 'return'
pos = s.position()
s.next()
if s.sy not in statement_terminators:
value = p_expr(s)
else:
value = None
return Nodes.ReturnStatNode(pos, value = value)
def p_raise_statement(s):
# s.sy == 'raise'
pos = s.position()
s.next()
exc_type = None
exc_value = None
exc_tb = None
if s.sy not in statement_terminators:
exc_type = p_simple_expr(s)
if s.sy == ',':
s.next()
exc_value = p_simple_expr(s)
if s.sy == ',':
s.next()
exc_tb = p_simple_expr(s)
return Nodes.RaiseStatNode(pos,
exc_type = exc_type,
exc_value = exc_value,
exc_tb = exc_tb)
def p_import_statement(s):
# s.sy in ('import', 'cimport')
pos = s.position()
kind = s.sy
s.next()
items = [p_dotted_name(s, as_allowed = 1)]
while s.sy == ',':
s.next()
items.append(p_dotted_name(s, as_allowed = 1))
stats = []
for pos, target_name, dotted_name, as_name in items:
if kind == 'cimport':
stat = Nodes.CImportStatNode(pos,
module_name = dotted_name,
as_name = as_name)
else:
stat = Nodes.SingleAssignmentNode(pos,
lhs = ExprNodes.NameNode(pos,
name = as_name or target_name),
rhs = ExprNodes.ImportNode(pos,
module_name = ExprNodes.StringNode(pos,
value = dotted_name),
name_list = None))
stats.append(stat)
return Nodes.StatListNode(pos, stats = stats)
def p_cinline_statement(s):
# s.sy == 'cinline'
pos = s.position()
kind = s.sy
s.next()
string = p_simple_expr(s)
return Nodes.InlineStatNode(pos, string=string)
def p_from_import_statement(s):
# s.sy == 'from'
pos = s.position()
s.next()
(dotted_name_pos, _, dotted_name, _) = \
p_dotted_name(s, as_allowed = 0)
if s.sy in ('import', 'cimport'):
kind = s.sy
s.next()
else:
s.error("Expected 'import' or 'cimport'")
if s.sy == '*':
s.error("'import *' not supported")
imported_names = [p_imported_name(s)]
while s.sy == ',':
s.next()
imported_names.append(p_imported_name(s))
if kind == 'cimport':
for (name_pos, name, as_name) in imported_names:
local_name = as_name or name
s.add_type_name(local_name)
return Nodes.FromCImportStatNode(pos,
module_name = dotted_name,
imported_names = imported_names)
else:
imported_name_strings = []
items = []
for (name_pos, name, as_name) in imported_names:
imported_name_strings.append(
ExprNodes.StringNode(name_pos, value = name))
items.append(
(name,
ExprNodes.NameNode(name_pos,
name = as_name or name)))
import_list = ExprNodes.ListNode(
imported_names[0][0], args = imported_name_strings)
return Nodes.FromImportStatNode(pos,
module = ExprNodes.ImportNode(dotted_name_pos,
module_name = ExprNodes.StringNode(dotted_name_pos,
value = dotted_name),
name_list = import_list),
items = items)
def p_imported_name(s):
pos = s.position()
name = p_ident(s)
as_name = p_as_name(s)
return (pos, name, as_name)
def p_dotted_name(s, as_allowed):
pos = s.position()
target_name = p_ident(s)
as_name = None
names = [target_name]
while s.sy == '.':
s.next()
names.append(p_ident(s))
if as_allowed:
as_name = p_as_name(s)
else:
as_name = None
return (pos, target_name, join(names, "."), as_name)
def p_as_name(s):
if s.sy == 'IDENT' and s.systring == 'as':
s.next()
return p_ident(s)
else:
return None
def p_assert_statement(s):
# s.sy == 'assert'
pos = s.position()
s.next()
cond = p_simple_expr(s)
if s.sy == ',':
s.next()
value = p_simple_expr(s)
else:
value = None
return Nodes.AssertStatNode(pos, cond = cond, value = value)
statement_terminators = (';', 'NEWLINE', 'EOF')
def p_if_statement(s):
# s.sy == 'if'
pos = s.position()
s.next()
if_clauses = [p_if_clause(s)]
while s.sy == 'elif':
s.next()
if_clauses.append(p_if_clause(s))
else_clause = p_else_clause(s)
return Nodes.IfStatNode(pos,
if_clauses = if_clauses, else_clause = else_clause)
def p_if_clause(s):
pos = s.position()
test = p_simple_expr(s)
body = p_suite(s)
return Nodes.IfClauseNode(pos,
condition = test, body = body)
def p_else_clause(s):
if s.sy == 'else':
s.next()
return p_suite(s)
else:
return None
def p_while_statement(s):
# s.sy == 'while'
pos = s.position()
s.next()
test = p_simple_expr(s)
body = p_suite(s)
else_clause = p_else_clause(s)
return Nodes.WhileStatNode(pos,
condition = test, body = body,
else_clause = else_clause)
def p_for_statement(s):
# s.sy == 'for'
pos = s.position()
s.next()
target = p_for_target(s)
if s.sy == 'in':
s.next()
iterator = p_for_iterator(s)
body = p_suite(s)
else_clause = p_else_clause(s)
return Nodes.ForInStatNode(pos,
target = target,
iterator = iterator,
body = body,
else_clause = else_clause)
elif s.sy == 'from':
s.next()
bound1 = p_bit_expr(s)
rel1 = p_for_from_relation(s)
name2_pos = s.position()
name2 = p_ident(s)
rel2_pos = s.position()
rel2 = p_for_from_relation(s)
bound2 = p_bit_expr(s)
if not target.is_name:
error(target.pos,
"Target of for-from statement must be a variable name")
elif name2 <> target.name:
error(name2_pos,
"Variable name in for-from range does not match target")
if rel1[0] <> rel2[0]:
error(rel2_pos,
"Relation directions in for-from do not match")
body = p_suite(s)
else_clause = p_else_clause(s)
return Nodes.ForFromStatNode(pos,
target = target,
bound1 = bound1,
relation1 = rel1,
relation2 = rel2,
bound2 = bound2,
body = body,
else_clause = else_clause)
def p_for_from_relation(s):
if s.sy in inequality_relations:
op = s.sy
s.next()
return op
else:
s.error("Expected one of '<', '<=', '>' '>='")
inequality_relations = ('<', '<=', '>', '>=')
def p_for_target(s):
pos = s.position()
expr = p_bit_expr(s)
if s.sy == ',':
s.next()
exprs = [expr]
while s.sy <> 'in':
exprs.append(p_bit_expr(s))
if s.sy <> ',':
break
s.next()
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
def p_for_iterator(s):
pos = s.position()
expr = p_expr(s)
return ExprNodes.IteratorNode(pos, sequence = expr)
def p_try_statement(s):
# s.sy == 'try'
pos = s.position()
s.next()
body = p_suite(s)
except_clauses = []
else_clause = None
if s.sy in ('except', 'else'):
while s.sy == 'except':
except_clauses.append(p_except_clause(s))
if s.sy == 'else':
s.next()
else_clause = p_suite(s)
return Nodes.TryExceptStatNode(pos,
body = body, except_clauses = except_clauses,
else_clause = else_clause)
elif s.sy == 'finally':
s.next()
finally_clause = p_suite(s)
return Nodes.TryFinallyStatNode(pos,
body = body, finally_clause = finally_clause)
else:
s.error("Expected 'except' or 'finally'")
def p_except_clause(s):
# s.sy == 'except'
pos = s.position()
s.next()
exc_type = None
exc_value = None
if s.sy <> ':':
exc_type = p_simple_expr(s)
if s.sy == ',':
s.next()
exc_value = p_simple_expr(s)
body = p_suite(s)
return Nodes.ExceptClauseNode(pos,
pattern = exc_type, target = exc_value, body = body)
def p_include_statement(s, level):
pos = s.position()
s.next() # 'include'
_, include_file_name = p_string_literal(s)
s.expect_newline("Syntax error in include statement")
include_file_path = s.context.find_include_file(include_file_name, pos)
if include_file_path:
f = open(include_file_path, "r")
s2 = PyrexScanner(f, include_file_path, s)
try:
tree = p_statement_list(s2, level)
finally:
f.close()
return tree
else:
return None
def p_simple_statement(s):
#print "p_simple_statement:", s.sy, s.systring ###
if s.sy == 'global':
node = p_global_statement(s)
elif s.sy == 'print':
node = p_print_statement(s)
elif s.sy == 'del':
node = p_del_statement(s)
elif s.sy == 'break':
node = p_break_statement(s)
elif s.sy == 'continue':
node = p_continue_statement(s)
elif s.sy == 'return':
node = p_return_statement(s)
elif s.sy == 'raise':
node = p_raise_statement(s)
elif s.sy in ('import', 'cimport'):
node = p_import_statement(s)
elif s.sy == 'cinline':
node = p_cinline_statement(s)
elif s.sy == 'from':
node = p_from_import_statement(s)
elif s.sy == 'assert':
node = p_assert_statement(s)
elif s.sy == 'pass':
node = p_pass_statement(s)
else:
node = p_expression_or_assignment(s)
return node
def p_simple_statement_list(s):
# Parse a series of simple statements on one line
# separated by semicolons.
stat = p_simple_statement(s)
if s.sy == ';':
stats = [stat]
while s.sy == ';':
#print "p_simple_statement_list: maybe more to follow" ###
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
stats.append(p_simple_statement(s))
stat = Nodes.StatListNode(stats[0].pos, stats = stats)
s.expect_newline("Syntax error in simple statement list")
return stat
def p_statement(s, level, cdef_flag = 0, visibility = 'private'):
#print "p_statement:", s.sy, s.systring ###
if s.sy == 'ctypedef':
if level not in ('module', 'module_pxd'):
s.error("ctypedef statement not allowed here")
return p_ctypedef_statement(s, level, visibility)
if s.sy == 'cdef':
cdef_flag = 1
s.next()
if cdef_flag:
if level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'):
s.error('cdef statement not allowed here')
return p_cdef_statement(s, level, visibility)
elif s.sy == 'def':
if level not in ('module', 'class', 'c_class', 'property'):
s.error('def statement not allowed here')
return p_def_statement(s)
elif s.sy == 'class':
if level <> 'module':
s.error("class definition not allowed here")
return p_class_statement(s)
elif s.sy == 'include':
if level not in ('module', 'module_pxd'):
s.error("include statement not allowed here")
return p_include_statement(s, level)
elif level == 'c_class' and s.sy == 'IDENT' and s.systring == 'property':
return p_property_decl(s)
else:
if level in ('c_class', 'c_class_pxd'):
if s.sy == 'pass':
return p_pass_statement(s, with_newline = 1)
else:
s.error("Executable statement not allowed here")
if s.sy == 'if':
return p_if_statement(s)
elif s.sy == 'while':
return p_while_statement(s)
elif s.sy == 'for':
return p_for_statement(s)
elif s.sy == 'try':
return p_try_statement(s)
else:
return p_simple_statement_list(s)
def p_statement_list(s, level,
cdef_flag = 0, visibility = 'private'):
# Parse a series of statements separated by newlines.
#print "p_statement_list:", s.sy, s.systring ###
pos = s.position()
stats = []
while s.sy not in ('DEDENT', 'EOF'):
stats.append(p_statement(s, level,
cdef_flag = cdef_flag, visibility = visibility))
return Nodes.StatListNode(pos, stats = stats)
def p_suite(s, level = 'other', cdef_flag = 0,
visibility = 'private', with_doc = 0):
pos = s.position()
s.expect(':')
doc = None
stmts = []
if s.sy == 'NEWLINE':
s.next()
s.expect_indent()
if with_doc:
doc = p_doc_string(s)
body = p_statement_list(s,
level = level,
cdef_flag = cdef_flag,
visibility = visibility)
s.expect_dedent()
else:
if level in ('module', 'class', 'function', 'other'):
body = p_simple_statement_list(s)
else:
body = p_pass_statement(s)
s.expect_newline("Syntax error in declarations")
if with_doc:
return doc, body
else:
return body
def p_c_base_type(s, self_flag = 0):
# If self_flag is true, this is the base type for the
# self argument of a C method of an extension type.
if s.sy == '(':
return p_c_complex_base_type(s)
else:
return p_c_simple_base_type(s, self_flag)
def p_c_complex_base_type(s):
# s.sy == '('
pos = s.position()
s.next()
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
return Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
def p_c_simple_base_type(s, self_flag):
#print "p_c_simple_base_type: self_flag =", self_flag
is_basic = 0
signed = 1
longness = 0
pos = s.position()
module_path = []
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
signed = p_signed_or_unsigned(s)
longness = p_short_or_long(s)
if s.sy == 'IDENT' and s.systring in basic_c_type_names:
name = s.systring
s.next()
else:
name = 'int'
elif s.looking_at_type_name() or looking_at_dotted_name(s):
#print "p_c_simple_base_type: looking_at_type_name at", s.position()
name = s.systring
s.next()
while s.sy == '.':
module_path.append(name)
s.next()
name = p_ident(s)
else:
#print "p_c_simple_base_type: not looking at type at", s.position()
name = None
return Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
longness = longness, is_self_arg = self_flag)
def looking_at_type(s):
return looking_at_base_type(s) or s.looking_at_type_name()
def looking_at_base_type(s):
#print "looking_at_base_type?", s.sy, s.systring, s.position()
return s.sy == 'IDENT' and s.systring in base_type_start_words
def looking_at_dotted_name(s):
if s.sy == 'IDENT':
name = s.systring
s.next()
result = s.sy == '.'
s.put_back('IDENT', name)
return result
else:
return 0
base_type_start_words = (
"char", "short", "int", "long", "float", "double",
"void", "signed", "unsigned"
)
basic_c_type_names = (
"void", "char", "int", "float", "double"
)
def p_signed_or_unsigned(s):
signed = 1
if s.sy == 'IDENT':
if s.systring == 'signed':
s.next()
elif s.systring == 'unsigned':
signed = 0
s.next()
return signed
def p_short_or_long(s):
longness = 0
if s.sy == 'IDENT' and s.systring == 'short':
longness = -1
s.next()
else:
while s.sy == 'IDENT' and s.systring == 'long':
longness += 1
s.next()
return longness
def p_opt_cname(s):
literal = p_opt_string_literal(s)
if literal:
_, cname = literal
else:
cname = None
return cname
def p_c_declarator(s, empty = 0, is_type = 0, cmethod_flag = 0):
# If empty is true, the declarator must be
# empty, otherwise we don't care.
# If cmethod_flag is true, then if this declarator declares
# a function, it's a C method of an extension type.
pos = s.position()
if s.sy == '*':
s.next()
base = p_c_declarator(s, empty, is_type, cmethod_flag)
result = Nodes.CPtrDeclaratorNode(pos,
base = base)
elif s.sy == '**': # scanner returns this as a single token
s.next()
base = p_c_declarator(s, empty, is_type, cmethod_flag)
result = Nodes.CPtrDeclaratorNode(pos,
base = Nodes.CPtrDeclaratorNode(pos,
base = base))
else:
if s.sy == '(':
s.next()
result = p_c_declarator(s, empty, is_type, cmethod_flag)
s.expect(')')
else:
if s.sy == 'IDENT':
name = s.systring
if is_type:
s.add_type_name(name)
if empty:
error(s.position(), "Declarator should be empty")
s.next()
cname = p_opt_cname(s)
else:
name = ""
cname = None
result = Nodes.CNameDeclaratorNode(pos,
name = name, cname = cname)
while s.sy in ('[', '('):
if s.sy == '[':
s.next()
if s.sy <> ']':
dim = p_expr(s)
else:
dim = None
s.expect(']')
result = Nodes.CArrayDeclaratorNode(pos,
base = result, dimension = dim)
else: # sy == '('
s.next()
args = p_c_arg_list(s, in_pyfunc = 0, cmethod_flag = cmethod_flag)
ellipsis = p_optional_ellipsis(s)
s.expect(')')
exc_val, exc_check = p_exception_value_clause(s)
result = Nodes.CFuncDeclaratorNode(pos,
base = result, args = args, has_varargs = ellipsis,
exception_value = exc_val, exception_check = exc_check)
cmethod_flag = 0
return result
def p_exception_value_clause(s):
exc_val = None
exc_check = 0
if s.sy == 'except':
s.next()
if s.sy == '*':
exc_check = 1
s.next()
else:
if s.sy == '?':
exc_check = 1
s.next()
exc_val = p_exception_value(s)
return exc_val, exc_check
def p_exception_value(s):
sign = ""
if s.sy == "-":
sign = "-"
s.next()
if s.sy in ('INT', 'FLOAT', 'NULL'):
s.systring = sign + s.systring
return p_atom(s)
else:
s.error("Exception value must be an int or float literal or NULL")
c_arg_list_terminators = ('*', '**', '.', ')')
c_arg_list_trailers = ('.', '*', '**')
def p_c_arg_list(s, in_pyfunc, cmethod_flag = 0):
args = []
if s.sy not in c_arg_list_terminators:
args.append(p_c_arg_decl(s, in_pyfunc, cmethod_flag))
while s.sy == ',':
s.next()
if s.sy in c_arg_list_trailers:
break
args.append(p_c_arg_decl(s, in_pyfunc))
return args
def p_optional_ellipsis(s):
if s.sy == '.':
expect_ellipsis(s)
return 1
else:
return 0
def p_c_arg_decl(s, in_pyfunc, cmethod_flag = 0):
pos = s.position()
not_none = 0
default = None
base_type = p_c_base_type(s, cmethod_flag)
declarator = p_c_declarator(s)
if s.sy == 'not':
s.next()
if s.sy == 'IDENT' and s.systring == 'None':
s.next()
else:
s.error("Expected 'None'")
if not in_pyfunc:
error(pos, "'not None' only allowed in Python functions")
not_none = 1
if s.sy == '=':
s.next()
default = p_simple_expr(s)
return Nodes.CArgDeclNode(pos,
base_type = base_type,
declarator = declarator,
not_none = not_none,
default = default)
def p_cdef_statement(s, level, visibility = 'private'):
pos = s.position()
visibility = p_visibility(s, visibility)
if visibility == 'extern' and s.sy in ('from' ,':'):
return p_cdef_extern_block(s, level, pos)
elif s.sy == 'class':
if level not in ('module', 'module_pxd'):
error(pos, "Extension type definition not allowed here")
return p_c_class_definition(s, level, pos, visibility = visibility)
elif s.sy == 'IDENT' and s.systring in struct_union_or_enum:
if level not in ('module', 'module_pxd'):
error(pos, "C struct/union/enum definition not allowed here")
if visibility == 'public':
error(pos, "Public struct/union/enum definition not implemented")
if s.systring == "enum":
return p_c_enum_definition(s, pos)
else:
return p_c_struct_or_union_definition(s, pos)
elif s.sy == 'pass':
node = p_pass_statement(s)
s.expect_newline('Expected a newline')
return node
else:
return p_c_func_or_var_declaration(s, level, pos, visibility)
def p_cdef_extern_block(s, level, pos):
include_file = None
s.expect('from')
if s.sy == '*':
s.next()
else:
_, include_file = p_string_literal(s)
body = p_suite(s, level, cdef_flag = 1, visibility = 'extern')
return Nodes.CDefExternNode(pos,
include_file = include_file,
body = body)
struct_union_or_enum = (
"struct", "union", "enum"
)
def p_c_enum_definition(s, pos, typedef_flag = 0):
# s.sy == ident 'enum'
s.next()
if s.sy == 'IDENT':
name = s.systring
s.next()
s.add_type_name(name)
cname = p_opt_cname(s)
else:
name = None
cname = None
items = None
s.expect(':')
items = []
if s.sy <> 'NEWLINE':
p_c_enum_line(s, items)
else:
s.next() # 'NEWLINE'
s.expect_indent()
while s.sy not in ('DEDENT', 'EOF'):
p_c_enum_line(s, items)
s.expect_dedent()
return Nodes.CEnumDefNode(pos, name = name, cname = cname,
items = items, typedef_flag = typedef_flag)
def p_c_enum_line(s, items):
p_c_enum_item(s, items)
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
p_c_enum_item(s, items)
s.expect_newline("Syntax error in enum item list")
def p_c_enum_item(s, items):
pos = s.position()
name = p_ident(s)
cname = p_opt_cname(s)
value = None
if s.sy == '=':
s.next()
value = p_simple_expr(s)
items.append(Nodes.CEnumDefItemNode(pos,
name = name, cname = cname, value = value))
def p_c_struct_or_union_definition(s, pos, typedef_flag = 0):
# s.sy == ident 'struct' or 'union'
kind = s.systring
s.next()
name = p_ident(s)
cname = p_opt_cname(s)
s.add_type_name(name)
attributes = None
if s.sy == ':':
s.next()
s.expect('NEWLINE')
s.expect_indent()
attributes = []
while s.sy <> 'DEDENT':
attributes.append(
p_c_func_or_var_declaration(s, level = 'other', pos = s.position()))
s.expect_dedent()
else:
s.expect_newline("Syntax error in struct or union definition")
return Nodes.CStructOrUnionDefNode(pos,
name = name, cname = cname, kind = kind, attributes = attributes,
typedef_flag = typedef_flag)
def p_visibility(s, prev_visibility):
pos = s.position()
visibility = prev_visibility
if s.sy == 'IDENT' and s.systring in ('extern', 'public', 'readonly'):
visibility = s.systring
if prev_visibility <> 'private' and visibility <> prev_visibility:
s.error("Conflicting visibility options '%s' and '%s'"
% (prev_visibility, visibility))
s.next()
return visibility
def p_c_func_or_var_declaration(s, level, pos, visibility = 'private'):
cmethod_flag = level in ('c_class', 'c_class_pxd')
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, cmethod_flag = cmethod_flag)
if s.sy == ':':
if level not in ('module', 'c_class'):
s.error("C function definition not allowed here")
suite = p_suite(s, 'function')
result = Nodes.CFuncDefNode(pos,
visibility = visibility,
base_type = base_type,
declarator = declarator,
body = suite)
else:
if level == 'module_pxd' and visibility <> 'extern':
error(pos,
"Only 'extern' C function or variable declaration allowed in .pxd file")
declarators = [declarator]
while s.sy == ',':
s.next()
declarator = p_c_declarator(s, cmethod_flag = cmethod_flag)
declarators.append(declarator)
s.expect_newline("Syntax error in C variable declaration")
result = Nodes.CVarDefNode(pos,
visibility = visibility,
base_type = base_type,
declarators = declarators)
return result
def p_ctypedef_statement(s, level, visibility = 'private'):
# s.sy == 'ctypedef'
pos = s.position()
s.next()
visibility = p_visibility(s, visibility)
if s.sy == 'class':
return p_c_class_definition(s, level, pos,
visibility = visibility,
typedef_flag = 1)
elif s.sy == 'IDENT' and s.systring in ('struct', 'union', 'enum'):
if s.systring == 'enum':
return p_c_enum_definition(s, pos, typedef_flag = 1)
else:
return p_c_struct_or_union_definition(s, pos, typedef_flag = 1)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, is_type = 1)
s.expect_newline("Syntax error in ctypedef statement")
return Nodes.CTypeDefNode(pos,
base_type = base_type, declarator = declarator)
def p_def_statement(s):
# s.sy == 'def'
pos = s.position()
s.next()
name = p_ident(s)
args = []
s.expect('(');
args = p_c_arg_list(s, in_pyfunc = 1)
star_arg = None
starstar_arg = None
if s.sy == '*':
s.next()
star_arg = p_py_arg_decl(s)
if s.sy == ',':
s.next()
if s.sy == '**':
s.next()
starstar_arg = p_py_arg_decl(s)
elif s.sy == '**':
s.next()
starstar_arg = p_py_arg_decl(s)
s.expect(')')
doc, body = p_suite(s, 'function', with_doc = 1)
return Nodes.DefNode(pos, name = name, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
doc = doc, body = body)
def p_py_arg_decl(s):
pos = s.position()
name = p_ident(s)
return Nodes.PyArgDeclNode(pos, name = name)
def p_class_statement(s):
# s.sy == 'class'
pos = s.position()
s.next()
class_name = p_ident(s)
if s.sy == '(':
s.next()
base_list = p_simple_expr_list(s)
s.expect(')')
else:
base_list = []
doc, body = p_suite(s, 'class', with_doc = 1)
return Nodes.PyClassDefNode(pos,
name = class_name,
bases = ExprNodes.TupleNode(pos, args = base_list),
doc = doc, body = body)
def p_c_class_definition(s, level, pos,
visibility = 'private', typedef_flag = 0):
# s.sy == 'class'
s.next()
module_path = []
class_name = p_ident(s)
while s.sy == '.':
s.next()
module_path.append(class_name)
class_name = p_ident(s)
if module_path and visibility <> 'extern':
error(pos, "Qualified class name only allowed for 'extern' C class")
if module_path and s.sy == 'IDENT' and s.systring == 'as':
s.next()
as_name = p_ident(s)
else:
as_name = class_name
s.add_type_name(as_name)
objstruct_name = None
typeobj_name = None
base_class_module = None
base_class_name = None
if s.sy == '(':
s.next()
base_class_path = [p_ident(s)]
while s.sy == '.':
s.next()
base_class_path.append(p_ident(s))
if s.sy == ',':
s.error("C class may only have one base class")
s.expect(')')
base_class_module = ".".join(base_class_path[:-1])
base_class_name = base_class_path[-1]
if s.sy == '[':
if visibility not in ('public', 'extern'):
error(s.position(), "Name options only allowed for 'public' or 'extern' C class")
objstruct_name, typeobj_name = p_c_class_options(s)
if s.sy == ':':
if level == 'module_pxd':
body_level = 'c_class_pxd'
else:
body_level = 'c_class'
doc, body = p_suite(s, body_level, with_doc = 1)
else:
s.expect_newline("Syntax error in C class definition")
doc = None
body = None
if visibility == 'extern':
if not module_path:
error(pos, "Module name required for 'extern' C class")
if typeobj_name:
error(pos, "Type object name specification not allowed for 'extern' C class")
elif visibility == 'public':
if not objstruct_name:
error(pos, "Object struct name specification required for 'public' C class")
if not typeobj_name:
error(pos, "Type object name specification required for 'public' C class")
return Nodes.CClassDefNode(pos,
visibility = visibility,
typedef_flag = typedef_flag,
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
base_class_module = base_class_module,
base_class_name = base_class_name,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
in_pxd = level == 'module_pxd',
doc = doc,
body = body)
def p_c_class_options(s):
objstruct_name = None
typeobj_name = None
s.expect('[')
while 1:
if s.sy <> 'IDENT':
break
if s.systring == 'object':
s.next()
objstruct_name = p_ident(s)
elif s.systring == 'type':
s.next()
typeobj_name = p_ident(s)
if s.sy <> ',':
break
s.next()
s.expect(']', "Expected 'object' or 'type'")
return objstruct_name, typeobj_name
def p_property_decl(s):
pos = s.position()
s.next() # 'property'
name = p_ident(s)
doc, body = p_suite(s, 'property', with_doc = 1)
return Nodes.PropertyNode(pos, name = name, doc = doc, body = body)
def p_doc_string(s):
if s.sy == 'STRING' or s.sy == 'BEGIN_STRING':
_, result = p_cat_string_literal(s)
if s.sy <> 'EOF':
s.expect_newline("Syntax error in doc string")
return result
else:
return None
def p_module(s, pxd):
s.add_type_name("object")
pos = s.position()
doc = p_doc_string(s)
if pxd:
level = 'module_pxd'
else:
level = 'module'
body = p_statement_list(s, level)
if s.sy <> 'EOF':
s.error("Syntax error in statement [%s,%s]" % (
repr(s.sy), repr(s.systring)))
return Nodes.ModuleNode(pos, doc = doc, body = body)
#----------------------------------------------
#
# Debugging
#
#----------------------------------------------
def print_parse_tree(f, node, level, key = None):
ind = " " * level
if node:
f.write(ind)
if key:
f.write("%s: " % key)
t = type(node)
if t == TupleType:
f.write("(%s @ %s\n" % (node[0], node[1]))
for i in xrange(2, len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s)\n" % ind)
return
elif isinstance(node, Node):
try:
tag = node.tag
except AttributeError:
tag = node.__class__.__name__
f.write("%s @ %s\n" % (tag, node.pos))
for name, value in node.__dict__.items():
if name <> 'tag' and name <> 'pos':
print_parse_tree(f, value, level+1, name)
return
elif t == ListType:
f.write("[\n")
for i in xrange(len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s]\n" % ind)
return
f.write("%s%s\n" % (ind, node))
| Python |
#
# Pyrex - Errors
#
import sys
from Pyrex.Utils import open_new_file
class PyrexError(Exception):
pass
class CompileError(PyrexError):
def __init__(self, position = None, message = ""):
self.position = position
self.message = message
if position:
pos_str = "%s:%d:%d: " % position
else:
pos_str = ""
Exception.__init__(self, pos_str + message)
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
def __init__(self, message):
Exception.__init__(self, "Internal compiler error: %s"
% message)
listing_file = None
num_errors = 0
echo_file = None
def open_listing_file(path, echo_to_stderr = 1):
# Begin a new error listing. If path is None, no file
# is opened, the error counter is just reset.
global listing_file, num_errors, echo_file
if path is not None:
listing_file = open_new_file(path)
else:
listing_file = None
if echo_to_stderr:
echo_file = sys.stderr
else:
echo_file = None
num_errors = 0
def close_listing_file():
global listing_file
if listing_file:
listing_file.close()
listing_file = None
def error(position, message):
#print "Errors.error:", repr(position), repr(message) ###
global num_errors
err = CompileError(position, message)
line = "%s\n" % err
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
num_errors = num_errors + 1
return err
| Python |
#
# Pyrex - Parse tree nodes for expressions
#
from string import join
from Errors import error, InternalError
import Naming
from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type
import Symtab
import Options
from Pyrex.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result string C code fragment
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# allocate_temps
# - Call allocate_temps for all sub-nodes.
# - Call allocate_temp for this node.
# - If a temporary was allocated, call release_temp on
# all sub-expressions.
#
# A default implementation of allocate_temps is
# provided which uses the following abstract method:
#
# result_code
# - Return a C code fragment evaluating to
# the result. This is only called when the
# result is not a temporary.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract method:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
# result_as_extension_type
# Normally, the results of all nodes whose type
# is a Python object, either generic or an extension
# type, are returned as a generic Python object, so
# that they can be passed directly to Python/C API
# routines. This method is called to obtain the
# result as the actual type of the node. It is only
# called when the type is known to actually be an
# extension type, and nodes whose result can never
# be an extension type need not implement it.
#
is_sequence_constructor = 0
is_attribute = 0
saved_subexpr_nodes = None
is_temp = 0
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
if self.saved_subexpr_nodes is None:
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item:
if isinstance(item, ExprNode):
nodes.append(item)
else:
nodes.extend(item)
self.saved_subexpr_nodes = nodes
return self.saved_subexpr_nodes
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
self.analyse_types(env)
self.allocate_temps(env)
self.check_const()
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
self.analyse_types(env)
self.allocate_temps(env)
def analyse_target_expression(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
self.analyse_target_types(env)
self.allocate_target_temps(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
self.analyse_types(env)
bool = self.coerce_to_boolean(env)
bool.allocate_temps(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
self.analyse_types(env)
bool = self.coerce_to_boolean(env)
temp_bool = bool.coerce_to_temp(env)
temp_bool.allocate_temps(env)
return temp_bool
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
self.analyse_types(env)
def check_const(self):
self.not_const()
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def allocate_target_temps(self, env):
# Perform allocate_temps for the LHS of an assignment.
if debug_temp_alloc:
print self, "Allocating target temps"
self.allocate_subexpr_temps(env)
self.result = self.target_code()
def allocate_temps(self, env, result = None):
# Allocate temporary variables for this node and
# all its sub-expressions. If a result is specified,
# this must be a temp node and the specified variable
# is used as the result instead of allocating a new
# one.
if debug_temp_alloc:
print self, "Allocating temps"
self.allocate_subexpr_temps(env)
self.allocate_temp(env, result)
if self.is_temp:
self.release_subexpr_temps(env)
def allocate_subexpr_temps(self, env):
# Allocate temporary variables for all sub-expressions
# of this node.
if debug_temp_alloc:
print self, "Allocating temps for:", self.subexprs
for node in self.subexpr_nodes():
if node:
if debug_temp_alloc:
print self, "Allocating temps for", node
node.allocate_temps(env)
def allocate_temp(self, env, result = None):
# If this node requires a temporary variable for its
# result, allocate one, otherwise set the result to
# a C code fragment. If a result is specified,
# this must be a temp node and the specified variable
# is used as the result instead of allocating a new
# one.
if debug_temp_alloc:
print self, "Allocating temp"
if result:
if not self.is_temp:
raise InternalError("Result forced on non-temp node")
self.result = result
elif self.is_temp:
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.result = env.allocate_temp(type)
else:
self.result = None
if debug_temp_alloc:
print self, "Allocated result", self.result
#print_call_chain(self, "allocated temp", self.result)
else:
self.result = self.result_code()
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.result_code()
def result_code(self):
self.not_implemented("result_code")
def release_target_temp(self, env):
# Release temporaries used by LHS of an assignment.
self.release_subexpr_temps(env)
def release_temp(self, env):
# If this node owns a temporary result, release it,
# otherwise release results of its sub-expressions.
if self.is_temp:
if debug_temp_alloc:
print self, "Releasing result", self.result
env.release_temp(self.result)
else:
self.release_subexpr_temps(env)
def release_subexpr_temps(self, env):
# Release the results of all sub-expressions of
# this node.
for node in self.subexpr_nodes():
if node:
node.release_temp(env)
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
# If result is a pyobject, make sure we own
# a reference to it.
#if self.type.is_pyobject and not self.is_temp:
if self.type.is_pyobject and not self.result_in_temp():
#code.put_incref(self.result, self.type)
code.put_incref(self.result, py_object_type)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
self.generate_result_code(code)
if self.is_temp:
self.generate_subexpr_disposal_code(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
# If necessary, generate code to dispose of
# temporary Python reference.
if self.is_temp:
if self.type.is_pyobject:
code.put_decref_clear(self.result, self.type)
else:
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
# Same as generate_disposal_code except that
# assignment will have absorbed a reference to
# the result if it is a Python object.
if self.is_temp:
if self.type.is_pyobject:
code.putln("%s = 0;" % self.result)
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
src = self
src_type = self.type
src_is_py_type = src_type.is_pyobject
dst_is_py_type = dst_type.is_pyobject
if dst_type.is_pyobject:
if not src.type.is_pyobject:
src = CoerceToPyTypeNode(src, env)
if not src.type.subtype_of(dst_type):
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
else: # neither src nor dst are py types
if not dst_type.assignable_from(src_type):
error(self.pos, "Cannot assign type '%s' to '%s'" %
(src.type, dst_type))
return src
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
type = self.type
if type.is_pyobject or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
if not type.is_int:
error(self.pos,
"Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
def is_simple(self):
return 1
def analyse_types(self, env):
self.type = PyrexTypes.py_object_type
def result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
value = "Py_None"
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
def is_simple(self):
return 1
def analyse_types(self, env):
pass # Types are held in class variables
def check_const(self):
pass
def result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "0"
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def result_code(self):
return "'%s'" % self.value
class IntNode(ConstNode):
type = PyrexTypes.c_long_type
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
class StringNode(ConstNode):
# entry Symtab.Entry
type = PyrexTypes.c_char_ptr_type
def analyse_types(self, env):
self.entry = env.add_string_const(self.value)
def coerce_to(self, dst_type, env):
# Arrange for a Python version of the string to be pre-allocated
# when coercing to a Python type.
if dst_type.is_pyobject and not self.type.is_pyobject:
node = self.as_py_string_node(env)
else:
node = self
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def as_py_string_node(self, env):
# Return a new StringNode with the same entry as this node
# but whose type is a Python type instead of a C type.
entry = self.entry
env.add_py_string(entry)
return StringNode(self.pos, entry = entry, type = py_object_type)
def result_code(self):
if self.type.is_pyobject:
return self.entry.pystring_cname
else:
return self.entry.cname
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
def analyse_types(self, env):
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_evaluation_code(self, code):
code.putln(
"%s = PyComplex_FromDoubles(0.0, %s); if (!%s) %s" % (
self.result,
self.value,
self.result,
code.error_goto(self.pos)))
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
is_name = 1
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = env.lookup(self.name)
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
return None
def analyse_target_declaration(self, env):
self.entry = env.lookup_here(self.name)
if not self.entry:
self.entry = env.declare_var(self.name,
PyrexTypes.py_object_type, self.pos)
def analyse_types(self, env):
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
self.analyse_entry(env)
def analyse_entry(self, env):
self.check_identifier_kind()
self.type = self.entry.type
# Reference to C array turns into pointer to first element.
if self.type.is_array:
self.type = self.type.element_ptr_type()
if self.entry.is_pyglobal or self.entry.is_builtin:
assert self.type.is_pyobject, "Python global or builtin not a Python object"
self.is_temp = 1
if Options.intern_names:
env.use_utility_code(get_name_interned_utility_code)
else:
env.use_utility_code(get_name_utility_code)
def analyse_target_types(self, env):
self.check_identifier_kind()
if self.is_lvalue():
self.type = self.entry.type
else:
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
def check_identifier_kind(self):
entry = self.entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if not (entry.is_const or entry.is_cfunction):
self.not_const()
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction):
self.addr_not_const()
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def result_code(self):
if self.entry is None:
return "<error>" # There was an error earlier
result = self.entry.cname
if self.type.is_extension_type and \
not self.entry.is_declared_generic:
result = "((PyObject *)%s)" % result
return result
def result_as_extension_type(self):
if self.entry is None:
return "<error>" # There was an error earlier
#if not self.entry.is_self_arg:
if not self.entry.is_declared_generic:
return self.entry.cname
else:
return "((%s)%s)" % (
self.type.declaration_code(""),
self.entry.cname)
def generate_result_code(self, code):
if not hasattr(self, 'entry'):
error(self.pos, "INTERNAL ERROR: NameNode has no entry attribute during code generation")
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.namespace_cname
if Options.intern_names:
#assert entry.interned_cname is not None
code.putln(
'%s = __Pyx_GetName(%s, %s); if (!%s) %s' % (
self.result,
namespace,
entry.interned_cname,
self.result,
code.error_goto(self.pos)))
else:
code.putln(
'%s = __Pyx_GetName(%s, "%s"); if (!%s) %s' % (
self.result,
namespace,
self.entry.name,
self.result,
code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code):
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_pyglobal:
namespace = self.entry.namespace_cname
if Options.intern_names:
code.putln(
'if (PyObject_SetAttr(%s, %s, %s) < 0) %s' % (
namespace,
entry.interned_cname,
rhs.result,
code.error_goto(self.pos)))
else:
code.putln(
'if (PyObject_SetAttrString(%s, "%s", %s) < 0) %s' % (
namespace,
entry.name,
rhs.result,
code.error_goto(self.pos)))
if debug_disposal_code:
print "NameNode.generate_assignment_code:"
print "...generating disposal code for", rhs
rhs.generate_disposal_code(code)
else:
if self.type.is_pyobject:
rhs.make_owned_reference(code)
code.put_decref(self.result, self.type)
code.putln('%s = %s;' % (self.entry.cname, rhs.result))
if debug_disposal_code:
print "NameNode.generate_assignment_code:"
print "...generating post-assignment code for", rhs
rhs.generate_post_assignment_code(code)
def generate_deletion_code(self, code):
if self.entry is None:
return # There was an error earlier
if not self.entry.is_pyglobal:
error(self.pos, "Deletion of local or C global name not supported")
return
code.putln(
'if (PyObject_DelAttrString(%s, "%s") < 0) %s' % (
Naming.module_cname,
self.entry.name,
code.error_goto(self.pos)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
subexprs = ['arg']
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); if (!%s) %s" % (
self.result,
self.arg.result,
self.result,
code.error_goto(self.pos)))
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list)
#
# module_name StringNode dotted name of module
# name_list ListNode or None list of names to be imported
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
self.module_name.analyse_types(env)
self.module_name = self.module_name.coerce_to_pyobject(env)
if self.name_list:
self.name_list.analyse_types(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
env.use_utility_code(import_utility_code)
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.result
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s); if (!%s) %s" % (
self.result,
self.module_name.result,
name_list_code,
self.result,
code.error_goto(self.pos)))
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
# Implements result = iter(sequence)
#
# sequence ExprNode
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence.analyse_types(env)
self.sequence = self.sequence.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PyObject_GetIter(%s); if (!%s) %s" % (
self.result,
self.sequence.result,
self.result,
code.error_goto(self.pos)))
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator ExprNode
def __init__(self, iterator, env):
self.pos = iterator.pos
self.iterator = iterator
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PyIter_Next(%s);" % (
self.result,
self.iterator.result))
code.putln(
"if (!%s) {" %
self.result)
code.putln(
"if (PyErr_Occurred()) %s" %
code.error_goto(self.pos))
code.putln(
"break;")
code.putln(
"}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
def __init__(self, pos, env):
ExprNode.__init__(self, pos)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
env.use_utility_code(get_exception_utility_code)
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_GetExcValue(); if (!%s) %s" % (
self.result,
self.result,
code.error_goto(self.pos)))
class TempNode(AtomicExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
def __init__(self, pos, type, env):
ExprNode.__init__(self, pos)
self.type = type
self.is_temp = 1
def generate_result_code(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
subexprs = ['base', 'index']
def analyse_target_declaration(self, env):
pass
def analyse_types(self, env):
self.base.analyse_types(env)
self.index.analyse_types(env)
if self.base.type.is_pyobject:
self.index = self.index.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
else:
if self.base.type.is_ptr or self.base.type.is_array:
self.type = self.base.type.base_type
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
self.base.type)
self.type = PyrexTypes.error_type
if self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_int_type, env)
if not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
def check_const_addr(self):
self.base.check_const_addr()
self.index.check_const()
def is_lvalue(self):
return 1
def result_code(self):
return "(%s[%s])" % (
self.base.result, self.index.result)
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyObject_GetItem(%s, %s); if (!%s) %s" % (
self.result,
self.base.result,
self.index.result,
self.result,
code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.putln(
"if (PyObject_SetItem(%s, %s, %s) < 0) %s" % (
self.base.result,
self.index.result,
rhs.result,
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
else:
code.putln(
"%s = %s;" % (
self.result, rhs.result))
rhs.generate_disposal_code(code)
def generate_deletion_code(self, code):
self.generate_subexpr_evaluation_code(code)
code.putln(
"if (PyObject_DelItem(%s, %s) < 0) %s" % (
self.base.result,
self.index.result,
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
subexprs = ['base', 'start', 'stop']
def analyse_target_declaration(self, env):
pass
def analyse_types(self, env):
self.base.analyse_types(env)
if self.start:
self.start.analyse_types(env)
if self.stop:
self.stop.analyse_types(env)
self.base = self.base.coerce_to_pyobject(env)
c_int = PyrexTypes.c_int_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PySequence_GetSlice(%s, %s, %s); if (!%s) %s" % (
self.result,
self.base.result,
self.start_code(),
self.stop_code(),
self.result,
code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
code.putln(
"if (PySequence_SetSlice(%s, %s, %s, %s) < 0) %s" % (
self.base.result,
self.start_code(),
self.stop_code(),
rhs.result,
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
rhs.generate_disposal_code(code)
def generate_deletion_code(self, code):
self.generate_subexpr_evaluation_code(code)
code.putln(
"if (PySequence_DelSlice(%s, %s, %s) < 0) %s" % (
self.base.result,
self.start_code(),
self.stop_code(),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
def start_code(self):
if self.start:
return self.start.result
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result
else:
return "0x7fffffff"
def result_code(self):
# self.result is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
def analyse_types(self, env):
self.start.analyse_types(env)
self.stop.analyse_types(env)
self.step.analyse_types(env)
self.start = self.start.coerce_to_pyobject(env)
self.stop = self.stop.coerce_to_pyobject(env)
self.step = self.step.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PySlice_New(%s, %s, %s); if (!%s) %s" % (
self.result,
self.start.result,
self.stop.result,
self.step.result,
self.result,
code.error_goto(self.pos)))
class SimpleCallNode(ExprNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
def analyse_types(self, env):
function = self.function
function.is_called = 1
self.function.analyse_types(env)
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
if self.function.type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.args = None
self.arg_tuple.analyse_types(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
else:
for arg in self.args:
arg.analyse_types(env)
if self.self and self.function.type.args:
# Coerce 'self' to the type expected by the method.
expected_type = self.function.type.args[0].type
self.coerced_self = CloneNode(self.self).coerce_to(
expected_type, env)
# Insert coerced 'self' argument into argument list.
self.args.insert(0, self.coerced_self)
self.analyse_c_function_call(env)
def analyse_c_function_call(self, env):
func_type = self.function.type
# Coerce function pointer to function
if func_type.is_ptr:
func_type = func_type.base_type
self.function.type = func_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error:
error(self.pos, "Calling non-function type '%s'" %
func_type)
self.type = PyrexTypes.error_type
self.result = "<error>"
return
# Check no. of args
expected_nargs = len(func_type.args)
actual_nargs = len(self.args)
if actual_nargs < expected_nargs \
or (not func_type.has_varargs and actual_nargs > expected_nargs):
expected_str = str(expected_nargs)
if func_type.has_varargs:
expected_str = "at least " + expected_str
error(self.pos,
"Call with wrong number of arguments (expected %s, got %s)"
% (expected_str, actual_nargs))
self.args = None
self.type = PyrexTypes.error_type
self.result = "<error>"
return
# Coerce arguments
for i in range(expected_nargs):
formal_type = func_type.args[i].type
self.args[i] = self.args[i].coerce_to(formal_type, env)
for i in range(expected_nargs, actual_nargs):
if self.args[i].type.is_pyobject:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
# Calc result type and code fragment
self.type = func_type.return_type
if self.type.is_pyobject \
or func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
def result_code(self):
return self.c_call_code(as_extension_type = 0)
def result_as_extension_type(self):
return self.c_call_code(as_extension_type = 1)
def c_call_code(self, as_extension_type):
if self.args is None or not self.function.type.is_cfunction:
return "<error>"
formal_args = self.function.type.args
arg_list_code = []
for (formal_arg, actual_arg) in \
zip(formal_args, self.args):
if formal_arg.type.is_extension_type:
arg_code = actual_arg.result_as_extension_type()
if not formal_arg.type.same_as(actual_arg.type):
arg_code = "((%s)%s)" % (
formal_arg.type.declaration_code(""),
arg_code)
else:
arg_code = actual_arg.result
arg_list_code.append(arg_code)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result)
result = "%s(%s)" % (self.function.result,
join(arg_list_code, ","))
if self.type.is_extension_type and not as_extension_type:
result = "((PyObject *)%s)" % result
return result
def generate_result_code(self, code):
#print_call_chain("SimpleCallNode.generate_result_code") ###
if self.function.type.is_pyobject:
code.putln(
"%s = PyObject_CallObject(%s, %s); if (!%s) %s" % (
self.result,
self.function.result,
self.arg_tuple.result,
self.result,
code.error_goto(self.pos)))
elif self.function.type.is_cfunction:
exc_checks = []
if self.type.is_pyobject:
exc_checks.append("!%s" % self.result)
else:
exc_val = self.function.type.exception_value
exc_check = self.function.type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result, exc_val))
if exc_check:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
if self.result:
lhs = "%s = " % self.result
else:
lhs = ""
code.putln(
"%s%s; if (%s) %s" % (
lhs,
self.c_call_code(as_extension_type = 0),
" && ".join(exc_checks),
code.error_goto(self.pos)))
class GeneralCallNode(ExprNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
# starstar_arg ExprNode or None Dict of extra keyword args
subexprs = ['function', 'positional_args', 'keyword_args', 'starstar_arg']
def analyse_types(self, env):
self.function.analyse_types(env)
self.positional_args.analyse_types(env)
if self.keyword_args:
self.keyword_args.analyse_types(env)
if self.starstar_arg:
self.starstar_arg.analyse_types(env)
self.function = self.function.coerce_to_pyobject(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
if self.starstar_arg:
self.starstar_arg = \
self.starstar_arg.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
if self.keyword_args and self.starstar_arg:
code.putln(
"if (PyDict_Update(%s, %s) < 0) %s" % (
self.keyword_args.result,
self.starstar_arg.result,
code.error_goto(self.pos)))
keyword_code = self.keyword_args.result
elif self.keyword_args:
keyword_code = self.keyword_args.result
elif self.starstar_arg:
keyword_code = self.starstar_arg.result
else:
keyword_code = None
if not keyword_code:
call_code = "PyObject_CallObject(%s, %s)" % (
self.function.result,
self.positional_args.result)
else:
call_code = "PyEval_CallObjectWithKeywords(%s, %s, %s)" % (
self.function.result,
self.positional_args.result,
keyword_code)
code.putln(
"%s = %s; if (!%s) %s" % (
self.result,
call_code,
self.result,
code.error_goto(self.pos)))
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); if (!%s) %s" % (
self.result,
self.arg.result,
self.result,
code.error_goto(self.pos)))
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
# interned_attr_cname string C name of interned attribute name
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
result = "<error>"
entry = None
is_called = 0
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
self.analyse_types(env, target = 1)
def analyse_types(self, env, target = 0):
if self.analyse_as_cimported_attribute(env, target):
return
if not target and self.analyse_as_unbound_cmethod(env):
return
self.analyse_as_ordinary_attribute(env, target)
def analyse_as_cimported_attribute(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
self.mutate_into_name_node(env, entry, target)
return 1
return 0
def analyse_as_unbound_cmethod(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
type = self.obj.analyse_as_extension_type(env)
if type:
entry = type.scope.lookup_here(self.attribute)
if entry and entry.is_cmethod:
# Create a temporary entry describing the C method
# as an ordinary function.
ubcm_entry = Symtab.Entry(entry.name,
"%s->%s" % (type.vtabptr_cname, entry.cname),
entry.type)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
self.mutate_into_name_node(env, ubcm_entry, None)
return 1
return 0
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def mutate_into_name_node(self, env, entry, target):
# Mutate this node into a NameNode and complete the
# analyse_types phase.
self.__class__ = NameNode
self.name = self.attribute
self.entry = entry
del self.obj
del self.attribute
if target:
NameNode.analyse_target_types(self, env)
else:
NameNode.analyse_entry(self, env)
def analyse_as_ordinary_attribute(self, env, target):
self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
error(self.pos, "C method can only be called")
# Reference to C array turns into pointer to first element.
if self.type.is_array:
self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
def analyse_attribute(self, env):
# Look up attribute and set self.type and self.member.
self.is_py_attr = 0
self.member = self.attribute
if self.obj.type.is_string:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
if obj_type.is_ptr:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
entry = None
if obj_type.attributes_known():
entry = obj_type.scope.lookup_here(self.attribute)
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
obj_type = PyrexTypes.error_type
self.entry = entry
if entry:
if entry.is_variable or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
if obj_type.is_pyobject:
self.type = PyrexTypes.py_object_type
self.is_py_attr = 1
if Options.intern_names:
self.interned_attr_cname = env.intern(self.attribute)
else:
if not obj_type.is_error:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return 1
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return ExprNode.is_ephemeral(self)
else:
return NameNode.is_ephemeral(self)
def result_code(self):
return self.select_code()
def result_as_extension_type(self):
return self.uncast_select_code()
def select_code(self):
code = self.uncast_select_code()
if self.type.is_extension_type:
code = "((PyObject *)%s)" % code
return code
def uncast_select_code(self):
obj_type = self.obj.type
if obj_type.is_extension_type:
obj_code = self.obj.result_as_extension_type()
else:
obj_code = self.obj.result
if self.entry and self.entry.is_cmethod:
return "((struct %s *)%s%s%s)->%s" % (
obj_type.vtabstruct_cname, obj_code, self.op,
obj_type.vtabslot_cname, self.member)
else:
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if Options.intern_names:
code.putln(
'%s = PyObject_GetAttr(%s, %s); if (!%s) %s' % (
self.result,
self.obj.result,
self.interned_attr_cname,
self.result,
code.error_goto(self.pos)))
else:
code.putln(
'%s = PyObject_GetAttrString(%s, "%s"); if (!%s) %s' % (
self.result,
self.obj.result,
self.attribute,
self.result,
code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
if Options.intern_names:
code.putln(
'if (PyObject_SetAttr(%s, %s, %s) < 0) %s' % (
self.obj.result,
self.interned_attr_cname,
rhs.result,
code.error_goto(self.pos)))
else:
code.putln(
'if (PyObject_SetAttrString(%s, "%s", %s) < 0) %s' % (
self.obj.result,
self.attribute,
rhs.result,
code.error_goto(self.pos)))
rhs.generate_disposal_code(code)
else:
select_code = self.select_code()
if self.type.is_pyobject:
rhs.make_owned_reference(code)
code.put_decref(select_code, self.type)
code.putln(
"%s = %s;" % (
select_code,
rhs.result))
rhs.generate_post_assignment_code(code)
self.obj.generate_disposal_code(code)
def generate_deletion_code(self, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.putln(
'if (PyObject_DelAttrString(%s, "%s") < 0) %s' % (
self.obj.result,
self.attribute,
code.error_goto(self.pos)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
subexprs = ['args']
is_sequence_constructor = 1
unpacked_items = None
def analyse_target_declaration(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def analyse_target_types(self, env):
self.unpacked_items = [] # PyTempNode(self.pos, env)
self.coerced_unpacked_items = []
for arg in self.args:
arg.analyse_target_types(env)
#node = CloneNode(self.unpacked_item)
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = PyrexTypes.py_object_type
env.use_utility_code(unpacking_utility_code)
def allocate_target_temps(self, env):
for arg in self.args:
arg.allocate_target_temps(env)
for node in self.coerced_unpacked_items:
node.allocate_temps(env)
def release_target_temp(self, env):
for arg in self.args:
arg.release_target_temp(env)
for node in self.coerced_unpacked_items:
node.release_temp(env)
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_assignment_code(self, rhs, code):
for i in range(len(self.args)):
unpack_result = self.unpacked_items[i].result
code.putln(
"%s = __Pyx_UnpackItem(%s, %s); if (!%s) %s" % (
unpack_result,
rhs.result,
i,
unpack_result,
code.error_goto(self.pos)))
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
self.args[i].generate_assignment_code(value_node, code)
code.putln(
"if (__Pyx_EndUnpack(%s, %s) < 0) %s" % (
rhs.result,
len(self.args),
code.error_goto(self.pos)))
if debug_disposal_code:
print "UnpackNode.generate_assignment_code:"
print "...generating disposal code for", rhs
rhs.generate_disposal_code(code)
class TupleNode(SequenceNode):
# Tuple constructor.
def generate_operation_code(self, code):
code.putln(
"%s = PyTuple_New(%s); if (!%s) %s" % (
self.result,
len(self.args),
self.result,
code.error_goto(self.pos)))
for i in range(len(self.args)):
arg = self.args[i]
if not arg.result_in_temp():
code.put_incref(arg.result, arg.type)
code.putln(
"PyTuple_SET_ITEM(%s, %s, %s);" % (
self.result,
i,
arg.result))
def generate_subexpr_disposal_code(self, code):
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
class ListNode(SequenceNode):
# List constructor.
def generate_operation_code(self, code):
code.putln("%s = PyList_New(%s); if (!%s) %s" %
(self.result,
len(self.args),
self.result,
code.error_goto(self.pos)))
for i in range(len(self.args)):
arg = self.args[i]
#if not arg.is_temp:
if not arg.result_in_temp():
code.put_incref(arg.result, arg.type)
code.putln("PyList_SET_ITEM(%s, %s, %s);" %
(self.result,
i,
arg.result))
def generate_subexpr_disposal_code(self, code):
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the list using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [(ExprNode, ExprNode)]
def analyse_types(self, env):
new_pairs = []
for key, value in self.key_value_pairs:
key.analyse_types(env)
value.analyse_types(env)
key = key.coerce_to_pyobject(env)
value = value.coerce_to_pyobject(env)
new_pairs.append((key, value))
self.key_value_pairs = new_pairs
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def allocate_temps(self, env):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
self.allocate_temp(env)
for key, value in self.key_value_pairs:
key.allocate_temps(env)
value.allocate_temps(env)
key.release_temp(env)
value.release_temp(env)
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.putln(
"%s = PyDict_New(); if (!%s) %s" % (
self.result,
self.result,
code.error_goto(self.pos)))
for key, value in self.key_value_pairs:
key.generate_evaluation_code(code)
value.generate_evaluation_code(code)
code.putln(
"if (PyDict_SetItem(%s, %s, %s) < 0) %s" % (
self.result,
key.result,
value.result,
code.error_goto(self.pos)))
key.generate_disposal_code(code)
value.generate_disposal_code(code)
class ClassNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name ExprNode Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name string Name of defining module
subexprs = ['name', 'bases', 'doc']
def analyse_types(self, env):
self.name.analyse_types(env)
self.name = self.name.coerce_to_pyobject(env)
self.bases.analyse_types(env)
if self.doc:
self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.module_name = env.global_scope().module_name
self.type = PyrexTypes.py_object_type
self.is_temp = 1
env.use_utility_code(create_class_utility_code);
def generate_result_code(self, code):
if self.doc:
code.putln(
'if (PyDict_SetItemString(%s, "__doc__", %s) < 0) %s' % (
self.dict.result,
self.doc.result,
code.error_goto(self.pos)))
## code.putln(
## '%s = PyClass_New(%s, %s, %s); if (!%s) %s' % (
## self.result,
## self.bases.result,
## self.dict.result,
## self.name.result,
## self.result,
## code.error_goto(self.pos)))
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, "%s"); if (!%s) %s' % (
self.result,
self.bases.result,
self.dict.result,
self.name.result,
self.module_name,
self.result,
code.error_goto(self.pos)))
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# class_cname string C var holding the class object
# function ExprNode Function object
subexprs = ['function']
def analyse_types(self, env):
self.function.analyse_types(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, 0, %s); if (!%s) %s" % (
self.result,
self.function.result,
self.class_cname,
self.result,
code.error_goto(self.pos)))
class PyCFunctionNode(AtomicExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
def analyse_types(self, env):
self.type = PyrexTypes.py_object_type
self.is_temp = 1
def generate_result_code(self, code):
code.putln(
"%s = PyCFunction_New(&%s, 0); if (!%s) %s" % (
self.result,
self.pymethdef_cname,
self.result,
code.error_goto(self.pos)))
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
def analyse_types(self, env):
self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
else:
self.analyse_c_operation(env)
def check_const(self):
self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
else:
if self.is_temp:
self.generate_c_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); if (!%s) %s" % (
self.result,
function,
self.operand.result,
self.result,
code.error_goto(self.pos)))
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
class NotNode(ExprNode):
# 'not' operator
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
self.operand.analyse_types(env)
self.operand = self.operand.coerce_to_boolean(env)
self.type = PyrexTypes.c_int_type
def result_code(self):
return "(!%s)" % self.operand.result
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = self.operand.type
def py_operation_function(self):
return "PyNumber_Positive"
def result_code(self):
return self.operand.result
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = self.operand.type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Negative"
def result_code(self):
return "(-%s)" % self.operand.result
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = self.operand.type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Invert"
def result_code(self):
return "(~%s)" % self.operand.result
class AmpersandNode(ExprNode):
# The C address-of operator.
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
self.operand.analyse_types(env)
argtype = self.operand.type
if not (argtype.is_cfunction or self.operand.is_lvalue()):
self.error("Taking address of non-lvalue")
return
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return
self.type = PyrexTypes.c_ptr_type(argtype)
def check_const(self):
self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result = "<error>"
def result_code(self):
return "(&%s)" % self.operand.result
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
self.operand.analyse_types(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
error(self.pos, "Casting temporary Python object to non-Python type")
#if to_py:
if to_py and not from_py:
self.is_temp = 1
def check_const(self):
self.operand.check_const()
def result_code(self):
if self.type.is_pyobject:
cast = "PyObject *"
else:
cast = self.type.declaration_code("")
return "((%s)%s)" % (cast, self.operand.result)
def result_as_extension_type(self):
return "((%s)%s)" % (
self.type.declaration_code(""),
self.operand.result)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result,
#self.type.declaration_code(""),
self.operand.result))
code.put_incref(self.result, self.type)
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
def check_const(self):
pass
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
def analyse_types(self, env):
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
if arg_type.is_pyobject:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_code)
self.type = PyrexTypes.c_int_type
def result_code(self):
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
self.operand.analyse_types(env)
self.type = PyrexTypes.c_int_type
def result_code(self):
return "(sizeof(%s))" % self.operand.result
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = PyrexTypes.py_object_type
self.is_temp = 1
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
self.operand1.check_const()
self.operand2.check_const()
def generate_result_code(self, code):
if self.operand1.type.is_pyobject:
function = self.py_operation_function()
if self.operator == "**":
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); if (!%s) %s" % (
self.result,
function,
self.operand1.result,
self.operand2.result,
extra_args,
self.result,
code.error_goto(self.pos)))
else:
if self.is_temp:
self.generate_c_operation_code(code)
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
inplace = 0
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
return PyrexTypes.widest_numeric_type(type1, type2)
else:
return None
def c_types_okay(self, type1, type2):
return type1.is_numeric and type2.is_numeric
def result_code(self):
return "(%s %s %s)" % (
self.operand1.result,
self.operator,
self.operand2.result)
def py_operation_function(self):
function = self.py_functions[self.operator]
if self.inplace:
function = function.replace("PyNumber_", "PyNumber_InPlace")
return function
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"/": "PyNumber_Divide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power"
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
return type1.is_int and type2.is_int
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation(self):
if self.operand1.type.is_string \
and self.operand2.type.is_string:
return 1
else:
return NumBinopNode.is_py_operation(self)
def compute_c_result_type(self, type1, type2):
if type1.is_ptr and type2.is_int:
return type1
elif type1.is_int and type2.is_ptr:
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if type1.is_ptr and type2.is_int:
return type1
elif type1.is_ptr and type2.is_ptr:
return PyrexTypes.c_int_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation(self):
type1 = self.operand1.type
type2 = self.operand2.type
if (type1.is_string and type2.is_int) \
or (type2.is_string and type1.is_int):
return 1
else:
return NumBinopNode.is_py_operation(self)
class ModNode(IntBinopNode):
# '%' operator.
def is_py_operation(self):
return (self.operand1.type.is_string
or self.operand2.type.is_string
or IntBinopNode.is_py_operation(self))
class PowNode(NumBinopNode):
# '**' operator.
def analyse_types(self, env):
env.pow_function_used = 1
NumBinopNode.analyse_types(self, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
return PyrexTypes.c_double_type
else:
return None
def result_code(self):
return "pow(%s, %s)" % (
self.operand1.result, self.operand2.result)
class BoolBinopNode(ExprNode):
# Short-circuiting boolean operation.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# temp_bool ExprNode used internally
temp_bool = None
subexprs = ['operand1', 'operand2', 'temp_bool']
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
if self.operand1.type.is_pyobject or \
self.operand2.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.temp_bool = TempNode(self.pos,
PyrexTypes.c_int_type, env)
self.type = PyrexTypes.py_object_type
else:
self.operand1 = self.operand1.coerce_to_boolean(env)
self.operand2 = self.operand2.coerce_to_boolean(env)
self.type = PyrexTypes.c_int_type
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_temp(env)
# coerce_to_simple does not seem to be sufficient
#self.operand1 = self.operand1.coerce_to_simple(env)
#self.operand2 = self.operand2.coerce_to_simple(env)
self.is_temp = 1
def allocate_temps(self, env, result = None):
# We don't need both operands at the same time, and
# one of the operands will also be our result. So we
# use an allocation strategy here which results in
# this node and both its operands sharing the same
# result variable. This allows us to avoid some
# assignments and increfs/decrefs that would otherwise
# be necessary.
self.allocate_temp(env, result)
self.operand1.allocate_temps(env, self.result)
if self.temp_bool:
self.temp_bool.allocate_temp(env)
self.temp_bool.release_temp(env)
self.operand2.allocate_temps(env, self.result)
# We haven't called release_temp on either operand,
# because although they are temp nodes, they don't own
# their result variable. And because they are temp
# nodes, any temps in their subnodes will have been
# released before their allocate_temps returned.
# Therefore, they contain no temp vars that need to
# be released.
def check_const(self):
self.operand1.check_const()
self.operand2.check_const()
def result_code(self):
return "(%s %s %s)" % (
self.operand1.result,
self.py_to_c_op[self.operator],
self.operand2.result)
py_to_c_op = {'and': "&&", 'or': "||"}
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
test_result = self.generate_operand1_test(code)
if self.operator == 'and':
sense = ""
else:
sense = "!"
code.putln(
"if (%s%s) {" % (
sense,
test_result))
self.operand1.generate_disposal_code(code)
self.operand2.generate_evaluation_code(code)
code.putln(
"}")
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = self.temp_bool.result
code.putln(
"%s = PyObject_IsTrue(%s); if (%s < 0) %s" % (
test_result,
self.operand1.result,
test_result,
code.error_goto(self.pos)))
else:
test_result = self.operand1.result
return test_result
class CmpNode:
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
def is_python_comparison(self):
return (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in'))
def check_types(self, env, operand1, op, operand2):
if not self.types_okay(operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, operand1.type, operand2.type))
def types_okay(self, operand1, op, operand2):
type1 = operand1.type
type2 = operand2.type
if type1.is_error or type2.is_error:
return 1
if type1.is_pyobject: # type2 will be, too
return 1
elif type1.is_ptr:
return type1.is_null_ptr or type2.is_null_ptr \
or type1.same_as(type2)
elif (type1.is_numeric and type2.is_numeric
and op not in ('is', 'is_not')):
return 1
else:
return 0
def generate_operation_code(self, code, result,
operand1, op , operand2):
if op == 'in' or op == 'not_in':
code.putln(
"%s = PySequence_Contains(%s, %s); if (%s < 0) %s" % (
result,
operand2.result,
operand1.result,
result,
code.error_goto(self.pos)))
if op == 'not_in':
code.putln(
"%s = !%s;" % (
result, result))
elif (operand1.type.is_pyobject
and op not in ('is', 'is_not')):
code.putln(
"if (PyObject_Cmp(%s, %s, &%s) < 0) %s" % (
operand1.result,
operand2.result,
result,
code.error_goto(self.pos)))
code.putln(
"%s = %s %s 0;" % (
result, result, op))
else:
code.putln("%s = %s %s %s;" % (
result,
operand1.result,
self.c_operator(op),
operand2.result))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
cascade = None
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
if self.cascade:
self.cascade.analyse_types(env, self.operand2)
self.is_pycmp = self.is_python_comparison()
if self.is_pycmp:
self.coerce_operands_to_pyobjects(env)
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
self.check_operand_types(env)
self.type = PyrexTypes.c_int_type
if self.is_pycmp or self.cascade:
self.is_temp = 1
def check_operand_types(self, env):
self.check_types(env,
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.check_operand_types(env, self.operand2)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def allocate_subexpr_temps(self, env):
self.operand1.allocate_temps(env)
self.operand2.allocate_temps(env)
if self.cascade:
self.cascade.allocate_subexpr_temps(env)
def release_subexpr_temps(self, env):
self.operand1.release_temp(env)
self.operand2.release_temp(env)
if self.cascade:
self.cascade.release_subexpr_temps(env)
def check_const(self):
self.operand1.check_const()
self.operand2.check_const()
if self.cascade:
self.not_const()
def result_code(self):
return "(%s %s %s)" % (
self.operand1.result,
self.c_operator(self.operator),
self.operand2.result)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.generate_operation_code(code, self.result,
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(code,
self.result, self.operand2)
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
cascade = None
def analyse_types(self, env, operand1):
self.operand2.analyse_types(env)
if self.cascade:
self.cascade.analyse_types(env, self.operand2)
def check_operand_types(self, env, operand1):
self.check_types(env,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.check_operand_types(env, self.operand2)
def has_python_operands(self):
return self.operand2.type.is_pyobject
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def allocate_subexpr_temps(self, env):
self.operand2.allocate_temps(env)
if self.cascade:
self.cascade.allocate_subexpr_temps(env)
def release_subexpr_temps(self, env):
self.operand2.release_temp(env)
if self.cascade:
self.cascade.release_subexpr_temps(env)
def generate_evaluation_code(self, code, result, operand1):
code.putln("if (%s) {" % result)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.operand2)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
code.putln("}")
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"/": NumBinopNode,
"%": ModNode,
"**": PowNode
}
def binop_node(pos, operator, operand1, operand2):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
if debug_coercion:
print self, "Coercing", self.arg
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def result_code(self):
return "((%s)%s)" % (
self.type.declaration_code(""),
self.arg.result)
def result_as_extension_type(self):
return self.result
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
def __init__(self, arg, dst_type, env):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
env.use_utility_code(type_test_utility_code)
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def result_code(self):
return self.arg.result
def result_as_extension_type(self):
return "((%s)%s)" % (
self.type.declaration_code(""),
self.arg.result)
def generate_result_code(self, code):
if self.type.typeobj_is_available():
code.putln(
"if (!__Pyx_TypeTest(%s, %s)) %s" % (
self.result,
self.type.typeptr_cname,
code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = PyrexTypes.py_object_type
if not arg.type.to_py_function:
error(arg.pos,
"Cannot convert '%s' to Python object" % arg.type)
self.is_temp = 1
def generate_result_code(self, code):
function = self.arg.type.to_py_function
code.putln('%s = %s(%s); if (!%s) %s' % (
self.result,
function,
self.arg.result,
self.result,
code.error_goto(self.pos)))
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
if not result_type.from_py_function:
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string and self.arg.is_ephemeral():
error(arg.pos,
"Obtaining char * from temporary Python value")
self.is_temp = 1
def generate_result_code(self, code):
opnd = self.arg.result
function = self.type.from_py_function
code.putln('%s = %s(%s); if (PyErr_Occurred()) %s' % (
self.result,
function,
self.arg.result,
code.error_goto(self.pos)))
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = PyrexTypes.c_int_type
if arg.type.is_pyobject:
self.is_temp = 1
def check_const(self):
if self.is_temp:
self.not_const()
self.arg.check_const()
def result_code(self):
return "(%s != 0)" % self.arg.result
def generate_result_code(self, code):
if self.arg.type.is_pyobject:
code.putln(
"%s = PyObject_IsTrue(%s); if (%s < 0) %s" % (
self.result,
self.arg.result,
self.result,
code.error_goto(self.pos)))
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type
self.is_temp = 1
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result, self.arg.result))
if self.type.is_pyobject:
code.put_incref(self.result, self.type)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.type = arg.type
def result_code(self):
return self.arg.result
def result_as_extension_type(self):
return self.arg.result_as_extension_type()
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
get_name_utility_code = \
"""
static PyObject *__Pyx_GetName(PyObject *dict, char *name) {
PyObject *result;
result = PyObject_GetAttrString(dict, name);
if (!result)
PyErr_SetString(PyExc_NameError, name);
return result;
}
"""
get_name_interned_utility_code = \
"""
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
PyObject *result;
result = PyObject_GetAttr(dict, name);
if (!result)
PyErr_SetObject(PyExc_NameError, name);
return result;
}
"""
#------------------------------------------------------------------------------------
import_utility_code = \
"""
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
PyObject *__import__ = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
__import__ = PyObject_GetAttrString(%(BUILTINS)s, "__import__");
if (!__import__)
goto bad;
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(%(GLOBALS)s);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
module = PyObject_CallFunction(__import__, "OOOO",
name, global_dict, empty_dict, list);
bad:
Py_XDECREF(empty_list);
Py_XDECREF(__import__);
Py_XDECREF(empty_dict);
return module;
}
""" % {
"BUILTINS": Naming.builtins_cname,
"GLOBALS": Naming.module_cname,
}
#------------------------------------------------------------------------------------
get_exception_utility_code = \
"""
static PyObject *__Pyx_GetExcValue(void) {
PyObject *type = 0, *value = 0, *tb = 0;
PyObject *result = 0;
PyThreadState *tstate = PyThreadState_Get();
PyErr_Fetch(&type, &value, &tb);
PyErr_NormalizeException(&type, &value, &tb);
if (PyErr_Occurred())
goto bad;
if (!value) {
value = Py_None;
Py_INCREF(value);
}
Py_XDECREF(tstate->exc_type);
Py_XDECREF(tstate->exc_value);
Py_XDECREF(tstate->exc_traceback);
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
result = value;
Py_XINCREF(result);
type = 0;
value = 0;
tb = 0;
bad:
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(tb);
return result;
}
"""
#------------------------------------------------------------------------------------
unpacking_utility_code = \
"""
static void __Pyx_UnpackError(void) {
PyErr_SetString(PyExc_ValueError, "unpack sequence of wrong size");
}
static PyObject *__Pyx_UnpackItem(PyObject *seq, int i) {
PyObject *item;
if (!(item = PySequence_GetItem(seq, i))) {
if (PyErr_ExceptionMatches(PyExc_IndexError))
__Pyx_UnpackError();
}
return item;
}
static int __Pyx_EndUnpack(PyObject *seq, int i) {
PyObject *item;
if (item = PySequence_GetItem(seq, i)) {
Py_DECREF(item);
__Pyx_UnpackError();
return -1;
}
PyErr_Clear();
return 0;
}
"""
#------------------------------------------------------------------------------------
type_test_utility_code = \
"""
static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (obj == Py_None || PyObject_TypeCheck(obj, type))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %s to %s",
obj->ob_type->tp_name, type->tp_name);
return 0;
}
"""
#------------------------------------------------------------------------------------
create_class_utility_code = \
"""
static PyObject *__Pyx_CreateClass(
PyObject *bases, PyObject *dict, PyObject *name, char *modname)
{
PyObject *py_modname;
PyObject *result = 0;
py_modname = PyString_FromString(modname);
if (!py_modname)
goto bad;
if (PyDict_SetItemString(dict, "__module__", py_modname) < 0)
goto bad;
result = PyClass_New(bases, dict, name);
bad:
Py_XDECREF(py_modname);
return result;
}
"""
#------------------------------------------------------------------------------------
| Python |
#
# Pyrex - Symbol Table
#
import re
from Errors import error, InternalError
import Options
import Naming
from PyrexTypes import c_int_type, \
py_object_type, c_char_array_type, \
CEnumType, CStructOrUnionType, PyExtensionType
from TypeSlots import \
pyfunction_signature, pymethod_signature, \
get_special_method_signature, get_property_accessor_signature
identifier_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*$")
class Entry:
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# #borrowed bool Is a borrowed reference
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is a Python builtin name
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_type boolean Is a type definition
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_self_arg boolean Is the "self" arg of an exttype method
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# init_to_none boolean True if initial value should be None
# as_variable Entry Alternative interpretation of extension
# type name as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# interned_cname string C name of interned name string
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
borrowed = 0
init = ""
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_type = 0
is_const = 0
is_property = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_declared_generic = 0
is_readonly = 0
func_cname = None
doc = None
init_to_none = 0
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
interned_cname = None
pystring_cname = None
is_interned = 0
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
class Scope:
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# temp_entries [Entry] Temporary variable entries
# free_temp_entries [Entry] Temp variables currently unused
# temp_counter integer Counter for naming temp vars
# cname_to_entry {string : Entry} Temp cname to entry mapping
# pow_function_used boolean The C pow() function is used
# return_type PyrexType or None Return type of function owning scope
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# pystring_entries [Entry] String const entries newly used as
# Python strings in this scope
is_py_class_scope = 0
is_c_class_scope = 0
scope_prefix = ""
in_cinclude = 0
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name)
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = name
self.scope_prefix = mangled_name
self.entries = {}
self.const_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.temp_entries = []
self.free_temp_entries = []
#self.pending_temp_entries = [] # TEMPORARY
self.temp_counter = 1
self.cname_to_entry = {}
self.pow_function_used = 0
self.string_to_entry = {}
self.pystring_entries = []
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def intern(self, name):
return self.global_scope().intern(name)
def qualifying_scope(self):
#return self.outer_scope
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def global_scope(self):
# Return the module-level scope containing this scope.
return self.outer_scope.global_scope()
def declare(self, name, cname, type, pos):
# Create new entry, and add to dictionary if
# name is not None. Reports an error if already
# declared.
dict = self.entries
if name and dict.has_key(name):
error(pos, "'%s' redeclared" % name)
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
if name:
entry.qualified_name = self.qualify_name(name)
dict[name] = entry
return entry
def qualify_name(self, name):
return "%s.%s" % (self.qualified_name, name)
#def undeclare(self, name):
# del self.entries[name]
def declare_const(self, name, type, value, pos, cname = None):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude:
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos)
entry.is_const = 1
entry.value = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private'):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos)
entry.visibility = visibility
entry.is_type = 1
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude:
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
type = CStructOrUnionType(name, kind, scope, typedef_flag, cname)
entry = self.declare_type(name, type, pos, cname)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_struct_or_union):
error(pos, "'%s' redeclared" % name)
elif scope and entry.type.scope:
error(pos, "'%s' already defined" % name)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if scope:
entry.type.scope = scope
if not scope and not entry.type.scope:
self.check_for_illegal_incomplete_ctypedef(typedef_flag, pos)
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag <> entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def declare_enum(self, name, pos, cname, typedef_flag):
if name:
if not cname:
if self.in_cinclude:
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
type = CEnumType(name, cname, typedef_flag)
else:
type = c_int_type
entry = self.declare_type(name, type, pos, cname = cname)
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a variable.
if not cname:
if visibility <> 'private':
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos)
entry.is_variable = 1
entry.visibility = visibility
return entry
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
def declare_pyfunction(self, name, pos):
# Add an entry for a Python function.
entry = self.declare_var(name, py_object_type, pos)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0):
# Add an entry for a C function.
if not cname:
if visibility <> 'private':
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.add_cfunction(name, type, pos, cname, visibility)
entry.func_cname = cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos)
entry.is_cfunction = 1
entry.visibility = visibility
self.cfunc_entries.append(entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
return (self.lookup_here(name)
or (self.outer_scope and self.outer_scope.lookup(name))
or None)
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def add_string_const(self, value):
# Add an entry for a string constant.
cname = self.new_const_cname()
entry = Entry("", cname, c_char_array_type, init = value)
self.const_entries.append(entry)
return entry
def get_string_const(self, value):
# Get entry for string constant. Returns an existing
# one if possible, otherwise creates a new one.
genv = self.global_scope()
entry = genv.string_to_entry.get(value)
if not entry:
entry = self.add_string_const(value)
genv.string_to_entry[value] = entry
return entry
def add_py_string(self, entry):
# If not already done, allocate a C name for a Python version of
# a string literal, and add it to the list of Python strings to
# be created at module init time. If the string resembles a
# Python identifier, it will be interned.
if not entry.pystring_cname:
value = entry.init
if identifier_pattern.match(value):
entry.pystring_cname = self.intern(value)
entry.is_interned = 1
else:
entry.pystring_cname = entry.cname + "p"
self.pystring_entries.append(entry)
self.global_scope().all_pystring_entries.append(entry)
def new_const_cname(self):
# Create a new globally-unique name for a constant.
return self.global_scope().new_const_cname()
def allocate_temp(self, type):
# Allocate a temporary variable of the given type from the
# free list if available, otherwise create a new one.
# Returns the cname of the variable.
for entry in self.free_temp_entries:
if entry.type == type:
self.free_temp_entries.remove(entry)
return entry.cname
n = self.temp_counter
self.temp_counter = n + 1
cname = "%s%d" % (Naming.pyrex_prefix, n)
entry = Entry("", cname, type)
if type.is_pyobject:
entry.init = "0"
self.cname_to_entry[entry.cname] = entry
self.temp_entries.append(entry)
return entry.cname
def allocate_temp_pyobject(self):
# Allocate a temporary PyObject variable.
return self.allocate_temp(py_object_type)
def release_temp(self, cname):
# Release a temporary variable for re-use.
if not cname: # can happen when type of an expr is void
return
entry = self.cname_to_entry[cname]
if entry in self.free_temp_entries:
raise InternalError("Temporary variable %s released more than once"
% cname)
self.free_temp_entries.append(entry)
def recycle_pending_temps(self):
# Obsolete
pass
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def generate_library_function_declarations(self, code):
# Generate extern decls for C library funcs used.
if self.pow_function_used:
code.putln("extern double pow(double, double);")
def defines_any(self, names):
# Test whether any of the given names are
# defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
class BuiltinScope(Scope):
# The builtin namespace.
def __init__(self):
Scope.__init__(self, "__builtin__", None, None)
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos)
entry.is_builtin = 1
return entry
class ModuleScope(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# const_counter integer Counter for naming constants
# utility_code_used [string] Utility code to be included
# default_entries [Entry] Function argument default entries
# python_include_files [string] Standard Python headers to be included
# include_files [string] Other C headers to be included
# string_to_entry {string : Entry} Map string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# intern_map {string : string} Mapping from Python names to interned strs
# interned_names [string] Interned names pending generation of declarations
# all_pystring_entries [Entry] Python string consts from all scopes
def __init__(self, name, parent_module, context):
self.parent_module = parent_module
outer_scope = context.find_submodule("__builtin__")
Scope.__init__(self, name, outer_scope, parent_module)
self.module_name = name
self.context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.const_counter = 1
self.utility_code_used = []
self.default_entries = []
self.module_entries = {}
self.python_include_files = ["Python.h", "structmember.h"]
self.include_files = []
self.type_names = {}
self.pxd_file_loaded = 0
self.cimported_modules = []
self.intern_map = {}
self.interned_names = []
self.all_pystring_entries = []
def qualifying_scope(self):
return self.parent_module
def global_scope(self):
return self
def declare_builtin(self, name, pos):
entry = Scope.declare_builtin(self, name, pos)
entry.interned_cname = self.intern(name)
return entry
def intern(self, name):
intern_map = self.intern_map
cname = intern_map.get(name)
if not cname:
cname = Naming.interned_prefix + name
intern_map[name] = cname
self.interned_names.append(name)
return cname
def find_module(self, module_name, pos):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
return self.global_scope().context.find_module(
module_name, relative_to = self.parent_module, pos = pos)
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = self, context = self.context)
self.module_entries[name] = scope
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
return self.module_entries.get(name, None)
def add_include_file(self, filename):
if filename not in self.python_include_files \
and filename not in self.include_files:
self.include_files.append(filename)
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
self.cimported_modules.append(scope)
def add_imported_entry(self, name, entry, pos):
if entry not in self.entries:
self.entries[name] = entry
else:
error(pos, "'%s' redeclared" % name)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if not (entry.is_pyglobal and not entry.as_module):
error(pos, "'%s' redeclared" % name)
return None
else:
entry = self.declare_var(name, py_object_type, pos)
entry.as_module = scope
self.cimported_modules.append(scope)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
if not is_cdef:
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
entry.is_pyglobal = 1
entry.namespace_cname = self.module_cname
if Options.intern_names:
entry.interned_cname = self.intern(name)
else:
entry.is_cglobal = 1
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def add_default_value(self, type):
# Add an entry for holding a function argument
# default value.
cname = self.new_const_cname()
entry = Entry("", cname, type)
self.default_entries.append(entry)
return entry
def new_const_cname(self):
# Create a new globally-unique name for a constant.
n = self.const_counter
self.const_counter = n + 1
return "%s%d" % (Naming.const_prefix, n)
def use_utility_code(self, new_code):
# Add string to list of utility code to be included,
# if not already there (tested using 'is').
for old_code in self.utility_code_used:
if old_code is new_code:
return
self.utility_code_used.append(new_code)
def declare_c_class(self, name, pos, defining, implementing,
module_name, base_type, objstruct_cname, typeobj_cname,
visibility, typedef_flag):
#
#print "declare_c_class:", name
#print "...visibility =", visibility
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause an error when we redeclare it
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if base_type <> type.base_type:
error(pos, "Base type does not match previous declaration")
#
# Make a new entry if needed
#
if not entry:
type = PyExtensionType(name, typedef_flag, base_type)
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility)
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility = visibility)
if base_type:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
else:
self.check_for_illegal_incomplete_ctypedef(typedef_flag, pos)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if entry.visibility <> visibility:
error(pos, "Declaration of '%s' as '%s' conflicts with previous "
"declaration as '%s'" % (class_name, visibility, entry.visibility))
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname <> objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname <> typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
#
# Return new or existing entry
#
return entry
def check_for_illegal_incomplete_ctypedef(self, typedef_flag, pos):
if typedef_flag and not self.in_cinclude:
error(pos, "Forward-referenced type must use 'cdef', not 'ctypedef'")
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also carries out the following:
# * Allocates vtable-related names if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print "Scope.check_c_classes: checking scope", self.qualified_name
for entry in self.c_class_entries:
if debug_check_c_classes:
print "...entry", entry.name, entry
print "......type =", entry.type
print "......visibility =", entry.visibility
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility <> 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
## Generate var entry
#self.attach_var_entry_to_c_class(entry)
# Allocated vtable-related names if necessary
#print "ModuleScope.check_c_classes:", type ###
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
var_entry = Entry(name = entry.name,
type = py_object_type,
pos = entry.pos,
cname = "((PyObject*)%s)" % entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
entry.as_variable = var_entry
class LocalScope(Scope):
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
def mangle(self, prefix, name):
return prefix + name
def declare_arg(self, name, type, pos):
# Add an entry for an argument of a function.
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos)
entry.is_variable = 1
if type.is_pyobject:
entry.init = "0"
#entry.borrowed = 1 # Not using borrowed arg refs for now
self.arg_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.init_to_none = type.is_pyobject
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
error(pos, "'%s' redeclared")
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
def __init__(self):
Scope.__init__(self, "?", None, None)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject:
error(pos,
"C struct/union member cannot be a Python object")
if visibility <> 'private':
error(pos,
"C struct/union member cannot be declared %s" % visibility)
return entry
class ClassScope(Scope):
# Abstract base class for namespace of
# Python class or extension type.
#
# class_name string Pyrex name of the class
# scope_prefix string Additional prefix for names
# declared in the class
# doc string or None Doc string
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def add_string_const(self, value):
return self.outer_scope.add_string_const(value)
class PyClassScope(ClassScope):
# Namespace of a Python class.
#
# class_dict_cname string C variable holding class dict
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.is_pyglobal = 1
entry.namespace_cname = self.class_obj_cname
if Options.intern_names:
entry.interned_cname = self.intern(name)
return entry
def allocate_temp(self, type):
return self.outer_scope.allocate_temp(type)
def release_temp(self, cname):
self.outer_scope.release_temp(cname)
def recycle_pending_temps(self):
self.outer_scope.recycle_pending_temps()
def add_default_value(self, type):
return self.outer_scope.add_default_value(type)
class CClassScope(ClassScope):
# Namespace of an extension type.
#
# parent_type CClassType
# #typeobj_cname string or None
# #objstruct_cname string
# method_table_cname string
# member_table_cname string
# getset_table_cname string
# has_pyobject_attrs boolean Any PyObject attributes?
# public_attr_entries boolean public/readonly attrs
# property_entries [Entry]
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility <> 'extern':
self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
self.member_table_cname = outer_scope.mangle(Naming.memtab_prefix, name)
self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
self.has_pyobject_attrs = 0
self.public_attr_entries = []
self.property_entries = []
self.inherited_var_entries = []
self.defined = 0
self.implemented = 0
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
return self.has_pyobject_attrs or \
(self.parent_type.base_type and \
self.parent_type.base_type.scope.needs_gc())
def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for an attribute.
if self.defined:
error(pos,
"C attributes cannot be added in implementation part of"
" extension type")
if get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
cname = name
entry = self.declare(name, cname, type, pos)
entry.visibility = visibility
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject:
self.has_pyobject_attrs = 1
if visibility not in ('private', 'public', 'readonly'):
error(pos,
"Attribute of extension type cannot be declared %s" % visibility)
if visibility in ('public', 'readonly'):
if type.pymemberdef_typecode:
self.public_attr_entries.append(entry)
else:
error(pos,
"C attribute of type '%s' cannot be accessed from Python" % type)
if visibility == 'public' and type.is_extension_type:
error(pos,
"Non-generic Python attribute cannot be exposed for writing from Python")
return entry
def declare_pyfunction(self, name, pos):
# Add an entry for a method.
entry = self.declare(name, name, py_object_type, pos)
special_sig = get_special_method_signature(name)
if special_sig:
entry.signature = special_sig
# Special methods don't get put in the method table
else:
entry.signature = pymethod_signature
self.pyfunc_entries.append(entry)
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0):
args = type.args
if not args:
error(pos, "C method has no self argument")
elif not args[0].type.same_as(self.parent_type):
error(pos, "Self argument of C method does not match parent type")
entry = self.lookup_here(name)
if entry:
if not entry.is_cfunction:
error(pos, "'%s' redeclared" % name)
else:
if defining and entry.func_cname:
error(pos, "'%s' already defined" % name)
if not entry.type.same_as(type, as_cmethod = 1):
error(pos, "Signature does not match previous declaration")
else:
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
" extension type" % name)
entry = self.add_cfunction(name, type, pos, cname or name, visibility)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
return entry
def add_cfunction(self, name, type, pos, cname, visibility):
# Add a cfunction entry without giving it a func_cname.
entry = ClassScope.add_cfunction(self, name, type, pos, cname, visibility)
entry.is_cmethod = 1
return entry
def declare_property(self, name, doc, pos):
entry = self.declare(name, name, py_object_type, pos)
entry.is_property = 1
entry.doc = doc
entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
def adapt(cname):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
entry = self.declare(base_entry.name, adapt(base_entry.cname),
base_entry.type, None)
entry.is_variable = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.add_cfunction(base_entry.name, base_entry.type, None,
adapt(base_entry.cname), base_entry.visibility)
entry.is_inherited = 1
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
#
# parent_type PyExtensionType The type to which the property belongs
def declare_pyfunction(self, name, pos):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
if signature:
entry = self.declare(name, name, py_object_type, pos)
entry.signature = signature
return entry
else:
error(pos, "Only __get__, __set__ and __del__ methods allowed "
"in a property declaration")
return None
| Python |
#
# Pyrex Top Level
#
import sys
if sys.version_info[:2] < (2, 2):
print >>sys.stderr, "Sorry, Pyrex requires Python 2.2 or later"
sys.exit(1)
import os
from time import time
import Version
from Scanning import PyrexScanner
import Errors
from Errors import PyrexError, CompileError, error
import Parsing
from Symtab import BuiltinScope, ModuleScope
import Code
from Pyrex.Utils import replace_suffix
verbose = 0
class Context:
# This class encapsulates the context needed for compiling
# one or more Pyrex implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
def __init__(self, include_directories):
self.modules = {"__builtin__" : BuiltinScope()}
self.include_directories = include_directories
def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print "Context.find_module: module_name =", module_name, \
"relative_to =", relative_to, "pos =", pos, "need_pxd =", need_pxd
scope = None
pxd_pathname = None
if "." not in module_name and relative_to:
if debug_find_module:
print "...trying relative import"
scope = relative_to.lookup_submodule(module_name)
if not scope:
qualified_name = relative_to.qualify_name(module_name)
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print "...trying absolute import"
scope = self
for name in module_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print "...scope =", scope
if not scope.pxd_file_loaded:
if debug_find_module:
print "...pxd not loaded"
scope.pxd_file_loaded = 1
if not pxd_pathname:
if debug_find_module:
print "...looking for pxd file"
pxd_pathname = self.find_pxd_file(module_name, pos)
if debug_find_module:
print "......found ", pxd_pathname
if not pxd_pathname and need_pxd:
error(pos, "'%s.pxd' not found" % module_name)
if pxd_pathname:
try:
if debug_find_module:
print "Context.find_module: Parsing", pxd_pathname
pxd_tree = self.parse(pxd_pathname, scope.type_names, pxd = 1)
pxd_tree.analyse_declarations(scope)
except CompileError:
pass
return scope
def find_pxd_file(self, module_name, pos):
# Search include directories for the .pxd file
# corresponding to the given (full) module name.
pxd_filename = "%s.pxd" % module_name
return self.search_include_directories(pxd_filename, pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, pos)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, filename, pos):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
dirs = self.include_directories
if pos:
here_dir = os.path.dirname(pos[0])
dirs = [here_dir] + dirs
for dir in dirs:
path = os.path.join(dir, filename)
if os.path.exists(path):
return path
return None
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_filename, type_names, pxd):
# Parse the given source file and return a parse tree.
f = open(source_filename, "r")
s = PyrexScanner(f, source_filename,
type_names = type_names, context = self)
try:
tree = Parsing.p_module(s, pxd)
finally:
f.close()
if Errors.num_errors > 0:
raise CompileError
return tree
def extract_module_name(self, path):
# Get the module name out of a source file pathname.
_, tail = os.path.split(path)
name, _ = os.path.splitext(tail)
return name
def compile(self, source, options = None):
# Compile a Pyrex implementation file in this context
# and return a CompilationResult.
if not options:
options = default_options
result = CompilationResult()
cwd = os.getcwd()
source = os.path.join(cwd, source)
if options.use_listing_file:
result.listing_file = replace_suffix(source, ".lis")
Errors.open_listing_file(result.listing_file,
echo_to_stderr = options.errors_to_stderr)
else:
Errors.open_listing_file(None)
if options.output_file:
result.c_file = os.path.join(cwd, options.output_file)
else:
result.c_file = replace_suffix(source, ".c")
module_name = self.extract_module_name(source)
initial_pos = (source, 1, 0)
scope = self.find_module(module_name, pos = initial_pos, need_pxd = 0)
try:
tree = self.parse(source, scope.type_names, pxd = 0)
tree.process_implementation(scope, result)
except CompileError:
result.c_file = None
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
result.c_file = None
if result.c_file and not options.c_only and c_compile:
result.object_file = c_compile(result.c_file)
if not options.obj_only and c_link:
result.extension_file = c_link(result.object_file)
return result
#------------------------------------------------------------------------
#
# Main Python entry point
#
#------------------------------------------------------------------------
class CompilationOptions:
"""
Options to the Pyrex compiler:
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
include_path [string] Directories to search for include files
output_file string Name of generated .c file
"""
def __init__(self, defaults = None, **kw):
self.include_path = []
if defaults:
self.__dict__.update(defaults.__dict__)
self.__dict__.update(kw)
class CompilationResult:
"""
Results from the Pyrex compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
"""
def __init__(self):
self.c_file = None
self.h_file = None
self.i_file = None
self.listing_file = None
self.object_file = None
self.extension_file = None
def compile(source, options = None, c_compile = 0, c_link = 0):
"""
compile(source, options = default_options)
Compile the given Pyrex implementation file and return
a CompilationResult object describing what was produced.
"""
if not options:
options = default_options
if c_compile:
options.c_only = 0
if c_link:
options.obj_only = 0
context = Context(options.include_path)
return context.compile(source, options)
#------------------------------------------------------------------------
#
# Main command-line entry point
#
#------------------------------------------------------------------------
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from CmdLine import parse_command_line
options, sources = parse_command_line(args)
else:
options = default_options
sources = args
if options.show_version:
print >>sys.stderr, "Pyrex version %s" % Version.version
context = Context(options.include_path)
for source in sources:
try:
result = context.compile(source, options)
if result.num_errors > 0:
any_failures = 1
except PyrexError, e:
print >>sys.stderr, e
any_failures = 1
if any_failures:
sys.exit(1)
#------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
#------------------------------------------------------------------------
default_options = CompilationOptions(
show_version = 0,
use_listing_file = 0,
errors_to_stderr = 1,
c_only = 1,
obj_only = 1,
output_file = None)
if sys.platform == "mac":
from Pyrex.Mac.MacSystem import c_compile, c_link, CCompilerError
default_options.use_listing_file = 1
elif sys.platform == "darwin":
from Pyrex.Mac.DarwinSystem import c_compile, c_link, CCompilerError
else:
c_compile = None
c_link = None
| Python |
#
# Pyrex - Tables describing slots in the type object
# and associated know-how.
#
import Naming
import PyrexTypes
class Signature:
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'I' int *
# 'l' long
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
error_value_map = {
'O': "0",
'i': "-1",
'l': "-1",
'r': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
return self.fixed_arg_format[i] == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
class SlotDescriptor:
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
def __init__(self, slot_name, dynamic = 0):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
def generate(self, scope, code):
if self.is_initialised_dynamically:
value = 0
else:
value = self.slot_code(scope)
code.putln("%s, /*%s*/" % (value, self.slot_name))
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value <> "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value):
SlotDescriptor.__init__(self, slot_name)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name):
FixedSlot.__init__(self, slot_name, "0")
class GCDependentSlot(SlotDescriptor):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, no_gc_value, gc_value, dynamic = 0):
SlotDescriptor.__init__(self, slot_name, dynamic)
self.no_gc_value = no_gc_value
self.gc_value = gc_value
def slot_code(self, scope):
if scope.has_pyobject_attrs:
return self.gc_value
else:
return self.no_gc_value
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# default string or None Default value of the slot
def __init__(self, signature, slot_name, method_name, default = None):
SlotDescriptor.__init__(self, slot_name)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.default = default
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry:
return entry.func_cname
else:
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Pyrex.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name):
SlotDescriptor.__init__(self, slot_name)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value):
InternalMethodSlot.__init__(self, slot_name)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE"
if scope.has_pyobject_attrs:
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
return '"%s"' % scope.doc
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
return "&%s" % self.substructure_cname(scope)
def generate_substructure(self, scope, code):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
if scope.public_attr_entries:
return scope.member_table_cname
else:
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
intintargproc = Signature("Tii", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
getreadbufferproc = Signature("TiP", 'r') # typedef int (*getreadbufferproc)(PyObject *, int, void **);
getwritebufferproc = Signature("TiP", 'r') # typedef int (*getwritebufferproc)(PyObject *, int, void **);
getsegcountproc = Signature("TI", 'r') # typedef int (*getsegcountproc)(PyObject *, int *);
getcharbufferproc = Signature("TiS", 'r') # typedef int (*getcharbufferproc)(PyObject *, int, const char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "l") # typedef long (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__"),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__"),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce"),
MethodSlot(unaryfunc, "nb_int", "__int__"),
MethodSlot(unaryfunc, "nb_long", "__long__"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__"),
MethodSlot(unaryfunc, "nb_hex", "__hex__"),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ternaryfunc, "nb_inplace_power", "__ipow__"), # NOT iternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
)
PySequenceMethods = (
MethodSlot(inquiry, "sq_length", "__len__"), # EmptySlot("sq_length"), # mp_length used instead
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(intintargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(inquiry, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(getreadbufferproc, "bf_getreadbuffer", "__getreadbuffer__"),
MethodSlot(getwritebufferproc, "bf_getwritebuffer", "__getwritebuffer__"),
MethodSlot(getsegcountproc, "bf_getsegcount", "__getsegcount__"),
MethodSlot(getcharbufferproc, "bf_getcharbuffer", "__getcharbuffer__"),
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
InternalMethodSlot("tp_dealloc"),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__"),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__"),
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
InternalMethodSlot("tp_traverse"),
InternalMethodSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__"),
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
# Some versions of Python 2.2 inherit the wrong value for tp_free when the
# type has GC but the base type doesn't, so we explicitly set it ourselves
# in that case.
GCDependentSlot("tp_free", "0", "_PyObject_GC_Del", dynamic = 1),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Pyrex.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__new__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(intintobjargproc, "", "__setslice__")
MethodSlot(intintargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
| Python |
#
# Pyrex - Parse tree nodes
#
import os, string, sys, time
import Code
from Errors import error, InternalError
import Naming
import PyrexTypes
from PyrexTypes import py_object_type, error_type
from Symtab import ModuleScope, LocalScope, \
StructOrUnionScope, PyClassScope, CClassScope
import TypeSlots
import Version
from Pyrex.Utils import open_new_file, replace_suffix
import Options
from DebugFlags import debug_disposal_code
class Node:
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_literal = 0
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (1) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (2) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result' attribute of each ExprNode with a C code
# fragment.
#
# (3) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
class BlockNode:
# Mixin class for nodes representing a declaration block.
def generate_const_definitions(self, env, code):
if env.const_entries:
code.putln("")
#code.put_var_declarations(env.const_entries, static = 1)
for entry in env.const_entries:
if not entry.is_interned:
code.put_var_declaration(entry)
def generate_interned_name_decls(self, env, code):
# Flush accumulated interned names from the global scope
# and generate declarations for them.
genv = env.global_scope()
intern_map = genv.intern_map
names = genv.interned_names
if names:
code.putln("")
for name in names:
code.putln(
"static PyObject *%s;" % intern_map[name])
del names[:]
def generate_py_string_decls(self, env, code):
entries = env.pystring_entries
if entries:
code.putln("")
for entry in entries:
code.putln(
"static PyObject *%s;" % entry.pystring_cname)
class ModuleNode(Node, BlockNode):
# doc string or None
# body StatListNode
def analyse_declarations(self, env):
env.doc = self.doc
self.body.analyse_declarations(env)
def process_implementation(self, env, result):
self.analyse_declarations(env)
env.check_c_classes()
self.body.analyse_expressions(env)
env.return_type = PyrexTypes.c_void_type
self.generate_c_code(env, result)
self.generate_h_code(env, result)
def generate_h_code(self, env, result):
public_vars_and_funcs = []
public_extension_types = []
for entry in env.var_entries:
if entry.visibility == 'public':
public_vars_and_funcs.append(entry)
for entry in env.cfunc_entries:
if entry.visibility == 'public':
public_vars_and_funcs.append(entry)
for entry in env.c_class_entries:
if entry.visibility == 'public':
public_extension_types.append(entry)
if public_vars_and_funcs or public_extension_types:
#import os
#outname_base, _ = os.path.splitext(result.c_file)
#result.h_file = outname_base + ".h"
#result.i_file = outname_base + ".pxi"
result.h_file = replace_suffix(result.c_file, ".h")
result.i_file = replace_suffix(result.c_file, ".pxi")
h_code = Code.CCodeWriter(result.h_file)
i_code = Code.PyrexCodeWriter(result.i_file)
for entry in public_vars_and_funcs:
h_code.putln("extern %s;" %
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT"))
i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
for entry in public_extension_types:
self.generate_cclass_header_code(entry.type, h_code)
self.generate_cclass_include_code(entry.type, i_code)
h_code.putln("extern DL_IMPORT(void) init%s(void);" % env.module_name)
#result.h_file_generated = 1
#result.i_file_generated = 1
def generate_cclass_header_code(self, type, h_code):
h_code.putln("extern DL_IMPORT(PyTypeObject) %s;" % type.typeobj_cname)
self.generate_obj_struct_definition(type, h_code)
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
i_code.indent()
for entry in type.scope.var_entries:
i_code.putln("cdef %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
i_code.dedent()
def generate_c_code(self, env, result):
modules = []
self.find_referenced_modules(env, modules, {})
code = Code.CCodeWriter(result.c_file)
code.init_labels()
self.generate_module_preamble(env, modules, code)
for module in modules:
self.generate_declarations_for_module(module, code)
code.putln("")
code.putln("/* Implementation of %s */" % env.qualified_name)
self.generate_const_definitions(env, code)
self.generate_interned_name_decls(env, code)
self.generate_py_string_decls(env, code)
self.body.generate_function_definitions(env, code)
self.generate_interned_name_table(env, code)
self.generate_py_string_table(env, code)
self.generate_typeobj_definitions(env, code)
self.generate_method_table(env, code)
self.generate_module_init_func(modules[:-1], env, code)
self.generate_filename_table(code)
self.generate_utility_functions(env, code)
result.c_file_generated = 1
def find_referenced_modules(self, env, module_list, modules_seen):
for imported_module in env.cimported_modules:
if imported_module not in modules_seen:
modules_seen[imported_module] = 1
self.find_referenced_modules(imported_module, module_list, modules_seen)
module_list.append(env)
def generate_module_preamble(self, env, cimported_modules, code):
code.putln('/* Generated by Pyrex %s on %s */' % (
Version.version, time.asctime()))
code.putln('')
for filename in env.python_include_files:
code.putln('#include "%s"' % filename)
code.putln("#ifndef PY_LONG_LONG")
code.putln(" #define PY_LONG_LONG LONG_LONG")
code.putln("#endif")
self.generate_includes(env, cimported_modules, code)
#for filename in env.include_files:
# code.putln('#include "%s"' % filename)
code.putln('')
code.put(utility_function_predeclarations)
if Options.intern_names:
code.putln(get_name_interned_predeclaration)
else:
code.putln(get_name_predeclaration)
code.putln('')
code.putln('static PyObject *%s;' % env.module_cname)
code.putln('static PyObject *%s;' % Naming.builtins_cname)
code.putln('static int %s;' % Naming.lineno_cname)
code.putln('static char *%s;' % Naming.filename_cname)
code.putln('staticforward char **%s;' % Naming.filetable_cname)
if env.doc:
code.putln('')
code.putln('static char %s[] = "%s";' % (env.doc_cname, env.doc))
def generate_includes(self, env, cimported_modules, code):
includes = env.include_files[:]
for module in cimported_modules:
for filename in module.include_files:
if filename not in includes:
includes.append(filename)
for filename in includes:
code.putln('#include "%s"' % filename)
def generate_filename_table(self, code):
code.putln("")
code.putln("static char *%s[] = {" % Naming.filenames_cname)
if code.filename_list:
for filename in code.filename_list:
filename = os.path.basename(filename)
escaped_filename = filename.replace("\\", "\\\\").replace('"', r'\"')
code.putln('"%s",' %
escaped_filename)
else:
# Some C compilers don't like an empty array
code.putln("0")
code.putln("};")
code.putln("statichere char **%s = %s;" %
(Naming.filetable_cname, Naming.filenames_cname))
def generate_declarations_for_module(self, env, code):
code.putln("")
code.putln("/* Declarations from %s */" % env.qualified_name)
self.generate_type_predeclarations(env, code)
self.generate_type_definitions(env, code)
self.generate_global_declarations(env, code)
self.generate_cfunction_predeclarations(env, code)
def generate_type_predeclarations(self, env, code):
pass
def generate_type_definitions(self, env, code):
# Generate definitions of structs/unions/enums.
for entry in env.sue_entries:
if not entry.in_cinclude:
type = entry.type
if type.is_struct_or_union:
self.generate_struct_union_definition(type, code)
else:
self.generate_enum_definition(entry, code)
# Generate extension type object struct definitions.
for entry in env.c_class_entries:
if not entry.in_cinclude:
self.generate_typeobject_predeclaration(entry, code)
self.generate_obj_struct_definition(entry.type, code)
self.generate_exttype_vtable_struct(entry, code)
self.generate_exttype_vtabptr_declaration(entry, code)
def sue_header_footer(self, type, kind, name):
if type.typedef_flag:
header = "typedef %s {" % kind
footer = "} %s;" % name
else:
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
def generate_struct_union_definition(self, type, code):
if type.scope:
header, footer = \
self.sue_header_footer(type, type.kind, type.cname)
code.putln("")
code.putln(header)
for attr in type.scope.var_entries:
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
code.putln(footer)
def generate_enum_definition(self, entry, code):
type = entry.type
name = entry.cname or entry.name or ""
header, footer = \
self.sue_header_footer(type, "enum", name)
code.putln("")
code.putln(header)
for value_entry in entry.enum_values:
if value_entry.value == value_entry.name:
code.putln(
"%s," %
value_entry.cname)
else:
code.putln(
"%s = %s," % (
value_entry.cname,
value_entry.value))
code.putln(footer)
def generate_typeobject_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
if name:
if entry.visibility == 'extern' and not entry.in_cinclude:
code.putln("extern DL_IMPORT(PyTypeObject) %s;" % name)
elif entry.visibility == 'public':
code.putln("DL_EXPORT(PyTypeObject) %s;" % name)
else:
code.putln("staticforward PyTypeObject %s;" % name)
def generate_exttype_vtable_struct(self, entry, code):
# Generate struct declaration for an extension type's vtable.
type = entry.type
scope = type.scope
if type.vtabstruct_cname:
code.putln("")
code.putln(
"struct %s {" %
type.vtabstruct_cname)
if type.base_type and type.base_type.vtabstruct_cname:
code.putln("struct %s %s;" % (
type.base_type.vtabstruct_cname,
Naming.obj_base_cname))
for method_entry in scope.cfunc_entries:
if not method_entry.is_inherited:
code.putln(
"%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.name))
code.putln(
"};")
def generate_exttype_vtabptr_declaration(self, entry, code):
# Generate declaration of pointer to an extension type's vtable.
type = entry.type
if type.vtabptr_cname:
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
def generate_obj_struct_definition(self, type, code):
# Generate object struct definition for an
# extension type.
if not type.scope:
return # Forward declared but never defined
header, footer = \
self.sue_header_footer(type, "struct", type.objstruct_cname)
code.putln("")
code.putln(header)
base_type = type.base_type
if base_type:
code.putln(
"%s%s %s;" % (
("struct ", "")[base_type.typedef_flag],
base_type.objstruct_cname,
Naming.obj_base_cname))
else:
code.putln(
"PyObject_HEAD")
if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname):
code.putln(
"struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabslot_cname))
for attr in type.scope.var_entries:
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
code.putln(footer)
def generate_global_declarations(self, env, code):
code.putln("")
for entry in env.c_class_entries:
code.putln("static PyTypeObject *%s = 0;" %
entry.type.typeptr_cname)
code.put_var_declarations(env.var_entries, static = 1,
dll_linkage = "DL_EXPORT")
code.put_var_declarations(env.default_entries, static = 1)
def generate_cfunction_predeclarations(self, env, code):
for entry in env.cfunc_entries:
if not entry.in_cinclude:
if entry.visibility == 'public':
dll_linkage = "DL_EXPORT"
else:
dll_linkage = None
header = entry.type.declaration_code(entry.cname,
dll_linkage = dll_linkage)
if entry.visibility <> 'private':
storage_class = ""
else:
storage_class = "static "
code.putln("%s%s; /*proto*/" % (
storage_class,
header))
def generate_typeobj_definitions(self, env, code):
modname = env.module_name
for entry in env.c_class_entries:
#print "generate_typeobj_definitions:", entry.name
#print "...visibility =", entry.visibility
if entry.visibility <> 'extern':
type = entry.type
scope = type.scope
if scope: # could be None if there was an error
self.generate_exttype_vtable(scope, code)
self.generate_new_function(scope, code)
self.generate_dealloc_function(scope, code)
self.generate_traverse_function(scope, code)
self.generate_clear_function(scope, code)
if scope.defines_any(["__getitem__"]):
self.generate_getitem_int_function(scope, code)
if scope.defines_any(["__setitem__", "__delitem__"]):
self.generate_ass_subscript_function(scope, code)
if scope.defines_any(["__setslice__", "__delslice__"]):
self.generate_ass_slice_function(scope, code)
if scope.defines_any(["__getattr__"]):
self.generate_getattro_function(scope, code)
if scope.defines_any(["__setattr__", "__delattr__"]):
self.generate_setattro_function(scope, code)
if scope.defines_any(["__get__"]):
self.generate_descr_get_function(scope, code)
if scope.defines_any(["__set__", "__delete__"]):
self.generate_descr_set_function(scope, code)
self.generate_property_accessors(scope, code)
self.generate_method_table(scope, code)
self.generate_member_table(scope, code)
self.generate_getset_table(scope, code)
self.generate_typeobj_definition(modname, entry, code)
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
if type.vtable_cname:
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.declaration_code("")))
def generate_new_function(self, scope, code):
base_type = scope.parent_type.base_type
code.putln("")
code.putln(
"static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k) {"
% scope.mangle_internal("tp_new"))
if base_type:
code.putln(
"PyObject *o = %s->tp_new(t, a, k);" %
base_type.typeptr_cname)
else:
code.putln(
"PyObject *o = (*t->tp_alloc)(t, 0);")
self.generate_self_cast(scope, code)
type = scope.parent_type
if type.vtabslot_cname:
code.putln("(struct %s *)p->%s = %s;" % (
type.vtabstruct_cname,
type.vtabslot_cname,
type.vtabptr_cname))
for entry in scope.var_entries:
if entry.type.is_pyobject:
code.put_init_var_to_py_none(entry, "p->%s")
entry = scope.lookup_here("__new__")
if entry:
code.putln(
"if (%s(o, a, k) < 0) {" %
entry.func_cname)
code.put_decref_clear("o", py_object_type);
code.putln(
"}")
code.putln(
"return o;")
code.putln(
"}")
def generate_dealloc_function(self, scope, code):
base_type = scope.parent_type.base_type
code.putln("")
code.putln(
"static void %s(PyObject *o) {"
% scope.mangle_internal("tp_dealloc"))
self.generate_self_cast(scope, code)
self.generate_usr_dealloc_call(scope, code)
for entry in scope.var_entries:
if entry.type.is_pyobject:
code.put_xdecref("p->%s" % entry.cname, entry.type)
if base_type:
code.putln(
"%s->tp_dealloc(o);" %
base_type.typeptr_cname)
else:
code.putln(
"(*o->ob_type->tp_free)(o);")
code.putln(
"}")
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if entry:
code.putln(
"{")
code.putln(
"PyObject *etype, *eval, *etb;")
code.putln(
"PyErr_Fetch(&etype, &eval, &etb);")
code.putln(
"++o->ob_refcnt;")
code.putln(
"%s(o);" %
entry.func_cname)
code.putln(
"if (PyErr_Occurred()) PyErr_WriteUnraisable(o);")
code.putln(
"--o->ob_refcnt;")
code.putln(
"PyErr_Restore(etype, eval, etb);")
code.putln(
"}")
def generate_traverse_function(self, scope, code):
base_type = scope.parent_type.base_type
code.putln("")
code.putln(
"static int %s(PyObject *o, visitproc v, void *a) {"
% scope.mangle_internal("tp_traverse"))
code.putln(
"int e;")
self.generate_self_cast(scope, code)
if base_type:
code.putln(
"%s->tp_traverse(o, v, a);" %
base_type.typeptr_cname)
for entry in scope.var_entries:
if entry.type.is_pyobject:
var_code = "p->%s" % entry.cname
code.putln(
"if (%s) {"
% var_code)
if entry.type.is_extension_type:
var_code = "((PyObject*)%s)" % var_code
code.putln(
"e = (*v)(%s, a); if (e) return e;"
% var_code)
code.putln(
"}")
code.putln(
"return 0;")
code.putln(
"}")
def generate_clear_function(self, scope, code):
base_type = scope.parent_type.base_type
code.putln("")
code.putln(
"static int %s(PyObject *o) {"
% scope.mangle_internal("tp_clear"))
self.generate_self_cast(scope, code)
if base_type:
code.putln(
"%s->tp_clear(o);" %
base_type.typeptr_cname)
for entry in scope.var_entries:
if entry.type.is_pyobject:
name = "p->%s" % entry.cname
code.put_xdecref(name, entry.type)
#code.put_init_to_py_none(name)
code.put_init_var_to_py_none(entry, "p->%s")
code.putln(
"return 0;")
code.putln(
"}")
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
# argument to a Python integer and calls mp_subscript.
code.putln(
"static PyObject *%s(PyObject *o, int i) {" %
scope.mangle_internal("sq_item"))
code.putln(
"PyObject *r;")
code.putln(
"PyObject *x = PyInt_FromLong(i); if(!x) return 0;")
code.putln(
"r = o->ob_type->tp_as_mapping->mp_subscript(o, x);")
code.putln(
"Py_DECREF(x);")
code.putln(
"return r;")
code.putln(
"}")
def generate_ass_subscript_function(self, scope, code):
# Setting and deleting an item are both done through
# the ass_subscript method, so we dispatch to user's __setitem__
# or __delitem__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setitem__")
del_entry = scope.lookup_here("__delitem__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("mp_ass_subscript"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript assignment not supported by %s", o->ob_type->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript deletion not supported by %s", o->ob_type->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_guarded_basetype_call(
self, base_type, substructure, slot, args, code):
if base_type:
base_tpname = base_type.typeptr_cname
if substructure:
code.putln(
"if (%s->%s && %s->%s->%s)" % (
base_tpname, substructure, base_tpname, substructure, slot))
code.putln(
" return %s->%s->%s(%s);" % (
base_tpname, substructure, slot, args))
else:
code.putln(
"if (%s->%s)" % (
base_tpname, slot))
code.putln(
" return %s->%s(%s);" % (
base_tpname, slot, args))
def generate_ass_slice_function(self, scope, code):
# Setting and deleting a slice are both done through
# the ass_slice method, so we dispatch to user's __setslice__
# or __delslice__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setslice__")
del_entry = scope.lookup_here("__delslice__")
code.putln("")
code.putln(
"static int %s(PyObject *o, int i, int j, PyObject *v) {" %
scope.mangle_internal("sq_ass_slice"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, j, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice assignment not supported by %s", o->ob_type->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i, j);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice deletion not supported by %s", o->ob_type->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_getattro_function(self, scope, code):
# First try to get the attribute using PyObject_GenericGetAttr.
# If that raises an AttributeError, call the user's __getattr__
# method.
entry = scope.lookup_here("__getattr__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *n) {"
% scope.mangle_internal("tp_getattro"))
code.putln(
"PyObject *v = PyObject_GenericGetAttr(o, n);")
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
code.putln(
"PyErr_Clear();")
code.putln(
"v = %s(o, n);" %
entry.func_cname)
code.putln(
"}")
code.putln(
"return v;")
code.putln(
"}")
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
# or __delattr__ or fall back on PyObject_GenericSetAttr.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setattr__")
del_entry = scope.lookup_here("__delattr__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *n, PyObject *v) {" %
scope.mangle_internal("tp_setattro"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, n, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, v);")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, n);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, 0);")
code.putln(
"}")
code.putln(
"}")
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
# under some circumstances, so we replace them with
# None in that case.
user_get_entry = scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" %
scope.mangle_internal("tp_descr_get"))
code.putln(
"PyObject *r = 0;")
code.putln(
"if (!i) i = Py_None;")
code.putln(
"if (!c) c = Py_None;")
#code.put_incref("i", py_object_type)
#code.put_incref("c", py_object_type)
code.putln(
"r = %s(o, i, c);" %
user_get_entry.func_cname)
#code.put_decref("i", py_object_type)
#code.put_decref("c", py_object_type)
code.putln(
"return r;")
code.putln(
"}")
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
# or __delete__ or raise an exception.
base_type = scope.parent_type.base_type
user_set_entry = scope.lookup_here("__set__")
user_del_entry = scope.lookup_here("__delete__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("tp_descr_set"))
code.putln(
"if (v) {")
if user_set_entry:
code.putln(
"return %s(o, i, v);" %
user_set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if user_del_entry:
code.putln(
"return %s(o, i);" %
user_del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__delete__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
if property_scope.defines_any(["__get__"]):
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
Naming.prop_get_prefix, property_entry.name)
get_entry = property_scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, void *x) {" %
property_entry.getter_cname)
code.putln(
"return %s(o);" %
get_entry.func_cname)
code.putln(
"}")
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
Naming.prop_set_prefix, property_entry.name)
set_entry = property_scope.lookup_here("__set__")
del_entry = property_scope.lookup_here("__del__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *v, void *x) {" %
property_entry.setter_cname)
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, v);" %
set_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o);" %
del_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__del__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_typeobj_definition(self, modname, entry, code):
type = entry.type
scope = type.scope
for suite in TypeSlots.substructures:
suite.generate_substructure(scope, code)
code.putln("")
if entry.visibility == 'public':
header = "DL_EXPORT(PyTypeObject) %s = {"
else:
header = "statichere PyTypeObject %s = {"
#code.putln(header % scope.parent_type.typeobj_cname)
code.putln(header % type.typeobj_cname)
code.putln(
"PyObject_HEAD_INIT(0)")
code.putln(
"0, /*ob_size*/")
code.putln(
'"%s.%s", /*tp_name*/' % (
modname, scope.class_name))
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
#objstruct = "struct %s" % scope.parent_type.objstruct_cname
objstruct = "struct %s" % type.objstruct_cname
code.putln(
"sizeof(%s), /*tp_basicsize*/" %
objstruct)
code.putln(
"0, /*tp_itemsize*/")
for slot in TypeSlots.slot_table:
slot.generate(scope, code)
code.putln(
"};")
def generate_method_table(self, env, code):
code.putln("")
code.putln(
"static struct PyMethodDef %s[] = {" %
env.method_table_cname)
for entry in env.pyfunc_entries:
code.put_pymethoddef(entry, ",")
code.putln(
"{0, 0, 0, 0}")
code.putln(
"};")
def generate_member_table(self, env, code):
#print "ModuleNode.generate_member_table: scope =", env ###
if env.public_attr_entries:
code.putln("")
code.putln(
"static struct PyMemberDef %s[] = {" %
env.member_table_cname)
type = env.parent_type
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
for entry in env.public_attr_entries:
type_code = entry.type.pymemberdef_typecode
if entry.visibility == 'readonly':
flags = "READONLY"
else:
flags = "0"
code.putln('{"%s", %s, %s, %s, 0},' % (
entry.name,
type_code,
"offsetof(%s, %s)" % (objstruct, entry.name),
flags))
code.putln(
"{0, 0, 0, 0, 0}")
code.putln(
"};")
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
code.putln(
"static struct PyGetSetDef %s[] = {" %
env.getset_table_cname)
for entry in env.property_entries:
code.putln(
'{"%s", %s, %s, %s, 0},' % (
entry.name,
entry.getter_cname or "0",
entry.setter_cname or "0",
entry.doc_cname or "0"))
code.putln(
"{0, 0, 0, 0, 0}")
code.putln(
"};")
def generate_interned_name_table(self, env, code):
items = env.intern_map.items()
if items:
items.sort()
code.putln("")
code.putln(
"static __Pyx_InternTabEntry %s[] = {" %
Naming.intern_tab_cname)
for (name, cname) in items:
code.putln(
'{&%s, "%s"},' % (
cname,
name))
code.putln(
"{0, 0}")
code.putln(
"};")
def generate_py_string_table(self, env, code):
entries = env.all_pystring_entries
if entries:
code.putln("")
code.putln(
"static __Pyx_StringTabEntry %s[] = {" %
Naming.stringtab_cname)
for entry in entries:
code.putln(
"{&%s, %s, sizeof(%s)}," % (
entry.pystring_cname,
entry.cname,
entry.cname))
code.putln(
"{0, 0, 0}")
code.putln(
"};")
def generate_module_init_func(self, imported_modules, env, code):
code.putln("")
header = "DL_EXPORT(void) init%s(void)" % env.module_name
code.putln("%s; /*proto*/" % header)
code.putln("%s {" % header)
code.put_var_declarations(env.temp_entries)
env.generate_library_function_declarations(code)
self.generate_module_creation_code(env, code)
self.generate_intern_code(env, code)
self.generate_string_init_code(env, code)
self.generate_global_init_code(env, code)
for module in imported_modules:
self.generate_type_import_code_for_module(module, env, code)
self.generate_type_init_code(env, code)
self.body.generate_execution_code(code)
code.putln("return;")
code.put_label(code.error_label)
code.put_var_xdecrefs(env.temp_entries)
code.putln('__Pyx_AddTraceback("%s");' % (env.module_name))
env.use_utility_code(traceback_utility_code)
code.putln('}')
def generate_module_creation_code(self, env, code):
# Generate code to create the module object and
# install the builtins.
if env.doc:
doc = env.doc_cname
else:
doc = "0"
code.putln(
'%s = Py_InitModule4("%s", %s, %s, 0, PYTHON_API_VERSION);' % (
env.module_cname,
env.module_name,
env.method_table_cname,
doc))
code.putln(
"if (!%s) %s;" % (
env.module_cname,
code.error_goto(self.pos)));
code.putln(
'%s = PyImport_AddModule("__builtin__");' %
Naming.builtins_cname)
code.putln(
"if (!%s) %s;" % (
Naming.builtins_cname,
code.error_goto(self.pos)));
code.putln(
'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s;' % (
env.module_cname,
Naming.builtins_cname,
code.error_goto(self.pos)))
def generate_intern_code(self, env, code):
if env.intern_map:
env.use_utility_code(init_intern_tab_utility_code);
code.putln(
"if (__Pyx_InternStrings(%s) < 0) %s;" % (
Naming.intern_tab_cname,
code.error_goto(self.pos)))
def generate_string_init_code(self, env, code):
if env.all_pystring_entries:
env.use_utility_code(init_string_tab_utility_code)
code.putln(
"if (__Pyx_InitStrings(%s) < 0) %s;" % (
Naming.stringtab_cname,
code.error_goto(self.pos)))
def generate_global_init_code(self, env, code):
# Generate code to initialise global PyObject *
# variables to None.
for entry in env.var_entries:
if entry.visibility <> 'extern':
if entry.type.is_pyobject:
code.put_init_var_to_py_none(entry)
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all extension types in
# an imported module.
if module.c_class_entries:
for entry in module.c_class_entries:
self.generate_type_import_code(env, entry, code)
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
for entry in env.c_class_entries:
if entry.visibility == 'extern':
self.generate_type_import_code(env, entry, code)
else:
self.generate_exttype_vtable_init_code(entry, code)
self.generate_type_ready_code(env, entry, code)
self.generate_typeptr_assignment_code(entry, code)
def use_type_import_utility_code(self, env):
import ExprNodes
env.use_utility_code(type_import_utility_code)
env.use_utility_code(ExprNodes.import_utility_code)
def generate_type_import_code(self, env, entry, code):
# Generate code to import the typeobject of an
# extension type defined in another module, and
# extract its C method table pointer if any.
type = entry.type
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln('%s = __Pyx_ImportType("%s", "%s", sizeof(%s)); if (!%s) %s' % (
type.typeptr_cname,
type.module_name,
type.name,
objstruct,
type.typeptr_cname,
code.error_goto(entry.pos)))
self.use_type_import_utility_code(env)
if type.vtabptr_cname:
code.putln(
"if (__Pyx_GetVtable(%s->tp_dict, &%s) < 0) %s" % (
type.typeptr_cname,
type.vtabptr_cname,
code.error_goto(entry.pos)))
env.use_utility_code(get_vtable_utility_code)
def generate_type_ready_code(self, env, entry, code):
# Generate a call to PyType_Ready for an extension
# type defined in this module.
type = entry.type
typeobj_cname = type.typeobj_cname
scope = type.scope
if scope: # could be None if there was an error
if entry.visibility <> 'extern':
for slot in TypeSlots.slot_table:
slot.generate_dynamic_init_code(scope, code)
code.putln(
"if (PyType_Ready(&%s) < 0) %s" % (
typeobj_cname,
code.error_goto(entry.pos)))
if type.vtable_cname:
code.putln(
"if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
typeobj_cname,
type.vtabptr_cname,
code.error_goto(entry.pos)))
env.use_utility_code(set_vtable_utility_code)
code.putln(
'if (PyObject_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
Naming.module_cname,
scope.class_name,
typeobj_cname,
code.error_goto(entry.pos)))
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
type = entry.type
if type.vtable_cname:
code.putln(
"%s = &%s;" % (
type.vtabptr_cname,
type.vtable_cname))
if type.base_type and type.base_type.vtabptr_cname:
code.putln(
"%s.%s = *%s;" % (
type.vtable_cname,
Naming.obj_base_cname,
type.base_type.vtabptr_cname))
for meth_entry in type.scope.cfunc_entries:
#if not meth_entry.is_inherited:
if meth_entry.func_cname:
code.putln(
"%s.%s = (void *)%s;" % (
type.vtable_cname,
meth_entry.cname,
meth_entry.func_cname))
def generate_typeptr_assignment_code(self, entry, code):
# Generate code to initialise the typeptr of an extension
# type defined in this module to point to its type object.
type = entry.type
if type.typeobj_cname:
code.putln(
"%s = &%s;" % (
type.typeptr_cname, type.typeobj_cname))
def generate_utility_functions(self, env, code):
code.putln("")
code.putln("/* Runtime support code */")
for utility_code in env.utility_code_used:
code.put(utility_code)
class StatListNode(Node):
# stats a list of StatNode
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
for stat in self.stats:
stat.analyse_expressions(env)
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class InlineStatNode(StatNode):
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
code.putln(str(self.string.value))
#raise InternalError("generate_code not implemented for %s" % \
# self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
pass
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Pyrex name being declared
# cname string or None C name, if specified
def analyse(self, base_type, env):
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
def analyse(self, base_type, env):
if base_type.is_pyobject:
error(self.pos,
"Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
def analyse(self, base_type, env):
if self.dimension:
self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
#size = self.dimension.value
size = self.dimension.result
else:
size = None
if not base_type.is_complete():
error(self.pos,
"Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos,
"Array element cannot be a Python object")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
def analyse(self, return_type, env):
func_type_args = []
for arg_node in self.args:
name_declarator, type = arg_node.analyse(env)
name = name_declarator.name
if name_declarator.cname:
error(self.pos,
"Function argument cannot have C name specification")
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Function argument cannot be void")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
error(arg_node.pos, "C function argument cannot have default value")
exc_val = None
exc_check = 0
if return_type.is_pyobject \
and (self.exception_value or self.exception_check):
error(self.pos,
"Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value.analyse_const_expression(env)
exc_val = self.exception_value.result
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
exception_value = exc_val, exception_check = exc_check)
return self.base.analyse(func_type, env)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# default ExprNode or None
# default_entry Symtab.Entry Entry for the variable holding the default value
# is_self_arg boolean Is the "self" arg of an extension type method
is_self_arg = 0
def analyse(self, env):
base_type = self.base_type.analyse(env)
return self.declarator.analyse(base_type, env)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
pass
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# is_self_arg boolean Is self argument of C method
def analyse(self, env):
# Return type descriptor.
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
else:
type = py_object_type
else:
scope = env
for name in self.module_path:
entry = scope.find(name, self.pos)
if entry and entry.as_module:
scope = entry.as_module
else:
if entry:
error(self.pos, "'%s' is not a cimported module" % name)
scope = None
break
if scope:
entry = scope.find(self.name, self.pos)
if entry and entry.is_type:
type = entry.type
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if type:
return type
else:
return PyrexTypes.error_type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
def analyse(self, env):
base = self.base_type.analyse(env)
_, type = self.declarator.analyse(base, env)
return type
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
def analyse_declarations(self, env, dest_scope = None):
if not dest_scope:
dest_scope = env
base_type = self.base_type.analyse(env)
for declarator in self.declarators:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos,
"Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if type.is_cfunction:
dest_scope.declare_cfunction(name, type, declarator.pos,
cname = cname, visibility = self.visibility)
else:
dest_scope.declare_var(name, type, declarator.pos,
cname = cname, visibility = self.visibility, is_cdef = 1)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# attributes [CVarDefNode] or None
# entry Entry
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope()
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname)
if self.attributes is not None:
for attr in self.attributes:
attr.analyse_declarations(env, scope)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# entry Entry
def analyse_declarations(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag)
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value.analyse_const_expression(env)
value = self.value.result
else:
value = self.name
entry = env.declare_const(self.name, enum_entry.type,
value, self.pos, cname = self.cname)
enum_entry.enum_values.append(entry)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
env.declare_type(name, type, self.pos, cname = cname)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
def analyse_expressions(self, env):
pass
def generate_function_definitions(self, env, code):
# Generate C code for header and body of function
genv = env.global_scope()
lenv = LocalScope(name = self.entry.name, outer_scope = genv)
#lenv.function_name = self.function_name()
lenv.return_type = self.return_type
#self.filename = lenv.get_filename_const(self.pos)
code.init_labels()
self.declare_arguments(lenv)
self.body.analyse_declarations(lenv)
self.body.analyse_expressions(lenv)
# Code for nested function definitions would go here
# if we supported them, which we probably won't.
# ----- Top-level constants used by this function
self.generate_interned_name_decls(lenv, code)
self.generate_py_string_decls(lenv, code)
#code.putln("")
#code.put_var_declarations(lenv.const_entries, static = 1)
self.generate_const_definitions(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code,
with_pymethdef = env.is_py_class_scope)
# ----- Local variable declarations
self.generate_argument_declarations(lenv, code)
code.put_var_declarations(lenv.var_entries)
init = ""
if not self.return_type.is_void:
code.putln(
"%s%s;" %
(self.return_type.declaration_code(
Naming.retval_cname),
init))
code.put_var_declarations(lenv.temp_entries)
self.generate_keyword_list(code)
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- Fetch arguments
self.generate_argument_parsing_code(code)
self.generate_argument_increfs(lenv, code)
#self.generate_stararg_getting_code(code)
self.generate_argument_conversion_code(code)
# ----- Initialise local variables
for entry in lenv.var_entries:
if entry.type.is_pyobject and entry.init_to_none:
code.put_init_var_to_py_none(entry)
# ----- Check types of arguments
self.generate_argument_type_tests(code)
# ----- Function body
self.body.generate_execution_code(code)
# ----- Default return value
code.putln("")
if self.return_type.is_pyobject:
cast = self.return_type.is_extension_type
lhs = Naming.retval_cname
code.put_init_to_py_none(cast, lhs)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
code.putln("goto %s;" % code.return_label)
# ----- Error cleanup
code.put_label(code.error_label)
code.put_var_xdecrefs(lenv.temp_entries)
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
code.putln(
'__Pyx_AddTraceback("%s");' %
self.entry.qualified_name)
if err_val is not None:
code.putln(
"%s = %s;" % (
Naming.retval_cname,
err_val))
else:
code.putln(
'__Pyx_WriteUnraisable("%s");' %
self.entry.qualified_name)
env.use_utility_code(unraisable_exception_utility_code)
# ----- Return cleanup
code.put_label(code.return_label)
code.put_var_decrefs(lenv.var_entries)
code.put_var_decrefs(lenv.arg_entries)
self.put_stararg_decrefs(code)
if not self.return_type.is_void:
retval_code = Naming.retval_cname
if self.return_type.is_extension_type:
retval_code = "((%s)%s) " % (
self.return_type.declaration_code(""),
retval_code)
code.putln("return %s;" % retval_code)
code.putln("}")
def put_stararg_decrefs(self, code):
pass
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not arg.type.is_array:
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_argument_increfs(self, env, code):
# Turn borrowed argument refs into owned refs.
# This is necessary, because if the argument is
# assigned to, it will be decrefed.
for entry in env.arg_entries:
code.put_var_incref(entry)
def generate_execution_code(self, code):
pass
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# body StatListNode
#
# type CFuncType
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
base_type = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base_type, env)
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
if not type.is_cfunction:
error(self.pos,
"Suite attached to non-function declaration")
name = name_declarator.name
cname = name_declarator.cname
self.entry = env.declare_cfunction(
name, type, self.pos,
cname = cname, visibility = self.visibility,
defining = self.body is not None)
self.return_type = type.return_type
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def generate_function_header(self, code, with_pymethdef):
arg_decls = []
type = self.type
for arg in type.args:
arg_decls.append(arg.declaration_code())
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
entity = "%s(%s)" % (self.entry.func_cname,
string.join(arg_decls, ","))
if self.visibility == 'public':
dll_linkage = "DL_EXPORT"
else:
dll_linkage = None
header = self.return_type.declaration_code(entity,
dll_linkage = dll_linkage)
if self.visibility <> 'private':
storage_class = ""
else:
storage_class = "static "
code.putln("%s%s {" % (
storage_class,
header))
def generate_argument_declarations(self, env, code):
# Arguments already declared in function header
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, code):
pass
# def generate_stararg_getting_code(self, code):
# pass
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
pass
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
pass
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# doc string or None
# body StatListNode
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# assmt AssignmentNode Function construction/assignment
assmt = None
def analyse_declarations(self, env):
for arg in self.args:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
if name_declarator.cname:
error(self.pos,
"Python function argument cannot have C name specification")
arg.type = type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.not_none and not arg.type.is_extension_type:
error(self.pos,
"Only extension type arguments can have 'not None'")
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
if self.star_arg or self.starstar_arg:
env.use_utility_code(get_starargs_utility_code)
def analyse_signature(self, env):
any_type_tests_needed = 0
sig = self.entry.signature
nfixed = sig.num_fixed_args()
for i in range(nfixed):
if i < len(self.args):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i):
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
any_type_tests_needed = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
else:
self.bad_signature()
return
if nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and arg.type.is_extension_type:
arg.needs_type_test = 1
any_type_tests_needed = 1
if any_type_tests_needed:
env.use_utility_code(arg_type_test_utility_code)
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str = expected_str + " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos,
"%s %s has wrong number of arguments "
"(%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
self.entry = env.declare_pyfunction(self.name, self.pos)
self.entry.doc = self.doc
self.entry.func_cname = \
Naming.func_prefix + env.scope_prefix + self.name
self.entry.doc_cname = \
Naming.funcdoc_prefix + env.scope_prefix + self.name
self.entry.pymethdef_cname = \
Naming.pymethdef_prefix + env.scope_prefix + self.name
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
arg.entry.init_to_none = 0
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_self_arg = arg.is_self_arg
if arg.hdr_type:
if arg.is_self_arg or \
(arg.type.is_extension_type and not arg.hdr_type.is_extension_type):
arg.entry.is_declared_generic = 1
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
arg.entry = env.declare_var(arg.name,
PyrexTypes.py_object_type, arg.pos)
arg.entry.init = "0"
arg.entry.init_to_none = 0
arg.entry.xdecref_cleanup = 1
def analyse_expressions(self, env):
self.analyse_default_values(env)
if env.is_py_class_scope:
self.synthesize_assignment_node(env)
def analyse_default_values(self, env):
for arg in self.args:
if arg.default:
if arg.is_generic:
arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
arg.default.allocate_temps(env)
arg.default_entry = env.add_default_value(arg.type)
else:
error(arg.pos,
"This argument cannot have a default value")
arg.default = None
def synthesize_assignment_node(self, env):
import ExprNodes
self.assmt = SingleAssignmentNode(self.pos,
lhs = ExprNodes.NameNode(self.pos, name = self.name),
rhs = ExprNodes.UnboundMethodNode(self.pos,
class_cname = env.class_obj_cname,
function = ExprNodes.PyCFunctionNode(self.pos,
pymethdef_cname = self.entry.pymethdef_cname)))
self.assmt.analyse_declarations(env)
self.assmt.analyse_expressions(env)
def generate_function_header(self, code, with_pymethdef):
arg_code_list = []
sig = self.entry.signature
if sig.has_dummy_arg:
arg_code_list.append(
"PyObject *%s" % Naming.self_cname)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s"
% (Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.func_cname)
header = "static %s(%s)" % (dc, arg_code)
code.putln("%s; /*proto*/" % header)
if self.entry.doc:
code.putln(
'static char %s[] = "%s";' % (
self.entry.doc_cname,
self.entry.doc))
if with_pymethdef:
code.put(
"static PyMethodDef %s = " %
self.entry.pymethdef_cname)
code.put_pymethoddef(self.entry, ";")
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic: # or arg.needs_conversion:
code.put_var_declaration(arg.entry)
def generate_keyword_list(self, code):
if self.entry.signature.has_generic_args:
code.put(
"static char *%s[] = {" %
Naming.kwdlist_cname)
for arg in self.args:
if arg.is_generic:
code.put(
'"%s",' %
arg.name)
code.putln(
"0};")
def generate_argument_parsing_code(self, code):
# Generate PyArg_ParseTuple call for generic
# arguments, if any.
if self.entry.signature.has_generic_args:
arg_addrs = []
arg_formats = []
default_seen = 0
for arg in self.args:
arg_entry = arg.entry
if arg.is_generic:
if arg.default:
code.putln(
"%s = %s;" % (
arg_entry.cname,
arg.default_entry.cname))
if not default_seen:
arg_formats.append("|")
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
arg_addrs.append("&" + arg_entry.cname)
format = arg_entry.type.parsetuple_format
if format:
arg_formats.append(format)
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% arg.type)
argformat = '"%s"' % string.join(arg_formats, "")
has_starargs = self.star_arg is not None or self.starstar_arg is not None
if has_starargs:
self.generate_stararg_getting_code(code)
pt_arglist = [Naming.args_cname, Naming.kwds_cname, argformat,
Naming.kwdlist_cname] + arg_addrs
pt_argstring = string.join(pt_arglist, ", ")
code.put(
'if (!PyArg_ParseTupleAndKeywords(%s)) ' %
pt_argstring)
error_return_code = "return %s;" % self.error_value()
if has_starargs:
code.putln("{")
code.put_xdecref(Naming.args_cname, py_object_type)
code.put_xdecref(Naming.kwds_cname, py_object_type)
self.generate_arg_xdecref(self.star_arg, code)
self.generate_arg_xdecref(self.starstar_arg, code)
code.putln(error_return_code)
code.putln("}")
else:
code.putln(error_return_code)
def put_stararg_decrefs(self, code):
if self.star_arg or self.starstar_arg:
code.put_xdecref(Naming.args_cname, py_object_type)
code.put_xdecref(Naming.kwds_cname, py_object_type)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref(arg.entry)
def arg_address(self, arg):
if arg:
return "&%s" % arg.entry.cname
else:
return 0
def generate_stararg_getting_code(self, code):
if self.star_arg or self.starstar_arg:
if not self.entry.signature.has_generic_args:
error(self.pos, "This method cannot have * or ** arguments")
star_arg_addr = self.arg_address(self.star_arg)
starstar_arg_addr = self.arg_address(self.starstar_arg)
code.putln(
"if (__Pyx_GetStarArgs(&%s, &%s, %s, %s, %s, %s) < 0) return %s;" % (
Naming.args_cname,
Naming.kwds_cname,
Naming.kwdlist_cname,
len(self.args) - self.entry.signature.num_fixed_args(),
star_arg_addr,
starstar_arg_addr,
self.error_value()))
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from
# signature type to declared type, if needed.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
self.generate_arg_conversion_from_pyobject(arg, code)
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln(
"%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos,
"Cannot convert argument from '%s' to '%s'" %
(old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
if func:
code.putln("%s = %s(%s); if (PyErr_Occurred()) %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto(arg.pos)))
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); if (!%s) %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
arg.entry.cname,
code.error_goto(arg.pos)))
else:
error(arg.pos,
"Cannot convert argument of type '%s' to Python object"
% old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (!__Pyx_ArgTypeTest(%s, %s, %d, "%s")) %s' % (
arg_code,
typeptr_cname,
not arg.not_none,
arg.name,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_execution_code(self, code):
# Evaluate and store argument default values
for arg in self.args:
default = arg.default
if default:
default.generate_evaluation_code(code)
default.make_owned_reference(code)
code.putln(
"%s = %s;" % (
arg.default_entry.cname,
default.result))
if default.is_temp and default.type.is_pyobject:
code.putln(
"%s = 0;" %
default.result)
# For Python class methods, create and store function object
if self.assmt:
self.assmt.generate_execution_code(code)
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return 1
class PyClassDefNode(StatNode):
# A Python class definition.
#
# name string Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
def __init__(self, pos, name, bases, doc, body):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
import ExprNodes
self.dict = ExprNodes.DictNode(pos, key_value_pairs = [])
if self.doc:
doc_node = ExprNodes.StringNode(pos, value = self.doc)
else:
doc_node = None
self.classobj = ExprNodes.ClassNode(pos,
name = ExprNodes.StringNode(pos, value = name),
bases = bases, dict = self.dict, doc = doc_node)
self.target = ExprNodes.NameNode(pos, name = name)
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.dict.analyse_expressions(env)
self.classobj.analyse_expressions(env)
genv = env.global_scope()
cenv = PyClassScope(name = self.name, outer_scope = genv)
cenv.class_dict_cname = self.dict.result
cenv.class_obj_cname = self.classobj.result
self.scope = cenv
self.body.analyse_declarations(cenv)
self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env)
self.dict.release_temp(env)
self.classobj.release_temp(env)
self.target.release_target_temp(env)
env.recycle_pending_temps()
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(
self.scope, code)
def generate_execution_code(self, code):
self.dict.generate_evaluation_code(code)
self.classobj.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.target.generate_assignment_code(self.classobj, code)
self.dict.generate_disposal_code(code)
class CClassDefNode(StatNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
self.base_type = None
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type:
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' is incomplete" % self.base_class_name)
else:
self.base_type = base_class_entry.type
has_body = self.body is not None
self.entry = env.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
module_name = self.module_name,
base_type = self.base_type,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag)
scope = self.entry.type.scope
if self.doc:
scope.doc = self.doc
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
def analyse_expressions(self, env):
if self.body:
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
if self.body:
self.body.generate_function_definitions(
self.entry.type.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc string or None Doc string
# body StatListNode
def analyse_declarations(self, env):
entry = env.declare_property(self.name, self.doc, self.pos)
if entry:
if self.doc:
doc_entry = env.get_string_const(self.doc)
entry.doc_cname = doc_entry.cname
self.body.analyse_declarations(entry.scope)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
self.expr.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result:
code.putln("%s;" % self.expr.result)
self.expr.generate_disposal_code(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
self.analyse_expressions_1(env)
self.analyse_expressions_2(env)
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_expressions_1(self, env, use_temp = 0):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
self.rhs.allocate_temps(env)
def analyse_expressions_2(self, env):
self.lhs.allocate_target_temps(env)
self.lhs.release_target_temp(env)
self.rhs.release_temp(env)
# def analyse_assignment(self, env, lhs, rhs):
# # Returns coerced RHS.
# rhs.analyse_types(env)
# lhs.analyse_target_types(env)
# rhs = rhs.coerce_to(lhs.type, env)
# rhs.allocate_temps(env)
# lhs.allocate_target_temps(env)
# return rhs
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
# def analyse_expressions(self, env):
# import ExprNodes
# self.rhs.analyse_types(env)
# self.rhs = self.rhs.coerce_to_temp(env)
# self.rhs.allocate_temps(env)
# self.coerced_rhs_list = []
# for lhs in self.lhs_list:
# lhs.analyse_target_types(env)
# coerced_rhs = ExprNodes.CloneNode(self.rhs).coerce_to(lhs.type, env)
# self.coerced_rhs_list.append(coerced_rhs)
# coerced_rhs.allocate_temps(env)
# lhs.allocate_target_temps(env)
# coerced_rhs.release_temp(env)
# lhs.release_target_temp(env)
# self.rhs.release_temp(env)
def analyse_expressions_1(self, env, use_temp = 0):
self.rhs.analyse_types(env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
else:
self.rhs = self.rhs.coerce_to_simple(env)
self.rhs.allocate_temps(env)
def analyse_expressions_2(self, env):
from ExprNodes import CloneNode
self.coerced_rhs_list = []
for lhs in self.lhs_list:
lhs.analyse_target_types(env)
rhs = CloneNode(self.rhs)
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_rhs_list.append(rhs)
rhs.allocate_temps(env)
lhs.allocate_target_temps(env)
lhs.release_target_temp(env)
rhs.release_temp(env)
self.rhs.release_temp(env)
# def generate_execution_code(self, code):
# self.rhs.generate_evaluation_code(code)
# for i in range(len(self.lhs_list)):
# lhs = self.lhs_list[i]
# rhs = self.coerced_rhs_list[i]
# rhs.generate_evaluation_code(code)
# lhs.generate_assignment_code(rhs, code)
# # Assignment has already disposed of the cloned RHS
# self.rhs.generate_disposal_code(code)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
rhs = self.coerced_rhs_list[i]
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code)
# Assignment has disposed of the cloned RHS
self.rhs.generate_disposal_code(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
for stat in self.stats:
stat.analyse_expressions_1(env, use_temp = 1)
for stat in self.stats:
stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
class PrintStatNode(StatNode):
# print statement
#
# args [ExprNode]
# ends_with_comma boolean
def analyse_expressions(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
arg.allocate_temps(env)
arg.release_temp(env)
self.args[i] = arg
env.recycle_pending_temps() # TEMPORARY
env.use_utility_code(printing_utility_code)
def generate_execution_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintItem(%s) < 0) %s" % (
arg.result,
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
if not self.ends_with_comma:
code.putln(
"if (__Pyx_PrintNewline() < 0) %s" %
code.error_goto(self.pos))
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for arg in self.args:
arg.analyse_target_expression(env)
if not arg.type.is_pyobject:
error(arg.pos, "Deletion of non-Python object")
env.recycle_pending_temps() # TEMPORARY
def generate_execution_code(self, code):
for arg in self.args:
if arg.type.is_pyobject:
arg.generate_deletion_code(code)
# else error reported earlier
class PassStatNode(StatNode):
# pass statement
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class BreakStatNode(StatNode):
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.putln(
"goto %s;" %
code.break_label)
class ContinueStatNode(StatNode):
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if code.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
elif not code.continue_label:
error(self.pos, "continue statement not inside loop")
else:
code.putln(
"goto %s;" %
code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return
if self.value:
self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
self.value.allocate_temps(env)
self.value.release_temp(env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
def generate_execution_code(self, code):
if not self.return_type:
# error reported earlier
return
if self.value:
self.value.generate_evaluation_code(code)
if self.value.type.is_pyobject and not self.value.is_temp:
code.put_incref(self.value.result, self.value.type)
if self.return_type.is_extension_type:
cast = "(%s)" % self.return_type.declaration_code("")
else:
cast = ""
code.putln(
"%s = %s%s;" % (
Naming.retval_cname,
cast,
self.value.result))
self.value.generate_post_assignment_code(code)
else:
if self.return_type.is_pyobject:
code.putln(
"%s = Py_None; Py_INCREF(%s);" % (
Naming.retval_cname,
Naming.retval_cname))
elif self.return_type.is_returncode:
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.return_type.default_value))
code.putln(
"goto %s;" %
code.return_label)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
def analyse_expressions(self, env):
if self.exc_type:
self.exc_type.analyse_types(env)
self.exc_type = self.exc_type.coerce_to_pyobject(env)
self.exc_type.allocate_temps(env)
if self.exc_value:
self.exc_value.analyse_types(env)
self.exc_value = self.exc_value.coerce_to_pyobject(env)
self.exc_value.allocate_temps(env)
if self.exc_tb:
self.exc_tb.analyse_types(env)
self.exc_tb = self.exc_tb.coerce_to_pyobject(env)
self.exc_tb.allocate_temps(env)
if self.exc_type:
self.exc_type.release_temp(env)
if self.exc_value:
self.exc_value.release_temp(env)
if self.exc_tb:
self.exc_tb.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
if not (self.exc_type or self.exc_value or self.exc_tb):
env.use_utility_code(reraise_utility_code)
else:
env.use_utility_code(raise_utility_code)
def generate_execution_code(self, code):
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.result
else:
type_code = 0
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.result
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.result
else:
tb_code = "0"
if self.exc_type or self.exc_value or self.exc_tb:
code.putln(
"__Pyx_Raise(%s, %s, %s);" % (
type_code,
value_code,
tb_code))
else:
code.putln(
"__Pyx_ReRaise();")
if self.exc_type:
self.exc_type.generate_disposal_code(code)
if self.exc_value:
self.exc_value.generate_disposal_code(code)
if self.exc_tb:
self.exc_tb.generate_disposal_code(code)
code.putln(
code.error_goto(self.pos))
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
self.value.analyse_types(env)
self.value = self.value.coerce_to_pyobject(env)
self.value.allocate_temps(env)
self.cond.release_temp(env)
if self.value:
self.value.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
def generate_execution_code(self, code):
self.cond.generate_evaluation_code(code)
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"if (!%s) {" %
self.cond.result)
if self.value:
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" %
self.value.result)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
if self.value:
self.value.generate_disposal_code(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
end_label = code.new_label()
for if_clause in self.if_clauses:
if_clause.generate_execution_code(code, end_label)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
def analyse_declarations(self, env):
self.condition.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.condition.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
self.body.analyse_expressions(env)
def generate_execution_code(self, code, end_label):
self.condition.generate_evaluation_code(code)
code.putln(
"if (%s) {" %
self.condition.result)
self.body.generate_execution_code(code)
code.putln(
"goto %s;" %
end_label)
code.putln("}")
class WhileStatNode(StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.condition.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
code.put_label(code.continue_label)
self.condition.generate_evaluation_code(code)
code.putln(
"if (!%s) break;" %
self.condition.result)
self.body.generate_execution_code(code)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
class ForInStatNode(StatNode):
# for statement
#
# target ExprNode
# iterator IteratorNode
# body StatNode
# else_clause StatNode
# item NextNode used internally
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.iterator.analyse_expressions(env)
self.target.analyse_target_types(env)
self.item = ExprNodes.NextNode(self.iterator, env)
self.item = self.item.coerce_to(self.target.type, env)
self.item.allocate_temps(env)
self.target.allocate_target_temps(env)
self.item.release_temp(env)
self.target.release_target_temp(env)
env.recycle_pending_temps() # TEMPORARY
self.body.analyse_expressions(env)
self.iterator.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln(
"for (;;) {")
code.put_label(code.continue_label)
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.putln(
"}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
class ForFromStatNode(StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# loopvar_name string
# py_loopvar_node PyTempNode or None
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.target.analyse_target_types(env)
self.bound1.analyse_types(env)
self.bound2.analyse_types(env)
self.bound1 = self.bound1.coerce_to_integer(env)
self.bound2 = self.bound2.coerce_to_integer(env)
if not (self.bound2.is_name or self.bound2.is_literal):
self.bound2 = self.bound2.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject
or target_type.assignable_from(PyrexTypes.c_int_type)):
error(self.target.pos,
"Cannot assign integer to variable of type '%s'" % target_type)
if target_type.is_int:
self.loopvar_name = self.target.entry.cname
self.py_loopvar_node = None
else:
c_loopvar_node = ExprNodes.TempNode(self.pos,
PyrexTypes.c_long_type, env)
c_loopvar_node.allocate_temps(env)
self.loopvar_name = c_loopvar_node.result
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.bound1.allocate_temps(env)
self.bound2.allocate_temps(env)
if self.py_loopvar_node:
self.py_loopvar_node.allocate_temps(env)
self.target.allocate_target_temps(env)
self.target.release_target_temp(env)
if self.py_loopvar_node:
self.py_loopvar_node.release_temp(env)
self.body.analyse_expressions(env)
if self.py_loopvar_node:
c_loopvar_node.release_temp(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
self.bound1.release_temp(env)
self.bound2.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
code.putln(
"for (%s = %s%s; %s %s %s; %s%s) {" % (
self.loopvar_name,
self.bound1.result, offset,
self.loopvar_name, self.relation2, self.bound2.result,
incop, self.loopvar_name))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound2.generate_disposal_code(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--")
}
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
# cleanup_list [Entry] temps to clean up on error
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.cleanup_list = env.free_temp_entries[:]
for except_clause in self.except_clauses:
except_clause.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label()
code.putln(
"/*try:*/ {")
self.body.generate_execution_code(code)
code.putln(
"}")
code.error_label = old_error_label
if self.else_clause:
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
code.putln(
"goto %s;" %
end_label)
code.put_label(our_error_label)
code.put_var_xdecrefs_clear(self.cleanup_list)
default_clause_seen = 0
for except_clause in self.except_clauses:
if not except_clause.pattern:
default_clause_seen = 1
else:
if default_clause_seen:
error(except_clause.pos, "Default except clause not last")
except_clause.generate_handling_code(code, end_label)
if not default_clause_seen:
code.putln(
"goto %s;" %
code.error_label)
code.put_label(end_label)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern ExprNode
# target ExprNode or None
# body StatNode
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
genv = env.global_scope()
self.function_name = env.qualified_name
if self.pattern:
self.pattern.analyse_expressions(env)
self.pattern = self.pattern.coerce_to_pyobject(env)
self.match_flag = env.allocate_temp(PyrexTypes.c_int_type)
self.pattern.release_temp(env)
env.release_temp(self.match_flag)
self.exc_value = ExprNodes.ExcValueNode(self.pos, env)
self.exc_value.allocate_temps(env)
if self.target:
self.target.analyse_target_expression(env)
self.exc_value.release_temp(env)
if self.target:
self.target.release_target_temp(env)
env.recycle_pending_temps() # TEMPORARY
self.body.analyse_expressions(env)
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
self.pattern.generate_evaluation_code(code)
code.putln(
"%s = PyErr_ExceptionMatches(%s);" % (
self.match_flag,
self.pattern.result))
self.pattern.generate_disposal_code(code)
code.putln(
"if (%s) {" %
self.match_flag)
else:
code.putln(
"/*except:*/ {")
code.putln(
'__Pyx_AddTraceback("%s");' % (self.function_name))
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
self.exc_value.generate_evaluation_code(code)
if self.target:
self.target.generate_assignment_code(self.exc_value, code)
else:
self.exc_value.generate_disposal_code(code)
self.body.generate_execution_code(code)
code.putln(
"goto %s;"
% end_label)
code.putln(
"}")
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
# cleanup_list [Entry] temps to clean up on error
# exc_vars 3*(string,) temps to hold saved exception
#
# The plan is that we funnel all continue, break
# return and error gotos into the beginning of the
# finally block, setting a variable to remember which
# one we're doing. At the end of the finally block, we
# switch on the variable to figure out where to go.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
# handling it.
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.cleanup_list = env.free_temp_entries[:]
self.exc_vars = (
env.allocate_temp(PyrexTypes.py_object_type),
env.allocate_temp(PyrexTypes.py_object_type),
env.allocate_temp(PyrexTypes.py_object_type))
self.lineno_var = \
env.allocate_temp(PyrexTypes.c_int_type)
self.finally_clause.analyse_expressions(env)
for var in self.exc_vars:
env.release_temp(var)
def generate_execution_code(self, code):
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
catch_label = code.new_label()
code.putln(
"/*try:*/ {")
if self.disallow_continue_in_try_finally:
was_in_try_finally = code.in_try_finally
code.in_try_finally = 1
self.body.generate_execution_code(code)
if self.disallow_continue_in_try_finally:
code.in_try_finally = was_in_try_finally
code.putln(
"}")
code.putln(
"/*finally:*/ {")
code.putln(
"int __pyx_why;")
#code.putln(
# "PyObject *%s, *%s, *%s;" %
# self.exc_vars)
#code.putln(
# "int %s;" %
# self.lineno_var)
code.putln(
"__pyx_why = 0; goto %s;" %
catch_label)
for i in range(len(new_labels)):
if new_labels[i] and new_labels[i] <> "<try>":
if new_labels[i] == new_error_label:
self.put_error_catcher(code,
new_error_label, i+1, catch_label)
else:
code.putln(
"%s: __pyx_why = %s; goto %s;" % (
new_labels[i],
i+1,
catch_label))
code.put_label(catch_label)
code.set_all_labels(old_labels)
self.finally_clause.generate_execution_code(code)
code.putln(
"switch (__pyx_why) {")
for i in range(len(old_labels)):
if old_labels[i]:
if old_labels[i] == old_error_label:
self.put_error_uncatcher(code, i+1, old_error_label)
else:
code.putln(
"case %s: goto %s;" % (
i+1,
old_labels[i]))
code.putln(
"}")
code.putln(
"}")
def put_error_catcher(self, code, error_label, i, catch_label):
code.putln(
"%s: {" %
error_label)
code.putln(
"__pyx_why = %s;" %
i)
code.put_var_xdecrefs_clear(self.cleanup_list)
code.putln(
"PyErr_Fetch(&%s, &%s, &%s);" %
self.exc_vars)
code.putln(
"%s = %s;" % (
self.lineno_var, Naming.lineno_cname))
code.putln(
"goto %s;" %
catch_label)
code.putln(
"}")
def put_error_uncatcher(self, code, i, error_label):
code.putln(
"case %s: {" %
i)
code.putln(
"PyErr_Restore(%s, %s, %s);" %
self.exc_vars)
code.putln(
"%s = %s;" % (
Naming.lineno_cname, self.lineno_var))
for var in self.exc_vars:
code.putln(
"%s = 0;" %
var)
code.putln(
"goto %s;" %
error_label)
code.putln(
"}")
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
def analyse_declarations(self, env):
module_scope = env.find_module(self.module_name, self.pos)
if "." in self.module_name:
names = self.module_name.split(".")
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names [(pos, name, as_name)] Names to be imported
def analyse_declarations(self, env):
module_scope = env.find_module(self.module_name, self.pos)
env.add_imported_module(module_scope)
for pos, name, as_name in self.imported_names:
entry = module_scope.find(name, pos)
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode)]
# item PyTempNode used internally
def analyse_declarations(self, env):
for _, target in self.items:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
import ExprNodes
self.module.analyse_expressions(env)
self.item = ExprNodes.PyTempNode(self.pos, env)
self.item.allocate_temp(env)
self.interned_items = []
for name, target in self.items:
if Options.intern_names:
self.interned_items.append((env.intern(name), target))
target.analyse_target_expression(env)
target.release_temp(env)
self.module.release_temp(env)
self.item.release_temp(env)
env.recycle_pending_temps() # TEMPORARY
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
if Options.intern_names:
for cname, target in self.interned_items:
code.putln(
'%s = PyObject_GetAttr(%s, %s); if (!%s) %s' % (
self.item.result,
self.module.result,
cname,
self.item.result,
code.error_goto(self.pos)))
target.generate_assignment_code(self.item, code)
else:
for name, target in self.items:
code.putln(
'%s = PyObject_GetAttrString(%s, "%s"); if (!%s) %s' % (
self.item.result,
self.module.result,
name,
self.item.result,
code.error_goto(self.pos)))
target.generate_assignment_code(self.item, code)
self.module.generate_disposal_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
utility_function_predeclarations = \
"""
typedef struct {PyObject **p; char *s;} __Pyx_InternTabEntry; /*proto*/
typedef struct {PyObject **p; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
static PyObject *__Pyx_UnpackItem(PyObject *, int); /*proto*/
static int __Pyx_EndUnpack(PyObject *, int); /*proto*/
static int __Pyx_PrintItem(PyObject *); /*proto*/
static int __Pyx_PrintNewline(void); /*proto*/
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static void __Pyx_ReRaise(void); /*proto*/
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
static PyObject *__Pyx_GetExcValue(void); /*proto*/
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name); /*proto*/
static int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
static int __Pyx_GetStarArgs(PyObject **args, PyObject **kwds,\
char *kwd_list[], int nargs, PyObject **args2, PyObject **kwds2); /*proto*/
static void __Pyx_WriteUnraisable(char *name); /*proto*/
static void __Pyx_AddTraceback(char *funcname); /*proto*/
static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name, long size); /*proto*/
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
static int __Pyx_GetVtable(PyObject *dict, void **vtabptr); /*proto*/
static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, char *modname); /*proto*/
static int __Pyx_InternStrings(__Pyx_InternTabEntry *t); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
"""
get_name_predeclaration = \
"static PyObject *__Pyx_GetName(PyObject *dict, char *name); /*proto*/"
get_name_interned_predeclaration = \
"static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/"
#------------------------------------------------------------------------------------
printing_utility_code = \
r"""
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject("stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_PrintItem(PyObject *v) {
PyObject *f;
if (!(f = __Pyx_GetStdout()))
return -1;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
return -1;
}
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
return -1;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
int len = PyString_Size(v);
if (len > 0 &&
isspace(Py_CHARMASK(s[len-1])) &&
s[len-1] != ' ')
PyFile_SoftSpace(f, 0);
}
return 0;
}
static int __Pyx_PrintNewline(void) {
PyObject *f;
if (!(f = __Pyx_GetStdout()))
return -1;
if (PyFile_WriteString("\n", f) < 0)
return -1;
PyFile_SoftSpace(f, 0);
return 0;
}
"""
#------------------------------------------------------------------------------------
# The following function is based on do_raise() from ceval.c.
raise_utility_code = \
"""
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
/* First, check the traceback argument, replacing None with NULL. */
if (tb == Py_None) {
Py_DECREF(tb);
tb = 0;
}
else if (tb != NULL && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
/* Next, replace a missing value with None */
if (value == NULL) {
value = Py_None;
Py_INCREF(value);
}
/* Next, repeatedly, replace a tuple exception with its first item */
while (PyTuple_Check(type) && PyTuple_Size(type) > 0) {
PyObject *tmp = type;
type = PyTuple_GET_ITEM(type, 0);
Py_INCREF(type);
Py_DECREF(tmp);
}
if (PyString_Check(type))
;
else if (PyClass_Check(type))
; /*PyErr_NormalizeException(&type, &value, &tb);*/
else if (PyInstance_Check(type)) {
/* Raising an instance. The value should be a dummy. */
if (value != Py_None) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
else {
/* Normalize to raise <class>, <instance> */
Py_DECREF(value);
value = type;
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
}
else {
/* Not something you can raise. You get an exception
anyway, just not what you specified :-) */
PyErr_Format(PyExc_TypeError,
"exceptions must be strings, classes, or "
"instances, not %s", type->ob_type->tp_name);
goto raise_error;
}
PyErr_Restore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
"""
#------------------------------------------------------------------------------------
reraise_utility_code = \
"""
static void __Pyx_ReRaise(void) {
PyThreadState *tstate = PyThreadState_Get();
PyObject *type = tstate->exc_type;
PyObject *value = tstate->exc_value;
PyObject *tb = tstate->exc_traceback;
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
PyErr_Restore(type, value, tb);
}
"""
#------------------------------------------------------------------------------------
arg_type_test_utility_code = \
"""
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name) {
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if ((none_allowed && obj == Py_None) || PyObject_TypeCheck(obj, type))
return 1;
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, obj->ob_type->tp_name);
return 0;
}
"""
#------------------------------------------------------------------------------------
#
# __Pyx_GetStarArgs splits the args tuple and kwds dict into two parts
# each, one part suitable for passing to PyArg_ParseTupleAndKeywords,
# and the other containing any extra arguments. On success, replaces
# the borrowed references *args and *kwds with references to a new
# tuple and dict, and passes back new references in *args2 and *kwds2.
# Does not touch any of its arguments on failure.
#
# Any of *kwds, args2 and kwds2 may be 0 (but not args or kwds). If
# *kwds == 0, it is not changed. If kwds2 == 0 and *kwds != 0, a new
# reference to the same dictionary is passed back in *kwds.
#
get_starargs_utility_code = \
"""
static int __Pyx_GetStarArgs(
PyObject **args,
PyObject **kwds,
char *kwd_list[],
int nargs,
PyObject **args2,
PyObject **kwds2)
{
PyObject *x = 0, *args1 = 0, *kwds1 = 0;
if (args2)
*args2 = 0;
if (kwds2)
*kwds2 = 0;
if (args2) {
args1 = PyTuple_GetSlice(*args, 0, nargs);
if (!args1)
goto bad;
*args2 = PyTuple_GetSlice(*args, nargs, PyTuple_Size(*args));
if (!*args2)
goto bad;
}
else {
args1 = *args;
Py_INCREF(args1);
}
if (kwds2) {
if (*kwds) {
char **p;
kwds1 = PyDict_New();
if (!kwds)
goto bad;
*kwds2 = PyDict_Copy(*kwds);
if (!*kwds2)
goto bad;
for (p = kwd_list; *p; p++) {
x = PyDict_GetItemString(*kwds, *p);
if (x) {
if (PyDict_SetItemString(kwds1, *p, x) < 0)
goto bad;
if (PyDict_DelItemString(*kwds2, *p) < 0)
goto bad;
}
}
}
else {
*kwds2 = PyDict_New();
if (!*kwds2)
goto bad;
}
}
else {
kwds1 = *kwds;
Py_XINCREF(kwds1);
}
*args = args1;
*kwds = kwds1;
return 0;
bad:
Py_XDECREF(args1);
Py_XDECREF(kwds1);
if (*args2)
Py_XDECREF(*args2);
if (*kwds2)
Py_XDECREF(*kwds2);
return -1;
}
"""
#------------------------------------------------------------------------------------
unraisable_exception_utility_code = \
"""
static void __Pyx_WriteUnraisable(char *name) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
PyErr_Fetch(&old_exc, &old_val, &old_tb);
ctx = PyString_FromString(name);
PyErr_Restore(old_exc, old_val, old_tb);
if (!ctx)
ctx = Py_None;
PyErr_WriteUnraisable(ctx);
}
"""
#------------------------------------------------------------------------------------
traceback_utility_code = \
"""
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static void __Pyx_AddTraceback(char *funcname) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyObject *py_globals = 0;
PyObject *empty_tuple = 0;
PyObject *empty_string = 0;
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_srcfile = PyString_FromString(%(FILENAME)s);
if (!py_srcfile) goto bad;
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
py_globals = PyModule_GetDict(%(GLOBALS)s);
if (!py_globals) goto bad;
empty_tuple = PyTuple_New(0);
if (!empty_tuple) goto bad;
empty_string = PyString_FromString("");
if (!empty_string) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
empty_string, /*PyObject *code,*/
empty_tuple, /*PyObject *consts,*/
empty_tuple, /*PyObject *names,*/
empty_tuple, /*PyObject *varnames,*/
empty_tuple, /*PyObject *freevars,*/
empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
%(LINENO)s, /*int firstlineno,*/
empty_string /*PyObject *lnotab*/
);
if (!py_code) goto bad;
py_frame = PyFrame_New(
PyThreadState_Get(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = %(LINENO)s;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
Py_XDECREF(empty_tuple);
Py_XDECREF(empty_string);
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
""" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
'GLOBALS': Naming.module_cname
}
#------------------------------------------------------------------------------------
type_import_utility_code = \
"""
static PyTypeObject *__Pyx_ImportType(char *module_name, char *class_name,
long size)
{
PyObject *py_module_name = 0;
PyObject *py_class_name = 0;
PyObject *py_name_list = 0;
PyObject *py_module = 0;
PyObject *result = 0;
py_module_name = PyString_FromString(module_name);
if (!py_module_name)
goto bad;
py_class_name = PyString_FromString(class_name);
if (!py_class_name)
goto bad;
py_name_list = PyList_New(1);
if (!py_name_list)
goto bad;
Py_INCREF(py_class_name);
if (PyList_SetItem(py_name_list, 0, py_class_name) < 0)
goto bad;
py_module = __Pyx_Import(py_module_name, py_name_list);
if (!py_module)
goto bad;
result = PyObject_GetAttr(py_module, py_class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (((PyTypeObject *)result)->tp_basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%s.%s does not appear to be the correct type object",
module_name, class_name);
goto bad;
}
goto done;
bad:
Py_XDECREF(result);
result = 0;
done:
Py_XDECREF(py_module_name);
Py_XDECREF(py_class_name);
Py_XDECREF(py_name_list);
return (PyTypeObject *)result;
}
"""
#------------------------------------------------------------------------------------
set_vtable_utility_code = \
"""
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
PyObject *pycobj = 0;
int result;
pycobj = PyCObject_FromVoidPtr(vtable, 0);
if (!pycobj)
goto bad;
if (PyDict_SetItemString(dict, "__pyx__vtable__", pycobj) < 0)
goto bad;
result = 0;
goto done;
bad:
result = -1;
done:
Py_XDECREF(pycobj);
return result;
}
"""
#------------------------------------------------------------------------------------
get_vtable_utility_code = \
"""
static int __Pyx_GetVtable(PyObject *dict, void **vtabptr) {
int result;
pycobj = PyObject_GetItemString(dict, "__pyx_vtable__");
if (!pycobj)
goto bad;
*vtabptr = PyCObject_AsVoidPtr(pycobj);
if (!*vtabptr)
goto bad;
result = 0;
goto done;
bad:
result = -1;
done:
Py_XDECREF(pycobj);
return result;
}
"""
#------------------------------------------------------------------------------------
init_intern_tab_utility_code = \
"""
static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) {
while (t->p) {
*t->p = PyString_InternFromString(t->s);
if (!*t->p)
return -1;
++t;
}
return 0;
}
""";
#------------------------------------------------------------------------------------
init_string_tab_utility_code = \
"""
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
if (!*t->p)
return -1;
++t;
}
return 0;
}
""";
#------------------------------------------------------------------------------------
| Python |
#
# Pyrex - Types
#
import string
import Naming
class PyrexType:
#
# Base class for all Pyrex types.
#
# is_pyobject boolean Is a Python object type
# is_extension_type boolean Is a Python extension type
# is_numeric boolean Is a C numeric type
# is_int boolean Is a C integer type
# is_float boolean Is a C floating point type
# is_void boolean Is the C void type
# is_array boolean Is a C array type
# is_ptr boolean Is a C pointer type
# is_null_ptr boolean Is the type of NULL
# is_cfunction boolean Is a C function type
# is_struct_or_union boolean Is a C struct or union type
# is_enum boolean Is a C enum type
# is_string boolean Is a C char * type
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
# parsetuple_format string Format char for PyArg_ParseTuple
# pymemberdef_typecode string Type code for PyMemberDef struct
#
# declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# * If for_display, this is for reading by a human in an error
# message; otherwise it must be valid C code.
# * If dll_linkage is not None, it must be 'DL_IMPORT' or
# 'DL_EXPORT', and will be added to the base type part of
# the declaration.
# * If pyrex = 1, this is for use in a 'cdef extern'
# statement of a Pyrex include file.
#
# assignable_from(src_type)
# Tests whether a variable of this type can be
# assigned a value of type src_type.
#
# same_as(other_type)
# Tests whether this type represents the same type
# as other_type.
#
# as_argument_type():
# Coerces array type into pointer type for use as
# a formal argument type.
#
is_pyobject = 0
is_extension_type = 0
is_numeric = 0
is_int = 0
is_float = 0
is_void = 0
is_array = 0
is_ptr = 0
is_null_ptr = 0
is_cfunction = 0
is_struct_or_union = 0
is_enum = 0
is_string = 0
is_returncode = 0
is_error = 0
has_attributes = 0
default_value = ""
parsetuple_format = ""
pymemberdef_typecode = None
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
def __str__(self):
return string.strip(self.declaration_code("", for_display = 1))
def same_as(self, other_type):
return self is other_type or other_type is error_type
def subtype_of(self, other_type):
return self.same_as(other_type)
def assignable_from(self, src_type):
return self.same_as(src_type)
def as_argument_type(self):
return self
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return 1
class PyObjectType(PyrexType):
#
# Base class for all Python object types (reference-counted).
#
is_pyobject = 1
default_value = "0"
parsetuple_format = "O"
pymemberdef_typecode = "T_OBJECT"
def __str__(self):
return "Python object"
def __repr__(self):
return "PyObjectType"
def assignable_from(self, src_type):
return 1 # Conversion will be attempted
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex:
return "object %s" % entity_code
else:
return "%s *%s" % (public_decl("PyObject", dll_linkage), entity_code)
class PyExtensionType(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# visibility string
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
is_extension_type = 1
has_attributes = 1
def __init__(self, name, typedef_flag, base_type):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def subtype_of(self, other_type):
if other_type.is_extension_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex:
return "%s %s" % (self.name, entity_code)
else:
if self.typedef_flag:
base_format = "%s"
else:
base_format = "struct %s"
base = public_decl(base_format % self.objstruct_cname, dll_linkage)
return "%s *%s" % (base, entity_code)
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "PyExtensionType(%s%s)" % (self.scope.class_name,
("", ".typedef_flag=1")[self.typedef_flag])
class CType(PyrexType):
#
# Base class for all C types (non-reference-counted).
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
to_py_function = None
from_py_function = None
class CSimpleType(CType):
#
# Base class for all unstructured C types.
#
pass
class CVoidType(CSimpleType):
is_void = 1
def __repr__(self):
return "<CVoidType>"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
base = public_decl("void", dll_linkage)
return "%s %s" % (base, entity_code)
def is_complete(self):
return 0
class CNumericType(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed boolean
#
is_numeric = 1
default_value = "0"
parsetuple_formats = "chilLfd?" # rank -> format
def __init__(self, rank, pymemberdef_typecode = None):
self.rank = rank
ptf = self.parsetuple_formats[rank]
if ptf == '?':
ptf = None
self.parsetuple_format = ptf
self.pymemberdef_typecode = pymemberdef_typecode
def __repr__(self):
if self.signed:
u = ""
else:
u = "unsigned "
return "<CNumericType %s%s>" % (u, rank_to_type_name[self.rank])
def assignable_from(self, src_type):
return src_type.is_numeric or src_type is error_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.signed:
u = ""
else:
u = "unsigned "
base = public_decl(u + rank_to_type_name[self.rank], dll_linkage)
return "%s %s" % (base, entity_code)
# return "%s%s %s" % (u, rank_to_type_name[self.rank], entity_code)
class CIntType(CNumericType):
is_int = 1
typedef_flag = 0
to_py_function = "PyInt_FromLong"
from_py_function = "PyInt_AsLong"
def __init__(self, rank, signed, pymemberdef_typecode = None, is_returncode = 0):
CNumericType.__init__(self, rank, pymemberdef_typecode)
self.signed = signed
self.is_returncode = is_returncode
class CLongLongType(CIntType):
to_py_function = "PyLong_FromLongLong"
from_py_function = "PyLong_AsLongLong"
class CULongLongType(CIntType):
to_py_function = "PyLong_FromUnsignedLongLong"
from_py_function = "PyLong_AsUnsignedLongLong"
class CFloatType(CNumericType):
is_float = 1
signed = 1
to_py_function = "PyFloat_FromDouble"
from_py_function = "PyFloat_AsDouble"
class CArrayType(CType):
# base_type CType Element type
# size integer or None Number of elements
is_array = 1
def __init__(self, base_type, size):
self.base_type = base_type
self.size = size
if base_type is c_char_type:
self.is_string = 1
def __repr__(self):
return "CArrayType(%s,%s)" % (self.size, repr(self.base_type))
def same_as(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def assignable_from(self, src_type):
# Can't assign to a variable of an array type
return 0
def element_ptr_type(self):
return c_ptr_type(self.base_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
else:
dimension_code = ""
return self.base_type.declaration_code(
"(%s[%s])" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
def as_argument_type(self):
return c_ptr_type(self.base_type)
def is_complete(self):
return self.size is not None
class CPtrType(CType):
# base_type CType Referenced type
is_ptr = 1
default_value = 0
def __init__(self, base_type):
self.base_type = base_type
def __repr__(self):
return "CPtrType(%s)" % repr(self.base_type)
def same_as(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return self.base_type.declaration_code(
"(*%s)" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from(self, other_type):
if other_type is error_type:
return 1
elif self.base_type.is_cfunction and other_type.is_cfunction:
return self.base_type.same_as(other_type)
elif not other_type.is_ptr:
return 0
elif self.base_type.is_void:
return 1
elif other_type.is_null_ptr:
return 1
else:
return self.base_type.same_as(other_type.base_type)
class CNullPtrType(CPtrType):
is_null_ptr = 1
class CFuncType(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value string
# exception_check boolean True if PyErr_Occurred check needed
is_cfunction = 1
def __init__(self, return_type, args, has_varargs,
exception_value = None, exception_check = 0):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.exception_value = exception_value
self.exception_check = exception_check
def __repr__(self):
arg_reprs = map(repr, self.args)
if self.has_varargs:
arg_reprs.append("...")
return "CFuncType(%s,[%s])" % (
repr(self.return_type),
string.join(arg_reprs, ","))
def same_c_signature_as(self, other_type, as_cmethod = 0):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs <> len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs <> other_type.has_varargs:
return 0
if not self.return_type.same_as(other_type.return_type):
return 0
return 1
def same_exception_signature_as(self, other_type):
return self.exception_value == other_type.exception_value \
and self.exception_check == other_type.exception_check
def same_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as(other_type, as_cmethod) \
and self.same_exception_signature_as(other_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
arg_decl_list = []
for arg in self.args:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = string.join(arg_decl_list, ",")
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
exc_clause = ""
if for_display:
if self.exception_value and self.exception_check:
exc_clause = " except? %s" % self.exception_value
elif self.exception_value:
exc_clause = " except %s" % self.exception_value
elif self.exception_check:
exc_clause = " except *"
return self.return_type.declaration_code(
"(%s(%s)%s)" % (entity_code, arg_decl_code, exc_clause),
for_display, dll_linkage, pyrex)
class CFuncTypeArg:
# name string
# cname string
# type PyrexType
# pos source file position
def __init__(self, name, type, pos):
self.name = name
self.cname = Naming.var_prefix + name
self.type = type
self.pos = pos
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
class CStructOrUnionType(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
is_struct_or_union = 1
has_attributes = 1
def __init__(self, name, kind, scope, typedef_flag, cname):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
def __repr__(self):
return "CStructOrUnionType(%s,%s%s)" % (self.name, self.cname,
("", ",typedef_flag=1")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex:
return "%s %s" % (self.name, entity_code)
else:
if for_display:
base = self.name
elif self.typedef_flag:
base = self.cname
else:
base = "%s %s" % (self.kind, self.cname)
return "%s %s" % (public_decl(base, dll_linkage), entity_code)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
class CEnumType(CIntType):
# name string
# cname string or None
# typedef_flag boolean
is_enum = 1
signed = 1
rank = 2
def __init__(self, name, cname, typedef_flag):
self.name = name
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
def __repr__(self):
return "CEnumType(%s,%s%s)" % (self.name, self.cname,
("", ",typedef_flag=1")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex:
return "%s %s" % (self.cname, entity_code)
else:
if self.typedef_flag:
base = self.cname
else:
base = "enum %s" % self.cname
return "%s %s" % (public_decl(base, dll_linkage), entity_code)
class CStringType:
# Mixin class for C string types.
is_string = 1
to_py_function = "PyString_FromString"
from_py_function = "PyString_AsString"
def literal_code(self, value):
return '"%s"' % value
class CCharArrayType(CStringType, CArrayType):
# C 'char []' type.
parsetuple_format = "s"
pymemberdef_typecode = "T_STRING_INPLACE"
def __init__(self, size):
CArrayType.__init__(self, c_char_type, size)
class CCharPtrType(CStringType, CPtrType):
# C 'char *' type.
parsetuple_format = "s"
pymemberdef_typecode = "T_STRING"
def __init__(self):
CPtrType.__init__(self, c_char_type)
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
def same_as(self, other_type):
return 1
py_object_type = PyObjectType()
c_void_type = CVoidType()
c_void_ptr_type = CPtrType(c_void_type)
c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
c_char_type = CIntType(0, 1, "T_CHAR")
c_short_type = CIntType(1, 1, "T_SHORT")
c_int_type = CIntType(2, 1, "T_INT")
c_long_type = CIntType(3, 1, "T_LONG")
c_longlong_type = CLongLongType(4, 1)
c_uchar_type = CIntType(0, 0, "T_UBYTE")
c_ushort_type = CIntType(1, 0, "T_USHORT")
c_uint_type = CIntType(2, 0, "T_UINT")
c_ulong_type = CIntType(3, 0, "T_ULONG")
c_ulonglong_type = CULongLongType(4, 0)
c_float_type = CFloatType(5, "T_FLOAT")
c_double_type = CFloatType(6, "T_DOUBLE")
c_longdouble_type = CFloatType(7)
c_null_ptr_type = CNullPtrType(c_void_type)
c_char_array_type = CCharArrayType(None)
c_char_ptr_type = CCharPtrType()
c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
c_int_ptr_type = CPtrType(c_int_type)
c_returncode_type = CIntType(2, 1, "T_INT", is_returncode = 1)
error_type = ErrorType()
lowest_float_rank = 5
rank_to_type_name = (
"char", # 0
"short", # 1
"int", # 2
"long", # 3
"PY_LONG_LONG", # 4
"float", # 5
"double", # 6
"long double", # 7
)
sign_and_rank_to_type = {
#(signed, rank)
(0, 0, ): c_uchar_type,
(0, 1): c_ushort_type,
(0, 2): c_uint_type,
(0, 3): c_ulong_type,
(0, 4): c_ulonglong_type,
(1, 0): c_char_type,
(1, 1): c_short_type,
(1, 2): c_int_type,
(1, 3): c_long_type,
(1, 4): c_longlong_type,
(1, 5): c_float_type,
(1, 6): c_double_type,
(1, 7): c_longdouble_type,
}
modifiers_and_name_to_type = {
#(signed, longness, name)
(0, 0, "char"): c_uchar_type,
(0, -1, "int"): c_ushort_type,
(0, 0, "int"): c_uint_type,
(0, 1, "int"): c_ulong_type,
(0, 2, "int"): c_ulonglong_type,
(1, 0, "void"): c_void_type,
(1, 0, "char"): c_char_type,
(1, -1, "int"): c_short_type,
(1, 0, "int"): c_int_type,
(1, 1, "int"): c_long_type,
(1, 2, "int"): c_longlong_type,
(1, 0, "float"): c_float_type,
(1, 0, "double"): c_double_type,
(1, 1, "double"): c_longdouble_type,
(1, 0, "object"): py_object_type,
}
def widest_numeric_type(type1, type2):
# Given two numeric types, return the narrowest type
# encompassing both of them.
signed = type1.signed
rank = max(type1.rank, type2.rank)
if rank >= lowest_float_rank:
signed = 1
return sign_and_rank_to_type[signed, rank]
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def c_array_type(base_type, size):
# Construct a C array type.
if base_type is c_char_type:
return CCharArrayType(size)
else:
return CArrayType(base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type is c_char_type:
return c_char_ptr_type
else:
return CPtrType(base_type)
def public_decl(base, dll_linkage):
if dll_linkage:
return "%s(%s)" % (dll_linkage, base)
else:
return base
| Python |
#
# Pyrex - Compilation-wide options
#
intern_names = 1 # Intern global variable and attribute names
| Python |
#
# Pyrex - Code output module
#
import Naming
from Pyrex.Utils import open_new_file
class CCodeWriter:
# f file output file
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
# return_label string function return point label
# error_label string error catch point label
# continue_label string loop continue point label
# break_label string loop break point label
# label_counter integer counter for naming labels
# in_try_finally boolean inside try of try...finally
# filename_table {string : int} for finding filename table indexes
# filename_list [string] filenames in filename table order
in_try_finally = 0
def __init__(self, outfile_name):
self.f = open_new_file(outfile_name)
self.level = 0
self.bol = 1
self.marker = None
self.label_counter = 1
self.error_label = None
self.filename_table = {}
self.filename_list = []
def putln(self, code = ""):
if self.marker and self.bol:
self.emit_marker()
if code:
self.put(code)
self.f.write("\n");
self.bol = 1
def emit_marker(self):
self.f.write("\n");
self.indent()
self.f.write("/* %s */\n" % self.marker)
self.marker = None
def put(self, code):
dl = code.count("{") - code.count("}")
if dl < 0:
self.level += dl
if self.bol:
self.indent()
self.f.write(code)
self.bol = 0
if dl > 0:
self.level += dl
def increase_indent(self):
self.level = self.level + 1
def decrease_indent(self):
self.level = self.level - 1
def begin_block(self):
self.putln("{")
self.increase_indent()
def end_block(self):
self.decrease_indent()
self.putln("}")
def indent(self):
self.f.write(" " * self.level)
def mark_pos(self, pos):
file, line, col = pos
self.marker = '"%s":%s' % (file, line)
def init_labels(self):
self.label_counter = 0
self.return_label = self.new_label()
self.new_error_label()
self.continue_label = None
self.break_label = None
def new_label(self):
n = self.label_counter
self.label_counter = n + 1
return "%s%d" % (Naming.label_prefix, n)
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label()
return old_err_lbl
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
(self.new_label(),
self.new_label()))
return old_labels
def get_all_labels(self):
return (
self.continue_label,
self.break_label,
self.return_label,
self.error_label)
def set_all_labels(self, labels):
(self.continue_label,
self.break_label,
self.return_label,
self.error_label) = labels
def all_new_labels(self):
old_labels = self.get_all_labels()
new_labels = []
for old_label in old_labels:
if old_label:
new_labels.append(self.new_label())
else:
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
def put_label(self, lbl):
self.putln("%s:;" % lbl)
def put_var_declarations(self, entries, static = 0, dll_linkage = None):
for entry in entries:
if not entry.in_cinclude:
self.put_var_declaration(entry, static, dll_linkage)
def put_var_declaration(self, entry, static = 0, dll_linkage = None):
if entry.visibility == 'extern':
self.put("extern ")
elif static and entry.visibility <> 'public':
self.put("static ")
if entry.visibility <> 'public':
dll_linkage = None
self.put(entry.type.declaration_code(entry.cname,
dll_linkage = dll_linkage))
if entry.init is not None:
self.put(" = %s" % entry.type.literal_code(entry.init))
self.putln(";")
def entry_as_pyobject(self, entry):
type = entry.type
if (not entry.is_self_arg and not entry.type.is_complete()) \
or (entry.type.is_extension_type and entry.type.base_type):
return "(PyObject *)" + entry.cname
else:
return entry.cname
def as_pyobject(self, cname, type):
if type.is_extension_type and type.base_type:
return "(PyObject *)" + cname
else:
return cname
def put_incref(self, cname, type):
self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type))
def put_decref(self, cname, type):
self.putln("Py_DECREF(%s);" % self.as_pyobject(cname, type))
def put_var_incref(self, entry):
if entry.type.is_pyobject:
self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry))
def put_decref_clear(self, cname, type):
self.putln("Py_DECREF(%s); %s = 0;" % (
self.as_pyobject(cname, type), cname))
def put_xdecref(self, cname, type):
self.putln("Py_XDECREF(%s);" % self.as_pyobject(cname, type))
def put_xdecref_clear(self, cname, type):
self.putln("Py_XDECREF(%s); %s = 0;" % (
self.as_pyobject(cname, type), cname))
def put_var_decref(self, entry):
if entry.type.is_pyobject:
self.putln("Py_DECREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xdecref(self, entry):
if entry.type.is_pyobject:
self.putln("Py_XDECREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xdecref_clear(self, entry):
if entry.type.is_pyobject:
self.putln("Py_XDECREF(%s); %s = 0;" % (
self.entry_as_pyobject(entry), entry.cname))
def put_var_decrefs(self, entries):
for entry in entries:
if entry.xdecref_cleanup:
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
def put_init_to_py_none(self, cast, cname):
if cast:
self.putln("%s = (void *)Py_None; Py_INCREF((PyObject *)%s);" % (cname, cname))
else:
self.putln("%s = Py_None; Py_INCREF(%s);" % (cname, cname))
def put_init_var_to_py_none(self, entry, template = "%s"):
code = template % entry.cname
cast = entry.type.is_extension_type
self.put_init_to_py_none(cast, code)
def put_pymethoddef(self, entry, term):
if entry.doc:
doc_code = entry.doc_cname
else:
doc_code = 0
self.putln(
'{"%s", (PyCFunction)%s, METH_VARARGS|METH_KEYWORDS, %s}%s' % (
entry.name,
entry.func_cname,
doc_code,
term))
def error_goto(self, pos):
return "{%s = %s[%s]; %s = %s; goto %s;}" % (
Naming.filename_cname,
Naming.filetable_cname,
self.lookup_filename(pos[0]),
Naming.lineno_cname,
pos[1],
self.error_label)
def lookup_filename(self, filename):
try:
index = self.filename_table[filename]
except KeyError:
index = len(self.filename_list)
self.filename_list.append(filename)
self.filename_table[filename] = index
return index
class PyrexCodeWriter:
# f file output file
# level int indentation level
def __init__(self, outfile_name):
self.f = open_new_file(outfile_name)
self.level = 0
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
def indent(self):
self.level += 1
def dedent(self):
self.level -= 1
| Python |
#
# Pyrex Scanner
#
#import pickle
import cPickle as pickle
import os
import stat
import sys
from time import time
from Pyrex import Plex
from Pyrex.Plex import Scanner
from Pyrex.Plex.Errors import UnrecognizedInput
from Errors import CompileError, error
from Lexicon import string_prefixes, make_lexicon
plex_version = getattr(Plex, '_version', None)
#print "Plex version:", plex_version ###
debug_scanner = 0
trace_scanner = 0
#scanner_dump_file = open("Lexicon_dump.txt", "w")
scanner_debug_flags = 0
scanner_dump_file = None
binary_lexicon_pickle = 1
notify_lexicon_unpickling = 0
notify_lexicon_pickling = 1
lexicon = None
#-----------------------------------------------------------------
def hash_source_file(path):
# Try to calculate a hash code for the given source file.
# Returns an empty string if the file cannot be accessed.
#print "Hashing", path ###
import md5
try:
try:
f = open(path)
text = f.read()
except IOError, e:
print "Unable to hash scanner source file (%s)" % e
return ""
finally:
f.close()
# Normalise spaces/tabs. We don't know what sort of
# space-tab substitution the file may have been
# through, so we replace all spans of spaces and
# tabs by a single space.
import re
text = re.sub("[ \t]+", " ", text)
hash = md5.new(text).hexdigest()
return hash
def open_pickled_lexicon(expected_hash):
# Try to open pickled lexicon file and verify that
# it matches the source file. Returns the opened
# file if successful, otherwise None. ???
f = None
result = None
if os.path.exists(lexicon_pickle):
try:
f = open(lexicon_pickle, "rb")
actual_hash = pickle.load(f)
if actual_hash == expected_hash:
result = f
f = None
else:
print "Lexicon hash mismatch:" ###
print " expected", expected_hash ###
print " got ", actual_hash ###
except IOError, e:
print "Warning: Unable to read pickled lexicon", lexicon_pickle
print e
if f:
f.close()
return result
def try_to_unpickle_lexicon():
global lexicon, lexicon_pickle, lexicon_hash
dir = os.path.dirname(__file__)
source_file = os.path.join(dir, "Lexicon.py")
lexicon_hash = hash_source_file(source_file)
lexicon_pickle = os.path.join(dir, "Lexicon.pickle")
f = open_pickled_lexicon(expected_hash = lexicon_hash)
if f:
if notify_lexicon_unpickling:
t0 = time()
print "Unpickling lexicon..."
lexicon = pickle.load(f)
f.close()
if notify_lexicon_unpickling:
t1 = time()
print "Done (%.2f seconds)" % (t1 - t0)
def create_new_lexicon():
global lexicon
t0 = time()
print "Creating lexicon..."
lexicon = make_lexicon()
t1 = time()
print "Done (%.2f seconds)" % (t1 - t0)
def pickle_lexicon():
f = None
try:
f = open(lexicon_pickle, "wb")
except IOError:
print "Warning: Unable to save pickled lexicon in", lexicon_pickle
if f:
if notify_lexicon_pickling:
t0 = time()
print "Pickling lexicon..."
pickle.dump(lexicon_hash, f, binary_lexicon_pickle)
pickle.dump(lexicon, f, binary_lexicon_pickle)
f.close()
if notify_lexicon_pickling:
t1 = time()
print "Done (%.2f seconds)" % (t1 - t0)
def get_lexicon():
global lexicon
if not lexicon and plex_version is None:
try_to_unpickle_lexicon()
if not lexicon:
create_new_lexicon()
if plex_version is None:
pickle_lexicon()
return lexicon
#------------------------------------------------------------------
reserved_words = [
"global", "include", "ctypedef", "cdef", "def", "class",
"print", "del", "pass", "break", "continue", "return",
"raise", "import", "exec", "try", "except", "finally",
"while", "if", "elif", "else", "for", "in", "assert",
"and", "or", "not", "is", "in", "lambda", "from",
"NULL", "cimport", "cinline"
]
class Method:
def __init__(self, name):
self.name = name
self.__name__ = name # for Plex tracing
def __call__(self, stream, text):
return getattr(stream, self.name)(text)
#def make_lexicon():
# import Lexicon
# return Lexicon.lexicon
#------------------------------------------------------------------
def build_resword_dict():
d = {}
for word in reserved_words:
d[word] = 1
return d
#------------------------------------------------------------------
class PyrexScanner(Scanner):
resword_dict = build_resword_dict()
def __init__(self, file, filename, parent_scanner = None,
type_names = None, context = None):
Scanner.__init__(self, get_lexicon(), file, filename)
if parent_scanner:
self.context = parent_scanner.context
self.type_names = parent_scanner.type_names
else:
self.context = context
self.type_names = type_names
self.trace = trace_scanner
self.indentation_stack = [0]
self.indentation_char = None
self.bracket_nesting_level = 0
self.begin('INDENT')
self.sy = ''
self.next()
def current_level(self):
return self.indentation_stack[-1]
def open_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level + 1
return text
def close_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level - 1
return text
def newline_action(self, text):
if self.bracket_nesting_level == 0:
self.begin('INDENT')
self.produce('NEWLINE', '')
string_states = {
"'": 'SQ_STRING',
'"': 'DQ_STRING',
"'''": 'TSQ_STRING',
'"""': 'TDQ_STRING'
}
def begin_string_action(self, text):
if text[:1] in string_prefixes:
text = text[1:]
self.begin(self.string_states[text])
self.produce('BEGIN_STRING')
def end_string_action(self, text):
self.begin('')
self.produce('END_STRING')
def unclosed_string_action(self, text):
self.end_string_action(text)
self.error("Unclosed string literal")
def indentation_action(self, text):
self.begin('')
# Indentation within brackets should be ignored.
#if self.bracket_nesting_level > 0:
# return
# Check that tabs and spaces are being used consistently.
if text:
c = text[0]
#print "Scanner.indentation_action: indent with", repr(c) ###
if self.indentation_char is None:
self.indentation_char = c
#print "Scanner.indentation_action: setting indent_char to", repr(c)
else:
if self.indentation_char <> c:
self.error("Mixed use of tabs and spaces")
if text.replace(c, "") <> "":
self.error("Mixed use of tabs and spaces")
# Figure out how many indents/dedents to do
current_level = self.current_level()
new_level = len(text)
#print "Changing indent level from", current_level, "to", new_level ###
if new_level == current_level:
return
elif new_level > current_level:
#print "...pushing level", new_level ###
self.indentation_stack.append(new_level)
self.produce('INDENT', '')
else:
while new_level < self.current_level():
#print "...popping level", self.indentation_stack[-1] ###
self.indentation_stack.pop()
self.produce('DEDENT', '')
#print "...current level now", self.current_level() ###
if new_level <> self.current_level():
self.error("Inconsistent indentation")
def eof_action(self, text):
while len(self.indentation_stack) > 1:
self.produce('DEDENT', '')
self.indentation_stack.pop()
self.produce('EOF', '')
def next(self):
try:
sy, systring = self.read()
except UnrecognizedInput:
self.error("Unrecognized character")
if sy == 'IDENT' and systring in self.resword_dict:
sy = systring
self.sy = sy
self.systring = systring
if debug_scanner:
_, line, col = self.position()
if not self.systring or self.sy == self.systring:
t = self.sy
else:
t = "%s %s" % (self.sy, self.systring)
print "--- %3d %2d %s" % (line, col, t)
def put_back(self, sy, systring):
self.unread(self.sy, self.systring)
self.sy = sy
self.systring = systring
def unread(self, token, value):
# This method should be added to Plex
self.queue.insert(0, (token, value))
def add_type_name(self, name):
self.type_names[name] = 1
def looking_at_type_name(self):
return self.sy == 'IDENT' and self.systring in self.type_names
def error(self, message, pos = None):
if pos is None:
pos = self.position()
if self.sy == 'INDENT':
error(pos, "Possible inconsistent indentation")
raise error(pos, message)
def expect(self, what, message = None):
if self.sy == what:
self.next()
else:
if message:
self.error(message)
else:
self.error("Expected '%s'" % what)
def expect_indent(self):
self.expect('INDENT',
"Expected an increase in indentation level")
def expect_dedent(self):
self.expect('DEDENT',
"Expected a decrease in indentation level")
def expect_newline(self, message):
# Expect either a newline or end of file
if self.sy <> 'EOF':
self.expect('NEWLINE', message)
| Python |
#
# Pyrex - C naming conventions
#
#
# Prefixes for generating C names.
# Collected here to facilitate ensuring uniqueness.
#
pyrex_prefix = "__pyx_"
arg_prefix = pyrex_prefix + "arg_"
funcdoc_prefix = pyrex_prefix + "doc_"
enum_prefix = pyrex_prefix + "e_"
func_prefix = pyrex_prefix + "f_"
gstab_prefix = pyrex_prefix + "getsets_"
prop_get_prefix = pyrex_prefix + "getprop_"
const_prefix = pyrex_prefix + "k"
label_prefix = pyrex_prefix + "L"
pymethdef_prefix = pyrex_prefix + "mdef_"
methtab_prefix = pyrex_prefix + "methods_"
memtab_prefix = pyrex_prefix + "members_"
interned_prefix = pyrex_prefix + "n_"
objstruct_prefix = pyrex_prefix + "obj_"
typeptr_prefix = pyrex_prefix + "ptype_"
prop_set_prefix = pyrex_prefix + "setprop_"
type_prefix = pyrex_prefix + "t_"
typeobj_prefix = pyrex_prefix + "type_"
var_prefix = pyrex_prefix + "v_"
vtable_prefix = pyrex_prefix + "vtable_"
vtabptr_prefix = pyrex_prefix + "vtabptr_"
vtabstruct_prefix = pyrex_prefix + "vtabstruct_"
args_cname = pyrex_prefix + "args"
kwdlist_cname = pyrex_prefix + "argnames"
obj_base_cname = pyrex_prefix + "base"
builtins_cname = pyrex_prefix + "b"
moddict_cname = pyrex_prefix + "d"
filename_cname = pyrex_prefix + "filename"
filetable_cname = pyrex_prefix + "f"
filenames_cname = pyrex_prefix + "filenames"
intern_tab_cname = pyrex_prefix + "intern_tab"
kwds_cname = pyrex_prefix + "kwds"
lineno_cname = pyrex_prefix + "lineno"
module_cname = pyrex_prefix + "m"
moddoc_cname = pyrex_prefix + "mdoc"
methtable_cname = pyrex_prefix + "methods"
retval_cname = pyrex_prefix + "r"
self_cname = pyrex_prefix + "self"
stringtab_cname = pyrex_prefix + "string_tab"
vtabslot_cname = pyrex_prefix + "vtab"
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Actions for use in token specifications
#
#=======================================================================
class Action:
def same_as(self, other):
return self is other
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
value = None
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def same_as(self, other):
return isinstance(other, Return) and self.value == other.value
def __repr__(self):
return "Return(%s)" % repr(self.value)
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
function = None
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
def same_as(self, other):
return isinstance(other, Call) and self.function is other.function
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
state_name = None
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
def same_as(self, other):
return isinstance(other, Begin) and self.state_name == other.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
IGNORE.__doc__ = Ignore.__doc__
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
TEXT.__doc__ = Text.__doc__
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
#
# Scanning an input stream
#
#=======================================================================
import Errors
from Regexps import BOL, EOL, EOF
class Scanner:
"""
A Scanner is used to read tokens from a stream of characters
using the token set specified by a Plex.Lexicon.
Constructor:
Scanner(lexicon, stream, name = '')
See the docstring of the __init__ method for details.
Methods:
See the docstrings of the individual methods for more
information.
read() --> (value, text)
Reads the next lexical token from the stream.
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
begin(state_name)
Causes scanner to change state.
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
"""
lexicon = None # Lexicon
stream = None # file-like object
name = ''
buffer = ''
buf_start_pos = 0 # position in input of start of buffer
next_pos = 0 # position in input of next char to read
cur_pos = 0 # position in input of current char
cur_line = 1 # line number of current char
cur_line_start = 0 # position in input of start of current line
start_pos = 0 # position in input of start of token
start_line = 0 # line number of start of token
start_col = 0 # position in line of start of token
text = None # text of last token read
initial_state = None # Node
state_name = '' # Name of initial state
queue = None # list of tokens to be returned
trace = 0
def __init__(self, lexicon, stream, name = ''):
"""
Scanner(lexicon, stream, name = '')
|lexicon| is a Plex.Lexicon instance specifying the lexical tokens
to be recognised.
|stream| can be a file object or anything which implements a
compatible read() method.
|name| is optional, and may be the name of the file being
scanned or any other identifying string.
"""
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
def read(self):
"""
Read the next lexical token from the stream and return a
tuple (value, text), where |value| is the value associated with
the token as specified by the Lexicon, and |text| is the actual
string read from the stream. Returns (None, '') on end of file.
"""
queue = self.queue
while not queue:
self.text, action = self.scan_a_token()
if action is None:
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if value is not None:
self.produce(value)
result = queue[0]
del queue[0]
return result
def scan_a_token(self):
"""
Read the next input sequence recognised by the machine
and return (text, action). Returns ('', None) on end of
file.
"""
self.start_pos = self.cur_pos
self.start_line = self.cur_line
self.start_col = self.cur_pos - self.cur_line_start
# if self.trace:
# action = self.run_machine()
# else:
# action = self.run_machine_inlined()
action = self.run_machine_inlined()
if action:
if self.trace:
print "Scanner: read: Performing", action, "%d:%d" % (
self.start_pos, self.cur_pos)
base = self.buf_start_pos
text = self.buffer[self.start_pos - base : self.cur_pos - base]
return (text, action)
else:
if self.cur_pos == self.start_pos:
if self.cur_char == EOL:
self.next_char()
if not self.cur_char or self.cur_char == EOF:
return ('', None)
raise Errors.UnrecognizedInput(self, self.state_name)
def run_machine(self):
"""
Run the machine until no more transitions are possible.
"""
self.state = self.initial_state
self.backup_state = None
while self.transition():
pass
return self.back_up()
def run_machine_inlined(self):
"""
Inlined version of run_machine for speed.
"""
state = self.initial_state
cur_pos = self.cur_pos
cur_line = self.cur_line
cur_line_start = self.cur_line_start
cur_char = self.cur_char
input_state = self.input_state
next_pos = self.next_pos
buffer = self.buffer
buf_start_pos = self.buf_start_pos
buf_len = len(buffer)
backup_state = None
trace = self.trace
while 1:
if trace: #TRACE#
print "State %d, %d/%d:%s -->" % ( #TRACE#
state['number'], input_state, cur_pos, repr(cur_char)), #TRACE#
# Begin inlined self.save_for_backup()
#action = state.action #@slow
action = state['action'] #@fast
if action:
backup_state = (
action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos)
# End inlined self.save_for_backup()
c = cur_char
#new_state = state.new_state(c) #@slow
new_state = state.get(c, -1) #@fast
if new_state == -1: #@fast
new_state = c and state.get('else') #@fast
if new_state:
if trace: #TRACE#
print "State %d" % new_state['number'] #TRACE#
state = new_state
# Begin inlined: self.next_char()
if input_state == 1:
cur_pos = next_pos
# Begin inlined: c = self.read_char()
buf_index = next_pos - buf_start_pos
if buf_index < buf_len:
c = buffer[buf_index]
next_pos = next_pos + 1
else:
discard = self.start_pos - buf_start_pos
data = self.stream.read(0x1000)
buffer = self.buffer[discard:] + data
self.buffer = buffer
buf_start_pos = buf_start_pos + discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index = buf_index - discard
if data:
c = buffer[buf_index]
next_pos = next_pos + 1
else:
c = ''
# End inlined: c = self.read_char()
if c == '\n':
cur_char = EOL
input_state = 2
elif not c:
cur_char = EOL
input_state = 4
else:
cur_char = c
elif input_state == 2:
cur_char = '\n'
input_state = 3
elif input_state == 3:
cur_line = cur_line + 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif input_state == 4:
cur_char = EOF
input_state = 5
else: # input_state = 5
cur_char = ''
# End inlined self.next_char()
else: # not new_state
if trace: #TRACE#
print "blocked" #TRACE#
# Begin inlined: action = self.back_up()
if backup_state:
(action, cur_pos, cur_line, cur_line_start,
cur_char, input_state, next_pos) = backup_state
else:
action = None
break # while 1
# End inlined: action = self.back_up()
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace: #TRACE#
if action: #TRACE#
print "Doing", action #TRACE#
return action
# def transition(self):
# self.save_for_backup()
# c = self.cur_char
# new_state = self.state.new_state(c)
# if new_state:
# if self.trace:
# print "Scanner: read: State %d: %s --> State %d" % (
# self.state.number, repr(c), new_state.number)
# self.state = new_state
# self.next_char()
# return 1
# else:
# if self.trace:
# print "Scanner: read: State %d: %s --> blocked" % (
# self.state.number, repr(c))
# return 0
# def save_for_backup(self):
# action = self.state.get_action()
# if action:
# if self.trace:
# print "Scanner: read: Saving backup point at", self.cur_pos
# self.backup_state = (
# action, self.cur_pos, self.cur_line, self.cur_line_start,
# self.cur_char, self.input_state, self.next_pos)
# def back_up(self):
# backup_state = self.backup_state
# if backup_state:
# (action, self.cur_pos, self.cur_line, self.cur_line_start,
# self.cur_char, self.input_state, self.next_pos) = backup_state
# if self.trace:
# print "Scanner: read: Backing up to", self.cur_pos
# return action
# else:
# return None
def next_char(self):
input_state = self.input_state
if self.trace:
print "Scanner: next:", " "*20, "[%d] %d" % (input_state, self.cur_pos),
if input_state == 1:
self.cur_pos = self.next_pos
c = self.read_char()
if c == '\n':
self.cur_char = EOL
self.input_state = 2
elif not c:
self.cur_char = EOL
self.input_state = 4
else:
self.cur_char = c
elif input_state == 2:
self.cur_char = '\n'
self.input_state = 3
elif input_state == 3:
self.cur_line = self.cur_line + 1
self.cur_line_start = self.cur_pos = self.next_pos
self.cur_char = BOL
self.input_state = 1
elif input_state == 4:
self.cur_char = EOF
self.input_state = 5
else: # input_state = 5
self.cur_char = ''
if self.trace:
print "--> [%d] %d %s" % (input_state, self.cur_pos, repr(self.cur_char))
# def read_char(self):
# """
# Get the next input character, filling the buffer if necessary.
# Returns '' at end of file.
# """
# next_pos = self.next_pos
# buf_index = next_pos - self.buf_start_pos
# if buf_index == len(self.buffer):
# discard = self.start_pos - self.buf_start_pos
# data = self.stream.read(0x1000)
# self.buffer = self.buffer[discard:] + data
# self.buf_start_pos = self.buf_start_pos + discard
# buf_index = buf_index - discard
# if not data:
# return ''
# c = self.buffer[buf_index]
# self.next_pos = next_pos + 1
# return c
def position(self):
"""
Return a tuple (name, line, col) representing the location of
the last token read using the read() method. |name| is the
name that was provided to the Scanner constructor; |line|
is the line number in the stream (1-based); |col| is the
position within the line of the first character of the token
(0-based).
"""
return (self.name, self.start_line, self.start_col)
def begin(self, state_name):
"""Set the current state of the scanner to the named state."""
self.initial_state = (
self.lexicon.get_initial_state(state_name))
self.state_name = state_name
def produce(self, value, text = None):
"""
Called from an action procedure, causes |value| to be returned
as the token value from read(). If |text| is supplied, it is
returned in place of the scanned text.
produce() can be called more than once during a single call to an action
procedure, in which case the tokens are queued up and returned one
at a time by subsequent calls to read(), until the queue is empty,
whereupon scanning resumes.
"""
if text is None:
text = self.text
self.queue.append((value, text))
def eof(self):
"""
Override this method if you want something to be done at
end of file.
"""
# For backward compatibility:
setattr(Scanner, "yield", Scanner.produce)
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Classes for building NFAs and DFAs
#
#=======================================================================
import string
import sys
from sys import maxint
from types import TupleType
from Transitions import TransitionMap
LOWEST_PRIORITY = -sys.maxint
class Machine:
"""A collection of Nodes representing an NFA or DFA."""
states = None # [Node]
next_state_number = 1
initial_states = None # {(name, bol): Node}
def __init__(self):
self.states = []
self.initial_states = {}
def __del__(self):
#print "Destroying", self ###
for state in self.states:
state.destroy()
def new_state(self):
"""Add a new state to the machine and return it."""
s = Node()
n = self.next_state_number
self.next_state_number = n + 1
s.number = n
self.states.append(s)
return s
def new_initial_state(self, name):
state = self.new_state()
self.make_initial_state(name, state)
return state
def make_initial_state(self, name, state):
self.initial_states[name] = state
def get_initial_state(self, name):
return self.initial_states[name]
def dump(self, file):
file.write("Plex.Machine:\n")
if self.initial_states is not None:
file.write(" Initial states:\n")
for (name, state) in self.initial_states.items():
file.write(" '%s': %d\n" % (name, state.number))
for s in self.states:
s.dump(file)
class Node:
"""A state of an NFA or DFA."""
transitions = None # TransitionMap
action = None # Action
action_priority = None # integer
number = 0 # for debug output
epsilon_closure = None # used by nfa_to_dfa()
def __init__(self):
# Preinitialise the list of empty transitions, because
# the nfa-to-dfa algorithm needs it
#self.transitions = {'':[]}
self.transitions = TransitionMap()
self.action_priority = LOWEST_PRIORITY
def destroy(self):
#print "Destroying", self ###
self.transitions = None
self.action = None
self.epsilon_closure = None
def add_transition(self, event, new_state):
self.transitions.add(event, new_state)
def link_to(self, state):
"""Add an epsilon-move from this state to another state."""
self.add_transition('', state)
def set_action(self, action, priority):
"""Make this an accepting state with the given action. If
there is already an action, choose the action with highest
priority."""
if priority > self.action_priority:
self.action = action
self.action_priority = priority
def get_action(self):
return self.action
def get_action_priority(self):
return self.action_priority
# def merge_actions(self, other_state):
# """Merge actions of other state into this state according
# to their priorities."""
# action = other_state.get_action()
# priority = other_state.get_action_priority()
# self.set_action(action, priority)
def is_accepting(self):
return self.action is not None
def __str__(self):
return "State %d" % self.number
def dump(self, file):
import string
# Header
file.write(" State %d:\n" % self.number)
# Transitions
# self.dump_transitions(file)
self.transitions.dump(file)
# Action
action = self.action
priority = self.action_priority
if action is not None:
file.write(" %s [priority %d]\n" % (action, priority))
class FastMachine:
"""
FastMachine is a deterministic machine represented in a way that
allows fast scanning.
"""
initial_states = None # {state_name:state}
states = None # [state]
# where state = {event:state, 'else':state, 'action':Action}
next_number = 1 # for debugging
new_state_template = {
'':None, 'bol':None, 'eol':None, 'eof':None, 'else':None
}
def __init__(self, old_machine = None):
self.initial_states = initial_states = {}
self.states = []
if old_machine:
self.old_to_new = old_to_new = {}
for old_state in old_machine.states:
new_state = self.new_state()
old_to_new[old_state] = new_state
for name, old_state in old_machine.initial_states.items():
initial_states[name] = old_to_new[old_state]
for old_state in old_machine.states:
new_state = old_to_new[old_state]
for event, old_state_set in old_state.transitions.items():
if old_state_set:
new_state[event] = old_to_new[old_state_set.keys()[0]]
else:
new_state[event] = None
new_state['action'] = old_state.action
def __del__(self):
for state in self.states:
state.clear()
def new_state(self, action = None):
number = self.next_number
self.next_number = number + 1
result = self.new_state_template.copy()
result['number'] = number
result['action'] = action
self.states.append(result)
return result
def make_initial_state(self, name, state):
self.initial_states[name] = state
def add_transitions(self, state, event, new_state):
if type(event) == TupleType:
code0, code1 = event
if code0 == -maxint:
state['else'] = new_state
elif code1 <> maxint:
while code0 < code1:
state[chr(code0)] = new_state
code0 = code0 + 1
else:
state[event] = new_state
def get_initial_state(self, name):
return self.initial_states[name]
def dump(self, file):
file.write("Plex.FastMachine:\n")
file.write(" Initial states:\n")
for name, state in self.initial_states.items():
file.write(" %s: %s\n" % (repr(name), state['number']))
for state in self.states:
self.dump_state(state, file)
def dump_state(self, state, file):
import string
# Header
file.write(" State %d:\n" % state['number'])
# Transitions
self.dump_transitions(state, file)
# Action
action = state['action']
if action is not None:
file.write(" %s\n" % action)
def dump_transitions(self, state, file):
chars_leading_to_state = {}
special_to_state = {}
for (c, s) in state.items():
if len(c) == 1:
chars = chars_leading_to_state.get(id(s), None)
if chars is None:
chars = []
chars_leading_to_state[id(s)] = chars
chars.append(c)
elif len(c) <= 4:
special_to_state[c] = s
ranges_to_state = {}
for state in self.states:
char_list = chars_leading_to_state.get(id(state), None)
if char_list:
ranges = self.chars_to_ranges(char_list)
ranges_to_state[ranges] = state
ranges_list = ranges_to_state.keys()
ranges_list.sort()
for ranges in ranges_list:
key = self.ranges_to_string(ranges)
state = ranges_to_state[ranges]
file.write(" %s --> State %d\n" % (key, state['number']))
for key in ('bol', 'eol', 'eof', 'else'):
state = special_to_state.get(key, None)
if state:
file.write(" %s --> State %d\n" % (key, state['number']))
def chars_to_ranges(self, char_list):
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
c1 = ord(char_list[i])
c2 = c1
i = i + 1
while i < n and ord(char_list[i]) == c2 + 1:
i = i + 1
c2 = c2 + 1
result.append((chr(c1), chr(c2)))
return tuple(result)
def ranges_to_string(self, range_list):
return string.join(map(self.range_to_string, range_list), ",")
def range_to_string(self, (c1, c2)):
if c1 == c2:
return repr(c1)
else:
return "%s..%s" % (repr(c1), repr(c2))
##
## (Superseded by Machines.FastMachine)
##
## class StateTableMachine:
## """
## StateTableMachine is an alternative representation of a Machine
## that can be run more efficiently.
## """
## initial_states = None # {state_name:state_index}
## states = None # [([state] indexed by char code, Action)]
## special_map = {'bol':256, 'eol':257, 'eof':258}
## def __init__(self, m):
## """
## Initialise StateTableMachine from Machine |m|.
## """
## initial_states = self.initial_states = {}
## states = self.states = [None]
## old_to_new = {}
## i = 1
## for old_state in m.states:
## new_state = ([0] * 259, old_state.get_action())
## states.append(new_state)
## old_to_new[old_state] = i # new_state
## i = i + 1
## for name, old_state in m.initial_states.items():
## initial_states[name] = old_to_new[old_state]
## for old_state in m.states:
## new_state_index = old_to_new[old_state]
## new_table = states[new_state_index][0]
## transitions = old_state.transitions
## for c, old_targets in transitions.items():
## if old_targets:
## old_target = old_targets[0]
## new_target_index = old_to_new[old_target]
## if len(c) == 1:
## a = ord(c)
## else:
## a = self.special_map[c]
## new_table[a] = states[new_target_index]
## def dump(self, f):
## f.write("Plex.StateTableMachine:\n")
## f.write(" Initial states:\n")
## for name, index in self.initial_states.items():
## f.write(" %s: State %d\n" % (
## repr(name), id(self.states[index])))
## for i in xrange(1, len(self.states)):
## table, action = self.states[i]
## f.write(" State %d:" % i)
## if action:
## f.write("%s" % action)
## f.write("\n")
## f.write(" %s\n" % map(id,table))
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Exception classes
#
#=======================================================================
import exceptions
class PlexError(exceptions.Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
| Python |
#
# Plex - Transition Maps
#
# This version represents state sets direcly as dicts
# for speed.
#
from copy import copy
import string
from sys import maxint
from types import TupleType
class TransitionMap:
"""
A TransitionMap maps an input event to a set of states.
An input event is one of: a range of character codes,
the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
For characters, this implementation compactly represents
the map by means of a list:
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
map = None # The list of codes and states
special = None # Mapping for special events
def __init__(self, map = None, special = None):
if not map:
map = [-maxint, {}, maxint]
if not special:
special = {}
self.map = map
self.special = special
#self.check() ###
def add(self, event, new_state,
TupleType = TupleType):
"""
Add transition to |new_state| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1][new_state] = 1
i = i + 2
else:
self.get_special(event)[new_state] = 1
def add_set(self, event, new_set,
TupleType = TupleType):
"""
Add transitions to the states in |new_set| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1].update(new_set)
i = i + 2
else:
self.get_special(event).update(new_set)
def get_epsilon(self,
none = None):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('', none)
def items(self,
len = len):
"""
Return the mapping as a list of ((code1, code2), state_set) and
(special_event, state_set) pairs.
"""
result = []
map = self.map
else_set = map[1]
i = 0
n = len(map) - 1
code0 = map[0]
while i < n:
set = map[i + 1]
code1 = map[i + 2]
if set or else_set:
result.append(((code0, code1), set))
code0 = code1
i = i + 2
for event, set in self.special.items():
if set:
result.append((event, set))
return result
# ------------------- Private methods --------------------
def split(self, code,
len = len, maxint = maxint):
"""
Search the list for the position of the split point for |code|,
inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
map = self.map
hi = len(map) - 1
# Special case: code == map[-1]
if code == maxint:
return hi
# General case
lo = 0
# loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2
while hi - lo >= 4:
# Find midpoint truncated to even index
mid = ((lo + hi) / 2) & ~1
if code < map[mid]:
hi = mid
else:
lo = mid
# map[lo] <= code < map[hi] and hi - lo == 2
if map[lo] == code:
return lo
else:
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
"""
special = self.special
set = special.get(event, None)
if not set:
set = {}
special[event] = set
return set
# --------------------- Conversion methods -----------------------
def __str__(self):
map_strs = []
map = self.map
n = len(map)
i = 0
while i < n:
code = map[i]
if code == -maxint:
code_str = "-inf"
elif code == maxint:
code_str = "inf"
else:
code_str = str(code)
map_strs.append(code_str)
i = i + 1
if i < n:
map_strs.append(state_set_str(map[i]))
i = i + 1
special_strs = {}
for event, set in self.special.items():
special_strs[event] = state_set_str(set)
return "[%s]+%s" % (
string.join(map_strs, ","),
special_strs
)
# --------------------- Debugging methods -----------------------
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print self
assert 0
def dump(self, file):
map = self.map
i = 0
n = len(map) - 1
while i < n:
self.dump_range(map[i], map[i + 2], map[i + 1], file)
i = i + 2
for event, set in self.special.items():
if set:
if not event:
event = 'empty'
self.dump_trans(event, set, file)
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
if code1 == maxint:
k = "any"
else:
k = "< %s" % self.dump_char(code1)
elif code1 == maxint:
k = "> %s" % self.dump_char(code0 - 1)
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
def dump_set(self, set):
return state_set_str(set)
#
# State set manipulation functions
#
#def merge_state_sets(set1, set2):
# for state in set2.keys():
# set1[state] = 1
def state_set_str(set):
state_list = set.keys()
str_list = []
for state in state_list:
str_list.append("S%d" % state.number)
return "[%s]" % string.join(str_list, ",")
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Regular Expressions
#
#=======================================================================
import array
import string
import types
from sys import maxint
import Errors
#
# Constants
#
BOL = 'bol'
EOL = 'eol'
EOF = 'eof'
nl_code = ord('\n')
#
# Helper functions
#
def chars_to_ranges(s):
"""
Return a list of character codes consisting of pairs
[code1a, code1b, code2a, code2b,...] which cover all
the characters in |s|.
"""
char_list = list(s)
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
code1 = ord(char_list[i])
code2 = code1 + 1
i = i + 1
while i < n and code2 >= ord(char_list[i]):
code2 = code2 + 1
i = i + 1
result.append(code1)
result.append(code2)
return result
def uppercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
lower case letters, return the corresponding upper case range.
"""
code3 = max(code1, ord('a'))
code4 = min(code2, ord('z') + 1)
if code3 < code4:
d = ord('A') - ord('a')
return (code3 + d, code4 + d)
else:
return None
def lowercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
upper case letters, return the corresponding lower case range.
"""
code3 = max(code1, ord('A'))
code4 = min(code2, ord('Z') + 1)
if code3 < code4:
d = ord('a') - ord('A')
return (code3 + d, code4 + d)
else:
return None
def CodeRanges(code_list):
"""
Given a list of codes as returned by chars_to_ranges, return
an RE which will match a character in any of the ranges.
"""
re_list = []
for i in xrange(0, len(code_list), 2):
re_list.append(CodeRange(code_list[i], code_list[i + 1]))
return apply(Alt, tuple(re_list))
def CodeRange(code1, code2):
"""
CodeRange(code1, code2) is an RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
return Alt(RawCodeRange(code1, nl_code),
RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2)
#
# Abstract classes
#
class RE:
"""RE is the base class for regular expression constructors.
The following operators are defined on REs:
re1 + re2 is an RE which matches |re1| followed by |re2|
re1 | re2 is an RE which matches either |re1| or |re2|
"""
nullable = 1 # True if this RE can match 0 input symbols
match_nl = 1 # True if this RE can match a string ending with '\n'
str = None # Set to a string to override the class's __str__ result
def build_machine(self, machine, initial_state, final_state,
match_bol, nocase):
"""
This method should add states to |machine| to implement this
RE, starting at |initial_state| and ending at |final_state|.
If |match_bol| is true, the RE must be able to match at the
beginning of a line. If nocase is true, upper and lower case
letters should be treated as equivalent.
"""
raise exceptions.UnimplementedMethod("%s.build_machine not implemented" %
self.__class__.__name__)
def build_opt(self, m, initial_state, c):
"""
Given a state |s| of machine |m|, return a new state
reachable from |s| on character |c| or epsilon.
"""
s = m.new_state()
initial_state.link_to(s)
initial_state.add_transition(c, s)
return s
def __add__(self, other):
return Seq(self, other)
def __or__(self, other):
return Alt(self, other)
def __str__(self):
if self.str:
return self.str
else:
return self.calc_str()
def check_re(self, num, value):
if not isinstance(value, RE):
self.wrong_type(num, value, "Plex.RE instance")
def check_string(self, num, value):
if type(value) <> type(''):
self.wrong_type(num, value, "string")
def check_char(self, num, value):
self.check_string(num, value)
if len(value) <> 1:
raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s."
"Expected a string of length 1, got: %s" % (
num, self.__class__.__name__, repr(value)))
def wrong_type(self, num, value, expected):
if type(value) == types.InstanceType:
got = "%s.%s instance" % (
value.__class__.__module__, value.__class__.__name__)
else:
got = type(value).__name__
raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
"(expected %s, got %s" % (
num, self.__class__.__name__, expected, got))
#
# Primitive RE constructors
# -------------------------
#
# These are the basic REs from which all others are built.
#
## class Char(RE):
## """
## Char(c) is an RE which matches the character |c|.
## """
## nullable = 0
## def __init__(self, char):
## self.char = char
## self.match_nl = char == '\n'
## def build_machine(self, m, initial_state, final_state, match_bol, nocase):
## c = self.char
## if match_bol and c <> BOL:
## s1 = self.build_opt(m, initial_state, BOL)
## else:
## s1 = initial_state
## if c == '\n' or c == EOF:
## s1 = self.build_opt(m, s1, EOL)
## if len(c) == 1:
## code = ord(self.char)
## s1.add_transition((code, code+1), final_state)
## if nocase and is_letter_code(code):
## code2 = other_case_code(code)
## s1.add_transition((code2, code2+1), final_state)
## else:
## s1.add_transition(c, final_state)
## def calc_str(self):
## return "Char(%s)" % repr(self.char)
def Char(c):
"""
Char(c) is an RE which matches the character |c|.
"""
if len(c) == 1:
result = CodeRange(ord(c), ord(c) + 1)
else:
result = SpecialSymbol(c)
result.str = "Char(%s)" % repr(c)
return result
class RawCodeRange(RE):
"""
RawCodeRange(code1, code2) is a low-level RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|, where the range
does not include newline. For internal use only.
"""
nullable = 0
match_nl = 0
range = None # (code, code)
uppercase_range = None # (code, code) or None
lowercase_range = None # (code, code) or None
def __init__(self, code1, code2):
self.range = (code1, code2)
self.uppercase_range = uppercase_range(code1, code2)
self.lowercase_range = lowercase_range(code1, code2)
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.range, final_state)
if nocase:
if self.uppercase_range:
initial_state.add_transition(self.uppercase_range, final_state)
if self.lowercase_range:
initial_state.add_transition(self.lowercase_range, final_state)
def calc_str(self):
return "CodeRange(%d,%d)" % (self.code1, self.code2)
class _RawNewline(RE):
"""
RawNewline is a low-level RE which matches a newline character.
For internal use only.
"""
nullable = 0
match_nl = 1
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
s = self.build_opt(m, initial_state, EOL)
s.add_transition((nl_code, nl_code + 1), final_state)
RawNewline = _RawNewline()
class SpecialSymbol(RE):
"""
SpecialSymbol(sym) is an RE which matches the special input
symbol |sym|, which is one of BOL, EOL or EOF.
"""
nullable = 0
match_nl = 0
sym = None
def __init__(self, sym):
self.sym = sym
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
# Sequences 'bol bol' and 'bol eof' are impossible, so only need
# to allow for bol if sym is eol
if match_bol and self.sym == EOL:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.sym, final_state)
class Seq(RE):
"""Seq(re1, re2, re3...) is an RE which matches |re1| followed by
|re2| followed by |re3|..."""
def __init__(self, *re_list):
nullable = 1
for i in xrange(len(re_list)):
re = re_list[i]
self.check_re(i, re)
nullable = nullable and re.nullable
self.re_list = re_list
self.nullable = nullable
i = len(re_list)
match_nl = 0
while i:
i = i - 1
re = re_list[i]
if re.match_nl:
match_nl = 1
break
if not re.nullable:
break
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
re_list = self.re_list
if len(re_list) == 0:
initial_state.link_to(final_state)
else:
s1 = initial_state
n = len(re_list)
for i in xrange(n):
if i < n - 1:
s2 = m.new_state()
else:
s2 = final_state
re = re_list[i]
re.build_machine(m, s1, s2, match_bol, nocase)
s1 = s2
match_bol = re.match_nl or (match_bol and re.nullable)
def calc_str(self):
return "Seq(%s)" % string.join(map(str, self.re_list), ",")
class Alt(RE):
"""Alt(re1, re2, re3...) is an RE which matches either |re1| or
|re2| or |re3|..."""
def __init__(self, *re_list):
self.re_list = re_list
nullable = 0
match_nl = 0
nullable_res = []
non_nullable_res = []
i = 1
for re in re_list:
self.check_re(i, re)
if re.nullable:
nullable_res.append(re)
nullable = 1
else:
non_nullable_res.append(re)
if re.match_nl:
match_nl = 1
i = i + 1
self.nullable_res = nullable_res
self.non_nullable_res = non_nullable_res
self.nullable = nullable
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
for re in self.nullable_res:
re.build_machine(m, initial_state, final_state, match_bol, nocase)
if self.non_nullable_res:
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
for re in self.non_nullable_res:
re.build_machine(m, initial_state, final_state, 0, nocase)
def calc_str(self):
return "Alt(%s)" % string.join(map(str, self.re_list), ",")
class Rep1(RE):
"""Rep1(re) is an RE which matches one or more repetitions of |re|."""
def __init__(self, re):
self.check_re(1, re)
self.re = re
self.nullable = re.nullable
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
s1 = m.new_state()
s2 = m.new_state()
initial_state.link_to(s1)
self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase)
s2.link_to(s1)
s2.link_to(final_state)
def calc_str(self):
return "Rep1(%s)" % self.re
class SwitchCase(RE):
"""
SwitchCase(re, nocase) is an RE which matches the same strings as RE,
but treating upper and lower case letters according to |nocase|. If
|nocase| is true, case is ignored, otherwise it is not.
"""
re = None
nocase = None
def __init__(self, re, nocase):
self.re = re
self.nocase = nocase
self.nullable = re.nullable
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
self.re.build_machine(m, initial_state, final_state, match_bol,
self.nocase)
def calc_str(self):
if self.nocase:
name = "NoCase"
else:
name = "Case"
return "%s(%s)" % (name, self.re)
#
# Composite RE constructors
# -------------------------
#
# These REs are defined in terms of the primitive REs.
#
Empty = Seq()
Empty.__doc__ = \
"""
Empty is an RE which matches the empty string.
"""
Empty.str = "Empty"
def Str1(s):
"""
Str1(s) is an RE which matches the literal string |s|.
"""
result = apply(Seq, tuple(map(Char, s)))
result.str = "Str(%s)" % repr(s)
return result
def Str(*strs):
"""
Str(s) is an RE which matches the literal string |s|.
Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|...
"""
if len(strs) == 1:
return Str1(strs[0])
else:
result = apply(Alt, tuple(map(Str1, strs)))
result.str = "Str(%s)" % string.join(map(repr, strs), ",")
return result
def Any(s):
"""
Any(s) is an RE which matches any character in the string |s|.
"""
#result = apply(Alt, tuple(map(Char, s)))
result = CodeRanges(chars_to_ranges(s))
result.str = "Any(%s)" % repr(s)
return result
def AnyBut(s):
"""
AnyBut(s) is an RE which matches any character (including
newline) which is not in the string |s|.
"""
ranges = chars_to_ranges(s)
ranges.insert(0, -maxint)
ranges.append(maxint)
result = CodeRanges(ranges)
result.str = "AnyBut(%s)" % repr(s)
return result
AnyChar = AnyBut("")
AnyChar.__doc__ = \
"""
AnyChar is an RE which matches any single character (including a newline).
"""
AnyChar.str = "AnyChar"
def Range(s1, s2 = None):
"""
Range(c1, c2) is an RE which matches any single character in the range
|c1| to |c2| inclusive.
Range(s) where |s| is a string of even length is an RE which matches
any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,...
"""
if s2:
result = CodeRange(ord(s1), ord(s2) + 1)
result.str = "Range(%s,%s)" % (s1, s2)
else:
ranges = []
for i in range(0, len(s1), 2):
ranges.append(CodeRange(ord(s1[i]), ord(s1[i+1]) + 1))
result = apply(Alt, tuple(ranges))
result.str = "Range(%s)" % repr(s1)
return result
def Opt(re):
"""
Opt(re) is an RE which matches either |re| or the empty string.
"""
result = Alt(re, Empty)
result.str = "Opt(%s)" % re
return result
def Rep(re):
"""
Rep(re) is an RE which matches zero or more repetitions of |re|.
"""
result = Opt(Rep1(re))
result.str = "Rep(%s)" % re
return result
def NoCase(re):
"""
NoCase(re) is an RE which matches the same strings as RE, but treating
upper and lower case letters as equivalent.
"""
return SwitchCase(re, nocase = 1)
def Case(re):
"""
Case(re) is an RE which matches the same strings as RE, but treating
upper and lower case letters as distinct, i.e. it cancels the effect
of any enclosing NoCase().
"""
return SwitchCase(re, nocase = 0)
#
# RE Constants
#
Bol = Char(BOL)
Bol.__doc__ = \
"""
Bol is an RE which matches the beginning of a line.
"""
Bol.str = "Bol"
Eol = Char(EOL)
Eol.__doc__ = \
"""
Eol is an RE which matches the end of a line.
"""
Eol.str = "Eol"
Eof = Char(EOF)
Eof.__doc__ = \
"""
Eof is an RE which matches the end of the file.
"""
Eof.str = "Eof"
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Traditional Regular Expression Syntax
#
#=======================================================================
from Regexps import *
from Errors import PlexError
class RegexpSyntaxError(PlexError):
pass
def re(s):
"""
Convert traditional string representation of regular expression |s|
into Plex representation.
"""
return REParser(s).parse_re()
class REParser:
def __init__(self, s):
self.s = s
self.i = -1
self.end = 0
self.next()
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
def parse_alt(self):
"""Parse a set of alternative regexps."""
re = self.parse_seq()
if self.c == '|':
re_list = [re]
while self.c == '|':
self.next()
re_list.append(self.parse_seq())
re = apply(Alt, tuple(re_list))
return re
def parse_seq(self):
"""Parse a sequence of regexps."""
re_list = []
while not self.end and not self.c in "|)":
re_list.append(self.parse_mod())
return apply(Seq, tuple(re_list))
def parse_mod(self):
"""Parse a primitive regexp followed by *, +, ? modifiers."""
re = self.parse_prim()
while not self.end and self.c in "*+?":
if self.c == '*':
re = Rep(re)
elif self.c == '+':
re = Rep1(re)
else: # self.c == '?'
re = Opt(re)
self.next()
return re
def parse_prim(self):
"""Parse a primitive regexp."""
c = self.get()
if c == '.':
re = AnyBut("\n")
elif c == '^':
re = Bol
elif c == '$':
re = Eol
elif c == '(':
re = self.parse_alt()
self.expect(')')
elif c == '[':
re = self.parse_charset()
self.expect(']')
else:
if c == '\\':
c = self.get()
re = Char(c)
return re
def parse_charset(self):
"""Parse a charset. Does not include the surrounding []."""
char_list = []
invert = 0
if self.c == '^':
invert = 1
self.next()
if self.c == ']':
char_list.append(']')
self.next()
while not self.end and self.c <> ']':
c1 = self.get()
if self.c == '-' and self.lookahead(1) <> ']':
self.next()
c2 = self.get()
for a in xrange(ord(c1), ord(c2) + 1):
char_list.append(chr(a))
else:
char_list.append(c1)
chars = string.join(char_list, "")
if invert:
return AnyBut(chars)
else:
return Any(chars)
def next(self):
"""Advance to the next char."""
s = self.s
i = self.i = self.i + 1
if i < len(s):
self.c = s[i]
else:
self.c = ''
self.end = 1
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
def lookahead(self, n):
"""Look ahead n chars."""
j = self.i + n
if j < len(self.s):
return self.s[j]
else:
return ''
def expect(self, c):
"""
Expect to find character |c| at current position.
Raises an exception otherwise.
"""
if self.c == c:
self.next()
else:
self.error("Missing %s" % repr(c))
def error(self, mess):
"""Raise exception to signal syntax error in regexp."""
raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % (
repr(self.s), self.i, mess))
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Converting NFA to DFA
#
#=======================================================================
import Machines
from Machines import LOWEST_PRIORITY
from Transitions import TransitionMap
def nfa_to_dfa(old_machine, debug = None):
"""
Given a nondeterministic Machine, return a new equivalent
Machine which is deterministic.
"""
# We build a new machine whose states correspond to sets of states
# in the old machine. Initially we add a new state corresponding to
# the epsilon-closure of each initial old state. Then we give transitions
# to each new state which are the union of all transitions out of any
# of the corresponding old states. The new state reached on a given
# character is the one corresponding to the set of states reachable
# on that character from any of the old states. As new combinations of
# old states are created, new states are added as needed until closure
# is reached.
new_machine = Machines.FastMachine()
state_map = StateMap(new_machine)
# Seed the process using the initial states of the old machine.
# Make the corresponding new states into initial states of the new
# machine with the same names.
for (key, old_state) in old_machine.initial_states.items():
new_state = state_map.old_to_new(epsilon_closure(old_state))
new_machine.make_initial_state(key, new_state)
# Tricky bit here: we add things to the end of this list while we're
# iterating over it. The iteration stops when closure is achieved.
for new_state in new_machine.states:
transitions = TransitionMap()
for old_state in state_map.new_to_old(new_state).keys():
for event, old_target_states in old_state.transitions.items():
if event and old_target_states:
transitions.add_set(event, set_epsilon_closure(old_target_states))
for event, old_states in transitions.items():
new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states))
if debug:
debug.write("\n===== State Mapping =====\n")
state_map.dump(debug)
return new_machine
def set_epsilon_closure(state_set):
"""
Given a set of states, return the union of the epsilon
closures of its member states.
"""
result = {}
for state1 in state_set.keys():
for state2 in epsilon_closure(state1).keys():
result[state2] = 1
return result
def epsilon_closure(state):
"""
Return the set of states reachable from the given state
by epsilon moves.
"""
# Cache the result
result = state.epsilon_closure
if result is None:
result = {}
state.epsilon_closure = result
add_to_epsilon_closure(result, state)
return result
def add_to_epsilon_closure(state_set, state):
"""
Recursively add to |state_set| states reachable from the given state
by epsilon moves.
"""
if not state_set.get(state, 0):
state_set[state] = 1
state_set_2 = state.transitions.get_epsilon()
if state_set_2:
for state2 in state_set_2.keys():
add_to_epsilon_closure(state_set, state2)
class StateMap:
"""
Helper class used by nfa_to_dfa() to map back and forth between
sets of states from the old machine and states of the new machine.
"""
new_machine = None # Machine
old_to_new_dict = None # {(old_state,...) : new_state}
new_to_old_dict = None # {id(new_state) : old_state_set}
def __init__(self, new_machine):
self.new_machine = new_machine
self.old_to_new_dict = {}
self.new_to_old_dict= {}
def old_to_new(self, old_state_set):
"""
Return the state of the new machine corresponding to the
set of old machine states represented by |state_set|. A new
state will be created if necessary. If any of the old states
are accepting states, the new state will be an accepting state
with the highest priority action from the old states.
"""
key = self.make_key(old_state_set)
new_state = self.old_to_new_dict.get(key, None)
if not new_state:
action = self.highest_priority_action(old_state_set)
new_state = self.new_machine.new_state(action)
self.old_to_new_dict[key] = new_state
self.new_to_old_dict[id(new_state)] = old_state_set
#for old_state in old_state_set.keys():
#new_state.merge_actions(old_state)
return new_state
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
for state in state_set.keys():
priority = state.action_priority
if priority > best_priority:
best_action = state.action
best_priority = priority
return best_action
# def old_to_new_set(self, old_state_set):
# """
# Return the new state corresponding to a set of old states as
# a singleton set.
# """
# return {self.old_to_new(old_state_set):1}
def new_to_old(self, new_state):
"""Given a new state, return a set of corresponding old states."""
return self.new_to_old_dict[id(new_state)]
def make_key(self, state_set):
"""
Convert a set of states into a uniquified
sorted tuple suitable for use as a dictionary key.
"""
lst = state_set.keys()
lst.sort()
return tuple(lst)
def dump(self, file):
from Transitions import state_set_str
for new_state in self.new_machine.states:
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set)))
| Python |
#
# Get time in platform-dependent way
#
import os
from sys import platform, exit, stderr
if platform == 'mac':
import MacOS
def time():
return MacOS.GetTicks() / 60.0
timekind = "real"
elif hasattr(os, 'times'):
def time():
t = os.times()
return t[0] + t[1]
timekind = "cpu"
else:
stderr.write(
"Don't know how to get time on platform %s\n" % repr(platform))
exit(1)
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
#=======================================================================
"""
The Plex module provides lexical analysers with similar capabilities
to GNU Flex. The following classes and functions are exported;
see the attached docstrings for more information.
Scanner For scanning a character stream under the
direction of a Lexicon.
Lexicon For constructing a lexical definition
to be used by a Scanner.
Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1,
Bol, Eol, Eof, Empty
Regular expression constructors, for building pattern
definitions for a Lexicon.
State For defining scanner states when creating a
Lexicon.
TEXT, IGNORE, Begin
Actions for associating with patterns when
creating a Lexicon.
"""
from Actions import TEXT, IGNORE, Begin
from Lexicons import Lexicon, State
from Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range
from Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase
from Scanners import Scanner
| Python |
#=======================================================================
#
# Python Lexical Analyser
#
# Lexical Analyser Specification
#
#=======================================================================
import types
import Actions
import DFA
import Errors
import Machines
import Regexps
# debug_flags for Lexicon constructor
DUMP_NFA = 1
DUMP_DFA = 2
class State:
"""
This class is used as part of a Plex.Lexicon specification to
introduce a user-defined state.
Constructor:
State(name, token_specifications)
"""
name = None
tokens = None
def __init__(self, name, tokens):
self.name = name
self.tokens = tokens
class Lexicon:
"""
Lexicon(specification) builds a lexical analyser from the given
|specification|. The specification consists of a list of
specification items. Each specification item may be either:
1) A token definition, which is a tuple:
(pattern, action)
The |pattern| is a regular axpression built using the
constructors defined in the Plex module.
The |action| is the action to be performed when this pattern
is recognised (see below).
2) A state definition:
State(name, tokens)
where |name| is a character string naming the state,
and |tokens| is a list of token definitions as
above. The meaning and usage of states is described
below.
Actions
-------
The |action| in a token specication may be one of three things:
1) A function, which is called as follows:
function(scanner, text)
where |scanner| is the relevant Scanner instance, and |text|
is the matched text. If the function returns anything
other than None, that value is returned as the value of the
token. If it returns None, scanning continues as if the IGNORE
action were specified (see below).
2) One of the following special actions:
IGNORE means that the recognised characters will be treated as
white space and ignored. Scanning will continue until
the next non-ignored token is recognised before returning.
TEXT causes the scanned text itself to be returned as the
value of the token.
3) Any other value, which is returned as the value of the token.
States
------
At any given time, the scanner is in one of a number of states.
Associated with each state is a set of possible tokens. When scanning,
only tokens associated with the current state are recognised.
There is a default state, whose name is the empty string. Token
definitions which are not inside any State definition belong to
the default state.
The initial state of the scanner is the default state. The state can
be changed in one of two ways:
1) Using Begin(state_name) as the action of a token.
2) Calling the begin(state_name) method of the Scanner.
To change back to the default state, use '' as the state name.
"""
machine = None # Machine
tables = None # StateTableMachine
def __init__(self, specifications, debug = None, debug_flags = 7, timings = None):
if type(specifications) <> types.ListType:
raise Errors.InvalidScanner("Scanner definition is not a list")
if timings:
from Timing import time
total_time = 0.0
time1 = time()
nfa = Machines.Machine()
default_initial_state = nfa.new_initial_state('')
token_number = 1
for spec in specifications:
if isinstance(spec, State):
user_initial_state = nfa.new_initial_state(spec.name)
for token in spec.tokens:
self.add_token_to_machine(
nfa, user_initial_state, token, token_number)
token_number = token_number + 1
elif type(spec) == types.TupleType:
self.add_token_to_machine(
nfa, default_initial_state, spec, token_number)
token_number = token_number + 1
else:
raise Errors.InvalidToken(
token_number,
"Expected a token definition (tuple) or State instance")
if timings:
time2 = time()
total_time = total_time + (time2 - time1)
time3 = time()
if debug and (debug_flags & 1):
debug.write("\n============= NFA ===========\n")
nfa.dump(debug)
dfa = DFA.nfa_to_dfa(nfa, debug = (debug_flags & 3) == 3 and debug)
if timings:
time4 = time()
total_time = total_time + (time4 - time3)
if debug and (debug_flags & 2):
debug.write("\n============= DFA ===========\n")
dfa.dump(debug)
if timings:
timings.write("Constructing NFA : %5.2f\n" % (time2 - time1))
timings.write("Converting to DFA: %5.2f\n" % (time4 - time3))
timings.write("TOTAL : %5.2f\n" % total_time)
self.machine = dfa
def add_token_to_machine(self, machine, initial_state, token_spec, token_number):
try:
(re, action_spec) = self.parse_token_definition(token_spec)
# Disabled this -- matching empty strings can be useful
#if re.nullable:
# raise Errors.InvalidToken(
# token_number, "Pattern can match 0 input symbols")
if isinstance(action_spec, Actions.Action):
action = action_spec
elif callable(action_spec):
action = Actions.Call(action_spec)
else:
action = Actions.Return(action_spec)
final_state = machine.new_state()
re.build_machine(machine, initial_state, final_state,
match_bol = 1, nocase = 0)
final_state.set_action(action, priority = -token_number)
except Errors.PlexError, e:
raise e.__class__("Token number %d: %s" % (token_number, e))
def parse_token_definition(self, token_spec):
if type(token_spec) <> types.TupleType:
raise Errors.InvalidToken("Token definition is not a tuple")
if len(token_spec) <> 2:
raise Errors.InvalidToken("Wrong number of items in token definition")
pattern, action = token_spec
if not isinstance(pattern, Regexps.RE):
raise Errors.InvalidToken("Pattern is not an RE instance")
return (pattern, action)
def get_initial_state(self, name):
return self.machine.get_initial_state(name)
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
#! /usr/bin/env python
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
# use a global instance instead of globals
class G:pass
g = G()
import sys
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = ""):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(abs(loops))
if loops >= 0:
print "Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime)
print "This machine benchmarks at %g pystones/second" % stones
def pystones(loops=LOOPS):
return Proc0(loops)
g.IntGlob = 0
g.BoolGlob = FALSE
g.Char1Glob = '\0'
g.Char2Glob = '\0'
g.Array1Glob = [0]*51
g.Array2Glob = map(lambda x: x[:], [g.Array1Glob]*51)
g.PtrGlb = None
g.PtrGlbNext = None
def Proc0(loops=LOOPS):
#global IntGlob
#global BoolGlob
#global Char1Glob
#global Char2Glob
#global Array1Glob
#global Array2Glob
#global PtrGlb
#global PtrGlbNext
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
i += 1
# the above is most likely to vanish in C :-(
nulltime = clock() - starttime
g.PtrGlbNext = Record()
g.PtrGlb = Record()
g.PtrGlb.PtrComp = g.PtrGlbNext
g.PtrGlb.Discr = Ident1
g.PtrGlb.EnumComp = Ident3
g.PtrGlb.IntComp = 40
g.PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
g.Array2Glob[8][7] = 10
EnumLoc = None # addition for flow space
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
g.BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(g.Array1Glob, g.Array2Glob, IntLoc1, IntLoc3)
g.PtrGlb = Proc1(g.PtrGlb)
CharIndex = 'A'
while CharIndex <= g.Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
i += 1
benchtime = clock() - starttime - nulltime
if benchtime < 1E-8:
benchtime = 1E-8 # time too short, meaningless results anyway
return benchtime, (loops / benchtime)
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = g.PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = g.PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
EnumLoc = None # addition for flow space
while 1:
if g.Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - g.IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
#global IntGlob
if g.PtrGlb is not None:
PtrParOut = g.PtrGlb.PtrComp
else:
g.IntGlob = 100
g.PtrGlb.IntComp = Proc7(10, g.IntGlob)
return PtrParOut
def Proc4():
#global Char2Glob
BoolLoc = g.Char1Glob == 'A'
BoolLoc = BoolLoc or g.BoolGlob
g.Char2Glob = 'B'
def Proc5():
#global Char1Glob
#global BoolGlob
g.Char1Glob = 'A'
g.BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if g.IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
#global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
g.IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
def error(msg):
print >> sys.stderr, msg,
print >> sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
def entrypoint(loops=None):
import string # just a little test
print string.replace("import works", "s", "x")
if loops is None:
loops = LOOPS # initialize early, for slow space
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
if hasattr(sys, 'pypy_objspaceclass'):
loops = LOOPS / 2000 # XXX rough estimate, adjust
main(loops)
if __name__ == '__main__':
entrypoint()
| Python |
#! /usr/bin/env python
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
# use a global instance instead of globals
class G:pass
g = G()
import sys
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = ""):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(abs(loops))
if loops >= 0:
print "Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime)
print "This machine benchmarks at %g pystones/second" % stones
def pystones(loops=LOOPS):
return Proc0(loops)
g.IntGlob = 0
g.BoolGlob = FALSE
g.Char1Glob = '\0'
g.Char2Glob = '\0'
g.Array1Glob = [0]*51
g.Array2Glob = map(lambda x: x[:], [g.Array1Glob]*51)
g.PtrGlb = None
g.PtrGlbNext = None
def Proc0(loops=LOOPS):
#global IntGlob
#global BoolGlob
#global Char1Glob
#global Char2Glob
#global Array1Glob
#global Array2Glob
#global PtrGlb
#global PtrGlbNext
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
i += 1
# the above is most likely to vanish in C :-(
nulltime = clock() - starttime
g.PtrGlbNext = Record()
g.PtrGlb = Record()
g.PtrGlb.PtrComp = g.PtrGlbNext
g.PtrGlb.Discr = Ident1
g.PtrGlb.EnumComp = Ident3
g.PtrGlb.IntComp = 40
g.PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
g.Array2Glob[8][7] = 10
EnumLoc = None # addition for flow space
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
g.BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(g.Array1Glob, g.Array2Glob, IntLoc1, IntLoc3)
g.PtrGlb = Proc1(g.PtrGlb)
CharIndex = 'A'
while CharIndex <= g.Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
i += 1
benchtime = clock() - starttime - nulltime
if benchtime < 1E-8:
benchtime = 1E-8 # time too short, meaningless results anyway
return benchtime, (loops / benchtime)
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = g.PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = g.PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
EnumLoc = None # addition for flow space
while 1:
if g.Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - g.IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
#global IntGlob
if g.PtrGlb is not None:
PtrParOut = g.PtrGlb.PtrComp
else:
g.IntGlob = 100
g.PtrGlb.IntComp = Proc7(10, g.IntGlob)
return PtrParOut
def Proc4():
#global Char2Glob
BoolLoc = g.Char1Glob == 'A'
BoolLoc = BoolLoc or g.BoolGlob
g.Char2Glob = 'B'
def Proc5():
#global Char1Glob
#global BoolGlob
g.Char1Glob = 'A'
g.BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if g.IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
#global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
g.IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
def error(msg):
print >> sys.stderr, msg,
print >> sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
def entrypoint(loops=None):
import string # just a little test
print string.replace("import works", "s", "x")
if loops is None:
loops = LOOPS # initialize early, for slow space
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
if hasattr(sys, 'pypy_objspaceclass'):
loops = LOOPS / 2000 # XXX rough estimate, adjust
main(loops)
if __name__ == '__main__':
entrypoint()
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
"""Snippets for translation
This module holds various snippets, to be used by translator
unittests.
We define argument types as default arguments to the snippet
functions.
"""
numtype = (int, float)
anytype = (int, float, str)
seqtype = (list, tuple)
def if_then_else(cond=anytype, x=anytype, y=anytype):
if cond:
return x
else:
return y
def my_gcd(a=numtype, b=numtype):
r = a % b
while r:
a = b
b = r
r = a % b
return b
def is_perfect_number(n=int):
div = 1
sum = 0
while div < n:
if n % div == 0:
sum += div
div += 1
return n == sum
def my_bool(x=int):
return not not x
def my_contains(seq=seqtype, elem=anytype):
return elem in seq
def is_one_or_two(n=int):
return n in [1, 2]
def two_plus_two():
"""Array test"""
array = [0] * 3
array[0] = 2
array[1] = 2
array[2] = array[0] + array[1]
return array[2]
def get_set_del_slice(l=list):
del l[:1]
del l[-1:]
del l[2:4]
l[:1] = [3]
l[-1:] = [9]
l[2:4] = [8,11]
return l[:2], l[5:], l[3:5]
def sieve_of_eratosthenes():
"""Sieve of Eratosthenes
This one is from an infamous benchmark, "The Great Computer
Language Shootout".
URL is: http://www.bagley.org/~doug/shootout/
"""
flags = [True] * (8192+1)
count = 0
i = 2
while i <= 8192:
if flags[i]:
k = i + i
while k <= 8192:
flags[k] = False
k = k + i
count = count + 1
i = i + 1
return count
def simple_func(i=numtype):
return i + 1
def while_func(i=numtype):
total = 0
while i > 0:
total = total + i
i = i - 1
return total
def nested_whiles(i=int, j=int):
s = ''
z = 5
while z > 0:
z = z - 1
u = i
while u < j:
u = u + 1
s = s + '.'
s = s + '!'
return s
def poor_man_range(i=int):
lst = []
while i > 0:
i = i - 1
lst.append(i)
lst.reverse()
return lst
def poor_man_rev_range(i=int):
lst = []
while i > 0:
i = i - 1
lst += [i]
return lst
def simple_id(x=anytype):
return x
def branch_id(cond=anytype, a=anytype, b=anytype):
while 1:
if cond:
return a
else:
return b
def builtinusage():
return pow(2, 2)
def yast(lst=seqtype):
total = 0
for z in lst:
total = total + z
return total
def time_waster(n=int):
"""Arbitrary test function"""
i = 0
x = 1
while i < n:
j = 0
while j <= i:
j = j + 1
x = x + (i & j)
i = i + 1
return x
def half_of_n(n=int):
"""Slice test"""
i = 0
lst = range(n)
while lst:
lst = lst[1:-1]
i = i + 1
return i
def int_id(x=int):
i = 0
while i < x:
i = i + 1
return i
def greet(target=str):
"""String test"""
hello = "hello"
return hello + target
def choose_last():
"""For loop test"""
set = ["foo", "bar", "spam", "egg", "python"]
choice = ""
for choice in set:
pass
return choice
def poly_branch(x=int):
if x:
y = [1,2,3]
else:
y = ['a','b','c']
z = y
return z*2
def s_and(x=anytype, y=anytype):
if x and y:
return 'yes'
else:
return 'no'
def break_continue(x=numtype):
result = []
i = 0
while 1:
i = i + 1
try:
if i&1:
continue
if i >= x:
break
finally:
result.append(i)
i = i + 1
return result
def reverse_3(lst=seqtype):
try:
a, b, c = lst
except:
return 0, 0, 0
return c, b, a
def finallys(lst=seqtype):
x = 1
try:
x = 2
try:
x = 3
a, = lst
x = 4
except KeyError:
return 5
except ValueError:
return 6
b, = lst
x = 7
finally:
x = 8
return x
def finally2(o, k):
try:
o[k] += 1
finally:
o[-1] = 'done'
def bare_raise(o, ignore):
try:
return o[5]
except:
if not ignore:
raise
def factorial(n=int):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def factorial2(n=int): # analysed in a different order
if n > 1:
return n * factorial2(n-1)
else:
return 1
def _append_five(lst):
lst += [5]
def call_five():
a = []
_append_five(a)
return a
def _append_six(lst):
lst += [6]
def call_five_six():
a = []
_append_five(a)
_append_six(a)
return a
def call_unpack_56():
a = call_five_six()
return len(a), a[0], a[1]
def forty_two():
return 42
def never_called():
return "booo"
def constant_result():
if forty_two():
return "yadda"
else:
return never_called()
class CallablePrebuiltConstant(object):
def __call__(self):
return 42
callable_prebuilt_constant = CallablePrebuiltConstant()
def call_cpbc():
return callable_prebuilt_constant()
class E1(Exception):
pass
class E2(Exception):
pass
def raise_choose(n):
if n == 1:
raise E1
elif n == 2:
raise E2
elif n == -1:
raise Exception
return 0
def try_raise_choose(n=int):
try:
raise_choose(n)
except E1:
return 1
except E2:
return 2
except Exception:
return -1
return 0
def do_try_raise_choose():
r = []
for n in [-1,0,1,2]:
r.append(try_raise_choose(n))
return r
# INHERITANCE / CLASS TESTS
class C(object): pass
def build_instance():
c = C()
return c
def set_attr():
c = C()
c.a = 1
c.a = 2
return c.a
def merge_setattr(x):
if x:
c = C()
c.a = 1
else:
c = C()
return c.a
class D(C): pass
class E(C): pass
def inheritance1():
d = D()
d.stuff = ()
e = E()
e.stuff = -12
e.stuff = 3
lst = [d, e]
return d.stuff, e.stuff
def inheritance2():
d = D()
d.stuff = (-12, -12)
e = E()
e.stuff = (3, "world")
return _getstuff(d), _getstuff(e)
class F:
pass
class G(F):
def m(self, x):
return self.m2(x)
def m2(self, x):
return D(), x
class H(F):
def m(self, y):
self.attr = 1
return E(), y
def knownkeysdict(b=anytype):
if b:
d = {'a': 0}
d['b'] = b
d['c'] = 'world'
else:
d = {'b': -123}
return d['b']
def generaldict(key=str, value=int, key2=str, value2=int):
d = {key: value}
d[key2] = value2
return d[key or key2]
def prime(n=int):
return len([i for i in range(1,n+1) if n%i==0]) == 2
class A0:
pass
class A1(A0):
clsattr = 123
class A2(A1):
clsattr = 456
class A3(A2):
clsattr = 789
class A4(A3):
pass
class A5(A0):
clsattr = 101112
def classattribute(flag=int):
if flag == 1:
x = A1()
elif flag == 2:
x = A2()
elif flag == 3:
x = A3()
elif flag == 4:
x = A4()
else:
x = A5()
return x.clsattr
class Z:
def my_method(self):
return self.my_attribute
class WithInit:
def __init__(self, n):
self.a = n
class WithMoreInit(WithInit):
def __init__(self, n, m):
WithInit.__init__(self, n)
self.b = m
def simple_method(v=anytype):
z = Z()
z.my_attribute = v
return z.my_method()
def with_init(v=int):
z = WithInit(v)
return z.a
def with_more_init(v=int, w=bool):
z = WithMoreInit(v, w)
if z.b:
return z.a
else:
return -z.a
global_z = Z()
global_z.my_attribute = 42
def global_instance():
return global_z.my_method()
def call_Z_my_method(z):
return z.my_method
def somepbc_simplify():
z = Z()
call_Z_my_method(global_z)
call_Z_my_method(z)
global_c = C()
global_c.a = 1
def global_newstyle_instance():
return global_c
global_rl = []
global_rl.append(global_rl)
def global_recursive_list():
return global_rl
class MI_A(object):
a = 1
class MI_B(MI_A):
b = 2
class MI_C(MI_A):
c = 3
class MI_D(MI_B, MI_C):
d = 4
def multiple_inheritance():
i = MI_D()
return i.a + i.b + i.c + i.d
class CBase(object):
pass
class CSub1(CBase):
def m(self):
self.x = 42
return self.x
class CSub2(CBase):
def m(self):
self.x = 'world'
return self.x
def methodcall_is_precise(cond):
if cond:
x = CSub1()
else:
x = CSub2()
x.m()
return CSub1().m()
def flow_type_info(i):
if isinstance(i, int):
a = i + 1
else:
a = len(str(i))
return a
def flow_usertype_info(ob):
if isinstance(ob, WithInit):
return ob
else:
return WithMoreInit(1, 2)
def flow_identity_info(x=object, y=object):
if x is None:
if y is None:
return (x, y)
else:
return (x, None)
else:
if y is None:
return (None, y)
else:
return (None, None)
def star_args(x, y, *args):
return x + args[0]
def call_star_args(z):
return star_args(z, 5, 10, 15, 20)
def call_star_args_multiple(z):
a = star_args(z, 5, 10)
b = star_args(z, 5, 10, 15)
c = star_args(z, 5, 10, 15, 20)
return a+b+c
def default_args(x, y=2, z=3L):
return x+y+z
def call_default_args(u):
return default_args(111, u)
def default_and_star_args(x, y=2, z=3, *more):
return x+y+z+len(more)
def call_default_and_star_args(u):
return (default_and_star_args(111, u),
default_and_star_args(-1000, -2000, -3000, -4000, -5000))
def call_with_star(z):
return default_args(-20, *z)
def call_with_keyword(z):
return default_args(-20, z=z)
def call_very_complex(z, args, kwds):
return default_args(-20, z=z, *args, **kwds)
def powerset(setsize=int):
"""Powerset
This one is from a Philippine Pythonista Hangout, an modified
version of Andy Sy's code.
list.append is modified to list concatenation, and powerset
is pre-allocated and stored, instead of printed.
URL is: http://lists.free.net.ph/pipermail/python/2002-November/
"""
set = range(setsize)
maxcardinality = pow(2, setsize)
bitmask = 0L
powerset = [None] * maxcardinality
ptr = 0
while bitmask < maxcardinality:
bitpos = 1L
index = 0
subset = []
while bitpos < maxcardinality:
if bitpos & bitmask:
subset = subset + [set[index]]
index += 1
bitpos <<= 1
powerset[ptr] = subset
ptr += 1
bitmask += 1
return powerset
def harmonic(n):
result = 0.0
for i in range(n, 0, -1):
result += 1.0 / n
return result
# specifically for geninterp testing
def t_isinstance(x, y):
return isinstance(x, (int, long)) and isinstance(y, int)
def t_issubclass(x, y):
return issubclass(type(x), (int, long)) and issubclass(type(y), int)
def t_neg_long():
return -132L
# --------------------(Currently) Non runnable Functions ---------------------
def _somebug1(n=int):
l = []
v = l.append
while n:
l[7] = 5 # raises an exception
break
return v
def _inheritance_nonrunnable():
d = D()
d.stuff = (-12, -12)
e = E()
e.stuff = (3, "world")
return C().stuff
def _getstuff(x):
return x.stuff
# --------------------(Currently) Non compilable Functions ---------------------
class BadInit(object):
def update(self, k):
self.k = 1
def __init__(self, v):
return
self.update(**{'k':v})
def read(self):
return self.k
global_bi = BadInit(1)
def global_badinit():
return global_bi.read()
def _attrs():
def b(): pass
b.f = 4
b.g = 5
return b.f + b.g
def _methodcall1(cond):
if cond:
x = G()
else:
x = H()
return x.m(42)
def func1():
pass
def func2():
pass
def mergefunctions(cond):
if cond:
x = func1
else:
x = func2
return x
def func_producing_exception():
raise ValueError, "this might e.g. block the caller"
def funccallsex():
return func_producing_exception()
def func_arg_unpack():
a,b = 3, "hello"
return a
class APBC:
def __init__(self):
self.answer = 42
apbc = APBC()
apbc.answer = 7
def preserve_pbc_attr_on_instance(cond):
if cond:
x = APBC()
else:
x = apbc
return x.answer
class APBCS(object):
__slots__ = ['answer']
def __init__(self):
self.answer = 42
apbcs = APBCS()
apbcs.answer = 7
def preserve_pbc_attr_on_instance_with_slots(cond):
if cond:
x = APBCS()
else:
x = apbcs
return x.answer
def is_and_knowntype(x):
if x is None:
return x
else:
return None
def isinstance_and_knowntype(x):
if isinstance(x, APBC):
return x
else:
return apbc
def simple_slice(x):
return x[:10]
def simple_iter(x):
return iter(x)
def simple_zip(x,y):
return zip(x,y)
def dict_copy(d):
return d.copy()
def dict_update(x):
d = {x:x}
d.update({1:2})
return d
def dict_keys():
d = {"a" : 1}
return d.keys()
def dict_keys2():
d = {"a" : 1}
keys = d.keys()
d[1] = 12
return keys
def dict_values():
d = {"a" : "a"}
return d.values()
def dict_values2():
d = {"a" : "a"}
values = d.values()
d[1] = 12
return values
def dict_items():
d = {'a' : 1}
return d.items()
class Exc(Exception):
pass
def exception_deduction0(x):
pass
def exception_deduction():
try:
exception_deduction0(2)
except Exc, e:
return e
return Exc()
def always_raising(x):
raise ValueError
def witness(x):
pass
def exception_deduction_with_raise1(x):
try:
exception_deduction0(2)
if x:
raise Exc()
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise2(x):
try:
exception_deduction0(2)
if x:
raise Exc
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise3(x):
try:
exception_deduction0(2)
if x:
raise Exc, Exc()
except Exc, e:
witness(e)
return e
return Exc()
def slice_union(x):
if x:
return slice(1)
else:
return slice(0, 10, 2)
def exception_deduction_we_are_dumb():
a = 1
try:
exception_deduction0(2)
except Exc, e:
a += 1
return e
return Exc()
class Exc2(Exception):
pass
def nested_exception_deduction():
try:
exception_deduction0(1)
except Exc, e:
try:
exception_deduction0(2)
except Exc2, f:
return (e, f)
return (e, Exc2())
return (Exc(), Exc2())
class Exc3(Exception):
def m(self):
return 1
class Exc4(Exc3):
def m(self):
return 1
class Sp:
def o(self):
raise Exc3
class Mod:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc3, e:
return e.m()
return 0
class Mod3:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc4, e1:
return e1.m()
except Exc3, e2:
try:
return e2.m()
except Exc4, e3:
return e3.m()
return 0
mod = Mod(Sp())
mod3 = Mod3(Sp())
def exc_deduction_our_exc_plus_others():
return mod.p()
def exc_deduction_our_excs_plus_others():
return mod3.p()
def call_two_funcs_but_one_can_only_raise(n):
fn = [witness, always_raising][n]
return fn(n)
class BltinCode:
def __init__(self, func, framecls):
self.func = func
self.framecls = framecls
def call(self, x):
return self.framecls(self).run(x)
class BltinFrame:
def __init__(self, code):
self.code = code
def bltin_code_frame_f(x):
return x
def bltin_code_frame_g(x):
return x
class FBltinFrame(BltinFrame):
def run(self, x):
return self.code.func(x)
class GBltinFrame(BltinFrame):
def run(self, x):
return self.code.func(x)
bltin_code_for_f = BltinCode(bltin_code_frame_f, FBltinFrame)
bltin_code_for_g = BltinCode(bltin_code_frame_g, GBltinFrame)
def bltin_code_frame_confusion():
a = bltin_code_for_f.call(0)
a1 = bltin_code_for_f.call(1)
b = bltin_code_for_g.call("a")
b1 = bltin_code_for_g.call("b")
return (a,a1,b,b1)
# reorg
class BltinCodeReorg:
def __init__(self, framecls):
self.framecls = framecls
def call(self, x):
frame = self.framecls()
frame.set(x)
return frame.run()
class BltinFrameReorg:
def __init__(self):
pass
def set(self,x):
pass
def run(self):
pass
class FBltinFrameReorg(BltinFrameReorg):
def set(self, x):
self.arg = int(x)
def run(self):
return bltin_code_frame_f(self.arg)
class GBltinFrameReorg(BltinFrameReorg):
def set(self, x):
self.arg = str(x)
def run(self):
return bltin_code_frame_g(self.arg)
bltin_code_for_f_reorg = BltinCodeReorg(FBltinFrameReorg)
bltin_code_for_g_reorg = BltinCodeReorg(GBltinFrameReorg)
def bltin_code_frame_reorg():
a = bltin_code_for_f_reorg.call(0)
a1 = bltin_code_for_f_reorg.call(1)
b = bltin_code_for_g_reorg.call("a")
b1 = bltin_code_for_g_reorg.call("b")
return (a,a1,b,b1)
# constant instances with __init__ vs. __new__
class Thing1:
def __init__(self):
self.thingness = 1
thing1 = Thing1()
def one_thing1():
return thing1
class Thing2(long):
def __new__(t,v):
return long.__new__(t,v*2)
thing2 = Thing2(2)
def one_thing2():
return thing2
# propagation of fresh instances through attributes
class Stk:
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
class EC:
def __init__(self):
self.stk = Stk()
def enter(self, f):
self.stk.push(f)
def propagation_of_fresh_instances_through_attrs(x):
e = EC()
e.enter(x)
# same involving recursion
class R:
def __init__(self, n):
if n>0:
self.r = R(n-1)
else:
self.r = None
self.n = n
if self.r:
self.m = self.r.n
else:
self.m = -1
def make_r(n):
return R(n)
class B:
pass
class Even(B):
def __init__(self,n):
if n > 0:
self.x = [Odd(n-1)]
self.y = self.x[0].x
else:
self.x = []
self.y = []
class Odd(B):
def __init__(self,n):
self.x = [Even(n-1)]
self.y = self.x[0].x
def make_eo(n):
if n%2 == 0:
return Even(n)
else:
return Odd(n)
# shows that we care about the expanded structure in front of changes to attributes involving only
# instances rev numbers
class Box:
pass
class Box2:
pass
class Box3(Box2):
pass
def flow_rev_numbers(n):
bx3 = Box3()
bx3.x = 1
bx = Box()
bx.bx3 = bx3
if n >0:
z = bx.bx3.x
if n >0:
bx2 = Box2()
bx2.x = 3
return z
raise Exception
# class specialization
class PolyStk:
_annspecialcase_ = "specialize:ctr_location"
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
def top(self):
return self.itms[-1]
def class_spec():
istk = PolyStk()
istk.push(1)
sstk = PolyStk()
sstk.push("a")
istk.push(2)
sstk.push("b")
#if not isinstance(istk, PolyStk):
# return "confused"
return istk.top(), sstk.top()
from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift
def add_func(i=numtype):
try:
return ovfcheck(i + 1)
except OverflowError:
raise
from sys import maxint
def div_func(i=numtype):
try:
return ovfcheck((-maxint-1) // i)
except (OverflowError, ZeroDivisionError):
raise
def mul_func(x=numtype, y=numtype):
try:
return ovfcheck(x * y)
except OverflowError:
raise
def mod_func(i=numtype):
try:
return ovfcheck((-maxint-1) % i)
except OverflowError:
raise
except ZeroDivisionError:
raise
def rshift_func(i=numtype):
try:
return (-maxint-1) >> i
except ValueError:
raise
class hugelmugel(OverflowError):pass
def hugo(a, b, c):pass
def lshift_func(i=numtype):
try:
hugo(2, 3, 5)
return ovfcheck_lshift((-maxint-1), i)
except (hugelmugel, OverflowError, StandardError, ValueError):
raise
def unary_func(i=numtype):
try:
return ovfcheck(-i), ovfcheck(abs(i-1))
except: raise
# XXX it would be nice to get it right without an exception
# handler at all, but then we need to do much harder parsing
| Python |
import sys
import shutil
import py
from py.compat import subprocess
from pypy.config.config import Config
from pypy.translator.oosupport.genoo import GenOO
from pypy.translator.oosupport.treebuilder import build_trees
from pypy.translator.backendopt.ssa import SSI_to_SSA
from pypy.translator.cli import conftest
from pypy.translator.cli.ilgenerator import IlasmGenerator
from pypy.translator.cli.function import Function, log
from pypy.translator.cli.class_ import Class
from pypy.translator.cli.option import getoption
from pypy.translator.cli.database import LowLevelDatabase
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.opcodes import opcodes
from pypy.translator.cli.sdk import SDK
from pypy.translator.cli.rte import get_pypy_dll
from pypy.translator.cli.support import Tee
from pypy.translator.cli.prebuiltnodes import get_prebuilt_nodes
from pypy.translator.cli.stackopt import StackOptGenerator
from pypy.translator.cli import query
from pypy.translator.cli import constant
try:
set
except NameError:
from sets import Set as set
#USE_STACKOPT = True and not getoption('nostackopt')
USE_STACKOPT = False
class GenCli(GenOO):
TypeSystem = CTS
Function = Function
opcodes = opcodes
Database = LowLevelDatabase
log = log
ConstantGenerator = constant.StaticFieldConstGenerator
InstanceConst = constant.CLIInstanceConst
RecordConst = constant.CLIRecordConst
ClassConst = constant.CLIClassConst
ListConst = constant.CLIListConst
StaticMethodConst = constant.CLIStaticMethodConst
CustomDictConst = constant.CLICustomDictConst
DictConst = constant.CLIDictConst
WeakRefConst = constant.CLIWeakRefConst
def __init__(self, tmpdir, translator, entrypoint, config=None):
GenOO.__init__(self, tmpdir, translator, entrypoint, config)
for node in get_prebuilt_nodes(translator, self.db):
self.db.pending_node(node)
self.assembly_name = entrypoint.get_name()
self.tmpfile = tmpdir.join(self.assembly_name + '.il')
self.const_stat = str(tmpdir.join('const_stat'))
if translator.config.translation.backendopt.stack_optimization:
for graph in translator.graphs:
SSI_to_SSA(graph)
build_trees(graph)
def generate_source(self):
GenOO.generate_source(self)
self.db.const_count.dump(self.const_stat)
query.savedesc()
return self.tmpfile.strpath
def create_assembler(self):
out = self.tmpfile.open('w')
if getoption('stdout'):
out = Tee(sys.stdout, out)
if USE_STACKOPT:
return StackOptGenerator(out, self.assembly_name, self.config)
else:
return IlasmGenerator(out, self.assembly_name, self.config)
def build_exe(self):
if getoption('source'):
return None
pypy_dll = get_pypy_dll() # get or recompile pypy.dll
shutil.copy(pypy_dll, self.tmpdir.strpath)
ilasm = SDK.ilasm()
tmpfile = self.tmpfile.strpath
self._exec_helper(ilasm, [tmpfile]+self.entrypoint.ilasm_flags(),
'ilasm failed to assemble (%s):\n%s\n%s',
timeout = 900)
# Mono's ilasm occasionally deadlocks. We set a timer to avoid
# blocking automated test runs forever.
self.outfile = self.entrypoint.output_filename(tmpfile)
if getoption('verify'):
peverify = SDK.peverify()
self._exec_helper(peverify, [outfile], 'peverify failed to verify (%s):\n%s\n%s')
return self.outfile
def _exec_helper(self, helper, args, msg, timeout=None):
args = [helper] + args
if timeout and not sys.platform.startswith('win'):
import os
from pypy.tool import autopath
watchdog = os.path.join(autopath.pypydir, 'tool', 'watchdog.py')
args[:0] = [sys.executable, watchdog, str(float(timeout))]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
retval = proc.wait()
assert retval == 0, msg % (args[0], stdout, stderr)
| Python |
import cPickle as pickle
import os.path
from py.compat import subprocess
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.rte import Query
from pypy.translator.cli.sdk import SDK
from pypy.translator.cli.support import log
from pypy.translator.cli.dotnet import CLR, CliNamespace, CliClass,\
NativeInstance, _overloaded_static_meth, _static_meth, OverloadingResolver
ClassCache = {}
OOTypeCache = {}
Descriptions = {}
class Dummy: pass
fake_root = Dummy()
fake_root._INSTANCE = ootype.ROOT
ClassCache['ROOT'] = fake_root
ClassCache['System.Array'] = fake_root
del fake_root
del Dummy
def _descfilename(filename):
if filename is None:
curdir = os.path.dirname(__file__)
return os.path.join(curdir, 'query-descriptions')
else:
return filename
def savedesc(filename=None):
f = open(_descfilename(filename), 'wb')
pickle.dump(Descriptions, f, protocol=-1)
f.close()
def loaddesc(filename=None):
filename = _descfilename(filename)
if not os.path.exists(filename):
return
f = open(filename, 'rb')
try:
newdesc = pickle.load(f)
except pickle.UnpicklingError:
log.WARNING('query-descriptions file exits, but failed to unpickle')
else:
Descriptions.clear()
Descriptions.update(newdesc)
def getattr_ex(target, attr):
parts = attr.split('.')
for part in parts:
target = getattr(target, part)
return target
def setattr_ex(target, attr, value):
if '.' in attr:
namespace, attr = attr.rsplit('.', 1)
target = getattr_ex(target, namespace)
setattr(target, attr, value)
def load_class_or_namespace(name):
try:
desc = Descriptions[name]
except KeyError:
desc = query_description(name)
Descriptions[name] = desc
res = desc.build()
setattr_ex(CLR, name, res)
return res
def query_description(name):
log.query('Loading description for %s' % name)
arglist = SDK.runtime() + [Query.get(), name]
query = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = query.communicate()
retval = query.wait()
if retval == 0:
cls = ClassDesc()
exec stdout in cls.__dict__
del cls.__dict__['__builtins__']
return cls
elif retval == 1:
raise RuntimeError, 'query.exe failed with this message:\n%s' % stderr
elif retval == 2:
# can't load type, assume it's a namespace
return NamespaceDesc(name)
def load_class_maybe(name):
if name.startswith('System.Array+InternalArray'):
res = ClassCache['System.Array']
ClassCache[name] = res
return res
elif name not in ClassCache:
return load_class_or_namespace(name)
else:
return ClassCache[name]
class Desc:
def build(self):
raise NotImplementedError
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
raise TypeError
class NamespaceDesc(Desc):
def __init__(self, name):
self.name = name
def build(self):
return CliNamespace(self.name)
class ClassDesc(Desc):
def build(self):
assert self.Assembly.startswith('mscorlib') # TODO: support external assemblies
namespace, name = self.FullName.rsplit('.', 1)
# construct OOTYPE and CliClass
if self.FullName == 'System.Type':
# we need to special case System.Type because it contains
# circular dependencies, since its superclasses have got
# methods which take System.Type as parameters
BASETYPE = None
else:
load_class_maybe(self.BaseType)
BASETYPE = ClassCache[self.BaseType]._INSTANCE
TYPE = NativeInstance('[mscorlib]', namespace, name, BASETYPE, {}, {})
TYPE._isArray = self.IsArray
if self.IsArray:
load_class_maybe(self.ElementType)
TYPE._ELEMENT = ClassCache[self.ElementType]._INSTANCE
Class = CliClass(TYPE, {})
OOTypeCache[self.OOType] = TYPE
ClassCache[self.FullName] = Class
if BASETYPE is None:
load_class_maybe(self.BaseType)
BASETYPE = ClassCache[self.BaseType]._INSTANCE
TYPE._set_superclass(BASETYPE)
# render dependencies
for name in self.Depend:
load_class_maybe(name)
# add both static and instance methods
static_meths = self.group_methods(self.StaticMethods, _overloaded_static_meth,
_static_meth, ootype.StaticMethod)
meths = self.group_methods(self.Methods, ootype.overload, ootype.meth, ootype.Meth)
Class._add_methods(static_meths)
TYPE._add_methods(meths)
return Class
def group_methods(self, methods, overload, meth, Meth):
groups = {}
for name, args, result in methods:
groups.setdefault(name, []).append((args, result))
res = {}
attrs = dict(resolver=OverloadingResolver)
for name, methlist in groups.iteritems():
TYPES = [self.get_method_type(Meth, args, result) for (args, result) in methlist]
meths = [meth(TYPE) for TYPE in TYPES]
res[name] = overload(*meths, **attrs)
return res
def get_method_type(self, Meth, args, result):
ARGS = [self.get_ootype(arg) for arg in args]
RESULT = self.get_ootype(result)
return Meth(ARGS, RESULT)
def get_ootype(self, t):
# a bit messy, but works
if t.startswith('ootype.'):
_, name = t.split('.')
return getattr(ootype, name)
else:
return OOTypeCache[t]
loaddesc() ## automatically loads the cached Dependencies
| Python |
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Void, Bool, Float
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.rpython.ootypesystem import ootype
from pypy.translator.oosupport.metavm import Generator
from pypy.translator.oosupport.constant import push_constant
from pypy.objspace.flow import model as flowmodel
from pypy.translator.cli.support import string_literal
def isnan(v):
return v != v*1.0 or (v == 1.0 and v == 2.0)
def isinf(v):
return v!=0 and (v == v*2)
class CodeGenerator(object):
def __init__(self, out, indentstep = 4, startblock = '{', endblock = '}'):
self._out = out
self._indent = 0
self._bol = True # begin of line
self._indentstep = indentstep
self._startblock = startblock
self._endblock = endblock
def write(self, s, indent = 0):
indent = self._indent + (indent * self._indentstep)
if self._bol:
self._out.write(' ' * indent)
self._out.write(s)
self._bol = (s and s[-1] == '\n')
def writeline(self, s=''):
self.write(s)
self.write('\n')
def openblock(self):
self.writeline(self._startblock)
self._indent += self._indentstep
def closeblock(self):
self._indent -= self._indentstep
self.writeline(self._endblock)
class IlasmGenerator(object):
"""
Generate IL code by writing to a file and compiling it with ilasm
"""
def __init__(self, outfile, name, config):
self.out = outfile
self.config = config
self.code = CodeGenerator(self.out)
self.code.writeline('.assembly extern mscorlib {}')
self.code.writeline('.assembly extern pypylib {}')
self.code.writeline('.assembly %s {}' % name)
self.code.writeline('.field static object last_exception') # XXX
def close(self):
self.out.close()
def begin_namespace(self, name):
self.code.writeline('.namespace ' + name)
self.code.openblock()
def end_namespace(self):
self.code.closeblock()
def write(self, s, indent=0):
self.code.write(s, indent)
def writeline(self, s=''):
self.code.writeline(s)
def openblock(self):
self.code.openblock()
def closeblock(self):
self.code.closeblock()
def begin_class(self, name, base=None, sealed=False, interfaces=(), abstract=False,
beforefieldinit=False, serializable=False):
if base is None:
base = '[mscorlib]System.Object'
s = ''
if abstract:
s += 'abstract '
if sealed:
s += 'sealed '
if beforefieldinit:
s += 'beforefieldinit '
if serializable:
s += 'serializable '
self.code.writeline('.class public %s %s extends %s' % (s, name, base))
if interfaces:
self.code.writeline(' implements %s' % ', '.join(interfaces))
self.code.openblock()
def end_class(self):
self.code.closeblock()
def field(self, name, type_, static = False):
if static:
s = 'static'
else:
s = ''
self.code.writeline('.field public %s %s %s' % (s, type_, name))
def begin_function(self, name, arglist, returntype, is_entrypoint = False, *args, **kwds):
# TODO: .maxstack
self.func_name = name
runtime = kwds.get('runtime', False)
if runtime:
method_type = 'runtime'
else:
method_type = 'il'
attributes = ' '.join(args)
arglist = ', '.join(['%s %s' % arg for arg in arglist])
self.code.writeline('.method public %s %s %s(%s) %s managed' %\
(attributes, returntype, name, arglist, method_type))
self.code.openblock()
if is_entrypoint:
self.code.writeline('.entrypoint')
self.code.writeline('.maxstack 32')
self.stderr('start %s' % name, self.config.translation.cli.trace_calls
and name!='.ctor' and method_type!='runtime')
def end_function(self):
self.flush()
self.code.closeblock()
def begin_try(self):
self.writeline('.try')
self.openblock()
def end_try(self):
self.closeblock()
def begin_catch(self, type_):
self.writeline('catch ' + type_)
self.openblock()
def end_catch(self):
self.closeblock()
def locals(self, vars):
varlist = ', '.join(['%s %s' % var for var in vars])
self.code.write('.locals init (')
self.code.write(varlist)
self.code.writeline(')')
def label(self, lbl):
self.code.writeline()
self.code.write(lbl + ':', indent=-1)
self.code.writeline()
def leave(self, lbl):
self.opcode('leave', lbl)
def branch(self, lbl):
self.opcode('br', lbl)
def branch_if(self, cond, lbl):
if cond:
opcode = 'brtrue'
else:
opcode = 'brfalse'
self.opcode(opcode, lbl)
def call(self, func):
self.opcode('call', func)
def call_method(self, meth, virtual):
if virtual:
self.opcode('callvirt instance', meth)
else:
self.opcode('call instance', meth)
def new(self, class_):
self.opcode('newobj', class_)
def set_field(self, field_data ):
self.opcode('stfld', '%s %s::%s' % field_data )
def get_field(self, field_data):
self.opcode('ldfld', '%s %s::%s' % field_data )
def throw(self):
self.opcode('throw')
def pop(self):
self.opcode('pop')
def ret(self):
self.opcode('ret')
def castclass(self, cts_type):
self.opcode('castclass', cts_type)
def load_self(self):
self.opcode('ldarg.0')
def load_arg(self,v):
self.opcode('ldarg', repr(v.name))
def load_local(self,v):
self.opcode('ldloc', repr(v.name))
def switch(self, targets):
cmd = 'switch(%s)' % ', '.join(targets)
self.opcode(cmd)
def load_const(self,type_,v):
if type_ is Void:
pass
elif type_ is Bool:
self.opcode('ldc.i4', str(int(v)))
elif type_ is Float:
self.opcode('ldc.r8', repr(v))
elif type_ in (Signed, Unsigned):
self.opcode('ldc.i4', str(v))
elif type_ in (SignedLongLong, UnsignedLongLong):
self.opcode('ldc.i8', str(v))
def store_local (self, v):
self.opcode('stloc', repr(v.name))
def store_static_constant(self, cts_type, CONST_NAMESPACE, CONST_CLASS, name):
self.opcode('stsfld', '%s %s.%s::%s' % (cts_type, CONST_NAMESPACE, CONST_CLASS, name))
def load_static_constant(self, cts_type, CONST_NAMESPACE, CONST_CLASS, name):
self.opcode('ldsfld', '%s %s.%s::%s' % (cts_type, CONST_NAMESPACE, CONST_CLASS, name))
def load_static_field(self, cts_type, name):
self.opcode('ldsfld', '%s %s' % (cts_type, name))
def emit(self, opcode, *args):
self.opcode(opcode,*args)
def begin_link(self):
pass
def opcode(self, opcode, *args):
self.code.write(opcode + ' ')
self.code.writeline(' '.join(map(str, args)))
def stderr(self, msg, cond=True):
from pypy.translator.cli.support import string_literal
if cond:
self.call('class [mscorlib]System.IO.TextWriter class [mscorlib]System.Console::get_Error()')
self.opcode('ldstr', string_literal(msg))
self.call_method('void class [mscorlib]System.IO.TextWriter::WriteLine(string)', virtual=True)
def add_comment(self, text):
self.code.writeline('// %s' % text)
def flush(self):
pass
DEFINED_INT_SYMBOLICS = {'MALLOC_ZERO_FILLED': 1,
'0 /* we are not jitted here */': 0}
class CLIBaseGenerator(Generator):
""" Implements those parts of the metavm generator that are not
tied to any particular function."""
def __init__(self, db, ilasm):
self.ilasm = ilasm
self.db = db
self.cts = db.genoo.TypeSystem(db)
def pop(self, TYPE):
self.ilasm.opcode('pop')
def add_comment(self, text):
self.ilasm.add_comment(text)
def function_signature(self, graph, func_name=None):
return self.cts.graph_to_signature(graph, False, func_name)
def op_signature(self, op, func_name):
return self.cts.op_to_signature(op, func_name)
def class_name(self, TYPE):
if isinstance(TYPE, ootype.Instance):
return self.db.class_name(TYPE)
elif isinstance(TYPE, ootype.Record):
return self.db.get_record_name(TYPE)
def emit(self, instr, *args):
self.ilasm.opcode(instr, *args)
def call_graph(self, graph, func_name=None):
if func_name is None: # else it is a suggested primitive
self.db.pending_function(graph)
func_sig = self.function_signature(graph, func_name)
self.ilasm.call(func_sig)
def call_op(self, op, func_name):
func_sig = self.op_signature(op, func_name)
self.ilasm.call(func_sig)
def call_signature(self, signature):
self.ilasm.call(signature)
def cast_to(self, lltype):
cts_type = self.cts.lltype_to_cts(lltype, False)
self.ilasm.opcode('castclass', cts_type)
def new(self, obj):
self.ilasm.new(self.cts.ctor_name(obj))
def field_name(self, obj, field):
INSTANCE, type_ = obj._lookup_field(field)
assert type_ is not None, 'Cannot find the field %s in the object %s' % (field, obj)
class_name = self.class_name(INSTANCE)
field_type = self.cts.lltype_to_cts(type_)
field = self.cts.escape_name(field)
return '%s %s::%s' % (field_type, class_name, field)
def set_field(self, obj, name):
self.ilasm.opcode('stfld ' + self.field_name(obj, name))
def get_field(self, obj, name):
self.ilasm.opcode('ldfld ' + self.field_name(obj, name))
def call_method(self, obj, name):
# TODO: use callvirt only when strictly necessary
signature, virtual = self.cts.method_signature(obj, name)
self.ilasm.call_method(signature, virtual)
def downcast(self, TYPE):
type = self.cts.lltype_to_cts(TYPE)
return self.ilasm.opcode('isinst', type)
def instantiate(self):
self.call_signature('object [pypylib]pypy.runtime.Utils::RuntimeNew(class [mscorlib]System.Type)')
def load(self, v):
if isinstance(v, flowmodel.Constant):
push_constant(self.db, v.concretetype, v.value, self)
else:
assert False
def isinstance(self, class_name):
self.ilasm.opcode('isinst', class_name)
def branch_unconditionally(self, target_label):
self.ilasm.branch(target_label)
def branch_conditionally(self, cond, target_label):
self.ilasm.branch_if(cond, target_label)
def branch_if_equal(self, target_label):
self.ilasm.opcode('beq', target_label)
def push_primitive_constant(self, TYPE, value):
ilasm = self.ilasm
if TYPE is ootype.Void:
pass
elif TYPE is ootype.Bool:
ilasm.opcode('ldc.i4', str(int(value)))
elif TYPE is ootype.Char or TYPE is ootype.UniChar:
ilasm.opcode('ldc.i4', ord(value))
elif TYPE is ootype.Float:
if isinf(value):
ilasm.opcode('ldc.r8', '(00 00 00 00 00 00 f0 7f)')
elif isnan(value):
ilasm.opcode('ldc.r8', '(00 00 00 00 00 00 f8 ff)')
else:
ilasm.opcode('ldc.r8', repr(value))
elif isinstance(value, CDefinedIntSymbolic):
ilasm.opcode('ldc.i4', DEFINED_INT_SYMBOLICS[value.expr])
elif TYPE in (ootype.Signed, ootype.Unsigned):
ilasm.opcode('ldc.i4', str(value))
elif TYPE in (ootype.SignedLongLong, ootype.UnsignedLongLong):
ilasm.opcode('ldc.i8', str(value))
elif TYPE is ootype.String:
if value._str is None:
ilasm.opcode('ldnull')
else:
ilasm.opcode("ldstr", string_literal(value._str))
else:
assert False, "Unexpected constant type"
def dup(self, TYPE):
self.ilasm.opcode('dup')
| Python |
from pypy.translator.cli.conftest import option
_defaultopt = dict(wd = False, source = False, nostop = False, stdout = False, nostackopt = False)
def getoption(name):
return getattr(option, name, _defaultopt.get(name))
| Python |
"""
Translate between PyPy ootypesystem and .NET Common Type System
"""
import exceptions
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.lltypesystem.llmemory import WeakGcAddress
from pypy.translator.cli.option import getoption
from pypy.translator.cli import oopspec
try:
set
except NameError:
from sets import Set as set
from pypy.tool.ansi_print import ansi_log
import py
log = py.log.Producer("cli")
py.log.setconsumer("cli", ansi_log)
WEAKREF = '[mscorlib]System.WeakReference'
PYPY_LIST = '[pypylib]pypy.runtime.List`1<%s>'
PYPY_LIST_OF_VOID = '[pypylib]pypy.runtime.ListOfVoid'
PYPY_DICT = '[pypylib]pypy.runtime.Dict`2<%s, %s>'
PYPY_DICT_OF_VOID = '[pypylib]pypy.runtime.DictOfVoid`2<%s, int32>'
PYPY_DICT_VOID_VOID = '[pypylib]pypy.runtime.DictVoidVoid'
PYPY_DICT_ITEMS_ITERATOR = '[pypylib]pypy.runtime.DictItemsIterator`2<%s, %s>'
PYPY_STRING_BUILDER = '[pypylib]pypy.runtime.StringBuilder'
_lltype_to_cts = {
ootype.Void: 'void',
ootype.Signed: 'int32',
ootype.Unsigned: 'unsigned int32',
SignedLongLong: 'int64',
UnsignedLongLong: 'unsigned int64',
ootype.Bool: 'bool',
ootype.Float: 'float64',
ootype.Char: 'char',
ootype.UniChar: 'char',
ootype.Class: 'class [mscorlib]System.Type',
ootype.String: 'string',
ootype.StringBuilder: 'class ' + PYPY_STRING_BUILDER,
WeakGcAddress: 'class ' + WEAKREF,
# maps generic types to their ordinal
ootype.List.SELFTYPE_T: 'class ' + (PYPY_LIST % '!0'),
ootype.List.ITEMTYPE_T: '!0',
ootype.Dict.SELFTYPE_T: 'class ' + (PYPY_DICT % ('!0', '!1')),
ootype.Dict.KEYTYPE_T: '!0',
ootype.Dict.VALUETYPE_T: '!1',
ootype.DictItemsIterator.SELFTYPE_T: 'class ' + (PYPY_DICT_ITEMS_ITERATOR % ('!0', '!1')),
ootype.DictItemsIterator.KEYTYPE_T: '!0',
ootype.DictItemsIterator.VALUETYPE_T: '!1',
}
_pyexception_to_cts = {
exceptions.Exception: '[mscorlib]System.Exception',
exceptions.OverflowError: '[mscorlib]System.OverflowException'
}
def _get_from_dict(d, key, error):
try:
return d[key]
except KeyError:
if getoption('nostop'):
log.WARNING(error)
return key
else:
assert False, error
class CTS(object):
ILASM_KEYWORDS = set(["at", "as", "implicitcom", "implicitres",
"noappdomain", "noprocess", "nomachine", "extern", "instance",
"explicit", "default", "vararg", "unmanaged", "cdecl", "stdcall",
"thiscall", "fastcall", "marshal", "in", "out", "opt", "retval",
"static", "public", "private", "family", "initonly",
"rtspecialname", "specialname", "assembly", "famandassem",
"famorassem", "privatescope", "literal", "notserialized", "value",
"not_in_gc_heap", "interface", "sealed", "abstract", "auto",
"sequential", "ansi", "unicode", "autochar", "bestfit",
"charmaperror", "import", "serializable", "nested", "lateinit",
"extends", "implements", "final", "virtual", "hidebysig",
"newslot", "unmanagedexp", "pinvokeimpl", "nomangle", "ole",
"lasterr", "winapi", "native", "il", "cil", "optil", "managed",
"forwardref", "runtime", "internalcall", "synchronized",
"noinlining", "custom", "fixed", "sysstring", "array", "variant",
"currency", "syschar", "void", "bool", "int8", "int16", "int32",
"int64", "float32", "float64", "error", "unsigned", "uint",
"uint8", "uint16", "uint32", "uint64", "decimal", "date", "bstr",
"lpstr", "lpwstr", "lptstr", "objectref", "iunknown", "idispatch",
"struct", "safearray", "int", "byvalstr", "tbstr", "lpvoid",
"any", "float", "lpstruct", "null", "ptr", "vector", "hresult",
"carray", "userdefined", "record", "filetime", "blob", "stream",
"storage", "streamed_object", "stored_object", "blob_object",
"cf", "clsid", "method", "class", "pinned", "modreq", "modopt",
"typedref", "type","refany", "wchar", "char", "fromunmanaged",
"callmostderived", "bytearray", "with", "init", "to", "catch",
"filter", "finally", "fault", "handler", "tls", "field",
"request", "demand", "assert", "deny", "permitonly", "linkcheck",
"inheritcheck", "reqmin", "reqopt", "reqrefuse", "prejitgrant",
"prejitdeny", "noncasdemand", "noncaslinkdemand",
"noncasinheritance", "readonly", "nometadata", "algorithm",
"fullorigin", "nan", "inf", "publickey", "enablejittracking",
"disablejitoptimizer", "preservesig", "beforefieldinit",
"alignment", "nullref", "valuetype", "compilercontrolled",
"reqsecobj", "enum", "object", "string", "true", "false", "is",
"on", "off", "add", "and", "arglist", "beq", "bge", "bgt", "ble",
"blt", "bne", "box", "br", "break", "brfalse", "brnull", "brtrue",
"call", "calli", "callvirt", "castclass", "ceq", "cgt",
"ckfinite", "clt", "conf", "constrained", "conv", "cpblk",
"cpobj", "div", "dup", "endfault", "endfilter", "endfinally",
"initblk", "initobj", "isinst", "jmp", "ldarg", "ldarga", "ldc",
"ldelem", "ldelema", "ldfld", "ldflda", "ldftn", "ldind", "ldlen",
"ldloc", "ldloca", "ldnull", "ldobj", "ldsfld", "ldsflda",
"ldstr", "ldtoken", "ldvirtftn", "leave", "localloc", "mkrefany",
"mul", "neg", "newarr", "newobj", "nop", "not", "or", "pop",
"readonly", "refanytype", "refanyval", "rem", "ret", "rethrow",
"shl", "shr", "sizeof", "starg", "stelem", "stfld", "stind",
"stloc", "stobj", "stsfld", "sub", "switch", "tail", "throw",
"unaligned", "unbox", "volatile", "xor"])
def __init__(self, db):
self.db = db
def __class(self, result, include_class):
if include_class:
return 'class ' + result
else:
return result
def escape_name(self, name):
"""Mangle then name if it's a ilasm reserved word"""
if name in self.ILASM_KEYWORDS:
return "'%s'" % name
else:
return name
def lltype_to_cts(self, t, include_class=True):
if t is ootype.ROOT:
return self.__class('[mscorlib]System.Object', include_class)
elif isinstance(t, lltype.Ptr) and isinstance(t.TO, lltype.OpaqueType):
return self.__class('[mscorlib]System.Object', include_class)
elif isinstance(t, ootype.Instance):
NATIVE_INSTANCE = t._hints.get('NATIVE_INSTANCE', None)
if NATIVE_INSTANCE:
return self.__class(NATIVE_INSTANCE._name, include_class)
else:
name = self.db.pending_class(t)
return self.__class(name, include_class)
elif isinstance(t, ootype.Record):
name = self.db.pending_record(t)
return self.__class(name, include_class)
elif isinstance(t, ootype.StaticMethod):
delegate = self.db.record_delegate(t)
return self.__class(delegate, include_class)
elif isinstance(t, ootype.List):
item_type = self.lltype_to_cts(t._ITEMTYPE)
if item_type == 'void': # special case: List of Void
return self.__class(PYPY_LIST_OF_VOID, include_class)
return self.__class(PYPY_LIST % item_type, include_class)
elif isinstance(t, ootype.Dict):
key_type = self.lltype_to_cts(t._KEYTYPE)
value_type = self.lltype_to_cts(t._VALUETYPE)
if value_type == 'void': # special cases: Dict with voids
if key_type == 'void':
return self.__class(PYPY_DICT_VOID_VOID, include_class)
else:
return self.__class(PYPY_DICT_OF_VOID % key_type, include_class)
return self.__class(PYPY_DICT % (key_type, value_type), include_class)
elif isinstance(t, ootype.DictItemsIterator):
key_type = self.lltype_to_cts(t._KEYTYPE)
value_type = self.lltype_to_cts(t._VALUETYPE)
if key_type == 'void':
key_type = 'int32' # placeholder
if value_type == 'void':
value_type = 'int32' # placeholder
return self.__class(PYPY_DICT_ITEMS_ITERATOR % (key_type, value_type), include_class)
return _get_from_dict(_lltype_to_cts, t, 'Unknown type %s' % t)
def llvar_to_cts(self, var):
return self.lltype_to_cts(var.concretetype), var.name
def llconst_to_cts(self, const):
return self.lltype_to_cts(const.concretetype), const.value
def ctor_name(self, t):
return 'instance void %s::.ctor()' % self.lltype_to_cts(t)
def graph_to_signature(self, graph, is_method = False, func_name = None):
ret_type, ret_var = self.llvar_to_cts(graph.getreturnvar())
func_name = func_name or graph.name
func_name = self.escape_name(func_name)
namespace = getattr(graph.func, '_namespace_', None)
if namespace:
func_name = '%s::%s' % (namespace, func_name)
args = [arg for arg in graph.getargs() if arg.concretetype is not ootype.Void]
if is_method:
args = args[1:]
arg_types = [self.lltype_to_cts(arg.concretetype) for arg in args]
arg_list = ', '.join(arg_types)
return '%s %s(%s)' % (ret_type, func_name, arg_list)
def op_to_signature(self, op, func_name):
ret_type, ret_var = self.llvar_to_cts(op.result)
func_name = self.escape_name(func_name)
args = [arg for arg in op.args[1:]
if arg.concretetype is not ootype.Void]
arg_types = [self.lltype_to_cts(arg.concretetype) for arg in args]
arg_list = ', '.join(arg_types)
return '%s %s(%s)' % (ret_type, func_name, arg_list)
def method_signature(self, TYPE, name_or_desc):
# TODO: use callvirt only when strictly necessary
if isinstance(TYPE, ootype.Instance):
if isinstance(name_or_desc, ootype._overloaded_meth_desc):
name = name_or_desc.name
METH = name_or_desc.TYPE
virtual = True
else:
name = name_or_desc
owner, meth = TYPE._lookup(name)
METH = meth._TYPE
virtual = getattr(meth, '_virtual', True)
class_name = self.db.class_name(TYPE)
full_name = 'class %s::%s' % (class_name, name)
returntype = self.lltype_to_cts(METH.RESULT)
arg_types = [self.lltype_to_cts(ARG) for ARG in METH.ARGS if ARG is not ootype.Void]
arg_list = ', '.join(arg_types)
return '%s %s(%s)' % (returntype, full_name, arg_list), virtual
elif isinstance(TYPE, (ootype.BuiltinType, ootype.StaticMethod)):
assert isinstance(name_or_desc, str)
name = name_or_desc
if isinstance(TYPE, ootype.StaticMethod):
METH = TYPE
else:
METH = oopspec.get_method(TYPE, name)
class_name = self.lltype_to_cts(TYPE)
if isinstance(TYPE, ootype.Dict):
KEY = TYPE._KEYTYPE
VALUE = TYPE._VALUETYPE
name = name_or_desc
if KEY is ootype.Void and VALUE is ootype.Void and name == 'll_get_items_iterator':
# ugly, ugly special case
ret_type = 'class ' + PYPY_DICT_ITEMS_ITERATOR % ('int32', 'int32')
elif VALUE is ootype.Void and METH.RESULT is ootype.Dict.VALUETYPE_T:
ret_type = 'void'
else:
ret_type = self.lltype_to_cts(METH.RESULT)
ret_type = dict_of_void_ll_copy_hack(TYPE, ret_type)
else:
ret_type = self.lltype_to_cts(METH.RESULT)
generic_types = getattr(TYPE, '_generic_types', {})
arg_types = [self.lltype_to_cts(arg) for arg in METH.ARGS if
arg is not ootype.Void and \
generic_types.get(arg, arg) is not ootype.Void]
arg_list = ', '.join(arg_types)
return '%s %s::%s(%s)' % (ret_type, class_name, name, arg_list), False
else:
assert False
def dict_of_void_ll_copy_hack(TYPE, ret_type):
# XXX: ugly hack to make the ll_copy signature correct when
# CustomDict is special-cased to DictOfVoid.
if isinstance(TYPE, ootype.CustomDict) and TYPE._VALUETYPE is ootype.Void:
return ret_type.replace('Dict`2', 'DictOfVoid`2')
else:
return ret_type
| Python |
"""
Support for an automatically compiled Run Time Environment.
The source of the RTE is in the src/ directory.
"""
import os
import os.path
import shutil
import py
from py.compat import subprocess
from pypy.translator.cli.sdk import SDK
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("cli")
py.log.setconsumer("cli", ansi_log)
class Target:
SOURCES = []
OUTPUT = None
ALIAS = None
FLAGS = []
DEPENDENCIES = []
SRC_DIR = os.path.join(os.path.dirname(__file__), 'src/')
def _filename(cls, name, path=None):
rel_path = os.path.join(cls.SRC_DIR, name)
return os.path.abspath(rel_path)
_filename = classmethod(_filename)
def get_COMPILER(cls):
return SDK.csc()
get_COMPILER = classmethod(get_COMPILER)
def get(cls):
for dep in cls.DEPENDENCIES:
dep.get()
sources = [cls._filename(src) for src in cls.SOURCES]
out = cls._filename(cls.OUTPUT)
alias = cls._filename(cls.ALIAS or cls.OUTPUT)
recompile = True
try:
src_mtime = max([os.stat(src).st_mtime for src in sources])
alias_mtime = os.stat(alias).st_mtime
if src_mtime <= alias_mtime:
recompile = False
except OSError:
pass
if recompile:
cls.compile(sources, out)
return out
get = classmethod(get)
def compile(cls, sources, out):
log.red("Compiling %s" % (cls.ALIAS or cls.OUTPUT))
oldcwd = os.getcwd()
os.chdir(cls.SRC_DIR)
compiler = subprocess.Popen([cls.get_COMPILER()] + cls.FLAGS + ['/out:%s' % out] + sources,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = compiler.communicate()
retval = compiler.wait()
assert retval == 0, 'Failed to compile %s: the compiler said:\n %s' % (cls.OUTPUT, stderr)
if cls.ALIAS is not None:
alias = cls._filename(cls.ALIAS)
shutil.copy(out, alias)
os.chdir(oldcwd)
compile = classmethod(compile)
class MainStub(Target):
SOURCES = ['stub/main.il']
OUTPUT = 'main.exe'
def get_COMPILER(cls):
return SDK.ilasm()
get_COMPILER = classmethod(get_COMPILER)
class PyPyLibDLL(Target):
SOURCES = ['pypylib.cs', 'll_os.cs', 'll_os_path.cs', 'errno.cs', 'll_math.cs']
OUTPUT = 'pypylib.dll'
FLAGS = ['/t:library', '/unsafe', '/r:main.exe']
DEPENDENCIES = [MainStub]
class Query(Target):
SOURCES = ['query.cs']
OUTPUT = 'query.exe'
def compile(cls, sources, out):
# assume that if query.exe need to be recompiled the descriptions cache is invalid
from pypy.translator.cli.query import _descfilename
filename = _descfilename(None)
if os.path.exists(filename):
os.remove(filename)
Target.compile.im_func(cls, sources, out)
compile = classmethod(compile)
class Support(Target):
SOURCES = ['support.cs']
OUTPUT = 'support.dll'
FLAGS = ['/t:library']
def get_pypy_dll():
return PyPyLibDLL.get()
if __name__ == '__main__':
get_pypy_dll()
| Python |
import types
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.node import Node
IEQUALITY_COMPARER = 'class [mscorlib]System.Collections.Generic.IEqualityComparer`1<%s>'
class EqualityComparer(Node):
count = 0
def __init__(self, db, KEY_TYPE, eq_args, hash_args):
self.db = db
self.cts = CTS(db)
self.KEY_TYPE = KEY_TYPE
self.key_type = self.cts.lltype_to_cts(KEY_TYPE)
self.eq_args = eq_args
self.hash_args = hash_args
self.name = 'EqualityComparer_%d' % EqualityComparer.count
EqualityComparer.count += 1
def get_ctor(self):
return 'instance void %s::.ctor()' % self.name
def render(self, ilasm):
self.ilasm = ilasm
IEqualityComparer = IEQUALITY_COMPARER % self.key_type
ilasm.begin_class(self.name, interfaces=[IEqualityComparer])
self._ctor()
self._method('Equals', [(self.key_type, 'x'), (self.key_type, 'y')],
'bool', self.eq_args)
self._method('GetHashCode', [(self.key_type, 'x')], 'int32', self.hash_args)
ilasm.end_class()
def _ctor(self):
self.ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance')
self.ilasm.opcode('ldarg.0')
self.ilasm.call('instance void [mscorlib]System.Object::.ctor()')
self.ilasm.opcode('ret')
self.ilasm.end_function()
def _method(self, name, arglist, return_type, fn_args):
self.ilasm.begin_function(name, arglist, return_type, False,
'final', 'virtual', 'hidebysig', 'newslot',
'instance', 'default')
if type(fn_args) == types.FunctionType:
assert len(fn_args.self_arg) <= 1
if len(fn_args.self_arg) == 1:
assert fn_args.graph.getargs()[0].concretetype is ootype.Void
self._call_function(fn_args.graph, len(arglist))
else:
fn, obj, method_name = fn_args
# fn is a HalfConcreteWrapper
sm = fn.value.concretize().value
if method_name.value is None:
self._call_function(sm.graph, len(arglist))
else:
assert False, 'XXX'
self.ilasm.end_function()
def _call_function(self, graph, n_args):
self.db.pending_function(graph)
for arg in range(1, n_args+1):
self.ilasm.opcode('ldarg', arg)
signature = self.cts.graph_to_signature(graph)
self.ilasm.call(signature)
self.ilasm.opcode('ret')
| Python |
from pypy.translator.cli.ilgenerator import IlasmGenerator
class StackOptMixin(object):
def __init__(self, *args):
self.super = super(StackOptMixin, self)
self.super.__init__(*args)
self._reset()
def _reset(self):
self.pending_ops = []
self.mapping = {} # varname --> (opcode, args) needed to load it
def opcode(self, op, *args):
self.pending_ops.append((op, args))
def writeline(self, s=''):
self.pending_ops.append(('SUPER', ('writeline', s)))
def write(self, s, indent=0):
self.pending_ops.append(('SUPER', ('write', s, indent)))
def openblock(self):
self.pending_ops.append(('SUPER', ('openblock',)))
def closeblock(self):
self.pending_ops.append(('SUPER', ('closeblock',)))
def label(self, lbl):
self.pending_ops.append(('LABEL', (lbl,)))
def locals(self, vars):
self.pending_ops.append(('LOCALS', ()))
self.pending_locals = dict([(varname, vartype) for (vartype, varname) in vars])
def emit_locals(self):
# takes only locals used by at least one stloc
locals = {}
for item in self.pending_ops:
if item is None:
continue
if item[0] == 'stloc':
op, (varname,) = item
if varname[0] == "'" == varname[-1] == "'":
varname = varname[1:-1]
locals[varname] = self.pending_locals[varname]
vars = [(vartype, varname) for (varname, vartype) in locals.iteritems()]
self.super.locals(vars)
def _varname(self, op, args):
if op in ('ldloc', 'ldarg', 'stloc'):
return args[0]
elif op.startswith('ld'):
return ('PLACEHOLDER', op, args)
else:
assert False, "undefined varname of %s" % op
def _is_load(self, op):
return op is not None and op.startswith('ld')
def _is_simple_load(self, op):
return op is not None and (op.startswith('ldloc') or
op.startswith('ldarg') or
op.startswith('ldsfld') or
op.startswith('ldc'))
def _optimize(self):
self._collect_stats()
self._remove_renaming()
def _collect_stats(self):
assign_count = {}
read_count = {}
for item in self.pending_ops:
if item is None:
continue
op, args = item
if op == 'stloc':
varname, = args
assign_count[varname] = assign_count.get(varname, 0) + 1
elif op == 'ldloc':
varname, = args
read_count[varname] = read_count.get(varname, 0) + 1
self.assign_count = assign_count
self.read_count = read_count
def _remove_renaming(self):
assign_count = self.assign_count
read_count = self.read_count
prev_op, prev_args = None, None
for i, (op, args) in enumerate(self.pending_ops):
if op == 'stloc' and self._is_simple_load(prev_op):
# ldloc x, stloc x0 --> remove both, map x0 to x
varname, = args
if assign_count[varname] == 1:
self.mapping[varname] = self.mapping.get(self._varname(prev_op, prev_args), (prev_op, prev_args))
self.pending_ops[i-1] = None
self.pending_ops[i] = None
op, args = None, None # to prevent the next opcode thinking the previous was a store
elif op == 'ldloc':
if prev_op == 'stloc' and args == prev_args and read_count[args[0]] == 1:
# stloc x, ldloc x --> remove both
self.pending_ops[i-1] = None
self.pending_ops[i] = None
op, args = None, None # to prevent the next opcode thinking the previous was a load
else:
# ldloc x, stloc x1, ..., ldloc x1 --> ..., ldloc x
try:
self.pending_ops[i] = self.mapping[self._varname(op, args)]
except KeyError:
pass
prev_op, prev_args = op, args
def flush(self):
self._optimize()
for item in self.pending_ops:
if item is None:
continue
opcode, args = item
if opcode == 'SUPER':
method = args[0]
getattr(self.super, method)(*args[1:])
elif opcode == 'LABEL':
self.super.label(*args)
elif opcode == 'LOCALS':
self.emit_locals()
else:
self.super.opcode(opcode, *args)
self._reset()
class StackOptGenerator(StackOptMixin, IlasmGenerator):
pass
| Python |
try:
set
except NameError:
from sets import Set as set
from pypy.objspace.flow import model as flowmodel
from pypy.rpython.lltypesystem.lltype import Void
from pypy.rpython.ootypesystem import ootype
from pypy.translator.oosupport.treebuilder import SubOperation
from pypy.translator.oosupport.function import Function as OOFunction
from pypy.translator.oosupport.constant import push_constant
from pypy.translator.cli.option import getoption
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.opcodes import opcodes
from pypy.translator.cli.metavm import InstructionList, Generator
from pypy.translator.cli.node import Node
from pypy.translator.cli.class_ import Class
from pypy.translator.cli.support import log
from pypy.translator.cli.ilgenerator import CLIBaseGenerator
USE_LAST = False
class NativeExceptionHandler(object):
def begin_try(self):
self.ilasm.begin_try()
def end_try(self, target_label):
self.ilasm.leave(target_label)
self.ilasm.end_try()
def begin_catch(self, llexitcase):
ll_meta_exc = llexitcase
ll_exc = ll_meta_exc._inst.class_._INSTANCE
cts_exc = self.cts.lltype_to_cts(ll_exc, False)
self.ilasm.begin_catch(cts_exc)
def end_catch(self, target_label):
self.ilasm.leave(target_label)
self.ilasm.end_catch()
def render_raise_block(self, block):
exc = block.inputargs[1]
self.load(exc)
self.ilasm.opcode('throw')
def store_exception_and_link(self, link):
if self._is_raise_block(link.target):
# the exception value is on the stack, use it as the 2nd target arg
assert len(link.args) == 2
assert len(link.target.inputargs) == 2
self.store(link.target.inputargs[1])
else:
# the exception value is on the stack, store it in the proper place
if isinstance(link.last_exception, flowmodel.Variable):
self.ilasm.opcode('dup')
self.store(link.last_exc_value)
self.ilasm.get_field(('class Object_meta', 'Object', 'meta'))
self.store(link.last_exception)
else:
self.store(link.last_exc_value)
self._setup_link(link)
class LastExceptionHandler(object):
in_try = False
def begin_try(self):
self.in_try = True
self.ilasm.opcode('// begin_try')
def end_try(self, target_label):
self.ilasm.opcode('ldsfld', 'object last_exception')
self.ilasm.opcode('brnull', target_label)
self.ilasm.opcode('// end try')
self.in_try = False
def begin_catch(self, llexitcase):
self.ilasm.label(self.current_label('catch'))
ll_meta_exc = llexitcase
ll_exc = ll_meta_exc._inst.class_._INSTANCE
cts_exc = self.cts.lltype_to_cts(ll_exc, False)
self.ilasm.opcode('ldsfld', 'object last_exception')
self.isinstance(cts_exc)
self.ilasm.opcode('dup')
self.ilasm.opcode('brtrue.s', 6)
self.ilasm.opcode('pop')
self.ilasm.opcode('br', self.next_label('catch'))
# here is the target of the above brtrue.s
self.ilasm.opcode('ldnull')
self.ilasm.opcode('stsfld', 'object last_exception')
def end_catch(self, target_label):
self.ilasm.opcode('br', target_label)
def store_exception_and_link(self, link):
if self._is_raise_block(link.target):
# the exception value is on the stack, use it as the 2nd target arg
assert len(link.args) == 2
assert len(link.target.inputargs) == 2
self.store(link.target.inputargs[1])
else:
# the exception value is on the stack, store it in the proper place
if isinstance(link.last_exception, flowmodel.Variable):
self.ilasm.opcode('dup')
self.store(link.last_exc_value)
self.ilasm.get_field(('class Object_meta', 'Object', 'meta'))
self.store(link.last_exception)
else:
self.store(link.last_exc_value)
self._setup_link(link)
def before_last_blocks(self):
self.ilasm.label(self.current_label('catch'))
self.ilasm.opcode('nop')
def render_raise_block(self, block):
exc = block.inputargs[1]
self.load(exc)
self.ilasm.opcode('stsfld', 'object last_exception')
if not self.return_block: # must be a void function
TYPE = self.graph.getreturnvar().concretetype
default = TYPE._defl()
if default is not None: # concretetype is Void
try:
self.db.constant_generator.push_primitive_constant(self, TYPE, default)
except AssertionError:
self.ilasm.opcode('ldnull') # :-(
self.ilasm.opcode('ret')
else:
self.ilasm.opcode('br', self._get_block_name(self.return_block))
def _render_op(self, op):
OOFunction._render_op(self, op)
if op.opname in ('direct_call', 'oosend', 'indirect_call') and not self.in_try:
self._premature_return()
def _render_sub_op(self, sub_op):
OOFunction._render_sub_op(self, sub_op)
if sub_op.op.opname in ('direct_call', 'oosend', 'indirect_call') and not self.in_try:
self._premature_return(need_pop=sub_op.op.result is not ootype.Void)
def _premature_return(self, need_pop=False):
try:
return_block = self._get_block_name(self.graph.returnblock)
except KeyError:
self.ilasm.opcode('//premature return')
self.ilasm.opcode('ldsfld', 'object last_exception')
TYPE = self.graph.getreturnvar().concretetype
default = TYPE._defl()
if default is None: # concretetype is Void
self.ilasm.opcode('brfalse.s', 1)
self.ilasm.opcode('ret')
else:
self.ilasm.opcode('brfalse.s', 3) # ??
try:
self.db.constant_generator.push_primitive_constant(self, TYPE, default)
except AssertionError:
self.ilasm.opcode('ldnull') # :-(
self.ilasm.opcode('ret')
else:
self.ilasm.opcode('ldsfld', 'object last_exception')
self.ilasm.opcode('brtrue', return_block)
if USE_LAST:
ExceptionHandler = LastExceptionHandler
else:
ExceptionHandler = NativeExceptionHandler
class Function(ExceptionHandler, OOFunction, Node, CLIBaseGenerator):
def __init__(self, *args, **kwargs):
OOFunction.__init__(self, *args, **kwargs)
self._set_args()
self._set_locals()
namespace = getattr(self.graph.func, '_namespace_', None)
str
if namespace:
if '.' in namespace:
self.namespace, self.classname = namespace.rsplit('.', 1)
else:
self.namespace = None
self.classname = namespace
else:
self.namespace = None
self.classname = None
def _create_generator(self, ilasm):
return self # Function implements the Generator interface
def record_ll_meta_exc(self, ll_meta_exc):
# record the type only if it doesn't belong to a native_class
ll_exc = ll_meta_exc._inst.class_._INSTANCE
NATIVE_INSTANCE = ll_exc._hints.get('NATIVE_INSTANCE', None)
if NATIVE_INSTANCE is None:
OOFunction.record_ll_meta_exc(self, ll_meta_exc)
def begin_render(self):
returntype, returnvar = self.cts.llvar_to_cts(self.graph.getreturnvar())
if self.is_method:
args = self.args[1:] # self is implicit
meth_type = 'virtual' # TODO: mark as virtual only when strictly necessary
else:
args = self.args
meth_type = 'static'
if self.namespace:
self.ilasm.begin_namespace(self.namespace)
if self.classname:
self.ilasm.begin_class(self.classname)
self.ilasm.begin_function(self.name, args, returntype, self.is_entrypoint, meth_type)
self.ilasm.locals(self.locals)
def end_render(self):
self.ilasm.end_function()
if self.classname:
self.ilasm.end_class()
if self.namespace:
self.ilasm.end_namespace()
def set_label(self, label):
self.ilasm.label(label)
def render_return_block(self, block):
return_var = block.inputargs[0]
if return_var.concretetype is not Void:
self.load(return_var)
self.ilasm.opcode('ret')
# XXX: this method should be moved into oosupport, but other
# backends are not ready :-(
def render_bool_switch(self, block):
assert len(block.exits) == 2
for link in block.exits:
if link.exitcase:
link_true = link
else:
link_false = link
true_label = self.next_label('link_true')
self.generator.load(block.exitswitch)
self.generator.branch_conditionally(link.exitcase, true_label)
self._follow_link(link_false) # if here, the exitswitch is false
self.set_label(true_label)
self._follow_link(link_true) # if here, the exitswitch is true
def render_numeric_switch(self, block):
if block.exitswitch.concretetype in (ootype.SignedLongLong, ootype.UnsignedLongLong):
# TODO: it could be faster to check is the values fit in
# 32bit, and perform a cast in that case
self.render_numeric_switch_naive(block)
return
cases = {}
naive = False
for link in block.exits:
if link.exitcase == "default":
default = link, self.next_label('switch')
else:
if block.exitswitch.concretetype in (ootype.Char, ootype.UniChar):
value = ord(link.exitcase)
else:
value = link.exitcase
if value < 0:
naive = True
break
cases[value] = link, self.next_label('switch')
try:
max_case = max(cases.keys())
except ValueError:
max_case = 0
if max_case > 3*len(cases) + 10: # the switch is very sparse, better to use the naive version
naive = True
if naive:
self.render_numeric_switch_naive(block)
return
targets = []
for i in xrange(max_case+1):
link, lbl = cases.get(i, default)
targets.append(lbl)
self.generator.load(block.exitswitch)
self.ilasm.switch(targets)
self.render_switch_case(*default)
for link, lbl in cases.itervalues():
self.render_switch_case(link, lbl)
def render_switch_case(self, link, label):
target_label = self._get_block_name(link.target)
self.set_label(label)
self._setup_link(link)
self.generator.branch_unconditionally(target_label)
# Those parts of the generator interface that are function
# specific
def load(self, v):
if isinstance(v, flowmodel.Variable):
if v.concretetype is ootype.Void:
return # ignore it
if v.name in self.argset:
selftype, selfname = self.args[0]
if self.is_method and v.name == selfname:
self.ilasm.load_self() # special case for 'self'
else:
self.ilasm.load_arg(v)
else:
self.ilasm.load_local(v)
elif isinstance(v, SubOperation):
self._render_sub_op(v)
else:
super(Function, self).load(v)
def store(self, v):
if isinstance(v, flowmodel.Variable):
if v.concretetype is not Void:
self.ilasm.store_local(v)
else:
assert False
| Python |
import py
Option = py.test.config.Option
option = py.test.config.addoptions\
("pypy-cli options",
Option('--source', action="store_true", dest="source", default=False,
help="only generate IL source, don't compile"),
Option('--wd', action="store_true", dest="wd", default=False,
help="store temporary files in the working directory"),
Option('--stdout', action="store_true", dest="stdout", default=False,
help="print the generated IL code to stdout, too"),
Option('--nostop', action="store_true", dest="nostop", default=False,
help="don't stop on warning. The generated IL code could not compile"),
Option('--nowrap', action="store_true", dest="nowrap", default=False,
help="don't wrap exceptions but let them to flow out of the entry point"),
Option('--verify', action="store_true", dest="verify", default=False,
help="check that compiled executables are verifiable"),
Option('--norun', action='store_true', dest="norun", default=False,
help="don't run the compiled executable"),
Option('--nostackopt', action='store_true', dest='nostackopt', default=False,
help="don't optimize stack load/store operations"),
)
| Python |
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.database import LowLevelDatabase
from pypy.translator.cli.node import Node
from pypy.rpython.ootypesystem import ootype
def get_entrypoint(graph):
from pypy.translator.cli.test.runtest import TestEntryPoint
try:
ARG0 = graph.getargs()[0].concretetype
except IndexError:
ARG0 = None
if isinstance(ARG0, ootype.List) and ARG0._ITEMTYPE is ootype.String:
return StandaloneEntryPoint(graph)
else:
return TestEntryPoint(graph)
class BaseEntryPoint(Node):
def set_db(self, db):
self.db = db
self.cts = CTS(db)
def ilasm_flags(self):
return []
def output_filename(self, il_filename):
return il_filename.replace('.il', '.exe')
class StandaloneEntryPoint(BaseEntryPoint):
"""
This class produces a 'main' method that converts the argv in a
List of Strings and pass it to the real entry point.
"""
def __init__(self, graph_to_call):
self.graph = graph_to_call
def get_name(self):
return 'main'
def render(self, ilasm):
try:
ARG0 = self.graph.getargs()[0].concretetype
except IndexError:
ARG0 = None
assert isinstance(ARG0, ootype.List) and ARG0._ITEMTYPE is ootype.String,\
'Wrong entry point signature: List(String) expected'
ilasm.begin_function('main', [('string[]', 'argv')], 'void', True, 'static')
ilasm.new('instance void class [pypylib]pypy.runtime.List`1<string>::.ctor()')
# fake argv[0]
ilasm.opcode('dup')
ilasm.call('class [mscorlib]System.Reflection.Assembly class [mscorlib]System.Reflection.Assembly::GetEntryAssembly()')
ilasm.call_method('string class [mscorlib]System.Reflection.Assembly::get_Location()', True)
ilasm.call_method('void class [mscorlib]System.Collections.Generic.List`1<string>::Add(!0)', True)
# add real argv
ilasm.opcode('dup')
ilasm.opcode('ldarg.0')
ilasm.call_method('void class [mscorlib]System.Collections.Generic.List`1<string>::'
'AddRange(class [mscorlib]System.Collections.Generic.IEnumerable`1<!0>)', True)
ilasm.call(self.cts.graph_to_signature(self.graph))
ilasm.opcode('pop') # XXX: return this value, if it's an int32
ilasm.opcode('ret')
ilasm.end_function()
self.db.pending_function(self.graph)
class DllEntryPoint(BaseEntryPoint):
def __init__(self, name, graphs):
self.name = name
self.graphs = graphs
def get_name(self):
return self.name
def ilasm_flags(self):
return ['/dll']
def output_filename(self, il_filename):
return il_filename.replace('.il', '.dll')
def render(self, ilasm):
for graph in self.graphs:
self.db.pending_function(graph)
| Python |
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.node import Node
from pypy.translator.cli.cts import CTS
from pypy.translator.oosupport.constant import push_constant
from pypy.translator.cli.ilgenerator import CLIBaseGenerator
try:
set
except NameError:
from sets import Set as set
class Class(Node):
def __init__(self, db, INSTANCE, namespace, name):
self.db = db
self.cts = db.genoo.TypeSystem(db)
self.INSTANCE = INSTANCE
self.namespace = namespace
self.name = name
def dependencies(self):
if not self.is_root(self.INSTANCE):
self.db.pending_class(self.INSTANCE._superclass)
def __hash__(self):
return hash(self.INSTANCE)
def __eq__(self, other):
return self.INSTANCE == other.INSTANCE
def __ne__(self, other):
return not self == other
def is_root(INSTANCE):
return INSTANCE._superclass is None
is_root = staticmethod(is_root)
def get_name(self):
return self.name
def __repr__(self):
return '<Class %s>' % self.name
def get_base_class(self):
base_class = self.INSTANCE._superclass
if self.is_root(base_class):
return '[mscorlib]System.Object'
else:
return self.db.class_name(base_class)
def is_abstract(self):
return False # XXX
# if INSTANCE has an abstract method, the class is abstract
method_names = set()
for m_name, m_meth in self.INSTANCE._methods.iteritems():
if not hasattr(m_meth, 'graph'):
return True
method_names.add(m_name)
# if superclasses have abstract methods not overriden by
# INSTANCE, the class is abstract
abstract_method_names = set()
cls = self.INSTANCE._superclass
while cls is not None:
abstract_method_names.update(cls._methods.keys())
cls = cls._superclass
not_overriden = abstract_method_names.difference(method_names)
if not_overriden:
return True
return False
def render(self, ilasm):
if self.is_root(self.INSTANCE):
return
self.ilasm = ilasm
self.gen = CLIBaseGenerator(self.db, ilasm)
if self.namespace:
ilasm.begin_namespace(self.namespace)
ilasm.begin_class(self.name, self.get_base_class(), abstract=self.is_abstract())
for f_name, (f_type, f_default) in self.INSTANCE._fields.iteritems():
cts_type = self.cts.lltype_to_cts(f_type)
f_name = self.cts.escape_name(f_name)
if cts_type != 'void':
ilasm.field(f_name, cts_type)
self._ctor()
self._toString()
for m_name, m_meth in self.INSTANCE._methods.iteritems():
if hasattr(m_meth, 'graph'):
# if the first argument's type is not a supertype of
# this class it means that this method this method is
# not really used by the class: don't render it, else
# there would be a type mismatch.
args = m_meth.graph.getargs()
SELF = args[0].concretetype
if not ootype.isSubclass(self.INSTANCE, SELF):
continue
f = self.db.genoo.Function(self.db, m_meth.graph, m_name, is_method = True)
f.render(ilasm)
else:
# abstract method
METH = m_meth._TYPE
arglist = [(self.cts.lltype_to_cts(ARG), 'v%d' % i)
for i, ARG in enumerate(METH.ARGS)
if ARG is not ootype.Void]
returntype = self.cts.lltype_to_cts(METH.RESULT)
ilasm.begin_function(m_name, arglist, returntype, False, 'virtual') #, 'abstract')
ilasm.add_comment('abstract method')
if isinstance(METH.RESULT, ootype.OOType):
ilasm.opcode('ldnull')
else:
push_constant(self.db, METH.RESULT, 0, self.gen)
ilasm.opcode('ret')
ilasm.end_function()
ilasm.end_class()
if self.namespace:
ilasm.end_namespace()
def _ctor(self):
self.ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance')
self.ilasm.opcode('ldarg.0')
self.ilasm.call('instance void %s::.ctor()' % self.get_base_class())
# set default values for fields
default_values = self.INSTANCE._fields.copy()
default_values.update(self.INSTANCE._overridden_defaults)
for f_name, (F_TYPE, f_default) in default_values.iteritems():
INSTANCE_DEF, _ = self.INSTANCE._lookup_field(f_name)
cts_type = self.cts.lltype_to_cts(F_TYPE)
f_name = self.cts.escape_name(f_name)
if cts_type != 'void':
self.ilasm.opcode('ldarg.0')
push_constant(self.db, F_TYPE, f_default, self.gen)
class_name = self.db.class_name(INSTANCE_DEF)
self.ilasm.set_field((cts_type, class_name, f_name))
self.ilasm.opcode('ret')
self.ilasm.end_function()
def _toString(self):
self.ilasm.begin_function('ToString', [], 'string', False, 'virtual', 'instance', 'default')
self.ilasm.opcode('ldarg.0')
self.ilasm.call('string class [pypylib]pypy.test.Result::InstanceToPython(object)')
self.ilasm.ret()
self.ilasm.end_function()
| Python |
#! /usr/bin/env python
"""
Usage: carbonpython.py <module-name> [dll-name]
Compiles an RPython module into a .NET dll.
"""
import sys
import new
import types
import os.path
import inspect
from pypy.translator.driver import TranslationDriver
from pypy.translator.cli.entrypoint import DllEntryPoint
class DllDef:
def __init__(self, name, namespace, functions=[]):
self.name = name
self.namespace = namespace
self.functions = functions # [(function, annotation), ...]
self.driver = TranslationDriver()
self.driver.config.translation.ootype.mangle = False
self.driver.setup_library(self)
def add_function(self, func, inputtypes):
self.functions.append((func, inputtypes))
def get_entrypoint(self, bk):
graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions]
return DllEntryPoint(self.name, graphs)
def compile(self):
# add all functions to the appropriate namespace
for func, _ in self.functions:
if not hasattr(func, '_namespace_'):
func._namespace_ = self.namespace
self.driver.proceed(['compile_cli'])
class export(object):
def __new__(self, *args, **kwds):
if len(args) == 1 and isinstance(args[0], types.FunctionType):
func = args[0]
func._inputtypes_ = ()
return func
return object.__new__(self, *args, **kwds)
def __init__(self, *args, **kwds):
self.inputtypes = args
self.namespace = kwds.pop('namespace', None)
if len(kwds) > 0:
raise TypeError, "unexpected keyword argument: '%s'" % kwds.keys()[0]
def __call__(self, func):
func._inputtypes_ = self.inputtypes
if self.namespace is not None:
func._namespace_ = self.namespace
return func
def is_exported(obj):
return isinstance(obj, (types.FunctionType, types.UnboundMethodType)) \
and hasattr(obj, '_inputtypes_')
def collect_entrypoints(dic):
entrypoints = []
for item in dic.itervalues():
if is_exported(item):
entrypoints.append((item, item._inputtypes_))
elif isinstance(item, types.ClassType) or isinstance(item, type):
entrypoints += collect_class_entrypoints(item)
return entrypoints
def collect_class_entrypoints(cls):
try:
__init__ = cls.__init__
if not is_exported(__init__):
return []
except AttributeError:
return []
entrypoints = [(wrap_init(cls, __init__), __init__._inputtypes_)]
for item in cls.__dict__.itervalues():
if item is not __init__.im_func and is_exported(item):
inputtypes = (cls,) + item._inputtypes_
entrypoints.append((wrap_method(item), inputtypes))
return entrypoints
def getarglist(meth):
arglist, starargs, kwargs, defaults = inspect.getargspec(meth)
assert starargs is None, '*args not supported yet'
assert kwargs is None, '**kwds not supported yet'
assert defaults is None, 'default values not supported yet'
return arglist
def wrap_init(cls, meth):
arglist = getarglist(meth)[1:] # discard self
args = ', '.join(arglist)
source = 'def __internal__ctor(%s): return %s(%s)' % (
args, cls.__name__, args)
mydict = {cls.__name__: cls}
print source
exec source in mydict
return mydict['__internal__ctor']
def wrap_method(meth, is_init=False):
arglist = getarglist(meth)
name = '__internal__%s' % meth.func_name
selfvar = arglist[0]
args = ', '.join(arglist)
params = ', '.join(arglist[1:])
source = 'def %s(%s): return %s.%s(%s)' % (
name, args, selfvar, meth.func_name, params)
mydict = {}
print source
exec source in mydict
return mydict[name]
def compile_dll(filename, dllname=None, copy_dll=True):
dirname, name = os.path.split(filename)
if dllname is None:
dllname, _ = os.path.splitext(name)
elif dllname.endswith('.dll'):
dllname, _ = os.path.splitext(dllname)
module = new.module(dllname)
namespace = module.__dict__.get('_namespace_', dllname)
sys.path.insert(0, dirname)
execfile(filename, module.__dict__)
sys.path.pop(0)
dll = DllDef(dllname, namespace)
dll.functions = collect_entrypoints(module.__dict__)
dll.compile()
if copy_dll:
dll.driver.copy_cli_dll()
def main(argv):
if len(argv) == 2:
filename = argv[1]
dllname = None
elif len(argv) == 3:
filename = argv[1]
dllname = argv[2]
else:
print >> sys.stderr, __doc__
sys.exit(2)
if not filename.endswith('.py'):
filename += '.py'
if not os.path.exists(filename):
print >> sys.stderr, "Cannot find file %s" % filename
sys.exit(1)
compile_dll(filename, dllname)
if __name__ == '__main__':
main(sys.argv)
| Python |
from pypy.translator.cli.function import Function
try:
set
except NameError:
from sets import Set as set
class Helper(Function):
def render(self, ilasm):
ilasm.begin_namespace('pypy.runtime')
ilasm.begin_class('Helpers')
Function.render(self, ilasm)
ilasm.end_class()
ilasm.end_namespace()
def raise_RuntimeError():
raise RuntimeError
def raise_OverflowError():
raise OverflowError
def raise_ValueError():
raise ValueError
def raise_ZeroDivisionError():
raise ZeroDivisionError
HELPERS = [(raise_RuntimeError, []),
(raise_OverflowError, []),
(raise_ValueError, []),
(raise_ZeroDivisionError, []),
]
def _build_helpers(translator, db):
functions = set()
for fn, annotation in HELPERS:
functions.add(fn)
translator.annotator.build_types(fn, annotation)
translator.rtyper.specialize_more_blocks()
res = []
for graph in translator.graphs:
func = getattr(graph, 'func', None)
if func in functions:
res.append(Helper(db, graph, func.func_name))
return res
def get_prebuilt_nodes(translator, db):
prebuilt_nodes = _build_helpers(translator, db)
raise_OSError_graph = translator.rtyper.exceptiondata.fn_raise_OSError.graph
prebuilt_nodes.append(Helper(db, raise_OSError_graph, 'raise_OSError'))
return prebuilt_nodes
| Python |
from pypy.translator.cli import oopspec
from pypy.rpython.ootypesystem import ootype
from pypy.translator.oosupport.metavm import Generator, InstructionList, MicroInstruction,\
PushAllArgs, StoreResult, GetField, SetField, DownCast
from pypy.translator.cli.comparer import EqualityComparer
from pypy.translator.cli.cts import WEAKREF
from pypy.translator.cli.dotnet import _static_meth, NativeInstance
STRING_HELPER_CLASS = '[pypylib]pypy.runtime.String'
class _Call(MicroInstruction):
def render(self, generator, op):
callee = op.args[0].value
if isinstance(callee, _static_meth):
self._render_native_function(generator, callee, op.args)
elif hasattr(callee, "graph"):
graph = callee.graph
method_name = oopspec.get_method_name(graph, op)
if method_name is None:
self._render_function(generator, graph, op.args)
else:
self._render_method(generator, method_name, op.args[1:])
else:
self._render_primitive_function(generator, callee, op)
def _load_arg_or_null(self, generator, arg):
if arg.concretetype is ootype.Void:
if arg.value is None:
generator.ilasm.opcode('ldnull') # special-case: use None as a null value
else:
assert False, "Don't know how to load this arg"
else:
generator.load(arg)
def _render_native_function(self, generator, funcdesc, args):
for func_arg in args[1:]: # push parameters
self._load_arg_or_null(generator, func_arg)
cts = generator.cts
ret_type = cts.lltype_to_cts(funcdesc._TYPE.RESULT)
arg_types = [cts.lltype_to_cts(arg) for arg in funcdesc._TYPE.ARGS if arg is not ootype.Void]
arg_list = ', '.join(arg_types)
signature = '%s %s::%s(%s)' % (ret_type, funcdesc._cls._name, funcdesc._name, arg_list)
generator.call_signature(signature)
def _render_function(self, generator, graph, args):
primitive = getattr(graph.func, 'suggested_primitive', False)
for func_arg in args[1:]: # push parameters
generator.load(func_arg)
if primitive:
_, module = graph.func.__module__.rsplit('.', 1)
func_name = '[pypylib]pypy.builtin.%s::%s' % (module, graph.func.func_name)
generator.call_graph(graph, func_name)
else:
generator.call_graph(graph)
def _render_method(self, generator, method_name, args):
this = args[0]
native = isinstance(this.concretetype, NativeInstance)
for arg in args: # push parametes
if native:
self._load_arg_or_null(generator, arg)
else:
generator.load(arg)
# XXX: very hackish, need refactoring
if this.concretetype is ootype.String:
# special case for string: don't use methods, but plain functions
METH = this.concretetype._METHODS[method_name]
cts = generator.cts
ret_type = cts.lltype_to_cts(METH.RESULT)
arg_types = [cts.lltype_to_cts(arg) for arg in METH.ARGS if arg is not ootype.Void]
arg_types.insert(0, cts.lltype_to_cts(ootype.String))
arg_list = ', '.join(arg_types)
signature = '%s %s::%s(%s)' % (ret_type, STRING_HELPER_CLASS, method_name, arg_list)
generator.call_signature(signature)
else:
generator.call_method(this.concretetype, method_name)
# special case: DictItemsIterator(XXX,
# Void).ll_current_value needs to return an int32 because
# we can't use 'void' as a parameter of a Generic. This
# means that after the call to ll_current_value there will
# be a value on the stack, and we need to explicitly pop
# it.
if isinstance(this.concretetype, ootype.DictItemsIterator) and \
((this.concretetype._VALUETYPE is ootype.Void and \
method_name == 'll_current_value') or \
(this.concretetype._KEYTYPE is ootype.Void and \
method_name == 'll_current_key')):
generator.ilasm.pop()
def _render_primitive_function(self, generator, callee, op):
for func_arg in op.args[1:]: # push parameters
self._load_arg_or_null(generator, func_arg)
module, name = callee._name.split(".")
func_name = '[pypylib]pypy.builtin.%s::%s' % (module, name)
generator.call_op(op, func_name)
class _CallMethod(_Call):
def render(self, generator, op):
method = op.args[0]
self._render_method(generator, method.value, op.args[1:])
class _IndirectCall(_Call):
def render(self, generator, op):
# discard the last argument because it's used only for analysis
self._render_method(generator, 'Invoke', op.args[:-1])
class _RuntimeNew(MicroInstruction):
def render(self, generator, op):
generator.load(op.args[0])
generator.call_signature('object [pypylib]pypy.runtime.Utils::RuntimeNew(class [mscorlib]System.Type)')
generator.cast_to(op.result.concretetype)
class _OOString(MicroInstruction):
def render(self, generator, op):
ARGTYPE = op.args[0].concretetype
if isinstance(ARGTYPE, ootype.Instance):
argtype = 'object'
else:
argtype = generator.cts.lltype_to_cts(ARGTYPE)
generator.load(op.args[0])
generator.load(op.args[1])
generator.call_signature('string [pypylib]pypy.runtime.Utils::OOString(%s, int32)' % argtype)
class _NewCustomDict(MicroInstruction):
def render(self, generator, op):
DICT = op.args[0].value
comparer = EqualityComparer(generator.db, DICT._KEYTYPE,
(op.args[1], op.args[2], op.args[3]),
(op.args[4], op.args[5], op.args[6]))
generator.db.pending_node(comparer)
dict_type = generator.cts.lltype_to_cts(DICT)
generator.ilasm.new(comparer.get_ctor())
generator.ilasm.new('instance void %s::.ctor(class'
'[mscorlib]System.Collections.Generic.IEqualityComparer`1<!0>)'
% dict_type)
class _CastWeakAdrToPtr(MicroInstruction):
def render(self, generator, op):
RESULTTYPE = op.result.concretetype
resulttype = generator.cts.lltype_to_cts(RESULTTYPE)
generator.load(op.args[0])
generator.ilasm.call_method('object class %s::get_Target()' % WEAKREF, True)
generator.ilasm.opcode('castclass', resulttype)
class MapException(MicroInstruction):
COUNT = 0
def __init__(self, instr, mapping):
if isinstance(instr, str):
self.instr = InstructionList([PushAllArgs, instr, StoreResult])
else:
self.instr = InstructionList(instr)
self.mapping = mapping
def render(self, generator, op):
from pypy.translator.cli.function import LastExceptionHandler
if isinstance(generator, LastExceptionHandler):
self.render_last(generator, op)
else:
self.render_native(generator, op)
def render_native(self, generator, op):
ilasm = generator.ilasm
label = '__check_block_%d' % MapException.COUNT
MapException.COUNT += 1
ilasm.begin_try()
self.instr.render(generator, op)
ilasm.leave(label)
ilasm.end_try()
for cli_exc, py_exc in self.mapping:
ilasm.begin_catch(cli_exc)
ilasm.new('instance void class %s::.ctor()' % py_exc)
ilasm.opcode('throw')
ilasm.end_catch()
ilasm.label(label)
ilasm.opcode('nop')
def render_last(self, generator, op):
ilasm = generator.ilasm
stdflow = '__check_block_%d' % MapException.COUNT
MapException.COUNT += 1
premature_return = '__check_block_%d' % MapException.COUNT
MapException.COUNT += 1
ilasm.begin_try()
self.instr.render(generator, op)
ilasm.leave(stdflow)
ilasm.end_try()
for cli_exc, py_exc in self.mapping:
ilasm.begin_catch(cli_exc)
ilasm.new('instance void class %s::.ctor()' % py_exc)
ilasm.opcode('stsfld', 'object last_exception')
ilasm.leave(stdflow)
ilasm.end_catch()
ilasm.label(stdflow)
ilasm.opcode('nop')
class _Box(MicroInstruction):
def render(self, generator, op):
generator.load(op.args[0])
TYPE = op.args[0].concretetype
boxtype = generator.cts.lltype_to_cts(TYPE)
generator.ilasm.opcode('box', boxtype)
class _Unbox(MicroInstruction):
def render(self, generator, op):
v_obj, v_type = op.args
assert v_type.concretetype is ootype.Void
TYPE = v_type.value
boxtype = generator.cts.lltype_to_cts(TYPE)
generator.load(v_obj)
generator.ilasm.opcode('unbox.any', boxtype)
class _NewArray(MicroInstruction):
def render(self, generator, op):
v_type, v_length = op.args
assert v_type.concretetype is ootype.Void
TYPE = v_type.value._INSTANCE
typetok = generator.cts.lltype_to_cts(TYPE)
generator.load(v_length)
generator.ilasm.opcode('newarr', typetok)
class _GetArrayElem(MicroInstruction):
def render(self, generator, op):
generator.load(op.args[0])
generator.load(op.args[1])
rettype = generator.cts.lltype_to_cts(op.result.concretetype)
generator.ilasm.opcode('ldelem', rettype)
class _SetArrayElem(MicroInstruction):
def render(self, generator, op):
v_array, v_index, v_elem = op.args
generator.load(v_array)
generator.load(v_index)
if v_elem.concretetype is ootype.Void and v_elem.value is None:
generator.ilasm.opcode('ldnull')
else:
generator.load(v_elem)
elemtype = generator.cts.lltype_to_cts(v_array.concretetype)
generator.ilasm.opcode('stelem', elemtype)
class _TypeOf(MicroInstruction):
def render(self, generator, op):
v_type, = op.args
assert v_type.concretetype is ootype.Void
cliClass = v_type.value
fullname = cliClass._INSTANCE._name
generator.ilasm.opcode('ldtoken', fullname)
generator.ilasm.call('class [mscorlib]System.Type class [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)')
Call = _Call()
CallMethod = _CallMethod()
IndirectCall = _IndirectCall()
RuntimeNew = _RuntimeNew()
OOString = _OOString()
NewCustomDict = _NewCustomDict()
CastWeakAdrToPtr = _CastWeakAdrToPtr()
Box = _Box()
Unbox = _Unbox()
NewArray = _NewArray()
GetArrayElem = _GetArrayElem()
SetArrayElem = _SetArrayElem()
TypeOf = _TypeOf()
| Python |
import types
from pypy.annotation.pairtype import pair, pairtype
from pypy.annotation.model import SomeObject, SomeInstance, SomeOOInstance, SomeInteger, s_None,\
s_ImpossibleValue, lltype_to_annotation, annotation_to_lltype, SomeChar, SomeString, SomePBC
from pypy.annotation.binaryop import _make_none_union
from pypy.annotation import model as annmodel
from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
from pypy.rpython.error import TyperError
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.rmodel import Repr
from pypy.rpython.rint import IntegerRepr
from pypy.rpython.ootypesystem.rootype import OOInstanceRepr
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.ootype import meth, overload, Meth, StaticMethod
from pypy.translator.cli.support import PythonNet
## Annotation model
class SomeCliClass(SomeObject):
def getattr(self, s_attr):
assert self.is_constant()
assert s_attr.is_constant()
return SomeCliStaticMethod(self.const, s_attr.const)
def simple_call(self, *s_args):
assert self.is_constant()
return SomeOOInstance(self.const._INSTANCE)
def rtyper_makerepr(self, rtyper):
return CliClassRepr(self.const)
def rtyper_makekey(self):
return self.__class__, self.const
class SomeCliStaticMethod(SomeObject):
def __init__(self, cli_class, meth_name):
self.cli_class = cli_class
self.meth_name = meth_name
def simple_call(self, *args_s):
return self.cli_class._ann_static_method(self.meth_name, args_s)
def rtyper_makerepr(self, rtyper):
return CliStaticMethodRepr(self.cli_class, self.meth_name)
def rtyper_makekey(self):
return self.__class__, self.cli_class, self.meth_name
class __extend__(pairtype(SomeOOInstance, SomeInteger)):
def getitem((ooinst, index)):
if ooinst.ootype._isArray:
return SomeOOInstance(ooinst.ootype._ELEMENT)
return s_ImpossibleValue
def setitem((ooinst, index), s_value):
if ooinst.ootype._isArray:
if s_value is annmodel.s_None:
return s_None
ELEMENT = ooinst.ootype._ELEMENT
VALUE = s_value.ootype
assert ootype.isSubclass(VALUE, ELEMENT)
return s_None
return s_ImpossibleValue
## Rtyper model
class CliClassRepr(Repr):
lowleveltype = ootype.Void
def __init__(self, cli_class):
self.cli_class = cli_class
def rtype_getattr(self, hop):
return hop.inputconst(ootype.Void, self.cli_class)
def rtype_simple_call(self, hop):
# TODO: resolve constructor overloading
INSTANCE = hop.args_r[0].cli_class._INSTANCE
cINST = hop.inputconst(ootype.Void, INSTANCE)
vlist = hop.inputargs(*hop.args_r)[1:] # discard the first argument
hop.exception_is_here()
return hop.genop("new", [cINST]+vlist, resulttype=hop.r_result.lowleveltype)
class CliStaticMethodRepr(Repr):
lowleveltype = ootype.Void
def __init__(self, cli_class, meth_name):
self.cli_class = cli_class
self.meth_name = meth_name
def _build_desc(self, args_v):
ARGS = tuple([v.concretetype for v in args_v])
return self.cli_class._lookup(self.meth_name, ARGS)
def rtype_simple_call(self, hop):
vlist = []
for i, repr in enumerate(hop.args_r[1:]):
vlist.append(hop.inputarg(repr, i+1))
resulttype = hop.r_result.lowleveltype
desc = self._build_desc(vlist)
cDesc = hop.inputconst(ootype.Void, desc)
return hop.genop("direct_call", [cDesc] + vlist, resulttype=resulttype)
class __extend__(pairtype(OOInstanceRepr, IntegerRepr)):
def rtype_getitem((r_inst, r_int), hop):
if not r_inst.lowleveltype._isArray:
raise TyperError("getitem() on a non-array instance")
v_array, v_index = hop.inputargs(r_inst, ootype.Signed)
hop.exception_is_here()
return hop.genop('cli_getelem', [v_array, v_index], hop.r_result.lowleveltype)
def rtype_setitem((r_inst, r_int), hop):
if not r_inst.lowleveltype._isArray:
raise TyperError("setitem() on a non-array instance")
vlist = hop.inputargs(*hop.args_r)
hop.exception_is_here()
return hop.genop('cli_setelem', vlist, hop.r_result.lowleveltype)
class __extend__(OOInstanceRepr):
def rtype_len(self, hop):
if not self.lowleveltype._isArray:
raise TypeError("len() on a non-array instance")
vlist = hop.inputargs(*hop.args_r)
hop.exception_cannot_occur()
return hop.genop('cli_arraylength', vlist, hop.r_result.lowleveltype)
## OOType model
class OverloadingResolver(ootype.OverloadingResolver):
def _can_convert_from_to(self, ARG1, ARG2):
if ARG1 is ootype.Void and isinstance(ARG2, NativeInstance):
return True # ARG1 could be None, that is always convertible to a NativeInstance
else:
return ootype.OverloadingResolver._can_convert_from_to(self, ARG1, ARG2)
def annotation_to_lltype(cls, ann):
if isinstance(ann, SomeChar):
return ootype.Char
elif isinstance(ann, SomeString):
return ootype.String
else:
return annotation_to_lltype(ann)
annotation_to_lltype = classmethod(annotation_to_lltype)
def lltype_to_annotation(cls, TYPE):
if isinstance(TYPE, NativeInstance):
return SomeOOInstance(TYPE)
elif TYPE is ootype.Char:
return SomeChar()
elif TYPE is ootype.String:
return SomeString()
else:
return lltype_to_annotation(TYPE)
lltype_to_annotation = classmethod(lltype_to_annotation)
class _static_meth(object):
def __init__(self, TYPE):
self._TYPE = TYPE
def _set_attrs(self, cls, name):
self._cls = cls
self._name = name
def _get_desc(self, ARGS):
#assert ARGS == self._TYPE.ARGS
return self
class _overloaded_static_meth(object):
def __init__(self, *overloadings, **attrs):
resolver = attrs.pop('resolver', OverloadingResolver)
assert not attrs
self._resolver = resolver(overloadings)
def _set_attrs(self, cls, name):
for meth in self._resolver.overloadings:
meth._set_attrs(cls, name)
def _get_desc(self, ARGS):
meth = self._resolver.resolve(ARGS)
assert isinstance(meth, _static_meth)
return meth._get_desc(ARGS)
class NativeInstance(ootype.Instance):
def __init__(self, assembly, namespace, name, superclass,
fields={}, methods={}, _is_root=False, _hints = {}):
fullname = '%s%s.%s' % (assembly, namespace, name)
self._namespace = namespace
self._classname = name
ootype.Instance.__init__(self, fullname, superclass, fields, methods, _is_root, _hints)
## RPython interface definition
class CliClass(object):
def __init__(self, INSTANCE, static_methods):
self._name = INSTANCE._name
self._INSTANCE = INSTANCE
self._static_methods = {}
self._add_methods(static_methods)
def __repr__(self):
return '<%s>' % (self,)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self._INSTANCE._name)
def _add_methods(self, methods):
self._static_methods.update(methods)
for name, meth in methods.iteritems():
meth._set_attrs(self, name)
def _lookup(self, meth_name, ARGS):
meth = self._static_methods[meth_name]
return meth._get_desc(ARGS)
def _ann_static_method(self, meth_name, args_s):
meth = self._static_methods[meth_name]
return meth._resolver.annotate(args_s)
def _load_class(self):
names = self._INSTANCE._namespace.split('.')
names.append(self._INSTANCE._classname)
obj = PythonNet
for name in names:
obj = getattr(obj, name)
self._PythonNet_class = obj
def __getattr__(self, attr):
if attr in self._static_methods:
self._load_class()
return getattr(self._PythonNet_class, attr)
else:
raise AttributeError
def __call__(self, *args):
self._load_class()
return self._PythonNet_class(*args)
class Entry(ExtRegistryEntry):
_type_ = CliClass
def compute_annotation(self):
return SomeCliClass()
def compute_result_annotation(self):
return SomeOOInstance(self.instance._INSTANCE)
class CliNamespace(object):
def __init__(self, name):
self._name = name
def __fullname(self, name):
if self._name is None:
return name
else:
return '%s.%s' % (self._name, name)
def __getattr__(self, attr):
from pypy.translator.cli.query import load_class_or_namespace
# .NET namespace are not self-entities but just parts of the
# FullName of a class. This imply that there is no way ask
# .NET if a particular name is a namespace; there are many
# names that are clearly not namespaces such as im_self and
# _freeze_, but there is no general rule and we have to guess.
# For now, the heuristic simply check is the first char of the
# name is a UPPERCASE letter.
if attr[0].isalpha() and attr[0] == attr[0].upper():
# we assume it's a class or namespace
name = self.__fullname(attr)
load_class_or_namespace(name)
assert attr in self.__dict__
return getattr(self, attr)
else:
raise AttributeError
CLR = CliNamespace(None)
BOXABLE_TYPES = [ootype.Signed, ootype.Unsigned, ootype.SignedLongLong,
ootype.UnsignedLongLong, ootype.Bool, ootype.Float,
ootype.Char, ootype.String]
class BoxedSpace:
objects = {}
index = 0
def put(cls, obj):
index = cls.index
cls.objects[index] = obj
cls.index += 1
return index
put = classmethod(put)
def get(cls, index):
return cls.objects[index]
get = classmethod(get)
def box(x):
t = type(x)
if t is int:
return CLR.System.Int32(x)
elif t is r_uint:
return CLR.System.UInt32(x)
elif t is r_longlong:
return CLR.System.Int64(x)
elif t is r_ulonglong:
return CLR.System.UInt64(x)
elif t is bool:
return CLR.System.Boolean(x)
elif t is float:
return CLR.System.Double(x)
elif t is str or t is unicode:
if len(x) == 1:
return CLR.System.Char(x)
else:
return CLR.System.String(x)
elif isinstance(x, PythonNet.System.Object):
return x
elif x is None:
return None
else:
# cast RPython instances to System.Object is trivial when
# translated but not when interpreting, because Python for
# .NET doesn't support passing aribrary Python objects to
# .NET. To solve, we store them in the BoxedSpace, then we
# return an opaque objects, which will be used by unbox to
# retrieve the original RPython instance.
index = BoxedSpace.put(x)
res = PythonNet.pypy.test.ObjectWrapper(index)
return res
def unbox(x, TYPE):
if isinstance(x, PythonNet.pypy.test.ObjectWrapper):
x = BoxedSpace.get(x.index)
if isinstance(TYPE, (type, types.ClassType)):
# we need to check the TYPE and return None if it fails
if isinstance(x, TYPE):
return x
else:
return None
# TODO: do the typechecking also in the other cases
# this is a workaround against a pythonnet limitation: you can't
# directly get the, e.g., python int from the System.Int32 object:
# a simple way to do this is to put it into an ArrayList and
# retrieve the value.
tmp = PythonNet.System.Collections.ArrayList()
tmp.Add(x)
return tmp[0]
class Entry(ExtRegistryEntry):
_about_ = box
def compute_result_annotation(self, x_s):
can_be_None = getattr(x_s, 'can_be_None', False)
return SomeOOInstance(CLR.System.Object._INSTANCE, can_be_None=can_be_None)
def specialize_call(self, hop):
v_obj, = hop.inputargs(*hop.args_r)
hop.exception_cannot_occur()
TYPE = v_obj.concretetype
if (TYPE is ootype.String or isinstance(TYPE, (ootype.Instance, ootype.BuiltinType, NativeInstance))):
return hop.genop('ooupcast', [v_obj], hop.r_result.lowleveltype)
else:
if TYPE not in BOXABLE_TYPES:
raise TyperError, "Can't box values of type %s" % v_obj.concretetype
return hop.genop('clibox', [v_obj], hop.r_result.lowleveltype)
class Entry(ExtRegistryEntry):
_about_ = unbox
def compute_result_annotation(self, x_s, type_s):
assert isinstance(x_s, SomeOOInstance)
assert x_s.ootype == CLR.System.Object._INSTANCE
assert type_s.is_constant()
TYPE = type_s.const
if isinstance(TYPE, (type, types.ClassType)):
# it's a user-defined class, so we return SomeInstance
# can_be_None == True because it can always return None, if it fails
classdef = self.bookkeeper.getuniqueclassdef(TYPE)
return SomeInstance(classdef, can_be_None=True)
else:
assert TYPE in BOXABLE_TYPES
return OverloadingResolver.lltype_to_annotation(TYPE)
def specialize_call(self, hop):
v_obj, v_type = hop.inputargs(*hop.args_r)
if v_type.value is ootype.String or isinstance(v_type.value, (type, types.ClassType)):
return hop.genop('oodowncast', [v_obj], hop.r_result.lowleveltype)
else:
return hop.genop('cliunbox', [v_obj, v_type], hop.r_result.lowleveltype)
native_exc_cache = {}
def NativeException(cliClass):
try:
return native_exc_cache[cliClass._name]
except KeyError:
res = _create_NativeException(cliClass)
native_exc_cache[cliClass._name] = res
return res
def _create_NativeException(cliClass):
from pypy.translator.cli.query import getattr_ex
TYPE = cliClass._INSTANCE
if PythonNet.__name__ == 'CLR':
# we are using pythonnet -- use the .NET class
name = '%s.%s' % (TYPE._namespace, TYPE._classname)
res = getattr_ex(PythonNet, name)
else:
# we are not using pythonnet -- create a fake class
res = types.ClassType(TYPE._classname, (Exception,), {})
res._rpython_hints = {'NATIVE_INSTANCE': TYPE}
return res
def native_exc(exc):
return exc
class Entry(ExtRegistryEntry):
_about_ = native_exc
def compute_result_annotation(self, exc_s):
assert isinstance(exc_s, SomeInstance)
cls = exc_s.classdef.classdesc.pyobj
assert issubclass(cls, Exception)
NATIVE_INSTANCE = cls._rpython_hints['NATIVE_INSTANCE']
return SomeOOInstance(NATIVE_INSTANCE)
def specialize_call(self, hop):
v_obj, = hop.inputargs(*hop.args_r)
return hop.genop('same_as', [v_obj], hop.r_result.lowleveltype)
def new_array(type, length):
return [None] * length
def init_array(type, *args):
# PythonNet doesn't provide a straightforward way to create arrays... fake it with a list
return list(args)
class Entry(ExtRegistryEntry):
_about_ = new_array
def compute_result_annotation(self, type_s, length_s):
from pypy.translator.cli.query import load_class_maybe
assert type_s.is_constant()
assert isinstance(length_s, SomeInteger)
TYPE = type_s.const._INSTANCE
fullname = '%s.%s[]' % (TYPE._namespace, TYPE._classname)
cliArray = load_class_maybe(fullname)
return SomeOOInstance(cliArray._INSTANCE)
def specialize_call(self, hop):
c_type, v_length = hop.inputargs(*hop.args_r)
hop.exception_cannot_occur()
return hop.genop('cli_newarray', [c_type, v_length], hop.r_result.lowleveltype)
class Entry(ExtRegistryEntry):
_about_ = init_array
def compute_result_annotation(self, type_s, *args_s):
from pypy.translator.cli.query import load_class_maybe
assert type_s.is_constant()
TYPE = type_s.const._INSTANCE
for i, arg_s in enumerate(args_s):
if TYPE is not arg_s.ootype:
raise TypeError, 'Wrong type of arg #%d: %s expected, %s found' % \
(i, TYPE, arg_s.ootype)
fullname = '%s.%s[]' % (TYPE._namespace, TYPE._classname)
cliArray = load_class_maybe(fullname)
return SomeOOInstance(cliArray._INSTANCE)
def specialize_call(self, hop):
vlist = hop.inputargs(*hop.args_r)
c_type, v_elems = vlist[0], vlist[1:]
c_length = hop.inputconst(ootype.Signed, len(v_elems))
hop.exception_cannot_occur()
v_array = hop.genop('cli_newarray', [c_type, c_length], hop.r_result.lowleveltype)
for i, v_elem in enumerate(v_elems):
c_index = hop.inputconst(ootype.Signed, i)
hop.genop('cli_setelem', [v_array, c_index, v_elem], ootype.Void)
return v_array
def typeof(cliClass):
TYPE = cliClass._INSTANCE
name = '%s.%s' % (TYPE._namespace, TYPE._classname)
return PythonNet.System.Type.GetType(name)
class Entry(ExtRegistryEntry):
_about_ = typeof
def compute_result_annotation(self, cliClass_s):
from query import load_class_maybe
assert cliClass_s.is_constant()
cliType = load_class_maybe('System.Type')
return SomeOOInstance(cliType._INSTANCE)
def specialize_call(self, hop):
v_type, = hop.inputargs(*hop.args_r)
return hop.genop('cli_typeof', [v_type], hop.r_result.lowleveltype)
| Python |
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.node import Node
class Delegate(Node):
def __init__(self, db, TYPE, name):
self.cts = CTS(db)
self.TYPE = TYPE
self.name = name
def __eq__(self, other):
return self.TYPE == other.TYPE
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.TYPE)
def get_name(self):
return self.name
def dependencies(self):
# record we know about result and argument types
self.cts.lltype_to_cts(self.TYPE.RESULT)
for ARG in self.TYPE.ARGS:
self.cts.lltype_to_cts(ARG)
def render(self, ilasm):
TYPE = self.TYPE
ilasm.begin_class(self.name, '[mscorlib]System.MulticastDelegate', sealed=True)
ilasm.begin_function('.ctor',
[('object', "'object'"), ('native int', "'method'")],
'void',
False,
'hidebysig', 'specialname', 'rtspecialname', 'instance', 'default',
runtime=True)
ilasm.end_function()
resulttype = self.cts.lltype_to_cts(TYPE.RESULT)
arglist = [(self.cts.lltype_to_cts(ARG), '') for ARG in TYPE.ARGS if ARG is not ootype.Void]
ilasm.begin_function('Invoke', arglist, resulttype, False,
'virtual', 'hidebysig', 'instance', 'default',
runtime=True)
ilasm.end_function()
ilasm.end_class()
| Python |
class Node(object):
def get_name(self):
pass
def dependencies(self):
pass
def render(self, ilasm):
pass
| Python |
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.node import Node
from pypy.translator.cli.cts import CTS
class Record(Node):
def __init__(self, db, record, name):
self.db = db
self.cts = CTS(db)
self.record = record
self.name = name
def __hash__(self):
return hash(self.record)
def __eq__(self, other):
return self.record == other.record
def __ne__(self, other):
return not self == other
def get_name(self):
return self.name
def get_base_class(self):
return '[mscorlib]System.Object'
def render(self, ilasm):
self.ilasm = ilasm
ilasm.begin_class(self.name, self.get_base_class())
for f_name, (FIELD_TYPE, f_default) in self.record._fields.iteritems():
f_name = self.cts.escape_name(f_name)
cts_type = self.cts.lltype_to_cts(FIELD_TYPE)
if cts_type != 'void':
ilasm.field(f_name, cts_type)
self._ctor()
self._toString()
self._equals()
self._getHashCode()
ilasm.end_class()
def _ctor(self):
self.ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance')
self.ilasm.opcode('ldarg.0')
self.ilasm.call('instance void %s::.ctor()' % self.get_base_class())
self.ilasm.opcode('ret')
self.ilasm.end_function()
def _toString(self):
# only for testing purposes, and only if the Record represents a tuple
from pypy.translator.cli.test.runtest import format_object
for f_name in self.record._fields:
if not f_name.startswith('item'):
return # it's not a tuple
self.ilasm.begin_function('ToString', [], 'string', False, 'virtual', 'instance', 'default')
self.ilasm.opcode('ldstr', '"("')
for i in xrange(len(self.record._fields)):
f_name = 'item%d' % i
FIELD_TYPE, f_default = self.record._fields[f_name]
if FIELD_TYPE is ootype.Void:
continue
self.ilasm.opcode('ldarg.0')
f_type = self.cts.lltype_to_cts(FIELD_TYPE)
self.ilasm.get_field((f_type, self.name, f_name))
format_object(FIELD_TYPE, self.cts, self.ilasm)
self.ilasm.call('string string::Concat(string, string)')
self.ilasm.opcode('ldstr ", "')
self.ilasm.call('string string::Concat(string, string)')
self.ilasm.opcode('ldstr ")"')
self.ilasm.call('string string::Concat(string, string)')
self.ilasm.opcode('ret')
self.ilasm.end_function()
def _equals(self):
# field by field comparison
record_type = self.cts.lltype_to_cts(self.record, include_class=False)
class_record_type = self.cts.lltype_to_cts(self.record, include_class=True)
self.ilasm.begin_function('Equals', [('object', 'obj')], 'bool',
False, 'virtual', 'instance', 'default')
self.ilasm.locals([(class_record_type, 'self')])
self.ilasm.opcode('ldarg.1')
self.ilasm.opcode('castclass', record_type)
self.ilasm.opcode('stloc.0')
equal = 'bool [pypylib]pypy.runtime.Utils::Equal<%s>(!!0, !!0)'
self.ilasm.opcode('ldc.i4', '1')
for f_name, (FIELD_TYPE, default) in self.record._fields.iteritems():
if FIELD_TYPE is ootype.Void:
continue
f_type = self.cts.lltype_to_cts(FIELD_TYPE)
f_name = self.cts.escape_name(f_name)
self.ilasm.opcode('ldarg.0')
self.ilasm.get_field((f_type, record_type, f_name))
self.ilasm.opcode('ldloc.0')
self.ilasm.get_field((f_type, record_type, f_name))
self.ilasm.call(equal % f_type)
self.ilasm.opcode('and')
self.ilasm.opcode('ret')
self.ilasm.end_function()
def _getHashCode(self):
# return the hash of the first field. XXX: it can lead to a bad distribution
record_type = self.cts.lltype_to_cts(self.record, include_class=False)
self.ilasm.begin_function('GetHashCode', [], 'int32', False, 'virtual', 'instance', 'default')
gethash = 'int32 [pypylib]pypy.runtime.Utils::GetHashCode<%s>(!!0)'
if self.record._fields:
f_name, (FIELD_TYPE, default) = self.record._fields.iteritems().next()
if FIELD_TYPE is ootype.Void:
self.ilasm.opcode('ldc.i4.0')
else:
f_name = self.cts.escape_name(f_name)
f_type = self.cts.lltype_to_cts(FIELD_TYPE)
self.ilasm.opcode('ldarg.0')
self.ilasm.get_field((f_type, record_type, f_name))
self.ilasm.call(gethash % f_type)
else:
self.ilasm.opcode('ldc.i4.0')
self.ilasm.opcode('ret')
self.ilasm.end_function()
| Python |
import os.path
import platform
import py
class AbstractSDK(object):
def _check_helper(cls, helper):
if py.path.local.sysfind(helper) is None:
py.test.skip("%s is not on your path." % helper)
else:
return helper
_check_helper = classmethod(_check_helper)
def runtime(cls):
for item in cls.RUNTIME:
cls._check_helper(item)
return cls.RUNTIME
runtime = classmethod(runtime)
def ilasm(cls):
return cls._check_helper(cls.ILASM)
ilasm = classmethod(ilasm)
def csc(cls):
return cls._check_helper(cls.CSC)
csc = classmethod(csc)
def peverify(cls):
return cls._check_helper(cls.PEVERIFY)
peverify = classmethod(peverify)
class MicrosoftSDK(AbstractSDK):
RUNTIME = []
ILASM = 'ilasm'
CSC = 'csc'
PEVERIFY = 'peverify'
class MonoSDK(AbstractSDK):
RUNTIME = ['mono']
ILASM = 'ilasm2'
CSC = 'gmcs'
PEVERIFY = 'peverify' # it's not part of mono, but we get a meaningful skip message
def key_as_dict(handle):
import _winreg
i = 0
res = {}
while True:
try:
name, value, type_ = _winreg.EnumValue(handle, i)
res[name] = value
i += 1
except WindowsError:
break
return res
def find_mono_on_windows():
if platform.system() != 'Windows':
return None
import _winreg
try:
hMono = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "Software\\Novell\\Mono")
except WindowsError: # mono seems not installed
return None
mono = key_as_dict(hMono)
mono_version = mono.get('DefaultCLR', None)
if mono_version is None:
return None
hMono.Close()
hMono_data = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "Software\\Novell\\Mono\\%s" % mono_version)
mono_data = key_as_dict(hMono_data)
mono_dir = str(mono_data['SdkInstallRoot'])
return os.path.join(mono_dir, 'bin')
def get_default_SDK():
if platform.system() == 'Windows':
SDK = MicrosoftSDK
# if present, use mono ilasm2 instead of MS ilasm
mono_bin = find_mono_on_windows()
if mono_bin is not None:
SDK.ILASM = os.path.join(mono_bin, 'ilasm2.bat')
else:
SDK = MonoSDK
return SDK
SDK = get_default_SDK()
| Python |
from pypy.rpython.ootypesystem import ootype
def get_method_name(graph, op):
try:
oopspec = graph.func.oopspec
except AttributeError:
return None
# TODO: handle parsing of arguments; by now it is assumed that
# builtin methods take the same arguments of the corresponding
# ll_* function.
full_name, _ = oopspec.split('(', 1)
if len(full_name.split('.')) != 2:
return None
try:
type_name, method_name = full_name.split('.')
except ValueError:
return None
try:
type_ = BUILTIN_TYPES[type_name]
except KeyError:
return None
this = op.args[1]
if isinstance(this.concretetype, type_) and method_name in BUILTIN_METHODS[type_]:
return method_name
else:
return None # explicit is better than implicit :-)
def get_method(TYPE, name):
try:
# special case: when having List of Void, look at the concrete
# methods, not the generic ones
if isinstance(TYPE, ootype.List) and TYPE._ITEMTYPE is ootype.Void:
return TYPE._METHODS[name]
else:
return TYPE._GENERIC_METHODS[name]
except KeyError:
t = type(TYPE)
return BUILTIN_METHODS[t][name]
BUILTIN_TYPES = {
'list': ootype.List
}
BUILTIN_METHODS = {
ootype.List : {
'Add': ootype.Meth([ootype.List.ITEMTYPE_T], ootype.Void)
}
}
| Python |
import py
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.rte import Support
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("cli")
py.log.setconsumer("cli", ansi_log)
try:
import CLR as PythonNet
PythonNet.System.Reflection.Assembly.LoadFile(Support.get())
except ImportError:
class _PythonNet:
__name__ = None
def __getattr__(self, attr):
py.test.skip('Must use pythonnet for being able to access .NET libraries')
PythonNet = _PythonNet()
del _PythonNet
# some code has been stolen from genc
def string_literal(s):
def char_repr(c):
if c in '\\"': return '\\' + c
if ' ' <= c < '\x7F': return c
if c == '\n': return '\\n'
if c == '\t': return '\\t'
raise ValueError
def line_repr(s):
return ''.join([char_repr(c) for c in s])
def array_repr(s):
return ' '.join(['%x 00' % ord(c) for c in s]+['00'])
try:
return '"%s"' % line_repr(s)
except ValueError:
return "bytearray ( %s )" % array_repr(s)
class Tee(object):
def __init__(self, *args):
self.outfiles = args
def write(self, s):
for outfile in self.outfiles:
outfile.write(s)
def close(self):
for outfile in self.outfiles:
if outfile is not sys.stdout:
outfile.close()
class Counter:
def __init__(self):
self.counters = {}
def inc(self, *label):
cur = self.counters.get(label, 0)
self.counters[label] = cur+1
def dump(self, filename):
f = file(filename, 'w')
keys = self.counters.keys()
keys.sort()
for key in keys:
label = ', '.join([str(item) for item in key])
f.write('%s: %d\n' % (label, self.counters[key]))
f.close()
NT_OS = dict(
O_RDONLY = 0x0000,
O_WRONLY = 0x0001,
O_RDWR = 0x0002,
O_APPEND = 0x0008,
O_CREAT = 0x0100,
O_TRUNC = 0x0200,
O_TEXT = 0x4000,
O_BINARY = 0x8000
)
def _patch_os(defs=None):
"""
Modify the value of some attributes of the os module to be sure
they are the same on every platform pypy is compiled on. Returns a
dictionary containing the original values that can be passed to
patch_os to rollback to the original values.
"""
import os
if defs is None:
defs = NT_OS
olddefs = {}
for name, value in defs.iteritems():
try:
olddefs[name] = getattr(os, name)
except AttributeError:
pass
setattr(os, name, value)
return olddefs
def patch():
olddefs = _patch_os()
return olddefs,
def unpatch(olddefs):
_patch_os(olddefs)
| Python |
from pypy.translator.cli.metavm import Call, CallMethod, \
IndirectCall, GetField, SetField, OOString, DownCast, NewCustomDict,\
CastWeakAdrToPtr, MapException, Box, Unbox, NewArray, GetArrayElem, SetArrayElem,\
TypeOf
from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList,\
New, RuntimeNew, CastTo, PushPrimitive
from pypy.translator.cli.cts import WEAKREF
from pypy.rpython.ootypesystem import ootype
# some useful instruction patterns
Not = ['ldc.i4.0', 'ceq']
DoNothing = [PushAllArgs]
Ignore = []
def _not(op):
return [PushAllArgs, op]+Not
def _abs(type_):
return [PushAllArgs, 'call %s class [mscorlib]System.Math::Abs(%s)' % (type_, type_), StoreResult]
def _check_ovf(op):
mapping = [('[mscorlib]System.OverflowException', 'exceptions.OverflowError')]
return [MapException(op, mapping)]
def _check_zer(op):
mapping = [('[mscorlib]System.DivideByZeroException', 'exceptions.ZeroDivisionError')]
return [MapException(op, mapping)]
opcodes = {
# __________ object oriented operations __________
'new': [New],
'runtimenew': [RuntimeNew],
'oosetfield': [SetField],
'oogetfield': [GetField],
'oosend': [CallMethod],
'ooupcast': DoNothing,
'oodowncast': [DownCast],
'clibox': [Box],
'cliunbox': [Unbox],
'cli_newarray': [NewArray],
'cli_getelem': [GetArrayElem],
'cli_setelem': [SetArrayElem],
'cli_typeof': [TypeOf],
'cli_arraylength': 'ldlen',
'oois': 'ceq',
'oononnull': [PushAllArgs, 'ldnull', 'ceq']+Not,
'instanceof': [CastTo, 'ldnull', 'cgt.un'],
'subclassof': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::SubclassOf(class [mscorlib]System.Type, class[mscorlib]System.Type)'],
'ooidentityhash': [PushAllArgs, 'callvirt instance int32 object::GetHashCode()'],
'oohash': [PushAllArgs, 'callvirt instance int32 object::GetHashCode()'],
'oostring': [OOString],
'ooparse_int': [PushAllArgs, 'call int32 [pypylib]pypy.runtime.Utils::OOParseInt(string, int32)'],
'ooparse_float': [PushAllArgs, 'call float64 [pypylib]pypy.runtime.Utils::OOParseFloat(string)'],
'oonewcustomdict': [NewCustomDict],
'same_as': DoNothing,
'hint': [PushArg(0), StoreResult],
'direct_call': [Call],
'indirect_call': [IndirectCall],
'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF],
'cast_weakadr_to_ptr': [CastWeakAdrToPtr],
'gc__collect': 'call void class [mscorlib]System.GC::Collect()',
'resume_point': Ignore,
'debug_assert': Ignore,
# __________ numeric operations __________
'bool_not': [PushAllArgs]+Not,
'char_lt': 'clt',
'char_le': _not('cgt'),
'char_eq': 'ceq',
'char_ne': _not('ceq'),
'char_gt': 'cgt',
'char_ge': _not('clt'),
'unichar_eq': 'ceq',
'unichar_ne': _not('ceq'),
'int_is_true': [PushAllArgs, 'ldc.i4.0', 'cgt.un'],
'int_neg': 'neg',
'int_neg_ovf': _check_ovf(['ldc.i4.0', PushAllArgs, 'sub.ovf', StoreResult]),
'int_abs': _abs('int32'),
'int_abs_ovf': _check_ovf(_abs('int32')),
'int_invert': 'not',
'int_add': 'add',
'int_sub': 'sub',
'int_mul': 'mul',
'int_floordiv': 'div',
'int_floordiv_zer': _check_zer('div'),
'int_mod': 'rem',
'int_lt': 'clt',
'int_le': _not('cgt'),
'int_eq': 'ceq',
'int_ne': _not('ceq'),
'int_gt': 'cgt',
'int_ge': _not('clt'),
'int_and': 'and',
'int_or': 'or',
'int_lshift': 'shl',
'int_rshift': 'shr',
'int_xor': 'xor',
'int_add_ovf': _check_ovf('add.ovf'),
'int_sub_ovf': _check_ovf('sub.ovf'),
'int_mul_ovf': _check_ovf('mul.ovf'),
'int_floordiv_ovf': 'div', # these can't overflow!
'int_mod_ovf': 'rem',
'int_lt_ovf': 'clt',
'int_le_ovf': _not('cgt'),
'int_eq_ovf': 'ceq',
'int_ne_ovf': _not('ceq'),
'int_gt_ovf': 'cgt',
'int_ge_ovf': _not('clt'),
'int_and_ovf': 'and',
'int_or_ovf': 'or',
'int_lshift_ovf': _check_ovf([PushArg(0),'conv.i8',PushArg(1), 'shl',
'conv.ovf.i4', StoreResult]),
'int_lshift_ovf_val': _check_ovf([PushArg(0),'conv.i8',PushArg(1), 'shl',
'conv.ovf.i4', StoreResult]),
'int_rshift_ovf': 'shr', # these can't overflow!
'int_xor_ovf': 'xor',
'int_floordiv_ovf_zer': _check_zer('div'),
'int_mod_ovf_zer': _check_zer('rem'),
'int_mod_zer': _check_zer('rem'),
'uint_is_true': [PushAllArgs, 'ldc.i4.0', 'cgt.un'],
'uint_invert': 'not',
'uint_add': 'add',
'uint_sub': 'sub',
'uint_mul': 'mul',
'uint_div': 'div.un',
'uint_truediv': None, # TODO
'uint_floordiv': 'div.un',
'uint_mod': 'rem.un',
'uint_lt': 'clt.un',
'uint_le': _not('cgt.un'),
'uint_eq': 'ceq',
'uint_ne': _not('ceq'),
'uint_gt': 'cgt.un',
'uint_ge': _not('clt.un'),
'uint_and': 'and',
'uint_or': 'or',
'uint_lshift': 'shl',
'uint_rshift': 'shr.un',
'uint_xor': 'xor',
'float_is_true': [PushAllArgs, 'ldc.r8 0', 'ceq']+Not,
'float_neg': 'neg',
'float_abs': _abs('float64'),
'float_add': 'add',
'float_sub': 'sub',
'float_mul': 'mul',
'float_truediv': 'div',
'float_lt': 'clt',
'float_le': _not('cgt'),
'float_eq': 'ceq',
'float_ne': _not('ceq'),
'float_gt': 'cgt',
'float_ge': _not('clt'),
'float_pow': [PushAllArgs, 'call float64 [mscorlib]System.Math::Pow(float64, float64)'],
'llong_is_true': [PushAllArgs, 'ldc.i8 0', 'cgt.un'],
'llong_neg': 'neg',
'llong_neg_ovf': _check_ovf(['ldc.i8 0', PushAllArgs, 'sub.ovf', StoreResult]),
'llong_abs': _abs('int64'),
'llong_abs_ovf': _check_ovf(_abs('int64')),
'llong_invert': 'not',
'llong_add': 'add',
'llong_sub': 'sub',
'llong_mul': 'mul',
'llong_div': 'div',
'llong_truediv': None, # TODO
'llong_floordiv': 'div',
'llong_floordiv_zer': _check_zer('div'),
'llong_mod': 'rem',
'llong_mod_zer': _check_zer('rem'),
'llong_lt': 'clt',
'llong_le': _not('cgt'),
'llong_eq': 'ceq',
'llong_ne': _not('ceq'),
'llong_gt': 'cgt',
'llong_ge': _not('clt'),
'llong_and': 'and',
'llong_or': 'or',
'llong_lshift': 'shl',
'llong_rshift': [PushAllArgs, 'conv.i4', 'shr'],
'llong_xor': 'xor',
'ullong_is_true': [PushAllArgs, 'ldc.i8 0', 'cgt.un'],
'ullong_invert': 'not',
'ullong_add': 'add',
'ullong_sub': 'sub',
'ullong_mul': 'mul',
'ullong_div': 'div.un',
'ullong_truediv': None, # TODO
'ullong_floordiv': 'div.un',
'ullong_mod': 'rem.un',
'ullong_lt': 'clt.un',
'ullong_le': _not('cgt.un'),
'ullong_eq': 'ceq',
'ullong_ne': _not('ceq'),
'ullong_gt': 'cgt.un',
'ullong_ge': _not('clt.un'),
# when casting from bool we want that every truth value is casted
# to 1: we can't simply DoNothing, because the CLI stack could
# contains a truth value not equal to 1, so we should use the !=0
# trick.
'cast_bool_to_int': [PushAllArgs, 'ldc.i4.0', 'ceq']+Not,
'cast_bool_to_uint': [PushAllArgs, 'ldc.i4.0', 'ceq']+Not,
'cast_bool_to_float': [PushAllArgs, 'ldc.i4 0', 'ceq']+Not+['conv.r8'],
'cast_char_to_int': DoNothing,
'cast_unichar_to_int': DoNothing,
'cast_int_to_char': DoNothing,
'cast_int_to_unichar': DoNothing,
'cast_int_to_uint': DoNothing,
'cast_int_to_float': 'conv.r8',
'cast_int_to_longlong': 'conv.i8',
'cast_uint_to_int': DoNothing,
'cast_uint_to_float': [PushAllArgs, 'conv.u8', 'conv.r8'],
'cast_float_to_int': 'conv.i4',
'cast_float_to_uint': 'conv.u4',
'cast_longlong_to_float': 'conv.r8',
'cast_float_to_longlong': 'conv.i8',
'truncate_longlong_to_int': 'conv.i4',
'is_early_constant': [PushPrimitive(ootype.Bool, False)]
}
for key, value in opcodes.iteritems():
if type(value) is str:
value = InstructionList([PushAllArgs, value, StoreResult])
elif value is not None:
if value is not Ignore and StoreResult not in value and not isinstance(value[0], MapException):
value.append(StoreResult)
value = InstructionList(value)
opcodes[key] = value
| Python |
#! /usr/bin/env python
"""
Usage: carbonpython.py <module-name> [dll-name]
Compiles an RPython module into a .NET dll.
"""
import sys
import new
import types
import os.path
import inspect
from pypy.translator.driver import TranslationDriver
from pypy.translator.cli.entrypoint import DllEntryPoint
class DllDef:
def __init__(self, name, namespace, functions=[]):
self.name = name
self.namespace = namespace
self.functions = functions # [(function, annotation), ...]
self.driver = TranslationDriver()
self.driver.config.translation.ootype.mangle = False
self.driver.setup_library(self)
def add_function(self, func, inputtypes):
self.functions.append((func, inputtypes))
def get_entrypoint(self, bk):
graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions]
return DllEntryPoint(self.name, graphs)
def compile(self):
# add all functions to the appropriate namespace
for func, _ in self.functions:
if not hasattr(func, '_namespace_'):
func._namespace_ = self.namespace
self.driver.proceed(['compile_cli'])
class export(object):
def __new__(self, *args, **kwds):
if len(args) == 1 and isinstance(args[0], types.FunctionType):
func = args[0]
func._inputtypes_ = ()
return func
return object.__new__(self, *args, **kwds)
def __init__(self, *args, **kwds):
self.inputtypes = args
self.namespace = kwds.pop('namespace', None)
if len(kwds) > 0:
raise TypeError, "unexpected keyword argument: '%s'" % kwds.keys()[0]
def __call__(self, func):
func._inputtypes_ = self.inputtypes
if self.namespace is not None:
func._namespace_ = self.namespace
return func
def is_exported(obj):
return isinstance(obj, (types.FunctionType, types.UnboundMethodType)) \
and hasattr(obj, '_inputtypes_')
def collect_entrypoints(dic):
entrypoints = []
for item in dic.itervalues():
if is_exported(item):
entrypoints.append((item, item._inputtypes_))
elif isinstance(item, types.ClassType) or isinstance(item, type):
entrypoints += collect_class_entrypoints(item)
return entrypoints
def collect_class_entrypoints(cls):
try:
__init__ = cls.__init__
if not is_exported(__init__):
return []
except AttributeError:
return []
entrypoints = [(wrap_init(cls, __init__), __init__._inputtypes_)]
for item in cls.__dict__.itervalues():
if item is not __init__.im_func and is_exported(item):
inputtypes = (cls,) + item._inputtypes_
entrypoints.append((wrap_method(item), inputtypes))
return entrypoints
def getarglist(meth):
arglist, starargs, kwargs, defaults = inspect.getargspec(meth)
assert starargs is None, '*args not supported yet'
assert kwargs is None, '**kwds not supported yet'
assert defaults is None, 'default values not supported yet'
return arglist
def wrap_init(cls, meth):
arglist = getarglist(meth)[1:] # discard self
args = ', '.join(arglist)
source = 'def __internal__ctor(%s): return %s(%s)' % (
args, cls.__name__, args)
mydict = {cls.__name__: cls}
print source
exec source in mydict
return mydict['__internal__ctor']
def wrap_method(meth, is_init=False):
arglist = getarglist(meth)
name = '__internal__%s' % meth.func_name
selfvar = arglist[0]
args = ', '.join(arglist)
params = ', '.join(arglist[1:])
source = 'def %s(%s): return %s.%s(%s)' % (
name, args, selfvar, meth.func_name, params)
mydict = {}
print source
exec source in mydict
return mydict[name]
def compile_dll(filename, dllname=None, copy_dll=True):
dirname, name = os.path.split(filename)
if dllname is None:
dllname, _ = os.path.splitext(name)
elif dllname.endswith('.dll'):
dllname, _ = os.path.splitext(dllname)
module = new.module(dllname)
namespace = module.__dict__.get('_namespace_', dllname)
sys.path.insert(0, dirname)
execfile(filename, module.__dict__)
sys.path.pop(0)
dll = DllDef(dllname, namespace)
dll.functions = collect_entrypoints(module.__dict__)
dll.compile()
if copy_dll:
dll.driver.copy_cli_dll()
def main(argv):
if len(argv) == 2:
filename = argv[1]
dllname = None
elif len(argv) == 3:
filename = argv[1]
dllname = argv[2]
else:
print >> sys.stderr, __doc__
sys.exit(2)
if not filename.endswith('.py'):
filename += '.py'
if not os.path.exists(filename):
print >> sys.stderr, "Cannot find file %s" % filename
sys.exit(1)
compile_dll(filename, dllname)
if __name__ == '__main__':
main(sys.argv)
| Python |
import operator
import string
from pypy.translator.cli.function import Function, log
from pypy.translator.cli.class_ import Class
from pypy.translator.cli.record import Record
from pypy.translator.cli.delegate import Delegate
from pypy.translator.cli.comparer import EqualityComparer
from pypy.translator.cli.node import Node
from pypy.translator.cli.support import string_literal, Counter
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem.module import ll_os
from pypy.translator.cli.opcodes import opcodes
from pypy.translator.cli import dotnet
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.translator.oosupport.database import Database as OODatabase
try:
set
except NameError:
from sets import Set as set
BUILTIN_RECORDS = {
ootype.Record({"item0": ootype.Signed, "item1": ootype.Signed}):
'[pypylib]pypy.runtime.Record_Signed_Signed',
ootype.Record({"item0": ootype.Float, "item1": ootype.Signed}):
'[pypylib]pypy.runtime.Record_Float_Signed',
ootype.Record({"item0": ootype.Float, "item1": ootype.Float}):
'[pypylib]pypy.runtime.Record_Float_Float',
ll_os.STAT_RESULT: '[pypylib]pypy.runtime.Record_Stat_Result',
}
class LowLevelDatabase(OODatabase):
def __init__(self, genoo):
OODatabase.__init__(self, genoo)
self.classes = {} # INSTANCE --> class_name
self.classnames = set() # (namespace, name)
self.recordnames = {} # RECORD --> name
self.functions = {} # graph --> function_name
self.methods = {} # graph --> method_name
self.consts = {} # value --> AbstractConst
self.delegates = {} # StaticMethod --> type_name
self.const_count = Counter() # store statistics about constants
def next_count(self):
return self.unique()
def _default_record_name(self, RECORD):
trans = string.maketrans('[]<>(), :', '_________')
name = ['Record']
# XXX: refactor this: we need a proper way to ensure unique names
for f_name, (FIELD_TYPE, f_default) in RECORD._fields.iteritems():
type_name = FIELD_TYPE._short_name().translate(trans)
name.append(f_name)
name.append(type_name)
return '__'.join(name)
def _default_class_name(self, INSTANCE):
parts = INSTANCE._name.rsplit('.', 1)
if len(parts) == 2:
return parts
else:
return None, parts[0]
def pending_function(self, graph, functype=None):
if functype is None:
function = self.genoo.Function(self, graph)
else:
function = functype(self, graph)
self.pending_node(function)
return function.get_name()
def pending_class(self, INSTANCE):
try:
return self.classes[INSTANCE]
except KeyError:
pass
if isinstance(INSTANCE, dotnet.NativeInstance):
self.classes[INSTANCE] = INSTANCE._name
return INSTANCE._name
else:
namespace, name = self._default_class_name(INSTANCE)
name = self.get_unique_class_name(namespace, name)
if namespace is None:
full_name = name
else:
full_name = '%s.%s' % (namespace, name)
self.classes[INSTANCE] = full_name
cls = Class(self, INSTANCE, namespace, name)
self.pending_node(cls)
return full_name
def pending_record(self, RECORD):
try:
return BUILTIN_RECORDS[RECORD]
except KeyError:
pass
try:
return self.recordnames[RECORD]
except KeyError:
pass
name = self._default_record_name(RECORD)
name = self.get_unique_class_name(None, name)
self.recordnames[RECORD] = name
r = Record(self, RECORD, name)
self.pending_node(r)
return name
def record_function(self, graph, name):
self.functions[graph] = name
def graph_name(self, graph):
# XXX: graph name are not guaranteed to be unique
return self.functions.get(graph, None)
def get_unique_class_name(self, namespace, name):
base_name = name
i = 0
while (namespace, name) in self.classnames:
name = '%s_%d' % (base_name, i)
i+= 1
self.classnames.add((namespace, name))
return name
def class_name(self, INSTANCE):
try:
NATIVE_INSTANCE = INSTANCE._hints['NATIVE_INSTANCE']
return NATIVE_INSTANCE._name
except KeyError:
return self.classes[INSTANCE]
def get_record_name(self, RECORD):
try:
return BUILTIN_RECORDS[RECORD]
except KeyError:
return self.recordnames[RECORD]
def record_delegate(self, TYPE):
try:
return self.delegates[TYPE]
except KeyError:
name = 'StaticMethod__%d' % len(self.delegates)
self.delegates[TYPE] = name
self.pending_node(Delegate(self, TYPE, name))
return name
| Python |
"""
___________________________________________________________________________
CLI Constants
This module extends the oosupport/constant.py to be specific to the
CLI. Most of the code in this file is in the constant generators, which
determine how constants are stored and loaded (static fields, lazy
initialization, etc), but some constant classes have been overloaded or
extended to allow for special handling.
The CLI implementation is broken into three sections:
* Constant Generators: different generators implementing different
techniques for loading constants (Static fields, singleton fields, etc)
* Mixins: mixins are used to add a few CLI-specific methods to each
constant class. Basically, any time I wanted to extend a base class
(such as AbstractConst or DictConst), I created a mixin, and then
mixed it in to each sub-class of that base-class.
* Subclasses: here are the CLI specific classes. Eventually, these
probably wouldn't need to exist at all (the JVM doesn't have any,
for example), or could simply have empty bodies and exist to
combine a mixin and the generic base class. For now, though, they
contain the create_pointer() and initialize_data() routines.
"""
from pypy.translator.oosupport.constant import \
push_constant, WeakRefConst, StaticMethodConst, CustomDictConst, \
ListConst, ClassConst, InstanceConst, RecordConst, DictConst, \
BaseConstantGenerator
from pypy.translator.cli.ilgenerator import CLIBaseGenerator
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.comparer import EqualityComparer
from pypy.rpython.lltypesystem import lltype
from pypy.translator.cli.cts import PYPY_DICT_OF_VOID, WEAKREF
CONST_NAMESPACE = 'pypy.runtime'
CONST_CLASSNAME = 'Constants'
CONST_CLASS = '%s.%s' % (CONST_NAMESPACE, CONST_CLASSNAME)
DEBUG_CONST_INIT = False
DEBUG_CONST_INIT_VERBOSE = False
SERIALIZE = False
# ______________________________________________________________________
# Constant Generators
#
# Different generators implementing different techniques for loading
# constants (Static fields, singleton fields, etc)
class CLIBaseConstGenerator(BaseConstantGenerator):
"""
Base of all CLI constant generators. It implements the oosupport
constant generator in terms of the CLI interface.
"""
def __init__(self, db):
BaseConstantGenerator.__init__(self, db)
self.cts = db.genoo.TypeSystem(db)
def _begin_gen_constants(self, ilasm, all_constants):
self.ilasm = ilasm
self.begin_class()
gen = CLIBaseGenerator(self.db, ilasm)
return gen
def _end_gen_constants(self, gen, numsteps):
assert gen.ilasm is self.ilasm
self.end_class()
def begin_class(self):
self.ilasm.begin_namespace(CONST_NAMESPACE)
self.ilasm.begin_class(CONST_CLASSNAME, beforefieldinit=True)
def end_class(self):
self.ilasm.end_class()
self.ilasm.end_namespace()
def _declare_const(self, gen, const):
self.ilasm.field(const.name, const.get_type(), static=True)
def downcast_constant(self, gen, const, EXPECTED_TYPE):
type = self.cts.lltype_to_cts(EXPECTED_TYPE)
gen.ilasm.opcode('castclass', type)
class FieldConstGenerator(CLIBaseConstGenerator):
pass
class StaticFieldConstGenerator(FieldConstGenerator):
# _________________________________________________________________
# OOSupport interface
def push_constant(self, gen, const):
type_ = const.get_type()
gen.ilasm.load_static_constant(type_, CONST_NAMESPACE, CONST_CLASSNAME, const.name)
def _push_constant_during_init(self, gen, const):
full_name = '%s::%s' % (CONST_CLASS, const.name)
gen.ilasm.opcode('ldsfld %s %s' % (const.get_type(), full_name))
def _store_constant(self, gen, const):
type_ = const.get_type()
gen.ilasm.store_static_constant(type_, CONST_NAMESPACE, CONST_CLASSNAME, const.name)
# _________________________________________________________________
# CLI interface
def _declare_step(self, gen, stepnum):
gen.ilasm.begin_function(
'step%d' % stepnum, [], 'void', False, 'static')
def _close_step(self, gen, stepnum):
gen.ilasm.ret()
gen.ilasm.end_function()
def _end_gen_constants(self, gen, numsteps):
self.ilasm.begin_function('.cctor', [], 'void', False, 'static',
'specialname', 'rtspecialname', 'default')
self.ilasm.stderr('CONST: initialization starts', DEBUG_CONST_INIT)
for i in range(numsteps):
self.ilasm.stderr('CONST: step %d of %d' % (i, numsteps),
DEBUG_CONST_INIT)
step_name = 'step%d' % i
self.ilasm.call('void %s::%s()' % (CONST_CLASS, step_name))
self.ilasm.stderr('CONST: initialization completed', DEBUG_CONST_INIT)
self.ilasm.ret()
self.ilasm.end_function()
super(StaticFieldConstGenerator, self)._end_gen_constants(
gen, numsteps)
class InstanceFieldConstGenerator(FieldConstGenerator):
# _________________________________________________________________
# OOSupport interface
def push_constant(self, gen, const):
# load the singleton instance
gen.ilasm.opcode('ldsfld class %s %s::Singleton' % (CONST_CLASS, CONST_CLASS))
gen.ilasm.opcode('ldfld %s %s::%s' % (const.get_type(), CONST_CLASS, const.name))
def _push_constant_during_init(self, gen, const):
# during initialization, we load the 'this' pointer from our
# argument rather than the singleton argument
gen.ilasm.opcode('ldarg.0')
gen.ilasm.opcode('ldfld %s %s::%s' % (const.get_type(), CONST_CLASS, const.name))
def _pre_store_constant(self, gen, const):
gen.ilasm.opcode('ldarg.0')
def _store_constant(self, gen, const):
gen.ilasm.set_field((const.get_type(), CONST_CLASS, const.name))
# _________________________________________________________________
# CLI interface
def _declare_const(self, gen, all_constants):
gen.ilasm.field(const.name, const.get_type(), static=False)
def _declare_step(self, gen, stepnum):
gen.ilasm.begin_function('step%d' % stepnum, [], 'void', False)
def _close_step(self, gen, stepnum):
gen.ilasm.ret()
gen.ilasm.end_function()
def _end_gen_constants(self, gen, numsteps):
ilasm = gen.ilasm
ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance')
ilasm.opcode('ldarg.0')
ilasm.call('instance void object::.ctor()')
ilasm.opcode('ldarg.0')
ilasm.opcode('stsfld class %s %s::Singleton' % (CONST_CLASS, CONST_CLASS))
for i in range(numsteps):
step_name = 'step%d' % i
ilasm.opcode('ldarg.0')
ilasm.call('instance void %s::%s()' % (CONST_CLASS, step_name))
ilasm.ret()
ilasm.end_function()
# declare&init the Singleton containing the constants
ilasm.field('Singleton', 'class %s' % CONST_CLASS, static=True)
ilasm.begin_function('.cctor', [], 'void', False, 'static', 'specialname', 'rtspecialname', 'default')
if SERIALIZE:
self._serialize_ctor()
else:
self._plain_ctor()
ilasm.end_function()
super(StaticFieldConstGenerator, self)._end_gen_constants(gen, numsteps)
def _plain_ctor(self):
self.ilasm.new('instance void class %s::.ctor()' % CONST_CLASS)
self.ilasm.pop()
self.ilasm.ret()
def _serialize_ctor(self):
self.ilasm.opcode('ldstr "constants.dat"')
self.ilasm.call('object [pypylib]pypy.runtime.Utils::Deserialize(string)')
self.ilasm.opcode('dup')
self.ilasm.opcode('brfalse initialize')
self.ilasm.stderr('Constants deserialized successfully')
self.ilasm.opcode('stsfld class %s %s::Singleton' % (CONST_CLASS, CONST_CLASS))
self.ilasm.ret()
self.ilasm.label('initialize')
self.ilasm.pop()
self.ilasm.stderr('Cannot deserialize constants... initialize them!')
self.ilasm.new('instance void class %s::.ctor()' % CONST_CLASS)
self.ilasm.opcode('ldstr "constants.dat"')
self.ilasm.call('void [pypylib]pypy.runtime.Utils::Serialize(object, string)')
self.ilasm.ret()
class LazyConstGenerator(StaticFieldConstGenerator):
def push_constant(self, ilasm, const):
getter_name = '%s::%s' % (CONST_CLASS, 'get_%s' % const.name)
ilasm.call('%s %s()' % (const.get_type(), getter_name))
def _create_pointers(self, gen, all_constants):
# overload to do nothing since we handle everything in lazy fashion
pass
def _initialize_data(self, gen, all_constants):
# overload to do nothing since we handle everything in lazy fashion
pass
def _declare_const(self, gen, const):
# Declare the field
super(LazyConstGenerator, self)._declare_const(gen, const)
# Create the method for accessing the field
getter_name = 'get_%s' % const.name
type_ = const.get_type()
self.ilasm.begin_function(getter_name, [], type_, False, 'static')
self.ilasm.load_static_constant(type_, CONST_NAMESPACE, CONST_CLASS, const.name)
# if it's already initialized, just return it
self.ilasm.opcode('dup')
self.ilasm.opcode('brfalse', 'initialize')
self.ilasm.opcode('ret')
# else, initialize!
self.ilasm.label('initialize')
self.ilasm.opcode('pop') # discard the null value we know is on the stack
const.instantiate(ilasm)
self.ilasm.opcode('dup') # two dups because const.init pops the value at the end
self.ilasm.opcode('dup')
self.ilasm.store_static_constant(type_, CONST_NAMESPACE, CONST_CLASS, const.name)
const.init(ilasm)
self.ilasm.opcode('ret')
self.ilasm.end_function()
# ______________________________________________________________________
# Mixins
#
# Mixins are used to add a few CLI-specific methods to each constant
# class. Basically, any time I wanted to extend a base class (such as
# AbstractConst or DictConst), I created a mixin, and then mixed it in
# to each sub-class of that base-class. Kind of awkward.
class CLIBaseConstMixin(object):
""" A mix-in with a few extra methods the CLI backend uses """
def get_type(self, include_class=True):
""" Returns the CLI type for this constant's representation """
return self.cts.lltype_to_cts(self.value._TYPE, include_class)
def push_inline(self, gen, TYPE):
""" Overload the oosupport version so that we use the CLI opcode
for pushing NULL """
assert self.is_null()
gen.ilasm.opcode('ldnull')
class CLIDictMixin(CLIBaseConstMixin):
def _check_for_void_dict(self, gen):
KEYTYPE = self.value._TYPE._KEYTYPE
keytype = self.cts.lltype_to_cts(KEYTYPE)
keytype_T = self.cts.lltype_to_cts(self.value._TYPE.KEYTYPE_T)
VALUETYPE = self.value._TYPE._VALUETYPE
valuetype = self.cts.lltype_to_cts(VALUETYPE)
valuetype_T = self.cts.lltype_to_cts(self.value._TYPE.VALUETYPE_T)
if VALUETYPE is ootype.Void:
gen.add_comment(' CLI Dictionary w/ void value')
class_name = PYPY_DICT_OF_VOID % keytype
for key in self.value._dict:
gen.ilasm.opcode('dup')
push_constant(self.db, KEYTYPE, key, gen)
meth = 'void class %s::ll_set(%s)' % (class_name, keytype_T)
gen.ilasm.call_method(meth, False)
return True
return False
def initialize_data(self, gen):
# special case: dict of void, ignore the values
if self._check_for_void_dict(gen):
return
return super(CLIDictMixin, self).initialize_data(gen)
# ______________________________________________________________________
# Constant Classes
#
# Here we overload a few methods, and mix in the base classes above.
# Note that the mix-ins go first so that they overload methods where
# required.
#
# Eventually, these probably wouldn't need to exist at all (the JVM
# doesn't have any, for example), or could simply have empty bodies
# and exist to combine a mixin and the generic base class. For now,
# though, they contain the create_pointer() and initialize_data()
# routines. In order to get rid of them, we would need to implement
# the generator interface in the CLI.
class CLIRecordConst(CLIBaseConstMixin, RecordConst):
def create_pointer(self, gen):
self.db.const_count.inc('Record')
super(CLIRecordConst, self).create_pointer(gen)
class CLIInstanceConst(CLIBaseConstMixin, InstanceConst):
def create_pointer(self, gen):
self.db.const_count.inc('Instance')
self.db.const_count.inc('Instance', self.OOTYPE())
super(CLIInstanceConst, self).create_pointer(gen)
class CLIClassConst(CLIBaseConstMixin, ClassConst):
def is_inline(self):
return True
def push_inline(self, gen, EXPECTED_TYPE):
if not self.is_null():
INSTANCE = self.value._INSTANCE
gen.ilasm.opcode('ldtoken', self.db.class_name(INSTANCE))
gen.ilasm.call('class [mscorlib]System.Type class [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)')
return
super(CLIClassConst, self).push_inline(gen, EXPECTED_TYPE)
class CLIListConst(CLIBaseConstMixin, ListConst):
def _do_not_initialize(self):
# Check if it is a list of all zeroes:
try:
if self.value._list == [0] * len(self.value._list):
return True
except:
pass
return super(CLIListConst, self)._do_not_initialize()
def create_pointer(self, gen):
self.db.const_count.inc('List')
self.db.const_count.inc('List', self.value._TYPE._ITEMTYPE)
self.db.const_count.inc('List', len(self.value._list))
super(CLIListConst, self).create_pointer(gen)
class CLIDictConst(CLIDictMixin, DictConst):
def create_pointer(self, gen):
self.db.const_count.inc('Dict')
self.db.const_count.inc('Dict', self.value._TYPE._KEYTYPE, self.value._TYPE._VALUETYPE)
super(CLIDictConst, self).create_pointer(gen)
class CLICustomDictConst(CLIDictMixin, CustomDictConst):
def record_dependencies(self):
if not self.value:
return
eq = self.value._dict.key_eq
hash = self.value._dict.key_hash
self.comparer = EqualityComparer(self.db, self.value._TYPE._KEYTYPE, eq, hash)
self.db.pending_node(self.comparer)
super(CLICustomDictConst, self).record_dependencies()
def create_pointer(self, gen):
assert not self.is_null()
gen.ilasm.new(self.comparer.get_ctor())
class_name = self.get_type()
gen.ilasm.new('instance void %s::.ctor(class '
'[mscorlib]System.Collections.Generic.IEqualityComparer`1<!0>)'
% class_name)
self.db.const_count.inc('CustomDict')
self.db.const_count.inc('CustomDict', self.value._TYPE._KEYTYPE, self.value._TYPE._VALUETYPE)
class CLIStaticMethodConst(CLIBaseConstMixin, StaticMethodConst):
def create_pointer(self, gen):
assert not self.is_null()
signature = self.cts.graph_to_signature(self.value.graph)
gen.ilasm.opcode('ldnull')
gen.ilasm.opcode('ldftn', signature)
gen.ilasm.new('instance void class %s::.ctor(object, native int)' % self.delegate_type)
self.db.const_count.inc('StaticMethod')
def initialize_data(self, gen):
return
class CLIWeakRefConst(CLIBaseConstMixin, WeakRefConst):
def create_pointer(self, gen):
gen.ilasm.opcode('ldnull')
gen.ilasm.new('instance void %s::.ctor(object)' % self.get_type())
self.db.const_count.inc('WeakRef')
def get_type(self, include_class=True):
return 'class ' + WEAKREF
def initialize_data(self, gen):
if self.value is not None:
push_constant(self.db, self.value._TYPE, self.value, gen)
gen.ilasm.call_method(
'void %s::set_Target(object)' % self.get_type(), True)
return True
| Python |
import os
import platform
import py
from py.compat import subprocess
from pypy.tool.udir import udir
from pypy.translator.translator import TranslationContext
from pypy.rpython.test.tool import BaseRtypingTest, OORtypeMixin
from pypy.rpython.lltypesystem.lltype import typeOf
from pypy.rpython.ootypesystem import ootype
from pypy.annotation.model import lltype_to_annotation
from pypy.translator.backendopt.all import backend_optimizations
from pypy.translator.backendopt.checkvirtual import check_virtual_methods
from pypy.rpython.ootypesystem import ootype
from pypy.translator.cli.option import getoption
from pypy.translator.cli.gencli import GenCli
from pypy.translator.cli.function import Function
from pypy.translator.cli.node import Node
from pypy.translator.cli.cts import CTS
from pypy.translator.cli.database import LowLevelDatabase
from pypy.translator.cli.sdk import SDK
from pypy.translator.cli.entrypoint import BaseEntryPoint
from pypy.translator.cli.support import patch, unpatch
FLOAT_PRECISION = 8
def check(func, annotation, args):
mono = compile_function(func, annotation)
res1 = func(*args)
res2 = mono(*args)
if type(res1) is float:
assert round(res1, FLOAT_PRECISION) == round(res2, FLOAT_PRECISION)
else:
assert res1 == res2
def format_object(TYPE, cts, ilasm):
if TYPE is ootype.Void:
ilasm.opcode('ldstr "None"')
else:
if isinstance(TYPE, (ootype.BuiltinType, ootype.Instance, ootype.StaticMethod)) and TYPE is not ootype.String:
type_ = 'object'
else:
type_ = cts.lltype_to_cts(TYPE)
ilasm.call('string class [pypylib]pypy.test.Result::ToPython(%s)' % type_)
class TestEntryPoint(BaseEntryPoint):
"""
This class produces a 'main' method that converts its arguments
to int32, pass them to another method and prints out the result.
"""
def __init__(self, graph_to_call, wrap_exceptions=False):
self.graph = graph_to_call
self.wrap_exceptions = wrap_exceptions
def get_name(self):
return 'main'
def render(self, ilasm):
ilasm.begin_function('main', [('string[]', 'argv')], 'void', True, 'static')
RETURN_TYPE = self.graph.getreturnvar().concretetype
return_type = self.cts.lltype_to_cts(RETURN_TYPE)
if return_type != 'void':
ilasm.locals([(return_type, 'res')])
if self.wrap_exceptions:
ilasm.begin_try()
# convert string arguments to their true type
for i, arg in enumerate(self.graph.getargs()):
ilasm.opcode('ldarg.0')
ilasm.opcode('ldc.i4.%d' % i)
ilasm.opcode('ldelem.ref')
arg_type, arg_var = self.cts.llvar_to_cts(arg)
ilasm.call('%s class [mscorlib]System.Convert::%s(string)' %
(arg_type, self.__convert_method(arg_type)))
# call the function and convert the result to a string containing a valid python expression
ilasm.call(self.cts.graph_to_signature(self.graph))
if return_type != 'void':
ilasm.opcode('stloc', 'res')
if self.wrap_exceptions:
ilasm.leave('check_last_exception')
else:
ilasm.leave('print_result')
if self.wrap_exceptions:
ilasm.end_try()
for exc in ('[mscorlib]System.Exception', 'exceptions.Exception'):
ilasm.begin_catch(exc)
if getoption('nowrap'):
ilasm.opcode('throw')
else:
ilasm.call('string class [pypylib]pypy.test.Result::FormatException(object)')
ilasm.call('void class [mscorlib]System.Console::WriteLine(string)')
ilasm.leave('return')
ilasm.end_catch()
ilasm.label('check_last_exception')
ilasm.opcode('ldsfld', 'object last_exception')
ilasm.opcode('brnull', 'print_result')
# there is a pending exception
ilasm.opcode('ldsfld', 'object last_exception')
ilasm.call('string class [pypylib]pypy.test.Result::FormatException(object)')
ilasm.call('void class [mscorlib]System.Console::WriteLine(string)')
ilasm.opcode('br', 'return')
ilasm.label('print_result')
if return_type != 'void':
ilasm.opcode('ldloc', 'res')
format_object(RETURN_TYPE, self.cts, ilasm)
ilasm.call('void class [mscorlib]System.Console::WriteLine(string)')
ilasm.label('return')
ilasm.opcode('ret')
ilasm.end_function()
self.db.pending_function(self.graph)
def __convert_method(self, arg_type):
_conv = {
'int32': 'ToInt32',
'unsigned int32': 'ToUInt32',
'int64': 'ToInt64',
'unsigned int64': 'ToUInt64',
'bool': 'ToBoolean',
'float64': 'ToDouble',
'char': 'ToChar',
}
try:
return _conv[arg_type]
except KeyError:
assert False, 'Input type %s not supported' % arg_type
def compile_function(func, annotation=[], graph=None, backendopt=True):
olddefs = patch()
gen = _build_gen(func, annotation, graph, backendopt)
gen.generate_source()
exe_name = gen.build_exe()
unpatch(*olddefs) # restore original values
return CliFunctionWrapper(exe_name)
def _build_gen(func, annotation, graph=None, backendopt=True):
try:
func = func.im_func
except AttributeError:
pass
t = TranslationContext()
if graph is not None:
graph.func = func
ann = t.buildannotator()
inputcells = [ann.typeannotation(a) for a in annotation]
ann.build_graph_types(graph, inputcells)
t.graphs.insert(0, graph)
else:
ann = t.buildannotator()
ann.build_types(func, annotation)
if getoption('view'):
t.view()
t.buildrtyper(type_system="ootype").specialize()
if backendopt:
check_virtual_methods(ootype.ROOT)
backend_optimizations(t)
main_graph = t.graphs[0]
if getoption('view'):
t.view()
if getoption('wd'):
tmpdir = py.path.local('.')
else:
tmpdir = udir
return GenCli(tmpdir, t, TestEntryPoint(main_graph, True))
class CliFunctionWrapper(object):
def __init__(self, exe_name):
self._exe = exe_name
def run(self, *args):
if self._exe is None:
py.test.skip("Compilation disabled")
if getoption('norun'):
py.test.skip("Execution disabled")
arglist = SDK.runtime() + [self._exe] + map(str, args)
env = os.environ.copy()
env['LANG'] = 'C'
mono = subprocess.Popen(arglist, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
stdout, stderr = mono.communicate()
retval = mono.wait()
return stdout, stderr, retval
def __call__(self, *args):
stdout, stderr, retval = self.run(*args)
assert retval == 0, stderr
res = eval(stdout.strip())
if isinstance(res, tuple):
res = StructTuple(res) # so tests can access tuple elements with .item0, .item1, etc.
elif isinstance(res, list):
res = OOList(res)
return res
class StructTuple(tuple):
def __getattr__(self, name):
if name.startswith('item'):
i = int(name[len('item'):])
return self[i]
else:
raise AttributeError, name
class OOList(list):
def ll_length(self):
return len(self)
def ll_getitem_fast(self, i):
return self[i]
class InstanceWrapper:
def __init__(self, class_name):
self.class_name = class_name
class ExceptionWrapper:
def __init__(self, class_name):
self.class_name = class_name
def __repr__(self):
return 'ExceptionWrapper(%s)' % repr(self.class_name)
class CliTest(BaseRtypingTest, OORtypeMixin):
def __init__(self):
self._func = None
self._ann = None
self._cli_func = None
def _compile(self, fn, args, ann=None, backendopt=True):
if ann is None:
ann = [lltype_to_annotation(typeOf(x)) for x in args]
if self._func is fn and self._ann == ann:
return self._cli_func
else:
self._cli_func = compile_function(fn, ann, backendopt=backendopt)
self._func = fn
self._ann = ann
return self._cli_func
def _skip_win(self, reason):
if platform.system() == 'Windows':
py.test.skip('Windows --> %s' % reason)
def _skip_powerpc(self, reason):
if platform.processor() == 'powerpc':
py.test.skip('PowerPC --> %s' % reason)
def interpret(self, fn, args, annotation=None, backendopt=True):
f = self._compile(fn, args, annotation, backendopt)
res = f(*args)
if isinstance(res, ExceptionWrapper):
raise res
return res
def interpret_raises(self, exception, fn, args):
import exceptions # needed by eval
try:
self.interpret(fn, args)
except ExceptionWrapper, ex:
assert issubclass(eval(ex.class_name), exception)
else:
assert False, 'function did raise no exception at all'
def float_eq(self, x, y):
diff = abs(x-y)
return diff/x < 10**-FLOAT_PRECISION
def is_of_type(self, x, type_):
return True # we can't really test the type
def ll_to_string(self, s):
return s
def ll_to_list(self, l):
return l
def ll_to_tuple(self, t):
return t
def class_name(self, value):
return value.class_name.split(".")[-1]
def is_of_instance_type(self, val):
return isinstance(val, InstanceWrapper)
def read_attr(self, obj, name):
py.test.skip('read_attr not supported on gencli tests')
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
#
| Python |
from pypy.translator.cli.carbonpython import export
@export(int, int)
def sum(a, b):
return a+b
| Python |
#! /usr/bin/env python
if __name__ == '__main__':
import tool.autopath
import py
py.test.cmdline.main()
| Python |
from pypy.rlib.objectmodel import specialize, debug_assert
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
from pypy.jit.codegen.llgraph import llimpl
from pypy.rpython.lltypesystem.rclass import fishllattr
from pypy.rpython.module.support import LLSupport
class LLVar(GenVar):
def __init__(self, v):
self.v = v
def __repr__(self):
return repr(RGenOp.reveal(self))
class LLConst(GenConst):
def __init__(self, v):
self.v = v
@specialize.arg(1)
def revealconst(self, T):
return llimpl.revealconst(T, self.v)
def revealconstrepr(self):
return LLSupport.from_rstr(llimpl.revealconstrepr(self.v))
def __repr__(self):
return repr(RGenOp.reveal(self))
_gv_TYPE_cache = {}
def gv_TYPE(TYPE):
try:
return _gv_TYPE_cache[TYPE]
except KeyError:
gv = LLConst(llimpl.constTYPE(TYPE))
_gv_TYPE_cache[TYPE] = gv
return gv
gv_Void = gv_TYPE(lltype.Void)
gv_Signed = gv_TYPE(lltype.Signed)
gv_Bool = gv_TYPE(lltype.Bool)
gv_dummy_placeholder = LLConst(llimpl.dummy_placeholder)
gv_Address = gv_TYPE(llmemory.Address)
class LLLabel(GenLabel):
def __init__(self, b, g):
self.b = b
self.g = g
class LLPlace:
absorbed = False
def __init__(self, v, info):
self.v = v
self.info = info
class LLFlexSwitch(CodeGenSwitch):
def __init__(self, rgenop, b, g, args_gv):
self.rgenop = rgenop
self.b = b
self.gv_f = g
self.cases_gv = []
self.args_gv = args_gv
def add_case(self, gv_case):
self.cases_gv.append(gv_case) # not used so far, but keeps ptrs alive
l_case = llimpl.add_case(self.b, gv_case.v)
b = llimpl.closelinktofreshblock(l_case, self.args_gv, self.l_default)
builder = LLBuilder(self.rgenop, self.gv_f, b)
debug_assert(self.rgenop.currently_writing is None or
# special case: we stop replaying and add a case after
# a call to flexswitch() on a replay builder
self.rgenop.currently_writing.is_default_builder,
"add_case: currently_writing elsewhere")
self.rgenop.currently_writing = builder
return builder
def _add_default(self):
l_default = llimpl.add_default(self.b)
self.l_default = l_default
b = llimpl.closelinktofreshblock(l_default, self.args_gv, None)
builder = LLBuilder(self.rgenop, self.gv_f, b)
debug_assert(self.rgenop.currently_writing is None,
"_add_default: currently_writing elsewhere")
self.rgenop.currently_writing = builder
builder.is_default_builder = True
return builder
class LLBuilder(GenBuilder):
jumped_from = None
is_default_builder = False
def __init__(self, rgenop, g, block):
self.rgenop = rgenop
self.gv_f = g
self.b = block
def end(self):
debug_assert(self.rgenop.currently_writing is None,
"end: currently_writing")
llimpl.end(self.gv_f)
@specialize.arg(1)
def genop1(self, opname, gv_arg):
debug_assert(self.rgenop.currently_writing is self,
"genop1: bad currently_writing")
return LLVar(llimpl.genop(self.b, opname, [gv_arg], llimpl.guess))
@specialize.arg(1)
def genraisingop1(self, opname, gv_arg):
debug_assert(self.rgenop.currently_writing is self,
"genraisingop1: bad currently_writing")
gv_res = LLVar(llimpl.genop(self.b, opname, [gv_arg], llimpl.guess))
gv_exc = LLVar(llimpl.genop(self.b, "check_and_clear_exc", [],
gv_Bool.v))
return gv_res, gv_exc
@specialize.arg(1)
def genop2(self, opname, gv_arg1, gv_arg2):
debug_assert(self.rgenop.currently_writing is self,
"genop2: bad currently_writing")
return LLVar(llimpl.genop(self.b, opname, [gv_arg1, gv_arg2],
llimpl.guess))
@specialize.arg(1)
def genraisingop2(self, opname, gv_arg1, gv_arg2):
debug_assert(self.rgenop.currently_writing is self,
"genraisingop2: bad currently_writing")
gv_res = LLVar(llimpl.genop(self.b, opname, [gv_arg1, gv_arg2],
llimpl.guess))
gv_exc = LLVar(llimpl.genop(self.b, "check_and_clear_exc", [],
gv_Bool.v))
return gv_res, gv_exc
def genop_call(self, (ARGS_gv, gv_RESULT, _), gv_callable, args_gv):
debug_assert(self.rgenop.currently_writing is self,
"genop_call: bad currently_writing")
vars_gv = [gv_callable]
j = 0
for i in range(len(ARGS_gv)):
if ARGS_gv[i] is gv_Void:
gv_arg = gv_dummy_placeholder
else:
gv_arg = LLVar(llimpl.cast(self.b, ARGS_gv[i].v, args_gv[j].v))
j += 1
vars_gv.append(gv_arg)
if gv_callable.is_const:
v = llimpl.genop(self.b, 'direct_call', vars_gv, gv_RESULT.v)
else:
vars_gv.append(gv_dummy_placeholder)
v = llimpl.genop(self.b, 'indirect_call', vars_gv, gv_RESULT.v)
return LLVar(v)
def genop_getfield(self, (gv_name, gv_PTRTYPE, gv_FIELDTYPE), gv_ptr):
debug_assert(self.rgenop.currently_writing is self,
"genop_getfield: bad currently_writing")
vars_gv = [llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr.v), gv_name.v]
return LLVar(llimpl.genop(self.b, 'getfield', vars_gv,
gv_FIELDTYPE.v))
def genop_setfield(self, (gv_name, gv_PTRTYPE, gv_FIELDTYPE), gv_ptr,
gv_value):
debug_assert(self.rgenop.currently_writing is self,
"genop_setfield: bad currently_writing")
vars_gv = [llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr.v),
gv_name.v,
llimpl.cast(self.b, gv_FIELDTYPE.v, gv_value.v)]
return LLVar(llimpl.genop(self.b, 'setfield', vars_gv,
gv_Void.v))
def genop_getsubstruct(self, (gv_name, gv_PTRTYPE, gv_FIELDTYPE), gv_ptr):
debug_assert(self.rgenop.currently_writing is self,
"genop_getsubstruct: bad currently_writing")
vars_gv = [llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr.v), gv_name.v]
return LLVar(llimpl.genop(self.b, 'getsubstruct', vars_gv,
gv_FIELDTYPE.v))
def genop_getarrayitem(self, gv_ITEMTYPE, gv_ptr, gv_index):
debug_assert(self.rgenop.currently_writing is self,
"genop_getarrayitem: bad currently_writing")
vars_gv = [gv_ptr.v, gv_index.v]
return LLVar(llimpl.genop(self.b, 'getarrayitem', vars_gv,
gv_ITEMTYPE.v))
def genop_getarraysubstruct(self, gv_ITEMTYPE, gv_ptr, gv_index):
debug_assert(self.rgenop.currently_writing is self,
"genop_getarraysubstruct: bad currently_writing")
vars_gv = [gv_ptr.v, gv_index.v]
return LLVar(llimpl.genop(self.b, 'getarraysubstruct', vars_gv,
gv_ITEMTYPE.v))
def genop_setarrayitem(self, gv_ITEMTYPE, gv_ptr, gv_index, gv_value):
debug_assert(self.rgenop.currently_writing is self,
"genop_setarrayitem: bad currently_writing")
vars_gv = [gv_ptr.v, gv_index.v, gv_value.v]
return LLVar(llimpl.genop(self.b, 'setarrayitem', vars_gv,
gv_Void.v))
def genop_getarraysize(self, gv_ITEMTYPE, gv_ptr):
debug_assert(self.rgenop.currently_writing is self,
"genop_getarraysize: bad currently_writing")
return LLVar(llimpl.genop(self.b, 'getarraysize', [gv_ptr.v],
gv_Signed.v))
def genop_malloc_fixedsize(self, (gv_TYPE, gv_PTRTYPE)):
debug_assert(self.rgenop.currently_writing is self,
"genop_malloc_fixedsize: bad currently_writing")
vars_gv = [gv_TYPE.v]
return LLVar(llimpl.genop(self.b, 'malloc', vars_gv,
gv_PTRTYPE.v))
def genop_malloc_varsize(self, (gv_TYPE, gv_PTRTYPE), gv_length):
debug_assert(self.rgenop.currently_writing is self,
"genop_malloc_varsize: bad currently_writing")
vars_gv = [gv_TYPE.v, gv_length.v]
return LLVar(llimpl.genop(self.b, 'malloc_varsize', vars_gv,
gv_PTRTYPE.v))
def genop_same_as(self, gv_TYPE, gv_value):
debug_assert(self.rgenop.currently_writing is self,
"genop_same_as: bad currently_writing")
gv_value = llimpl.cast(self.b, gv_TYPE.v, gv_value.v)
return LLVar(llimpl.genop(self.b, 'same_as', [gv_value], gv_TYPE.v))
def genop_ptr_iszero(self, gv_PTRTYPE, gv_ptr):
debug_assert(self.rgenop.currently_writing is self,
"genop_ptr_iszero: bad currently_writing")
gv_ptr = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr.v)
return LLVar(llimpl.genop(self.b, 'ptr_iszero', [gv_ptr], gv_Bool.v))
def genop_ptr_nonzero(self, gv_PTRTYPE, gv_ptr):
debug_assert(self.rgenop.currently_writing is self,
"genop_ptr_nonzero: bad currently_writing")
gv_ptr = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr.v)
return LLVar(llimpl.genop(self.b, 'ptr_nonzero', [gv_ptr], gv_Bool.v))
def genop_ptr_eq(self, gv_PTRTYPE, gv_ptr1, gv_ptr2):
debug_assert(self.rgenop.currently_writing is self,
"genop_ptr_eq: bad currently_writing")
gv_ptr1 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr1.v)
gv_ptr2 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr2.v)
return LLVar(llimpl.genop(self.b, 'ptr_eq', [gv_ptr1, gv_ptr2],
gv_Bool.v))
def genop_ptr_ne(self, gv_PTRTYPE, gv_ptr1, gv_ptr2):
debug_assert(self.rgenop.currently_writing is self,
"genop_ptr_ne: bad currently_writing")
gv_ptr1 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr1.v)
gv_ptr2 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr2.v)
return LLVar(llimpl.genop(self.b, 'ptr_ne', [gv_ptr1, gv_ptr2],
gv_Bool.v))
def genop_cast_int_to_ptr(self, gv_PTRTYPE, gv_int):
debug_assert(self.rgenop.currently_writing is self,
"genop_cast_int_to_ptr: bad currently_writing")
return LLVar(llimpl.genop(self.b, 'cast_int_to_ptr', [gv_int],
gv_PTRTYPE.v))
def _newblock(self, kinds):
self.b = newb = llimpl.newblock()
return [LLVar(llimpl.geninputarg(newb, kind.v)) for kind in kinds]
def enter_next_block(self, kinds, args_gv):
debug_assert(self.rgenop.currently_writing is self,
"enter_next_block: bad currently_writing")
lnk = llimpl.closeblock1(self.b)
newb_args_gv = self._newblock(kinds)
llimpl.closelink(lnk, args_gv, self.b)
for i in range(len(args_gv)):
args_gv[i] = newb_args_gv[i]
return LLLabel(self.b, self.gv_f)
def finish_and_goto(self, args_gv, target):
lnk = llimpl.closeblock1(self.b)
llimpl.closelink(lnk, args_gv, target.b)
self._close()
def finish_and_return(self, sigtoken, gv_returnvar):
gv_returnvar = gv_returnvar or gv_dummy_placeholder
lnk = llimpl.closeblock1(self.b)
llimpl.closereturnlink(lnk, gv_returnvar.v, self.gv_f)
self._close()
def _jump(self, l_jump, l_no_jump, args_for_jump_gv):
debug_assert(self.rgenop.currently_writing is self,
"_jump: bad currently_writing")
self.b = llimpl.closelinktofreshblock(l_no_jump, None, None)
b2 = llimpl.closelinktofreshblock(l_jump, args_for_jump_gv, None)
later_builder = LLBuilder(self.rgenop, self.gv_f, llimpl.nullblock)
later_builder.later_block = b2
later_builder.jumped_from = self
return later_builder
def jump_if_true(self, gv_cond, args_for_jump_gv):
l_false, l_true = llimpl.closeblock2(self.b, gv_cond.v)
return self._jump(l_true, l_false, args_for_jump_gv)
def jump_if_false(self, gv_cond, args_for_jump_gv):
l_false, l_true = llimpl.closeblock2(self.b, gv_cond.v)
return self._jump(l_false, l_true, args_for_jump_gv)
def flexswitch(self, gv_switchvar, args_gv):
llimpl.closeblockswitch(self.b, gv_switchvar.v)
flexswitch = LLFlexSwitch(self.rgenop, self.b, self.gv_f, args_gv)
self._close()
return (flexswitch, flexswitch._add_default())
def _close(self):
debug_assert(self.rgenop.currently_writing is self,
"_close: bad currently_writing")
self.rgenop.currently_writing = None
self.b = llimpl.nullblock
def start_writing(self):
assert self.b == llimpl.nullblock
if self.jumped_from:
assert self.jumped_from.b == llimpl.nullblock
assert self.later_block != llimpl.nullblock
self.b = self.later_block
self.later_block = llimpl.nullblock
debug_assert(self.rgenop.currently_writing is None,
"start_writing: currently_writing")
self.rgenop.currently_writing = self
def pause_writing(self, args_gv):
lnk = llimpl.closeblock1(self.b)
b2 = llimpl.closelinktofreshblock(lnk, args_gv, None)
self._close()
later_builder = LLBuilder(self.rgenop, self.gv_f, llimpl.nullblock)
later_builder.later_block = b2
return later_builder
def show_incremental_progress(self):
llimpl.show_incremental_progress(self.gv_f)
# read_frame_var support
def genop_get_frame_base(self):
debug_assert(self.rgenop.currently_writing is self,
"genop_get_frame_base: bad currently_writing")
return LLVar(llimpl.genop(self.b, 'get_frame_base', [],
gv_Address.v))
def get_frame_info(self, vars):
debug_assert(self.rgenop.currently_writing is self,
"get_frame_info: bad currently_writing")
return llimpl.get_frame_info(self.b, vars)
def alloc_frame_place(self, gv_TYPE, gv_initial_value=None):
debug_assert(self.rgenop.currently_writing is self,
"alloc_frame_place: bad currently_writing")
if gv_initial_value is None:
gv_initial_value = self.rgenop.genzeroconst(gv_TYPE)
gv_initial_value = llimpl.cast(self.b, gv_TYPE.v, gv_initial_value.v)
v = LLVar(llimpl.genop(self.b, 'same_as', [gv_initial_value],
gv_TYPE.v))
return LLPlace(v, llimpl.get_frame_info(self.b, [v]))
def genop_absorb_place(self, gv_TYPE, place):
debug_assert(self.rgenop.currently_writing is self,
"alloc_frame_place: bad currently_writing")
debug_assert(not place.absorbed, "place already absorbed")
place.absorbed = True
return place.v
class RGenOp(AbstractRGenOp):
gv_Void = gv_Void
currently_writing = None
def newgraph(self, (ARGS_gv, gv_RESULT, gv_FUNCTYPE), name):
gv_func = llimpl.newgraph(gv_FUNCTYPE.v, name)
builder = LLBuilder(self, gv_func, llimpl.nullblock)
builder.later_block = llimpl.getstartblock(gv_func)
inputargs_gv = [LLVar(llimpl.getinputarg(builder.later_block, i))
for i in range(len(ARGS_gv))]
return builder, LLConst(gv_func), inputargs_gv
@staticmethod
@specialize.genconst(0)
def genconst(llvalue):
return LLConst(llimpl.genconst(llvalue))
@staticmethod
def erasedType(T):
return lltype.erasedType(T)
@staticmethod
@specialize.memo()
def kindToken(T):
return gv_TYPE(T)
@staticmethod
@specialize.memo()
def fieldToken(T, name):
assert name in T._flds
FIELDTYPE = getattr(T, name)
if isinstance(FIELDTYPE, lltype.ContainerType):
FIELDTYPE = lltype.Ptr(FIELDTYPE)
return (LLConst(llimpl.constFieldName(name)),
gv_TYPE(lltype.Ptr(T)),
gv_TYPE(FIELDTYPE))
@staticmethod
@specialize.memo()
def allocToken(TYPE):
return (gv_TYPE(TYPE),
gv_TYPE(lltype.Ptr(TYPE)))
varsizeAllocToken = allocToken
@staticmethod
@specialize.memo()
def arrayToken(A):
ITEMTYPE = A.OF
if isinstance(ITEMTYPE, lltype.ContainerType):
ITEMTYPE = lltype.Ptr(ITEMTYPE)
return gv_TYPE(ITEMTYPE)
@staticmethod
@specialize.memo()
def sigToken(FUNCTYPE):
return ([gv_TYPE(A) for A in FUNCTYPE.ARGS],
gv_TYPE(FUNCTYPE.RESULT),
gv_TYPE(FUNCTYPE))
constPrebuiltGlobal = genconst
@staticmethod
def genzeroconst(gv_TYPE):
return LLConst(llimpl.genzeroconst(gv_TYPE.v))
def replay(self, label, kinds):
builder = LLBuilder(self, label.g, llimpl.nullblock)
args_gv = builder._newblock(kinds)
debug_assert(self.currently_writing is None,
"replay: currently_writing")
self.currently_writing = builder
return builder, args_gv
#def stop_replay(self, endblock, kinds):
# return [LLVar(llimpl.getinputarg(endblock.b, i))
# for i in range(len(kinds))]
# not RPython, just for debugging. Specific to llgraph.
@staticmethod
def reveal(gv):
if hasattr(gv, 'v'):
v = gv.v
else:
v = fishllattr(gv, 'v')
return llimpl.reveal(v)
@staticmethod
@specialize.arg(0)
def read_frame_var(T, base, info, index):
return llimpl.read_frame_var(T, base, info, index)
@staticmethod
@specialize.arg(0)
def write_frame_place(T, base, place, value):
llimpl.write_frame_var(base, place.info, 0, value)
@staticmethod
@specialize.arg(0)
def read_frame_place(T, base, place):
return llimpl.read_frame_var(T, base, place.info, 0)
@staticmethod
def get_python_callable(FUNC, gv):
# return a closure that will run the graph on the llinterp
from pypy.jit.codegen.llgraph.llimpl import testgengraph
ptr = gv.revealconst(FUNC)
graph = ptr._obj.graph
def runner(*args):
return testgengraph(graph, list(args))
return runner
rgenop = RGenOp() # no real point in using a full class in llgraph
| Python |
"""
Functions that generate flow graphs and operations.
The functions below produce L2 graphs, but they define an interface
that can be used to produce any other kind of graph.
"""
from pypy.rpython.lltypesystem import lltype, llmemory, rtupletype
from pypy.objspace.flow import model as flowmodel
from pypy.translator.simplify import eliminate_empty_blocks
from pypy.translator.unsimplify import varoftype
from pypy.rpython.module.support import init_opaque_object
from pypy.rpython.module.support import to_opaque_object, from_opaque_object
from pypy.rpython.module.support import LLSupport
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.llinterp import LLInterpreter
from pypy.rpython.lltypesystem.rclass import fishllattr
from pypy.rpython.lltypesystem.lloperation import llop
# for debugging, sanity checks in non-RPython code
reveal = from_opaque_object
def isptrtype(gv_type):
c = from_opaque_object(gv_type)
return isinstance(c.value, lltype.Ptr)
def newblock():
block = flowmodel.Block([])
return to_opaque_object(block)
def newgraph(gv_FUNCTYPE, name):
FUNCTYPE = from_opaque_object(gv_FUNCTYPE).value
# 'name' is just a way to track things
if not isinstance(name, str):
name = LLSupport.from_rstr(name)
inputargs = []
erasedinputargs = []
for ARG in FUNCTYPE.ARGS:
v = flowmodel.Variable()
v.concretetype = ARG
inputargs.append(v)
v = flowmodel.Variable()
v.concretetype = lltype.erasedType(ARG)
erasedinputargs.append(v)
startblock = flowmodel.Block(inputargs)
# insert an exploding operation here which is removed by
# builder.end() to ensure that builder.end() is actually called.
startblock.operations.append(
flowmodel.SpaceOperation("debug_assert",
[flowmodel.Constant(False, lltype.Bool),
flowmodel.Constant("you didn't call builder.end()?",
lltype.Void)],
varoftype(lltype.Void)))
return_var = flowmodel.Variable()
return_var.concretetype = FUNCTYPE.RESULT
graph = flowmodel.FunctionGraph(name, startblock, return_var)
v1 = flowmodel.Variable()
v1.concretetype = lltype.erasedType(FUNCTYPE.RESULT)
graph.prereturnblock = flowmodel.Block([v1])
casting_link(graph.prereturnblock, [v1], graph.returnblock)
substartblock = flowmodel.Block(erasedinputargs)
casting_link(graph.startblock, inputargs, substartblock)
fptr = lltype.functionptr(FUNCTYPE, name,
graph=graph)
return genconst(fptr)
def _getgraph(gv_func):
graph = from_opaque_object(gv_func).value._obj.graph
return graph
def end(gv_func):
graph = _getgraph(gv_func)
_buildgraph(graph)
def getstartblock(gv_func):
graph = _getgraph(gv_func)
[link] = graph.startblock.exits
substartblock = link.target
return to_opaque_object(substartblock)
def geninputarg(block, gv_CONCRETE_TYPE):
block = from_opaque_object(block)
assert not block.operations, "block already contains operations"
assert block.exits == [], "block already closed"
CONCRETE_TYPE = from_opaque_object(gv_CONCRETE_TYPE).value
v = flowmodel.Variable()
v.concretetype = lltype.erasedType(CONCRETE_TYPE)
block.inputargs.append(v)
return to_opaque_object(v)
def getinputarg(block, i):
block = from_opaque_object(block)
v = block.inputargs[i]
return to_opaque_object(v)
def _inputvars(vars):
newvars = []
if not isinstance(vars, list):
n = vars.ll_length()
vars = vars.ll_items()
for i in range(n):
v = vars[i]
if not v:
v = dummy_placeholder
else:
v = fishllattr(v, 'v', v)
newvars.append(v)
else:
for v in vars:
if not v:
v = dummy_placeholder
else:
v = getattr(v, 'v', v)
newvars.append(v)
res = []
for v1 in newvars:
v = from_opaque_object(v1)
assert isinstance(v, (flowmodel.Constant, flowmodel.Variable))
res.append(v)
return res
def cast(block, gv_TYPE, gv_var):
TYPE = from_opaque_object(gv_TYPE).value
v = from_opaque_object(gv_var)
if TYPE != v.concretetype:
if TYPE is llmemory.GCREF or v.concretetype is llmemory.GCREF:
lltype.cast_opaque_ptr(TYPE, v.concretetype._defl()) # sanity check
opname = 'cast_opaque_ptr'
else:
assert v.concretetype == lltype.erasedType(TYPE)
opname = 'cast_pointer'
block = from_opaque_object(block)
v2 = flowmodel.Variable()
v2.concretetype = TYPE
op = flowmodel.SpaceOperation(opname, [v], v2)
block.operations.append(op)
v = v2
return to_opaque_object(v)
def erasedvar(v, block):
T = lltype.erasedType(v.concretetype)
if T != v.concretetype:
v2 = flowmodel.Variable()
v2.concretetype = T
op = flowmodel.SpaceOperation("cast_pointer", [v], v2)
block.operations.append(op)
return v2
return v
def genop(block, opname, vars_gv, gv_RESULT_TYPE):
# 'opname' is a constant string
# gv_RESULT_TYPE comes from constTYPE
if not isinstance(opname, str):
opname = LLSupport.from_rstr(opname)
block = from_opaque_object(block)
assert block.exits == [], "block already closed"
opvars = _inputvars(vars_gv)
if gv_RESULT_TYPE is guess:
RESULT_TYPE = guess_result_type(opname, opvars)
elif isinstance(gv_RESULT_TYPE, lltype.LowLevelType):
RESULT_TYPE = gv_RESULT_TYPE
else:
RESULT_TYPE = from_opaque_object(gv_RESULT_TYPE).value
v = flowmodel.Variable()
v.concretetype = RESULT_TYPE
op = flowmodel.SpaceOperation(opname, opvars, v)
block.operations.append(op)
return to_opaque_object(erasedvar(v, block))
RESULT_TYPES = {
'cast_ptr_to_int': lltype.Signed,
}
def guess_result_type(opname, opvars):
if opname.endswith('_zer'): # h
opname = opname[:-4] # a
if opname.endswith('_ovf'): # c
opname = opname[:-4] # k
if opname in RESULT_TYPES:
return RESULT_TYPES[opname]
op = getattr(llop, opname)
need_result_type = getattr(op.fold, 'need_result_type', False)
assert not need_result_type, ("cannot guess the result type of %r"
% (opname,))
examples = []
for v in opvars:
example = v.concretetype._example()
if isinstance(v.concretetype, lltype.Primitive):
if example == 0:
example = type(example)(1) # to avoid ZeroDivisionError
examples.append(example)
try:
result = op.fold(*examples)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
assert 0, "failed to guess the type of %s: %s" % (opname, e)
return lltype.typeOf(result)
def genconst(llvalue):
T = lltype.typeOf(llvalue)
T1 = lltype.erasedType(T)
if T1 != T:
llvalue = lltype.cast_pointer(T1, llvalue)
v = flowmodel.Constant(llvalue)
v.concretetype = T1
if v.concretetype == lltype.Void: # XXX genconst should not really be used for Void constants
assert not isinstance(llvalue, str) and not isinstance(llvalue, lltype.LowLevelType)
return to_opaque_object(v)
def genzeroconst(gv_TYPE):
TYPE = from_opaque_object(gv_TYPE).value
TYPE = lltype.erasedType(TYPE)
c = flowmodel.Constant(TYPE._defl())
c.concretetype = TYPE
return to_opaque_object(c)
def _generalcast(T, value):
if isinstance(T, lltype.Ptr):
return lltype.cast_pointer(T, value)
elif T == llmemory.Address:
return llmemory.cast_ptr_to_adr(value)
else:
T1 = lltype.typeOf(value)
if T1 is llmemory.Address:
value = llmemory.cast_adr_to_int(value)
elif isinstance(T1, lltype.Ptr):
value = lltype.cast_ptr_to_int(value)
else:
value = value
return lltype.cast_primitive(T, value)
def revealconst(T, gv_value):
c = from_opaque_object(gv_value)
assert isinstance(c, flowmodel.Constant)
return _generalcast(T, c.value)
def revealconstrepr(gv_value):
c = from_opaque_object(gv_value)
return LLSupport.to_rstr(repr(c.value))
def isconst(gv_value):
c = from_opaque_object(gv_value)
return isinstance(c, flowmodel.Constant)
# XXX
# temporary interface; it's unclear if genop itself should change to
# ease dinstinguishing Void special args from the rest. Or there
# should be variation for the ops involving them
def placeholder(dummy):
c = flowmodel.Constant(dummy)
c.concretetype = lltype.Void
return to_opaque_object(c)
def constFieldName(name):
assert isinstance(name, str)
c = flowmodel.Constant(name)
c.concretetype = lltype.Void
return to_opaque_object(c)
def constTYPE(TYPE):
assert isinstance(TYPE, lltype.LowLevelType)
c = flowmodel.Constant(TYPE)
c.concretetype = lltype.Void
return to_opaque_object(c)
def closeblock1(block):
block = from_opaque_object(block)
link = flowmodel.Link([], None)
block.closeblock(link)
return to_opaque_object(link)
def closeblock2(block, exitswitch):
block = from_opaque_object(block)
exitswitch = from_opaque_object(exitswitch)
assert isinstance(exitswitch, flowmodel.Variable)
block.exitswitch = exitswitch
false_link = flowmodel.Link([], None)
false_link.exitcase = False
false_link.llexitcase = False
true_link = flowmodel.Link([], None)
true_link.exitcase = True
true_link.llexitcase = True
block.closeblock(false_link, true_link)
return pseudotuple(to_opaque_object(false_link),
to_opaque_object(true_link))
_color_num = 1
_color_den = 2
def getcolor():
global _color_den, _color_num
import colorsys
hue = _color_num/float(_color_den)
_color_num +=2
if _color_num > _color_den:
_color_num = 1
_color_den *= 2
rgb = list(colorsys.hsv_to_rgb(hue, 0.10, 1.0))
return '#'+''.join(['%02x' % int(p*255) for p in rgb])
def closeblockswitch(block, exitswitch):
block = from_opaque_object(block)
block.blockcolor = getcolor()
exitswitch = from_opaque_object(exitswitch)
assert isinstance(exitswitch, flowmodel.Variable)
TYPE = exitswitch.concretetype
if isinstance(TYPE, lltype.Ptr):
# XXX hack!
v1 = varoftype(lltype.Signed)
block.operations.append(flowmodel.SpaceOperation(
'cast_ptr_to_int', [exitswitch], v1))
exitswitch = v1
block.exitswitch = exitswitch
block.closeblock()
return
def add_case(block, exitcase):
block = from_opaque_object(block)
exitcase = from_opaque_object(exitcase)
assert isinstance(exitcase, flowmodel.Constant)
assert isinstance(block.exitswitch, flowmodel.Variable)
case_link = flowmodel.Link([], None)
exitvalue = exitcase.value
if isinstance(lltype.typeOf(exitvalue), lltype.Ptr):
# XXX hack!
exitvalue = lltype.cast_ptr_to_int(exitvalue)
case_link.exitcase = exitvalue
case_link.llexitcase = exitvalue
if block.exits and block.exits[-1].exitcase == 'default':
exits = block.exits[:-1] + (case_link,) + block.exits[-1:]
else:
exits = block.exits + (case_link,)
block.recloseblock(*exits)
return to_opaque_object(case_link)
def add_default(block):
block = from_opaque_object(block)
assert isinstance(block.exitswitch, flowmodel.Variable)
default_link = flowmodel.Link([], None)
default_link.exitcase = 'default'
default_link.llexitcase = None
if block.exits and block.exits[-1].exitcase == 'default':
raise ValueError
else:
exits = block.exits + (default_link,)
block.recloseblock(*exits)
return to_opaque_object(default_link)
class pseudotuple(object):
# something that looks both like a hl and a ll tuple
def __init__(self, *items):
self._TYPE = rtupletype.TUPLE_TYPE(
[lltype.typeOf(item) for item in items])
for i, item in enumerate(items):
setattr(self, 'item%d' % i, item)
self._items = items
def __iter__(self):
return iter(self._items)
def _closelink(link, vars, targetblock):
if isinstance(link, flowmodel.Link):
assert link.target is None # link already closed
blockvars = dict.fromkeys(link.prevblock.getvariables())
for v in vars:
if isinstance(v, flowmodel.Variable):
assert v in blockvars # link using vars not from prevblock!
else:
assert isinstance(v, flowmodel.Constant)
assert ([v.concretetype for v in vars] ==
[v.concretetype for v in targetblock.inputargs])
link.args[:] = vars
link.target = targetblock
elif isinstance(link, flowmodel.FunctionGraph):
graph = link
graph.startblock = targetblock
targetblock.isstartblock = True
else:
raise TypeError
def closelink(link, vars, targetblock):
link = from_opaque_object(link)
targetblock = from_opaque_object(targetblock)
vars = _inputvars(vars)
_closelink(link, vars, targetblock)
def closereturnlink(link, returnvar, gv_func):
returnvar = from_opaque_object(returnvar)
link = from_opaque_object(link)
graph = _getgraph(gv_func)
_closelink(link, [returnvar], graph.prereturnblock)
def closelinktofreshblock(link, inputargs=None, otherlink=None):
link = from_opaque_object(link)
prevblockvars = link.prevblock.getvariables()
# the next block's inputargs come from 'inputargs' if specified
if inputargs is None:
inputvars = prevblockvars
else:
inputvars = _inputvars(inputargs)
inputvars = dict.fromkeys(inputvars).keys()
# the link's arguments are the same as the inputvars, except
# if otherlink is specified, in which case they are copied from otherlink
if otherlink is None:
linkvars = list(inputvars)
else:
otherlink = from_opaque_object(otherlink)
linkvars = list(otherlink.args)
# check linkvars for consistency
existing_vars = dict.fromkeys(prevblockvars)
for v in inputvars:
assert isinstance(v, flowmodel.Variable)
for v in linkvars:
assert v in existing_vars
nextblock = flowmodel.Block(inputvars)
link.args = linkvars
link.target = nextblock
return to_opaque_object(nextblock)
def casting_link(source, sourcevars, target):
assert len(sourcevars) == len(target.inputargs)
linkargs = []
for v, target_v in zip(sourcevars, target.inputargs):
if v.concretetype == target_v.concretetype:
linkargs.append(v)
else:
erasedv = flowmodel.Variable()
erasedv.concretetype = target_v.concretetype
source.operations.append(flowmodel.SpaceOperation('cast_pointer',
[v],
erasedv))
linkargs.append(erasedv)
source.closeblock(flowmodel.Link(linkargs, target))
# ____________________________________________________________
class PseudoRTyper(object):
def __init__(self):
from pypy.rpython.typesystem import LowLevelTypeSystem
self.type_system = LowLevelTypeSystem.instance
def fixduplicatevars(graph):
# just rename all vars in all blocks
try:
done = graph._llimpl_blocks_already_renamed
except AttributeError:
done = graph._llimpl_blocks_already_renamed = {}
for block in graph.iterblocks():
if block not in done:
mapping = {}
for a in block.inputargs:
mapping[a] = a1 = flowmodel.Variable(a)
a1.concretetype = a.concretetype
block.renamevariables(mapping)
done[block] = True
def _buildgraph(graph):
assert graph.startblock.operations[0].opname == 'debug_assert'
del graph.startblock.operations[0]
# rgenop makes graphs that use the same variable in several blocks,
fixduplicatevars(graph) # fix this now
flowmodel.checkgraph(graph)
eliminate_empty_blocks(graph)
# we cannot call join_blocks(graph) here! It has a subtle problem:
# it copies operations between blocks without renaming op.result.
# See test_promotion.test_many_promotions for a failure.
graph.rgenop = True
return graph
def buildgraph(graph, FUNCTYPE):
graph = from_opaque_object(graph)
return _buildgraph(graph)
def testgengraph(gengraph, args, viewbefore=False, executor=LLInterpreter):
if viewbefore:
gengraph.show()
llinterp = executor(PseudoRTyper())
return llinterp.eval_graph(gengraph, args)
def runblock(graph, FUNCTYPE, args,
viewbefore=False, executor=LLInterpreter):
graph = buildgraph(graph, FUNCTYPE)
return testgengraph(graph, args, viewbefore, executor)
def show_incremental_progress(gv_func):
from pypy import conftest
graph = _getgraph(gv_func)
fixduplicatevars(graph)
flowmodel.checkgraph(graph)
if conftest.option.view:
eliminate_empty_blocks(graph)
graph.show()
# ____________________________________________________________
# RTyping of the above functions
from pypy.rpython.extfunctable import declareptrtype
blocktypeinfo = declareptrtype(flowmodel.Block, "Block")
consttypeinfo = declareptrtype(flowmodel.Constant, "ConstOrVar")
vartypeinfo = declareptrtype(flowmodel.Variable, "ConstOrVar")
vartypeinfo.set_lltype(consttypeinfo.get_lltype()) # force same lltype
linktypeinfo = declareptrtype(flowmodel.Link, "Link")
graphtypeinfo = declareptrtype(flowmodel.FunctionGraph, "FunctionGraph")
CONSTORVAR = lltype.Ptr(consttypeinfo.get_lltype())
BLOCK = lltype.Ptr(blocktypeinfo.get_lltype())
LINK = lltype.Ptr(linktypeinfo.get_lltype())
GRAPH = lltype.Ptr(graphtypeinfo.get_lltype())
# support constants and types
nullvar = lltype.nullptr(CONSTORVAR.TO)
nullblock = lltype.nullptr(BLOCK.TO)
nulllink = lltype.nullptr(LINK.TO)
nullgraph = lltype.nullptr(GRAPH.TO)
gv_Void = constTYPE(lltype.Void)
dummy_placeholder = placeholder(None)
guess = placeholder('guess')
# helpers
def setannotation(func, annotation, specialize_as_constant=False):
class Entry(ExtRegistryEntry):
"Annotation and specialization for calls to 'func'."
_about_ = func
if annotation is None or isinstance(annotation, annmodel.SomeObject):
s_result_annotation = annotation
else:
def compute_result_annotation(self, *args_s):
return annotation(*args_s)
if specialize_as_constant:
def specialize_call(self, hop):
llvalue = func(hop.args_s[0].const)
return hop.inputconst(lltype.typeOf(llvalue), llvalue)
else:
# specialize as direct_call
def specialize_call(self, hop):
FUNCTYPE = lltype.FuncType([r.lowleveltype for r in hop.args_r],
hop.r_result.lowleveltype)
args_v = hop.inputargs(*hop.args_r)
funcptr = lltype.functionptr(FUNCTYPE, func.__name__,
_callable=func, _debugexc=True)
cfunc = hop.inputconst(lltype.Ptr(FUNCTYPE), funcptr)
return hop.genop('direct_call', [cfunc] + args_v, hop.r_result)
# annotations
from pypy.annotation import model as annmodel
s_ConstOrVar = annmodel.SomePtr(CONSTORVAR)#annmodel.SomeExternalObject(flowmodel.Variable)
s_Link = annmodel.SomePtr(LINK)#annmodel.SomeExternalObject(flowmodel.Link)
s_LinkPair = annmodel.SomeTuple([s_Link, s_Link])
s_Block = annmodel.SomePtr(BLOCK)
s_Graph = annmodel.SomePtr(GRAPH)
setannotation(newblock, s_Block)
setannotation(newgraph, s_ConstOrVar)
setannotation(getstartblock, s_Block)
setannotation(geninputarg, s_ConstOrVar)
setannotation(getinputarg, s_ConstOrVar)
setannotation(genop, s_ConstOrVar)
setannotation(end, None)
setannotation(genconst, s_ConstOrVar)
setannotation(genzeroconst, s_ConstOrVar)
setannotation(cast, s_ConstOrVar)
setannotation(revealconst, lambda s_T, s_gv: annmodel.lltype_to_annotation(
s_T.const))
from pypy.rpython.lltypesystem.rstr import STR
setannotation(revealconstrepr, annmodel.SomePtr(lltype.Ptr(STR)))
setannotation(isconst, annmodel.SomeBool())
setannotation(closeblock1, s_Link)
setannotation(closeblock2, s_LinkPair)
setannotation(closeblockswitch, None)
setannotation(add_case, s_Link)
setannotation(add_default, s_Link)
setannotation(closelink, None)
setannotation(closereturnlink, None)
setannotation(closelinktofreshblock, s_Block)
setannotation(isptrtype, annmodel.SomeBool())
# XXX(for now) void constant constructors
setannotation(constFieldName, s_ConstOrVar, specialize_as_constant=True)
setannotation(constTYPE, s_ConstOrVar, specialize_as_constant=True)
#setannotation(placeholder, s_ConstOrVar, specialize_as_constant=True)
setannotation(show_incremental_progress, None)
# read frame var support
def get_frame_info(block, vars_gv):
genop(block, 'frame_info', vars_gv, lltype.Void)
block = from_opaque_object(block)
frame_info = block.operations[-1]
return lltype.opaqueptr(llmemory.GCREF.TO, 'frame_info',
info=frame_info)
setannotation(get_frame_info, annmodel.SomePtr(llmemory.GCREF))
def read_frame_var(T, base, info, index):
vars = info._obj.info.args
v = vars[index]
if isinstance(v, flowmodel.Constant):
val = v.value
else:
llframe = base.ptr
val = llframe.bindings[v]
return _generalcast(T, val)
setannotation(read_frame_var, lambda s_T, s_base, s_info, s_index:
annmodel.lltype_to_annotation(s_T.const))
def write_frame_var(base, info, index, value):
vars = info._obj.info.args
v = vars[index]
assert isinstance(v, flowmodel.Variable)
llframe = base.ptr
value = _generalcast(v.concretetype, value)
llframe.bindings[v] = value
setannotation(write_frame_var, None)
| Python |
"""
Processor auto-detection
"""
import sys, os
class ProcessorAutodetectError(Exception):
pass
def autodetect():
mach = None
try:
import platform
mach = platform.machine()
except ImportError:
pass
if not mach:
platform = sys.platform.lower()
if platform.startswith('win'): # assume an Intel Windows
return 'i386'
# assume we have 'uname'
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
raise ProcessorAutodetectError, "cannot run 'uname -m'"
if mach == 'x86_64' and sys.maxint == 2147483647:
mach = 'x86' # it's a 64-bit processor but in 32-bits mode, maybe
try:
return {'i386': 'i386',
'i486': 'i386',
'i586': 'i386',
'i686': 'i386',
'i86pc': 'i386', # Solaris/Intel
'x86': 'i386', # Apple
'Power Macintosh': 'ppc',
}[mach]
except KeyError:
raise ProcessorAutodetectError, "unsupported processor '%s'" % mach
| Python |
from pypy.rlib.objectmodel import specialize
class NotConstant(Exception):
pass
# all the following classes will be subclassed by each backend.
class GenVarOrConst(object):
'''Instances of this "doubly abstract" class contains values,
either run-time values or compile time constants.'''
@specialize.arg(1)
def revealconst(self, T):
'''Return a value of low-level type T, or raise NotConstant.
Some simple conversion may be required, e.g. casting an address to a
pointer, but not converting a float to an integer.'''
raise NotConstant(self)
class GenVar(GenVarOrConst):
is_const = False
class GenConst(GenVarOrConst):
is_const = True
# a word about "tokens":
# several llops take Void arguments, for example the fieldname of a
# getfield. these need to be represented in some way during code
# generation, in the getfield example it might be the offset and size
# of the field in the structure. but this is not enough in general,
# because on the powerpc you need to know if the value should be
# loaded into a general purpose or floating point register.
# for this kind of possibly machine dependent information, we have the
# concept of "token". the tokens are created by specialize.memo()ed
# staticmethods on the RGenOp class, in particular fieldToken,
# allocToken, varsizeAllocToken, kindToken and sigToken. See their
# docstrings for more.
# as they are memo-specialized, these methods can be full Python
# inside, but each method must always return the same type so the jit
# can store the results in a list, for example (each backend can
# decide what this type is independently, though)
class GenBuilder(object):
'''Instances of GenBuilder -- generally referred to as "builders"
-- are responsible for actually generating machine code. One
instance is responsible for one chunk of memory, and when it is
filled or the generated code jumps away the builder is
thrown away.'''
# the genop methods should emit the machine code for a single llop.
# for most llops, the genop1 and genop2 methods suffice, but some
# (generally those that take Void arguments, or depend on the
# types of the arguments) require special attention, and these are
# handled by the genop_OPNAME methods.
# the gv_* arguments are instances of GenVarOrConst
## @specialize.arg(1)
## def genop1(self, opname, gv_arg):
## @specialize.arg(1)
## def genop2(self, opname, gv_arg1, gv_arg2):
## @specialize.arg(1)
## def genraisingop1(self, opname, gv_arg):
## return a pair (gv_result, gv_flag_set_if_exception)
## @specialize.arg(1)
## def genraisingop2(self, opname, gv_arg1, gv_arg2):
## return a pair (gv_result, gv_flag_set_if_exception)
## def genop_getfield(self, fieldtoken, gv_ptr):
## def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
## def genop_getsubstruct(self, fieldtoken, gv_ptr):
## def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
## def genop_getarraysize(self, arraytoken, gv_ptr):
## def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
## def genop_malloc_fixedsize(self, alloctoken):
## def genop_malloc_varsize(self, varsizealloctoken, gv_size):
## def genop_call(self, sigtoken, gv_fnptr, args_gv):
## def genop_same_as(self, kindtoken, gv_x):
## def genop_debug_pdb(self): # may take an args_gv later
## def genop_ptr_iszero(self, kindtoken, gv_ptr)
## def genop_ptr_nonzero(self, kindtoken, gv_ptr)
## def genop_ptr_eq(self, kindtoken, gv_ptr1, gv_ptr2)
## def genop_ptr_ne(self, kindtoken, gv_ptr1, gv_ptr2)
## def genop_cast_int_to_ptr(self, kindtoken, gv_int)
# the other thing that happens for a given chunk is entering and
# leaving basic blocks inside it.
def enter_next_block(self, kinds, args_gv):
'''Called before generating the code for a basic block.
zip(kinds, args_gv) gives the kindtoken and GenVarOrConst for
each inputarg of the block.
The Obscure Bit: args_gv must be mutated in place until it is a
list of unique GenVars. So GenConsts must be replaced with
GenVars, and duplicate GenVars must be made unique. Optionally,
*all* GenVars can be replaced with new GenVars, for example if
the same value might live in different locations (registers,
places on the stack) in different basic blocks.
Returns an instance of GenLabel that can later be jumped to.
'''
raise NotImplementedError
def jump_if_false(self, gv_condition, args_for_jump_gv):
'''Make a fresh builder, insert in the current block a
check of gv_condition and a conditional jump to the new block
that is taken if gv_condition is false and return the new
builder.
The current builder stays open. To make the backend\'s life
easier it must be closed before the fresh builder is used at
all, and the first thing to call on the latter is
start_writing().
args_for_jump_gv lists the variables that need to be live
after the jump is taken. The list can contain duplicates
(which the backend should ignore) but no constants.
'''
raise NotImplementedError
def jump_if_true(self, gv_condition, args_for_jump_gv):
'''See above, with the obvious difference :)'''
raise NotImplementedError
def finish_and_return(self, sigtoken, gv_returnvar):
'''Emit the epilogue code for the function, and the code to
return gv_returnvar. This "closes" the current builder.'''
raise NotImplementedError
def finish_and_goto(self, outputargs_gv, target):
'''Insert an unconditional jump to target.
outputargs_gv is a list of GenVarOrConsts which corresponds to Link.args
target is an instance of GenLabel.
This must insert code to make sure that the values in
outputargs_gv go where the target block expects them to be.
This "closes" the current builder.
'''
raise NotImplementedError
def flexswitch(self, gv_exitswitch, args_gv):
'''The Fun Stuff.
Generates a switch on the value of gv_exitswitch that can have
cases added to it later, i.e. even after it\'s been executed a
few times.
args_gv is the list of live variables. It\'s the list of
variables that can be used in each switch case. The list can
contain duplicates (which the backend should ignore) but no
constants.
Returns a tuple:
- an instance of CodeGenSwitch (see below)
- a new builder for the default case, that will be jumped to
when the switched-on GenVar does not take the value of any case.
This "closes" the current builder.
'''
raise NotImplementedError
def show_incremental_progress(self):
'''Give some indication of the code that this instance has generated.
So far, the machine code backends don\'t actually do anything for this.
'''
def log(self, msg):
'''Optional method: prints or logs the position of the generated code
along with the given msg.
'''
def pause_writing(self, args_gv):
'''Optional method: Called when the builder will not be used for a
while. This allows the builder to be freed. The pause_writing()
method returns the next builder, on which you will have to call
start_writing() before you continue.
args_gv lists the variables that need to stay live. The list can
contain duplicates (which the backend should ignore) but no
constants.'''
return self
def start_writing(self):
'''Start a builder returned by jump_if_xxx(), or resumes a paused
builder.'''
# read frame var support
def genop_get_frame_base(self):
'''Generate an operation that reads the current stack frame pointer.
The pointer can later be passed to read_frame_var() and
write_frame_place(). This returns a GenVar.
'''
raise NotImplementedError
def get_frame_info(self, vars_gv):
'''Return a constant object that describes where the variables are
inside the stack frame. The result should be correct for the
current basic block. It forces the listed variables to live in the
stack instead of being allocated to registers (or at least to be
copied into the stack when get_frame_info is called; a copy is ok
because there is no way to change the value of a variable).
'''
raise NotImplementedError
def alloc_frame_place(self, kind, gv_initial_value=None):
'''Reserve a "place" in the frame stack where called functions
can write to, with write_frame_place(). The place is not valid
any more after the current basic block.
Return value: any object representing the place.
'''
raise NotImplementedError
def genop_absorb_place(self, kind, place):
'''Absorb a place. This turns it into a regular variable,
containing the last value written into that place. The place
itself is no longer a valid target for write_frame_place()
afterwards.
Return value: a fresh GenVar.
'''
raise NotImplementedError
class GenLabel(object):
'''A "smart" label. Represents an address of the start of a basic
block and the location of the inputargs on entry to that block.'''
class AbstractRGenOp(object):
'''An RGenOp instance is responsible for coordinating the
generation of machine code for a given architecture.
Conceptually at least, instances do not have much state, although
pratically they have some state relating to management of buffers
being written to.
'''
def newgraph(self, sigtoken, name):
"""Begin code generation for a new function, which signature
described by sigtoken. Returns a new builder, entrypoint,
inputargs_gv where the new builder is for the startblock,
entrypoint is the address of the new function as GenConst and
inputargs_gv is the location of each argument on entry to the
function. name is for debugging purposes. The fresh builder
is initially paused, you must call start_writing() before
actually putting operations in it.
"""
raise NotImplementedError
# all staticmethods commented out for the sake of the annotator
#@specialize.genconst(0)
#def genconst(self, llvalue):
# """Convert an llvalue to an instance of (a subclass of)
# GenConst. The difference between this and
# constPrebuiltGlobal is that this method can use storage
# associated with the current RGenOp, i.e. self. If self is
# thrown away, it's safe for anything that this method has
# returned to disappear too."""
# raise NotImplementedError
#@staticmethod
#@specialize.genconst(0)
#def constPrebuiltGlobal(llvalue):
# """Convert an llvalue to an instance of (a subclass of) GenConst.
# This is for immortal prebuilt data."""
# raise NotImplementedError
#@staticmethod
#def genzeroconst(kind):
# """Get a GenConst containing the value 0 (or NULL) of the
# correct kind."""
# raise NotImplementedError
def replay(self, label, kinds):
'''Return a builder that will "generate" exactly the same code
as was already generated, starting from label. kinds is a
list of kindTokens for the inputargs associated with label.
The purpose of this is to reconstruct the knowledge of the
locations of the GenVars at some later point in the code, any
code actually generated during replaying is thrown away.'''
raise NotImplementedError
#@staticmethod
#def erasedType(T):
# '''Return the canonical type T2 such that kindToken(T) == kindToken(T2).
# For example, it\'s common to erase all Ptrs to llmemory.GCREF.
# '''
#@staticmethod
#@specialize.memo()
#def fieldToken(T, name):
# """Return a token describing the location and type of the field 'name'
# within the Struct T."""
# raise NotImplementedError
#@staticmethod
#@specialize.memo()
#def allocToken(T):
# """Return a token describing the size of the fixed-size type T."""
# raise NotImplementedError
#@staticmethod
#@specialize.memo()
#def varsizeAllocToken(T):
# """Return a token describing the size of the var-size type T,
# i.e. enough information to, when given the item count,
# compute how much memory to allocate."""
# raise NotImplementedError
#@staticmethod
#@specialize.memo()
#def arrayToken(A):
# """Return a token describing the Array A, enough information
# to read and write the length, find the base of the items
# array and find the size of each item."""
# raise NotImplementedError
#@staticmethod
#@specialize.memo()
#def kindToken(T):
# """Return a token that describes how to store the low-level
# type T. For example, on PowerPC this might just indicate
# whether values of type T live in the FPU or not."""
# raise NotImplementedError
#@staticmethod
#@specialize.memo()
#def sigToken(FUNCTYPE):
# """Return a token describing the signature of FUNCTYPE."""
# raise NotImplementedError
#@staticmethod
#@specialize.arg(0)
#def read_frame_var(T, base, info, index):
# """Read from the stack frame of a caller. The 'base' is the
# frame stack pointer captured by the operation generated by
# genop_get_frame_base(). The 'info' is the object returned by
# get_frame_info(); we are looking for the index-th variable
# in the list passed to get_frame_info()."""
#@staticmethod
#@specialize.arg(0)
#def write_frame_place(T, base, place, value):
# """Write into a place in the stack frame of a caller. The
# 'base' is the frame stack pointer captured by the operation
# generated by genop_get_frame_base()."""
#@staticmethod
#@specialize.arg(0)
#def read_frame_place(T, base, place):
# """Read from a place in the stack frame of a caller. The
# 'base' is the frame stack pointer captured by the operation
# generated by genop_get_frame_base()."""
@staticmethod
def get_python_callable(FUNC, gv):
"""NOT_RPYTHON
Turn a GenConst containing the address of a function into a
Python callable object, for testing purposes.
"""
from pypy.rpython.lltypesystem import lltype
from ctypes import cast, c_void_p, CFUNCTYPE, c_int, c_float
def _to_ctypes(t): #limited type support for now
if t == lltype.Float:
return c_float
if t == lltype.Void:
return None
return c_int
ctypestypes = [_to_ctypes(t) for t in FUNC.TO.ARGS]
ctypesres = _to_ctypes(FUNC.TO.RESULT)
return cast(c_void_p(gv.value), CFUNCTYPE(ctypesres, *ctypestypes))
class CodeGenSwitch(object):
'''A CodeGenSwitch is a flexible switch on a given GenVar that can have cases added
to it "later", i.e. after it has been executed a few times.'''
def add_case(self, gv_case):
'''Make a new builder that will be jumped to when the
switched-on GenVar takes the value of the GenConst gv_case.'''
raise NotImplementedError
# ____________________________________________________________
dummy_var = GenVar()
class ReplayFlexSwitch(CodeGenSwitch):
def __init__(self, replay_builder):
self.replay_builder = replay_builder
def add_case(self, gv_case):
return self.replay_builder
class ReplayBuilder(GenBuilder):
def __init__(self, rgenop):
self.rgenop = rgenop
def end(self):
pass
@specialize.arg(1)
def genop1(self, opname, gv_arg):
return dummy_var
@specialize.arg(1)
def genraisingop1(self, opname, gv_arg):
return dummy_var, dummy_var
@specialize.arg(1)
def genop2(self, opname, gv_arg1, gv_arg2):
return dummy_var
@specialize.arg(1)
def genraisingop2(self, opname, gv_arg1, gv_arg2):
return dummy_var, dummy_var
def genop_ptr_iszero(self, kind, gv_ptr):
return dummy_var
def genop_ptr_nonzero(self, kind, gv_ptr):
return dummy_var
def genop_ptr_eq(self, kind, gv_ptr1, gv_ptr2):
return dummy_var
def genop_ptr_ne(self, kind, gv_ptr1, gv_ptr2):
return dummy_var
def genop_getfield(self, fieldtoken, gv_ptr):
return dummy_var
def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
return dummy_var
def genop_getsubstruct(self, fieldtoken, gv_ptr):
return dummy_var
def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
return dummy_var
def genop_getarraysubstruct(self, arraytoken, gv_ptr, gv_index):
return dummy_var
def genop_getarraysize(self, arraytoken, gv_ptr):
return dummy_var
def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
return dummy_var
def genop_malloc_fixedsize(self, size):
return dummy_var
def genop_malloc_varsize(self, varsizealloctoken, gv_size):
return dummy_var
def genop_call(self, sigtoken, gv_fnptr, args_gv):
return dummy_var
def genop_same_as(self, kind, gv_x):
return dummy_var
def genop_debug_pdb(self): # may take an args_gv later
pass
def enter_next_block(self, kinds, args_gv):
return None
def jump_if_false(self, gv_condition, args_gv):
return self
def jump_if_true(self, gv_condition, args_gv):
return self
def finish_and_return(self, sigtoken, gv_returnvar):
pass
def finish_and_goto(self, outputargs_gv, target):
pass
def flexswitch(self, gv_exitswitch, args_gv):
flexswitch = ReplayFlexSwitch(self)
return flexswitch, self
def show_incremental_progress(self):
pass
def genop_get_frame_base(self):
return dummy_var
def get_frame_info(self, vars_gv):
return None
def alloc_frame_place(self, kind, gv_initial_value=None):
return None
def genop_absorb_place(self, kind, place):
return dummy_var
| Python |
'''
Use this file to hide differences between llvm 1.x and 2.x .
'''
from pypy.jit.codegen.llvm.llvmjit import llvm_version
if llvm_version() < 2.0:
icmp = scmp = ucmp = fcmp = 'set'
inttoptr = trunc = zext = bitcast = inttoptr = 'cast'
shr_prefix = ['', '']
i1 = 'bool'
i8 = 'ubyte'
i16 = 'short'
i32 = 'int'
i64 = 'long'
define = ''
globalprefix = '%'
else: # >= 2.0
icmp = 'icmp '
scmp = 'icmp s'
ucmp = 'icmp u'
fcmp = 'fcmp o'
inttoptr = 'inttoptr'
trunc = 'trunc'
zext = 'zext'
bitcast = 'bitcast'
inttoptr = 'inttoptr'
shr_prefix = ['l', 'a']
define = 'define'
globalprefix = '@'
i1 = 'i1'
i8 = 'i8'
i16 = 'i16'
i32 = 'i32'
i64 = 'i64'
f64 = 'double'
| Python |
from distutils.core import setup
from distutils.extension import Extension
from os import popen
#Create llvm c api library by running "python setup.py build_ext -i" here
cxxflags = popen('llvm-config --cxxflags').readline().split()
ldflags = popen('llvm-config --ldflags').readline().split()
libs = popen('llvm-config --libs all').readline().split()
opts = dict(name='libllvmjit',
sources=['lib/libllvmjit.cpp'],
libraries=[],
include_dirs =["include"] + [f[2:] for f in cxxflags if f.startswith('-I')],
library_dirs =[f[2:] for f in ldflags if f.startswith('-L')],
define_macros=[(f[2:], None) for f in cxxflags if f.startswith('-D')],
extra_objects=libs)
ext_modules = Extension(**opts)
setup(name=opts['name'], ext_modules=[ext_modules])
| Python |
import py, os
from pypy.rlib.objectmodel import specialize
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.rarithmetic import intmask, r_uint
from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
from pypy.jit.codegen.llvm import llvmjit
from pypy.rlib.objectmodel import we_are_translated
from pypy.jit.codegen.i386.rgenop import gc_malloc_fnaddr
from pypy.jit.codegen.llvm.conftest import option
from pypy.jit.codegen.llvm.genvarorconst import count, Var, BoolConst, CharConst,\
IntConst, UIntConst, FloatConst, AddrConst
from pypy.jit.codegen.llvm.logger import logger, log
from pypy.jit.codegen.llvm.cast import cast
from pypy.jit.codegen.llvm.compatibility import icmp, scmp, ucmp, fcmp,\
trunc, zext, bitcast, inttoptr, shr_prefix, define, globalprefix,\
i1, i8, i16, i32, f64
pi8 = i8 + '*'
pi32 = i32 + '*'
u32 = i32
LINENO = option.lineno
PRINT_SOURCE = option.print_source
PRINT_DEBUG = option.print_debug
class ParseException(Exception):
pass
class Block(GenLabel):
def writecode(self, lines):
raise NotImplementedError
class BasicBlock(Block):
"""An llvm basic block.
The source text is stored in the 'asm' list of lines.
The phinodes dict is updated by branches and is eventually
turned into 'phi' instructions by the writecode() method.
"""
def __init__(self, rgenop, label, inputargtypes):
self.rgenop = rgenop
self.label = label
self.inputargs = [Var(type) for type in inputargtypes]
self.phinodes = {} # dict {source block: [source vars]}
self.asm = []
rgenop.blocklist.append(self)
def getinputargtypes(self):
return [var.type for var in self.inputargs]
def add_incoming_link(self, sourceblock, sourcevars):
# check the types for debugging
sourcevartypes = [var.type for var in sourcevars]
targetvartypes = [var.type for var in self.inputargs]
if sourcevartypes != targetvartypes:
logger.dump('assert fails on: sourcevartypes(%s) != targetvartypes(%s)' % (
sourcevartypes, targetvartypes))
self.rgenop._dump_partial_lines()
assert sourcevartypes == targetvartypes
# Check if the source block jumps to 'self' from multiple
# positions: in this case we need an intermediate block...
if sourceblock in self.phinodes:
tmplabel = count.newlabel()
tmpblock = BasicBlock(self.rgenop, tmplabel, targetvartypes)
tmpblock.add_incoming_link(sourceblock, sourcevars)
sourceblock = tmpblock
sourcevars = tmpblock.inputargs
# Add this source for the phi nodes
self.phinodes[sourceblock] = list(sourcevars)
def writecode(self, lines):
lines.append(self.label + ':')
for i in range(len(self.inputargs)):
targetvar = self.inputargs[i]
mergelist = []
for sourceblock, sourcevars in self.phinodes.iteritems():
mergelist.append("[%s,%%%s]" % (sourcevars[i].operand2(),
sourceblock.label))
lines.append(' %s=phi %s %s' % (targetvar.operand2(),
targetvar.type,
', '.join(mergelist)))
lines.extend(self.asm)
class PrologueBlock(Block):
label = 'LP'
def __init__(self, sigtoken, name):
self.name = name
self.sigtoken = sigtoken
argtypes, restype = sigtoken
self.inputargs = [Var(type) for type in argtypes]
# self.startblocklabel set by newgraph()
def writecode(self, lines):
argtypes, restype = self.sigtoken
lines.append('%s %s %s%s(%s){' % (
define, restype, globalprefix, self.name,
','.join([v.operand() for v in self.inputargs])))
lines.append(self.label + ':')
lines.append(' br label %%%s' % (self.startblocklabel,))
class EpilogueBlock(Block):
def writecode(self, lines):
lines.append('}')
class FlexSwitch(Block):
def __init__(self, rgenop, builder, gv_exitswitch):
log('%s FlexSwitch.__init__ %s' % (builder.block.label, gv_exitswitch.operand()))
self.rgenop = rgenop
self.builder = builder
self.gv_exitswitch = gv_exitswitch
self.default_label = None
self.cases = []
self.rgenop.blocklist.append(self)
def add_case(self, gv_case):
targetbuilder = self.builder._fork()
self.cases.append('%s,label %%%s' % (gv_case.operand(), targetbuilder.nextlabel))
log('%s FlexSwitch.add_case %s => %s' % (
self.builder.block.label, gv_case.operand(), targetbuilder.nextlabel))
targetbuilder.start_writing()
return targetbuilder
def _add_default(self):
targetbuilder = self.builder._fork()
self.default_label = targetbuilder.nextlabel
log('%s FlexSwitch.add_default => %s' % (
self.builder.block.label, targetbuilder.nextlabel))
targetbuilder.start_writing()
return targetbuilder
def writecode(self, lines):
#note: gv_exitswitch should be an integer! (cast might be required here)
lines.append(' switch %s,label %%%s [%s]' % (
self.gv_exitswitch.operand(), self.default_label, ' '.join(self.cases)))
class Builder(GenBuilder):
def __init__(self, rgenop, coming_from):
self.rgenop = rgenop
self.nextlabel = count.newlabel() # the label of the next block
self.block = coming_from # the old block that jumped here
def _fork(self):
targetbuilder = Builder(self.rgenop, coming_from=self.block)
log('%s Builder._fork => %s' % (self.block.label, targetbuilder.nextlabel))
return targetbuilder
def _close(self):
self.block = None
def end(self):
self.rgenop.end() # XXX Hack to be removed!
def pause_writing(self, args_gv):
log('%s Builder.pause_writing' % self.block.label)
assert self.asm is not None
self.nextlabel = count.newlabel() # for the next block
self.asm.append(' br label %%%s' % (self.nextlabel,))
self.asm = None
return self
def start_writing(self):
log('%s Builder.start_writing' % self.nextlabel)
assert self.nextlabel is not None
coming_from = self.block
# prepare the next block
nextblock = BasicBlock(self.rgenop, self.nextlabel, [])
self.block = nextblock
self.asm = nextblock.asm
self.nextlabel = None
nextblock.add_incoming_link(coming_from, [])
# ----------------------------------------------------------------
# The public Builder interface
@specialize.arg(1)
def genop1(self, opname, gv_arg):
log('%s Builder.genop1 %s %s' % (
self.block.label, opname, gv_arg.operand()))
genmethod = getattr(self, 'op_' + opname)
return genmethod(gv_arg)
@specialize.arg(1)
def genop2(self, opname, gv_arg1, gv_arg2):
log('%s Builder.genop2 %s %s,%s' % (
self.block.label, opname, gv_arg1.operand(), gv_arg2.operand()))
genmethod = getattr(self, 'op_' + opname)
return genmethod(gv_arg1, gv_arg2)
def _rgenop2_generic(self, llvm_opcode, gv_arg1, gv_arg2, restype=None):
log('%s Builder._rgenop2_generic %s %s,%s' % (
self.block.label, llvm_opcode, gv_arg1.operand(), gv_arg2.operand2()))
restype = restype or gv_arg1.type
gv_result = Var(restype)
self.asm.append(' %s=%s %s,%s' % (
gv_result.operand2(), llvm_opcode, gv_arg1.operand(), gv_arg2.operand2()))
return gv_result
def op_int_add(self, gv_x, gv_y): return self._rgenop2_generic('add' , gv_x, gv_y)
def op_int_sub(self, gv_x, gv_y): return self._rgenop2_generic('sub' , gv_x, gv_y)
def op_int_mul(self, gv_x, gv_y): return self._rgenop2_generic('mul' , gv_x, gv_y)
def op_int_floordiv(self, gv_x, gv_y):
return self._rgenop2_generic('us'[gv_x.signed] + 'div', gv_x, gv_y)
def op_int_mod(self, gv_x, gv_y):
return self._rgenop2_generic('us'[gv_x.signed] + 'rem' , gv_x, gv_y)
def op_int_and(self, gv_x, gv_y): return self._rgenop2_generic('and' , gv_x, gv_y)
def op_int_or(self, gv_x, gv_y): return self._rgenop2_generic('or' , gv_x, gv_y)
def op_int_xor(self, gv_x, gv_y): return self._rgenop2_generic('xor' , gv_x, gv_y)
def op_int_lshift(self, gv_x, gv_y):
gv_y_i8 = Var(i8)
self.asm.append(' %s=%s %s to %s' % (
gv_y_i8.operand2(), trunc, gv_y.operand(), i8))
gv_result = Var(gv_x.type)
self.asm.append(' %s=shl %s,%s' % (
gv_result.operand2(), gv_x.operand(), gv_y_i8.operand()))
return gv_result
def op_int_rshift(self, gv_x, gv_y):
gv_y_i8 = Var(i8)
self.asm.append(' %s=%s %s to %s' % (
gv_y_i8.operand2(), trunc, gv_y.operand(), i8))
gv_result = Var(gv_x.type)
self.asm.append(' %s=%sshr %s,%s' % (
gv_result.operand2(), shr_prefix[gv_x.signed], gv_x.operand(), gv_y_i8.operand()))
return gv_result
op_uint_add = op_float_add = op_int_add
op_uint_sub = op_float_sub = op_int_sub
op_uint_mul = op_float_mul = op_int_mul
op_uint_floordiv = op_int_floordiv
op_uint_mod = op_int_mod
op_uint_and = op_int_and
op_uint_or = op_int_or
op_uint_xor = op_int_xor
op_uint_lshift = op_int_lshift
op_uint_rshift = op_int_rshift
def op_float_truediv(self, gv_x, gv_y): return self._rgenop2_generic('fdiv', gv_x, gv_y)
def op_float_neg(self, gv_x): return self._rgenop2_generic('sub', FloatConst(0.0), gv_x)
def op_int_lt(self, gv_x, gv_y):
return self._rgenop2_generic(scmp + 'lt', gv_x, gv_y, i1)
def op_int_le(self, gv_x, gv_y):
return self._rgenop2_generic(scmp + 'le', gv_x, gv_y, i1)
def op_int_eq(self, gv_x, gv_y):
return self._rgenop2_generic(icmp + 'eq' , gv_x, gv_y, i1)
def op_int_ne(self, gv_x, gv_y):
return self._rgenop2_generic(icmp + 'ne' , gv_x, gv_y, i1)
def op_int_gt(self, gv_x, gv_y):
return self._rgenop2_generic(scmp + 'gt', gv_x, gv_y, i1)
def op_int_ge(self, gv_x, gv_y):
return self._rgenop2_generic(scmp + 'ge', gv_x, gv_y, i1)
def op_uint_lt(self, gv_x, gv_y):
return self._rgenop2_generic(ucmp + 'lt', gv_x, gv_y, i1)
def op_uint_le(self, gv_x, gv_y):
return self._rgenop2_generic(ucmp + 'le', gv_x, gv_y, i1)
def op_uint_gt(self, gv_x, gv_y):
return self._rgenop2_generic(ucmp + 'gt', gv_x, gv_y, i1)
def op_uint_ge(self, gv_x, gv_y):
return self._rgenop2_generic(ucmp + 'ge', gv_x, gv_y, i1)
def op_float_lt(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'lt', gv_x, gv_y, i1)
def op_float_le(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'le', gv_x, gv_y, i1)
def op_float_eq(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'eq', gv_x, gv_y, i1)
def op_float_ne(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'ne', gv_x, gv_y, i1)
def op_float_gt(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'gt', gv_x, gv_y, i1)
def op_float_ge(self, gv_x, gv_y):
return self._rgenop2_generic(fcmp + 'ge', gv_x, gv_y, i1)
op_unichar_eq = op_ptr_eq = op_uint_eq = op_int_eq
op_unichar_ne = op_ptr_ne = op_uint_ne = op_int_ne
op_char_lt = op_uint_lt
op_char_le = op_uint_le
op_char_eq = op_uint_eq
op_char_ne = op_uint_ne
op_char_gt = op_uint_gt
op_char_ge = op_uint_ge
def _rgenop1_generic(self, llvm_opcode, gv_x, restype=None):
log('%s Builder._rgenop1_generic %s %s' % (
self.block.label, llvm_opcode, gv_x.operand()))
restype = restype or gv_x.type
gv_result = Var(restype)
self.asm.append(' %s=%s %s' % (
gv_result.operand2(), llvm_opcode, gv_x.operand()))
return gv_resulgv_comp.operand(), t
def op_int_neg(self, gv_x): return self._rgenop2_generic('sub', IntConst(0), gv_x)
def op_int_invert(self, gv_x): return self._rgenop2_generic('xor', gv_x, IntConst(-1))
def op_uint_invert(self, gv_x): return self._rgenop2_generic('xor', gv_x, UIntConst((1<<32)-1))
def _abs(self, gv_x, nullstr='0'):
gv_comp = Var(i1)
gv_abs_pos = Var(gv_x.type)
gv_result = Var(gv_x.type)
if nullstr == 'null' or nullstr == '0':
cmp = scmp
else:
cmp = fcmp
self.asm.append(' %s=%sge %s,%s' % (
gv_comp.operand2(), cmp, gv_x.operand(), nullstr))
self.asm.append(' %s=sub %s %s,%s' % (
gv_abs_pos.operand2(), gv_x.type, nullstr, gv_x.operand2()))
self.asm.append(' %s=select %s,%s,%s' % (
gv_result.operand2(), gv_comp.operand(), gv_x.operand(), gv_abs_pos.operand()))
return gv_result
op_int_abs = _abs
def op_float_abs(self, gv_x): return self._abs(gv_x, '0.0')
def op_bool_not(self, gv_x):
gv_result = Var(i1)
self.asm.append(' %s=select %s,%s false,%s true' % (
gv_result.operand2(), gv_x.operand(), i1, i1))
return gv_result
#XXX 'cast' has been replaced by many sext/zext/uitofp/... opcodes in the upcoming llvm 2.0.
#The lines upto /XXX should be refactored to do the right thing
def genop_same_as(self, kind, gv_x): #XXX why do we need a 'kind' here?
if gv_x.is_const: # must always return a var
restype = gv_x.type
gv_result = Var(restype)
v = gv_x.operand2()
if restype[-1] == '*':
cst = inttoptr
t = i32
if v == 'null':
v = '0'
else:
cst = bitcast
t = restype
self.asm.append(' %s=%s %s %s to %s ;1' % (
gv_result.operand2(), cst, t, v, restype))
return gv_result
else:
return gv_x
def _cast_to(self, gv_x, restype=None):
t = gv_x.type
restype = restype or t
if restype is t:
return self.genop_same_as(None, gv_x)
gv_result = Var(restype)
if restype[-1] == '*':
if gv_x.is_const:
cst = inttoptr
t = i32
else:
cst = bitcast
else:
cst = zext
self.asm.append(' %s=%s %s %s to %s ;2' % (
gv_result.operand2(), cst, t, gv_x.operand2(), restype))
return gv_result
def _trunc_to(self, gv_x, restype=None):
restype = restype or gv_x.type
if restype is gv_x.type:
return self.genop_same_as(None, gv_x)
gv_result = Var(restype)
self.asm.append(' %s=%s %s to %s' % (
gv_result.operand2(), trunc, gv_x.operand(), restype))
return gv_result
def _cast_to_bool(self, gv_x): return self._cast_to(gv_x, i1)
def _cast_to_char(self, gv_x): return self._cast_to(gv_x, i8)
def _cast_to_unichar(self, gv_x): return self._cast_to(gv_x, i32)
def _cast_to_int(self, gv_x): return self._cast_to(gv_x, i32)
def _cast_to_uint(self, gv_x): return self._cast_to(gv_x, u32)
def _cast_to_float(self, gv_x): return self._cast_to(gv_x, f64)
def _trunc_to_bool(self, gv_x): return self._trunc_to(gv_x, i1)
def _trunc_to_char(self, gv_x): return self._trunc_to(gv_x, i8)
def _trunc_to_unichar(self, gv_x): return self._trunc_to(gv_x, i32)
def _trunc_to_int(self, gv_x): return self._trunc_to(gv_x, i32)
def _trunc_to_uint(self, gv_x): return self._trunc_to(gv_x, u32)
def _trunc_to_float(self, gv_x): return self._trunc_to(gv_x, f64)
op_cast_char_to_bool = _trunc_to_bool
op_cast_unichar_to_bool = _trunc_to_bool
op_cast_int_to_bool = _trunc_to_bool
op_cast_uint_to_bool = _trunc_to_bool
op_cast_float_to_bool = _trunc_to_bool
op_cast_bool_to_char = _cast_to_char
op_cast_unichar_to_char = _trunc_to_char
op_cast_int_to_char = _trunc_to_char
op_cast_uint_to_char = _trunc_to_char
op_cast_float_to_char = _trunc_to_char
op_cast_bool_to_unichar = _cast_to_unichar
op_cast_char_to_unichar = _cast_to_unichar
op_cast_int_to_unichar = _cast_to_unichar
op_cast_uint_to_unichar = _cast_to_unichar
op_cast_float_to_unichar = _trunc_to_unichar
op_cast_bool_to_int = _cast_to_int
op_cast_char_to_int = _cast_to_int
op_cast_unichar_to_int = _cast_to_int
op_cast_uint_to_int = _cast_to_int
op_cast_float_to_int = _trunc_to_int
op_cast_bool_to_uint = _cast_to_uint
op_cast_char_to_uint = _cast_to_uint
op_cast_unichar_to_uint = _cast_to_uint
op_cast_int_to_uint = _cast_to_uint
op_cast_float_to_uint = _trunc_to_uint
op_cast_bool_to_float = _cast_to_float
op_cast_char_to_float = _cast_to_float
op_cast_unichar_to_float = _cast_to_float
op_cast_int_to_float = _cast_to_float
op_cast_uint_to_float = _cast_to_float
#/XXX
def enter_next_block(self, kinds, args_gv):
assert self.nextlabel is None
coming_from = self.block
newlabel = count.newlabel()
# we still need to properly terminate the current block
# (with a br to go to the next block)
# see: http://llvm.org/docs/LangRef.html#terminators
self.asm.append(' br label %%%s' % (newlabel,))
# prepare the next block
nextblock = BasicBlock(self.rgenop, newlabel, kinds)
log('%s Builder enter block %s' % (
nextblock.label, [v.operand() for v in nextblock.inputargs]))
self.block = nextblock
self.asm = nextblock.asm
# link the two blocks together and update args_gv
nextblock.add_incoming_link(coming_from, args_gv)
for i in range(len(args_gv)):
args_gv[i] = nextblock.inputargs[i]
return self.block
def jump_if_false(self, gv_condition, args_for_jump_gv):
log('%s Builder.jump_if_false %s' % (self.block.label, gv_condition.operand()))
targetbuilder = self._fork()
self.nextlabel = count.newlabel()
self.asm.append(' br %s,label %%%s,label %%%s' % (
gv_condition.operand(), self.nextlabel, targetbuilder.nextlabel))
self.start_writing()
return targetbuilder
def jump_if_true(self, gv_condition, args_for_jump_gv):
log('%s Builder.jump_if_true %s' % (self.block.label, gv_condition.operand()))
targetbuilder = self._fork()
self.nextlabel = count.newlabel()
self.asm.append(' br %s,label %%%s,label %%%s' % (
gv_condition.operand(), targetbuilder.nextlabel, self.nextlabel))
self.start_writing()
return targetbuilder
def _is_false(self, gv_x, nullstr='0'):
log('%s Builder._is_false %s' % (self.block.label, gv_x.operand()))
gv_result = Var(i1)
if nullstr == 'null' or nullstr == '0':
cmp = icmp
else:
cmp = fcmp
self.asm.append(' %s=%seq %s,%s' % (
gv_result.operand2(), cmp, gv_x.operand(), nullstr))
return gv_result
def _is_true(self, gv_x, nullstr='0'):
log('%s Builder._is_true %s' % (self.block.label, gv_x.operand()))
gv_result = Var(i1)
if nullstr == 'null' or nullstr == '0':
cmp = icmp
else:
cmp = fcmp
self.asm.append(' %s=%sne %s,%s' % (
gv_result.operand2(), cmp, gv_x.operand(), nullstr))
return gv_result
op_bool_is_true = op_char_is_true = op_unichar_is_true = op_int_is_true =\
op_uint_is_true = _is_true
def op_ptr_nonzero(self, gv_x): return self._is_true(gv_x, 'null')
def op_ptr_iszero(self, gv_x): return self._is_false(gv_x, 'null')
def genop_ptr_iszero(self, kind, gv_ptr):
return self.op_ptr_iszero(gv_ptr)
def genop_ptr_nonzero(self, kind, gv_ptr):
return self.op_ptr_nonzero(gv_ptr)
def genop_ptr_eq(self, kind, gv_ptr1, gv_ptr2):
return self.op_ptr_eq(gv_ptr1, gv_ptr2)
def genop_ptr_ne(self, kind, gv_ptr1, gv_ptr2):
return self.op_ptr_ne(gv_ptr1, gv_ptr2)
def op_float_is_true(self, gv_x): return self._is_true(gv_x, '0.0') #XXX fails for doubles
def genop_getfield(self, fieldtoken, gv_ptr):
offset, fieldtype = fieldtoken
log('%s Builder.genop_getfield (%d,%s) %s' % (
self.block.label, offset, fieldtype, gv_ptr.operand()))
gv_ptr_var = self._as_var(gv_ptr)
gv_p = Var(gv_ptr.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_ptr_var.operand(), i32, offset))
gv_p2 = self._cast_to(gv_p, fieldtype + '*')
gv_result = Var(fieldtype)
self.asm.append(' %s=load %s' % (
gv_result.operand2(), gv_p2.operand()))
return gv_result
def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
offset, fieldtype = fieldtoken
log('%s Builder.genop_setfield (%d,%s) %s=%s' % (
self.block.label, offset, fieldtype, gv_ptr.operand(), gv_value.operand()))
gv_ptr_var = self._as_var(gv_ptr)
gv_p = Var(gv_ptr.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_ptr_var.operand(), i32, offset))
gv_p2 = self._cast_to(gv_p, fieldtype + '*')
self.asm.append(' store %s,%s' % (
gv_value.operand(), gv_p2.operand()))
def genop_getsubstruct(self, fieldtoken, gv_ptr):
offset, fieldtype = fieldtoken
log('%s Builder.genop_getsubstruct (%d,%s) %s' % (
self.block.label, offset, fieldtype, gv_ptr.operand()))
gv_ptr_var = self._as_var(gv_ptr)
gv_sub = Var(gv_ptr.type)
self.asm.append(' %s=getelementptr %s,%s %d' % (
gv_sub.operand2(), gv_ptr_var.operand(), i32, offset))
return gv_sub
def genop_getarraysubstruct(self, arraytoken, gv_ptr, gv_index):
'''
self.mc.MOV(edx, gv_ptr.operand(self))
op = self.itemaddr(edx, arraytoken, gv_index)
self.mc.LEA(eax, op)
return self.returnvar(eax)
'''
#XXX WIP
log('%s Builder.genop_getarraysubstruct %s,%s,%s' % (
self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand()))
array_length_offset, array_items_offset, item_size, item_type = arraytoken
op_size = self._itemaddr(arraytoken, gv_index)
gv_ptr_var = self._as_var(gv_ptr)
gv_result = Var(pi8)
self.asm.append(' %s=getelementptr %s,%s' % (
gv_result.operand2(), gv_ptr_var.operand(), op_size.operand()))
return gv_result
def genop_getarraysize(self, arraytoken, gv_ptr):
log('%s Builder.genop_getarraysize %s,%s' % (
self.block.label, arraytoken, gv_ptr.operand()))
array_length_offset, array_items_offset, item_size, item_type = arraytoken
gv_ptr_var = self._as_var(gv_ptr)
gv_p = Var(gv_ptr_var.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_ptr_var.operand(), i32, array_length_offset))
gv_p2 = self._cast_to(gv_p, pi32)
gv_result = Var(i32)
self.asm.append(' %s=load %s' % (
gv_result.operand2(), gv_p2.operand()))
return gv_result
def _as_var(self, gv):
if gv.is_const:
gv_var = Var(gv.type)
#XXX provide correct cast here
self.asm.append(' %s=%s %s %s to %s' % (
gv_var.operand2(), inttoptr, i32, gv.operand2(), gv_var.type))
return gv_var
return gv
def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
array_length_offset, array_items_offset, item_size, item_type = arraytoken
log('%s Builder.genop_getarrayitem %s,%s[%s]' % (
self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand()))
gv_ptr_var = self._as_var(gv_ptr)
gv_p = Var(gv_ptr_var.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_ptr_var.operand(), i32, array_items_offset))
gv_p2 = self._cast_to(gv_p, item_type + '*')
gv_p3 = Var(gv_p2.type)
self.asm.append(' %s=getelementptr %s,%s' % (
gv_p3.operand2(), gv_p2.operand(), gv_index.operand()))
gv_result = Var(item_type)
self.asm.append(' %s=load %s' % (
gv_result.operand2(), gv_p3.operand()))
return gv_result
def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
array_length_offset, array_items_offset, item_size, item_type = arraytoken
log('%s Builder.genop_setarrayitem %s,%s[%s]=%s' % (
self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand(), gv_value.operand()))
gv_ptr_var = self._as_var(gv_ptr)
gv_p = Var(gv_ptr_var.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_ptr_var.operand(), i32, array_items_offset))
gv_p2 = self._cast_to(gv_p, item_type + '*')
gv_p3 = Var(gv_p2.type)
self.asm.append(' %s=getelementptr %s,%s' % (
gv_p3.operand2(), gv_p2.operand(), gv_index.operand()))
self.asm.append(' store %s,%s' % (
gv_value.operand(), gv_p3.operand()))
def genop_malloc_fixedsize(self, size):
log('%s Builder.genop_malloc_fixedsize %s' % (
self.block.label, str(size)))
gv_gc_malloc_fnaddr = Var('%s (%s)*' % (pi8, i32))
gv_result = Var(pi8)
#or use addGlobalFunctionMapping in libllvmjit.restart()
self.asm.append(' %s=%s %s %d to %s ;gc_malloc_fnaddr' % (
gv_gc_malloc_fnaddr.operand2(), inttoptr, i32,
gc_malloc_fnaddr(), gv_gc_malloc_fnaddr.type))
self.asm.append(' %s=call %s(%s %d)' % (
gv_result.operand2(), gv_gc_malloc_fnaddr.operand(), i32, size))
return gv_result
def _itemaddr(self, arraytoken, gv_index):
length_offset, items_offset, item_size, item_type = arraytoken
gv_size2 = Var(i32) #i386 uses self.itemaddr here
self.asm.append(' %s=mul %s,%d' % (
gv_size2.operand2(), gv_index.operand(), item_size))
gv_size3 = Var(i32)
self.asm.append(' %s=add %s,%d' % (
gv_size3.operand2(), gv_size2.operand(), items_offset))
return gv_size3
def genop_malloc_varsize(self, varsizealloctoken, gv_size):
log('%s Builder.genop_malloc_varsize %s,%s' % (
self.block.label, varsizealloctoken, gv_size.operand()))
length_offset, items_offset, item_size, item_type = varsizealloctoken
gv_gc_malloc_fnaddr = Var('%s (%s)*' % (pi8, i32))
#or use addGlobalFunctionMapping in libllvmjit.restart()
self.asm.append(' %s=%s %s %d to %s ;gc_malloc_fnaddr (varsize)' % (
gv_gc_malloc_fnaddr.operand2(), inttoptr, i32,
gc_malloc_fnaddr(), gv_gc_malloc_fnaddr.type))
op_size = self._itemaddr(varsizealloctoken, gv_size)
gv_result = Var(pi8)
self.asm.append(' %s=call %s(%s)' % (
gv_result.operand2(), gv_gc_malloc_fnaddr.operand(), op_size.operand()))
gv_p = Var(gv_result.type)
self.asm.append(' %s=getelementptr %s,%s %s' % (
gv_p.operand2(), gv_result.operand(), i32, length_offset))
gv_p2 = self._cast_to(gv_p, pi32) #warning: length field hardcoded as int here
self.asm.append(' store %s, %s' % (gv_size.operand(), gv_p2.operand()))
return gv_result
def _funcsig_type(self, args_gv, restype):
return '%s (%s)' % (restype, ','.join([a.type for a in args_gv]))
def genop_call(self, sigtoken, gv_fnptr, args_gv):
log('%s Builder.genop_call %s,%s,%s' % (
self.block.label, sigtoken, gv_fnptr, [v.operand() for v in args_gv]))
argtypes, restype = sigtoken
if isinstance(gv_fnptr, AddrConst):
gv_fn = Var(self._funcsig_type(args_gv, restype) + '*')
self.asm.append(' %s=%s %s %s to %s' % (
gv_fn.operand2(), inttoptr, i32, gv_fnptr.operand2(), gv_fn.type))
funcsig = gv_fn.operand()
else:
try:
funcsig = self.rgenop.funcsig[gv_fnptr.get_integer_value()]
except KeyError:
funcsig = 'TODO: funcsig here'
py.test.skip('call an address directly not supported yet')
args_gv2 = []
for v in args_gv:
if v.is_const and v.type[-1] == '*': #or use some kind of 'inline' cast (see LangRef)
t = Var(v.type)
self.asm.append(' %s=%s %s %s to %s' % (
t.operand2(), inttoptr, i32, v.operand2(), v.type))
v = t
args_gv2.append(v)
gv_returnvar = Var(restype)
self.asm.append(' %s=call %s(%s)' % (
gv_returnvar.operand2(),
funcsig,
','.join([v.operand() for v in args_gv2])))
return gv_returnvar
def finish_and_return(self, sigtoken, gv_returnvar):
log('%s Builder.finish_and_return %s,%s' % (
self.block.label, sigtoken, gv_returnvar.operand()))
self.asm.append(' ret ' + gv_returnvar.operand())
self._close()
def finish_and_goto(self, outputargs_gv, target):
# 'target' is a label, which for the llvm backend is a Block
log('%s Builder.finish_and_goto' % self.block.label)
gv = [v.operand() for v in outputargs_gv]
log('%s Builder.finish_and_goto %s,%s' % (
self.block.label, gv, target.label))
self.asm.append(' br label %%%s' % (target.label,))
target.add_incoming_link(self.block, outputargs_gv)
self._close()
def flexswitch(self, gv_exitswitch, args_gv):
log('%s Builder.flexswitch %s' % (self.block.label, gv_exitswitch.operand()))
flexswitch = FlexSwitch(self.rgenop, self, gv_exitswitch)
return flexswitch, flexswitch._add_default()
class RLLVMGenOp(AbstractRGenOp):
funcsig = {} #HACK for looking up function signatures
funcused = {} #we rename functions when encountered multiple times (for test_branching_compile)
def check_no_open_mc(self):
return True
def _dump_partial_lines(self): #what we've generated so far
asmlines = []
for block in self.blocklist:
block.writecode(asmlines)
asmlines = ['%s ;%d' % (asmlines[i], i+1) for i in range(len(asmlines))]
asm_string = '\n'.join(asmlines)
logger.dump(asm_string)
def end(self):
log(' RLLVMGenOp.end')
self.blocklist.append(EpilogueBlock())
asmlines = []
for block in self.blocklist:
block.writecode(asmlines)
if LINENO:
asmlines = ['%s ;%d' % (asmlines[i], i+1) for i in range(len(asmlines))]
asm_string = '\n'.join(asmlines)
self.blocklist = None
if PRINT_SOURCE:
print asm_string
logger.dump(asm_string)
parse_ok = llvmjit.parse(asm_string)
if not parse_ok:
raise ParseException()
llvmjit.transform(3) #optimize module (should be on functions actually)
function = llvmjit.getNamedFunction(self.name)
entrypoint = llvmjit.getPointerToFunctionAsInt(function)
# XXX or directly cast the ctypes ptr to int with:
# ctypes.cast(ptr, c_void_p).value
self.funcsig[entrypoint] = self.funcsig[self.gv_entrypoint.value]
self.gv_entrypoint.value = entrypoint
# ----------------------------------------------------------------
# the public RGenOp interface
def newgraph(self, sigtoken, name):
if name in self.funcused:
self.funcused[name] += 1
name = '%s_%d' % (name, self.funcused[name])
else:
self.funcused[name] = 0
log(' RLLVMGenOp.newgraph %s,%s' % (sigtoken, name))
prologueblock = PrologueBlock(sigtoken, name)
self.blocklist = [prologueblock]
builder = Builder(self, coming_from=prologueblock)
prologueblock.startblocklabel = builder.nextlabel
argtypes, restype = sigtoken
n = len(self.funcsig) * 2 + 1 #+1 so we recognize these pre compilation 'pointers'
self.name = name
self.funcsig[n] = '%s %s%s' % (restype, globalprefix, name)
self.gv_entrypoint = IntConst(n) #note: updated by Builder.end() (i.e after compilation)
args = list(prologueblock.inputargs)
return builder, self.gv_entrypoint, args
@specialize.genconst(1)
def genconst(self, llvalue):
T = lltype.typeOf(llvalue)
if T is llmemory.Address:
return AddrConst(llvalue)
elif T is lltype.Bool:
return BoolConst(lltype.cast_primitive(lltype.Bool, llvalue))
elif T is lltype.Char:
return CharConst(lltype.cast_primitive(lltype.Char, llvalue))
elif T is lltype.Unsigned:
return UIntConst(lltype.cast_primitive(lltype.Unsigned, llvalue))
elif T is lltype.Float:
return FloatConst(lltype.cast_primitive(lltype.Float, llvalue))
elif isinstance(T, lltype.Primitive):
return IntConst(lltype.cast_primitive(lltype.Signed, llvalue))
elif isinstance(T, lltype.Ptr):
lladdr = llmemory.cast_ptr_to_adr(llvalue)
#if T.TO._gckind == 'gc':
# self.keepalive_gc_refs.append(lltype.cast_opaque_ptr(llmemory.GCREF, llvalue))
return AddrConst(lladdr)
else:
msg = 'XXX not implemented'
logger.dump(msg)
assert 0, msg
# attached later constPrebuiltGlobal = global_rgenop.genconst
@staticmethod
def genzeroconst(kind):
return zero_consts[kind]
@staticmethod
@specialize.memo()
def kindToken(T):
# turn the type T into the llvm approximation that we'll use here
# XXX incomplete
if isinstance(T, lltype.Ptr) or T is llmemory.Address:
return pi8
elif T is lltype.Bool:
return i1
elif T is lltype.Char:
return i8
elif T is lltype.Unsigned:
return u32
elif T is lltype.Float:
return f64
else:
return i32 #Signed/UniChar/Void
@staticmethod
@specialize.memo()
def fieldToken(T, name):
FIELD = getattr(T, name)
if isinstance(FIELD, lltype.ContainerType):
fieldtype = pi8 # not useful for getsubstruct
else:
fieldtype = RLLVMGenOp.kindToken(FIELD)
return (llmemory.offsetof(T, name), fieldtype)
@staticmethod
@specialize.memo()
def allocToken(T):
return llmemory.sizeof(T)
@staticmethod
@specialize.memo()
def varsizeAllocToken(T):
#XXX TODO
if isinstance(T, lltype.Array):
return RLLVMGenOp.arrayToken(T)
else:
# var-sized structs
arrayfield = T._arrayfld
ARRAYFIELD = getattr(T, arrayfield)
arraytoken = RLLVMGenOp.arrayToken(ARRAYFIELD)
length_offset, items_offset, item_size, item_type = arraytoken
arrayfield_offset = llmemory.offsetof(T, arrayfield)
return (arrayfield_offset+length_offset,
arrayfield_offset+items_offset,
item_size,
item_type)
@staticmethod
@specialize.memo()
def arrayToken(A):
return (llmemory.ArrayLengthOffset(A),
llmemory.ArrayItemsOffset(A),
llmemory.ItemOffset(A.OF),
RLLVMGenOp.kindToken(A.OF))
@staticmethod
@specialize.memo()
def sigToken(FUNCTYPE):
argtypes = [RLLVMGenOp.kindToken(T) for T in FUNCTYPE.ARGS]
restype = RLLVMGenOp.kindToken(FUNCTYPE.RESULT)
return (argtypes, restype)
@staticmethod
def erasedType(T):
if T is llmemory.Address:
return llmemory.Address
if isinstance(T, lltype.Primitive):
return lltype.Signed
elif isinstance(T, lltype.Ptr):
return llmemory.GCREF
else:
msg = 'XXX not implemented'
logger.dump(msg)
assert 0, msg
global_rgenop = RLLVMGenOp()
RLLVMGenOp.constPrebuiltGlobal = global_rgenop.genconst
zero_consts = {
pi8: AddrConst(llmemory.NULL),
i1: BoolConst(False),
i8: CharConst('\x00'),
u32: UIntConst(r_uint(0)),
f64: FloatConst(0.0),
i32: IntConst(0),
}
| Python |
import py
from pypy.jit.codegen import detect_cpu
#XXX Should check here if llvm supports a JIT for this platform (perhaps using lli?)
class Directory(py.test.collect.Directory):
def run(self):
py.test.skip("skipping jit.codegen.llvm for now")
# try:
# processor = detect_cpu.autodetect()
# except detect_cpu.ProcessorAutodetectError, e:
# py.test.skip(str(e))
# else:
# if processor != 'i386':
# py.test.skip('detected a %r CPU' % (processor,))
return super(Directory, self).run()
Option = py.test.config.Option
option = py.test.config.addoptions("llvm options",
Option('--lineno', action="store_true", default=False,
dest="lineno",
help="add linenumbers to the generated code"),
Option('--print-source', action="store_true", default=False,
dest="print_source",
help="print generated sources"),
Option('--print-debug', action="store_true", default=False,
dest="print_debug",
help="print debug information"))
| Python |
import py
from pypy.jit.codegen.llvm.genvarorconst import Var, BoolConst, CharConst,\
IntConst, UIntConst, FloatConst, AddrConst
from pypy.jit.codegen.llvm.compatibility import icmp, scmp, ucmp, fcmp, inttoptr,\
trunc, zext, bitcast, inttoptr, shr_prefix, define, i1, i8, i16, i32, f64
def cast(osrc, dst):
print src, '->', dst
| Python |
'''
Another go at using the LLVM JIT as a codegenerator for PyPy.
For now we use the LLVM C++ API as little as possible!
In future we might talk directly to the LLVM C++ API.
This file contains the ctypes specification to use the llvmjit library!
'''
import autopath
from pypy.rpython.rctypes import implementation
from pypy.rpython.rctypes.tool.util import load_library
from ctypes import _CFuncPtr, _FUNCFLAG_CDECL
from ctypes import *
import os
newdir = os.path.dirname(__file__)
path = os.path.join(newdir, 'libllvmjit.so')
curdir = os.getcwd()
if newdir:
os.chdir(newdir)
#With py.test --session=R the master server rsyncs the .so library too!?!
#So we always need to recompile the library if its platform (output of file libllvmjit.so)
#differs from the current (remote) platform.
#note: we can't do this in global scope because that will only be executed on the master server.
#os.system('rm -rf libllvmjit.so build')
#We might want to generate an up-to-date version of the library always so running (tests)
#on a clean checkout will produce correct results.
os.system('python setup.py build_ext -i')
os.chdir(curdir)
if not os.path.exists(path):
import py
py.test.skip("libllvmjit.so compilation failed (no llvm headers or llvm version not up to date?)")
#load the actual library
llvmjit = load_library(os.path.abspath(path))
class _FuncPtr(_CFuncPtr):
_flags_ = _FUNCFLAG_CDECL
# aaarghdistutilsunixaaargh (may need something different for standalone builds...)
libraries = (os.path.join(os.path.dirname(path), 'llvmjit'),)
llvmjit._FuncPtr = _FuncPtr
MINIMAL_VERSION = 2.0
def llvm_version():
v = os.popen('llvm-as -version 2>&1').read()
v = ''.join([c for c in v if c.isdigit()])
v = int(v) / 10.0
return v
#ensure stable state (XXX this doesn't seem to get called when translated! We do it in parse!)
llvmjit.restart()
#exposed functions...
restart = llvmjit.restart
transform = llvmjit.transform
transform.restype = c_int
transform.argtypes = [c_int]
parse = llvmjit.parse
parse.restype = c_int
parse.argtypes = [c_char_p]
getNamedFunction = llvmjit.getNamedFunction
getNamedFunction.restype = c_void_p
getNamedFunction.argtypes = [c_char_p]
getNamedGlobal = llvmjit.getNamedGlobal
getNamedGlobal.restype = c_void_p
getNamedGlobal.argtypes = [c_char_p]
getPointerToFunction = llvmjit.getPointerToFunction
getPointerToFunction.restype = c_void_p
getPointerToFunction.argtypes = [c_void_p]
getPointerToFunctionAsInt = llvmjit.getPointerToFunction
getPointerToFunctionAsInt.restype = c_int
getPointerToFunctionAsInt.argtypes = [c_void_p]
freeMachineCodeForFunction = llvmjit.freeMachineCodeForFunction
freeMachineCodeForFunction.restype = c_int
freeMachineCodeForFunction.argtypes = [c_void_p]
recompile = llvmjit.recompile
recompile.restype = c_int
recompile.argtypes = [c_void_p]
execute = llvmjit.execute
execute.restype = c_int
execute.argtypes = [c_void_p, c_int]
get_global_data = llvmjit.get_global_data
get_global_data.restype = c_int
get_global_data.argtypes = []
set_global_data = llvmjit.set_global_data
set_global_data.argtypes = [c_int]
get_pointer_to_global_data = llvmjit.get_pointer_to_global_data
get_pointer_to_global_data.restype = POINTER(c_int)
get_pointer_to_global_data.argtypes = []
get_pointer_to_global_function = llvmjit.get_pointer_to_global_function
get_pointer_to_global_function.restype = c_void_p
get_pointer_to_global_function.argtypes = []
addGlobalMapping = llvmjit.addGlobalMapping
addGlobalMapping.argtypes = [c_void_p, c_void_p]
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
import py, os
from pypy.rlib.objectmodel import we_are_translated
from pypy.jit.codegen.llvm.conftest import option
PRINT_DEBUG = option.print_debug
class Logger:
enabled = True
log_fd = -1
def _freeze_(self):
# reset the machine_code_dumper global instance to its default state
if self.log_fd >= 0:
os.close(self.log_fd)
self.__dict__.clear()
return False
def open(self):
if not self.enabled:
return False
if self.log_fd < 0:
# check the environment for a file name
from pypy.rlib.ros import getenv
s = getenv('PYPYJITLOG')
if not s:
self.enabled = False
return False
try:
flags = os.O_WRONLY|os.O_CREAT|os.O_TRUNC
self.log_fd = os.open(s, flags, 0666)
except OSError:
os.write(2, "could not create log file\n")
self.enabled = False
return False
# log the executable name
from pypy.jit.codegen.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable:
os.write(self.log_fd, 'SYS_EXECUTABLE %s\n' % (
highleveljitinfo.sys_executable,))
return True
def dump(self, s):
if not self.open():
return
os.write(self.log_fd, str(s) + '\n')
logger = Logger()
def log(s):
if PRINT_DEBUG and not we_are_translated():
print str(s)
logger.dump(s)
| Python |
import py, os
from pypy.rlib.objectmodel import specialize
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.rarithmetic import intmask
from pypy.jit.codegen.model import GenVar, GenConst
from pypy.jit.codegen.llvm.logger import logger
from pypy.jit.codegen.llvm.compatibility import i1, i8, i16, i32, f64
pi8 = i8 + '*'
pi32 = i32 + '*'
u32 = i32
class Count(object):
n_vars = 0
n_labels = 0
def newlabel(self):
label = 'L%d' % (self.n_labels,)
self.n_labels += 1
return label
count = Count()
class Var(GenVar):
def __init__(self, type):
self.n = count.n_vars
self.type = type
self.signed = type is i32 or type is f64
count.n_vars += 1
def operand(self):
return '%s %s' % (self.type, self.operand2())
def operand2(self):
return '%%v%d' % (self.n,)
class GenericConst(GenConst):
def operand(self):
return '%s %s' % (self.type, self.operand2())
@specialize.arg(1)
def revealconst(self, T):
if isinstance(T, lltype.Ptr):
return lltype.cast_int_to_ptr(T, self.get_integer_value())
elif T is llmemory.Address:
return llmemory.cast_int_to_adr(self.get_integer_value())
else:
return lltype.cast_primitive(T, self.get_integer_value())
class BoolConst(GenericConst):
type = i1
signed = False
def __init__(self, value):
self.value = bool(value)
def operand2(self):
if self.value:
return 'true'
else:
return 'false'
def get_integer_value(self):
return int(self.value)
class CharConst(GenericConst):
type = i8
signed = False
def __init__(self, value):
self.value = ord(value)
def operand2(self):
return '%d' % self.value
def get_integer_value(self):
return self.value
class UniCharConst(GenericConst):
type = i32
signed = True
def __init__(self, value):
self.value = unicode(value)
def operand2(self):
return '%s' % self.value
def get_integer_value(self):
return int(self.value)
class IntConst(GenericConst):
type = i32
signed = True
def __init__(self, value):
self.value = int(value)
def operand2(self):
return str(self.value)
def get_integer_value(self):
return self.value
class UIntConst(GenericConst):
type = u32
signed = False
def __init__(self, value):
self.value = value
def operand2(self):
return str(self.value)
def get_integer_value(self):
return intmask(self.value)
class FloatConst(GenericConst):
type = f64
signed = True
def __init__(self, value):
self.value = float(value)
def operand2(self):
return str(self.value)
@specialize.arg(1)
def revealconst(self, T):
assert T is lltype.Float
return self.value
class AddrConst(GenConst):
type = pi8
signed = False
addr = llmemory.NULL #have 'addr' even when not instantiated
def __init__(self, addr):
self.addr = addr
def operand(self):
return '%s %s' % (self.type, self.operand2())
def operand2(self):
addr = self.addr
s = str(llmemory.cast_adr_to_int(addr))
if s == '0':
s = 'null'
return s
@specialize.arg(1)
def revealconst(self, T):
if T is llmemory.Address:
return self.addr
elif isinstance(T, lltype.Ptr):
return llmemory.cast_adr_to_ptr(self.addr, T)
elif T is lltype.Signed:
return llmemory.cast_adr_to_int(self.addr)
else:
msg = 'XXX not implemented'
logger.dump(msg)
assert 0, msg
| Python |
import py
Option = py.test.config.Option
option = py.test.config.addoptions("codegen options",
Option('--trap', action="store_true", default=False,
dest="trap",
help="generate a breakpoint instruction at the start"))
| Python |
import py
from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
from pypy.jit.codegen.model import ReplayBuilder, dummy_var
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem import lloperation
from pypy.rpython.extfunc import register_external
from pypy.rlib.objectmodel import specialize, we_are_translated
from pypy.jit.codegen.conftest import option
from ctypes import POINTER, cast, c_void_p, c_int, CFUNCTYPE
from pypy.jit.codegen.ppc import codebuf
from pypy.jit.codegen.ppc.instruction import rSP, rFP, rSCRATCH, gprs
from pypy.jit.codegen.ppc import instruction as insn
from pypy.jit.codegen.ppc.regalloc import RegisterAllocation
from pypy.jit.codegen.ppc.emit_moves import emit_moves, emit_moves_safe
from pypy.jit.codegen.ppc.ppcgen.rassemblermaker import make_rassembler
from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
from pypy.jit.codegen.i386.rgenop import gc_malloc_fnaddr
from pypy.rpython.annlowlevel import llhelper
class RPPCAssembler(make_rassembler(MyPPCAssembler)):
def emit(self, value):
self.mc.write(value)
_PPC = RPPCAssembler
_flush_icache = None
def flush_icache(base, size):
global _flush_icache
if _flush_icache == None:
cpath = py.magic.autopath().dirpath().join('_flush_icache.c')
_flush_icache = cpath._getpymodule()._flush_icache
_flush_icache(base, size)
register_external(flush_icache, [int, int], None, "LL_flush_icache")
NSAVEDREGISTERS = 19
DEBUG_TRAP = option.trap
DEBUG_PRINT = option.debug_print
_var_index = [0]
class Var(GenVar):
conditional = False
def __init__(self):
self.__magic_index = _var_index[0]
_var_index[0] += 1
def __repr__(self):
return "v%d" % self.__magic_index
def fits_in_uimm(self):
return False
def fits_in_simm(self):
return False
class ConditionVar(Var):
""" Used for vars that originated as the result of a conditional
operation, like a == b """
conditional = True
class IntConst(GenConst):
def __init__(self, value):
self.value = value
def __repr__(self):
return 'IntConst(%d)'%self.value
@specialize.arg(1)
def revealconst(self, T):
if isinstance(T, lltype.Ptr):
return lltype.cast_int_to_ptr(T, self.value)
elif T is llmemory.Address:
return llmemory.cast_int_to_adr(self.value)
else:
return lltype.cast_primitive(T, self.value)
def load(self, insns, var):
insns.append(
insn.Insn_GPR__IMM(_PPC.load_word,
var, [self]))
def load_now(self, asm, loc):
if loc.is_register:
assert isinstance(loc, insn.GPR)
asm.load_word(loc.number, self.value)
else:
#print 'load_now to', loc.offset
asm.load_word(rSCRATCH, self.value)
asm.stw(rSCRATCH, rFP, loc.offset)
def fits_in_simm(self):
return abs(self.value) < 2**15
def fits_in_uimm(self):
return 0 <= self.value < 2**16
class AddrConst(GenConst):
def __init__(self, addr):
self.addr = addr
@specialize.arg(1)
def revealconst(self, T):
if T is llmemory.Address:
return self.addr
elif isinstance(T, lltype.Ptr):
return llmemory.cast_adr_to_ptr(self.addr, T)
elif T is lltype.Signed:
return llmemory.cast_adr_to_int(self.addr)
else:
assert 0, "XXX not implemented"
def fits_in_simm(self):
return False
def fits_in_uimm(self):
return False
def load(self, insns, var):
i = IntConst(llmemory.cast_adr_to_int(self.addr))
insns.append(
insn.Insn_GPR__IMM(RPPCAssembler.load_word,
var, [i]))
def load_now(self, asm, loc):
value = llmemory.cast_adr_to_int(self.addr)
if loc.is_register:
assert isinstance(loc, insn.GPR)
asm.load_word(loc.number, value)
else:
#print 'load_now to', loc.offset
asm.load_word(rSCRATCH, value)
asm.stw(rSCRATCH, rFP, loc.offset)
class JumpPatchupGenerator(object):
def __init__(self, insns, allocator):
self.insns = insns
self.allocator = allocator
def emit_move(self, tarloc, srcloc):
srcvar = None
if DEBUG_PRINT:
for v, loc in self.allocator.var2loc.iteritems():
if loc is srcloc:
srcvar = v
break
emit = self.insns.append
if tarloc == srcloc:
return
if tarloc.is_register and srcloc.is_register:
assert isinstance(tarloc, insn.GPR)
if isinstance(srcloc, insn.GPR):
emit(insn.Move(tarloc, srcloc))
else:
assert isinstance(srcloc, insn.CRF)
emit(srcloc.move_to_gpr(tarloc.number))
elif tarloc.is_register and not srcloc.is_register:
emit(insn.Unspill(srcvar, tarloc, srcloc))
elif not tarloc.is_register and srcloc.is_register:
emit(insn.Spill(srcvar, srcloc, tarloc))
elif not tarloc.is_register and not srcloc.is_register:
emit(insn.Unspill(srcvar, insn.gprs[0], srcloc))
emit(insn.Spill(srcvar, insn.gprs[0], tarloc))
def create_fresh_location(self):
return self.allocator.spill_slot()
class StackInfo(Var):
# not really a Var at all, but needs to be mixable with Consts....
# offset will be assigned later
offset = 0
pass
def prepare_for_jump(insns, sourcevars, src2loc, target, allocator):
tar2src = {} # tar var -> src var
tar2loc = {}
# construct mapping of targets to sources; note that "target vars"
# and "target locs" are the same thing right now
targetlocs = target.arg_locations
tarvars = []
## if DEBUG_PRINT:
## print targetlocs
## print allocator.var2loc
for i in range(len(targetlocs)):
tloc = targetlocs[i]
src = sourcevars[i]
if isinstance(src, Var):
tar2loc[tloc] = tloc
tar2src[tloc] = src
tarvars.append(tloc)
if not tloc.is_register:
if tloc in allocator.free_stack_slots:
allocator.free_stack_slots.remove(tloc)
gen = JumpPatchupGenerator(insns, allocator)
emit_moves(gen, tarvars, tar2src, tar2loc, src2loc)
for i in range(len(targetlocs)):
tloc = targetlocs[i]
src = sourcevars[i]
if not isinstance(src, Var):
insns.append(insn.Load(tloc, src))
class Label(GenLabel):
def __init__(self, args_gv):
self.args_gv = args_gv
#self.startaddr = startaddr
#self.arg_locations = arg_locations
self.min_stack_offset = 1
# our approach to stack layout:
# on function entry, the stack looks like this:
# ....
# | parameter area |
# | linkage area | <- rSP points to the last word of the linkage area
# +----------------+
# we set things up like so:
# | parameter area |
# | linkage area | <- rFP points to where the rSP was
# | saved registers |
# | local variables |
# +-----------------+ <- rSP points here, and moves around between basic blocks
# points of note (as of 2006-11-09 anyway :-):
# 1. we currently never spill to the parameter area (should fix?)
# 2. we always save all callee-save registers
# 3. as each basic block can move the SP around as it sees fit, we index
# into the local variables area from the FP (frame pointer; it is not
# usual on the PPC to have a frame pointer, but there's no reason we
# can't have one :-)
class Builder(GenBuilder):
def __init__(self, rgenop):
self.rgenop = rgenop
self.asm = RPPCAssembler()
self.asm.mc = None
self.insns = []
self.initial_spill_offset = 0
self.initial_var2loc = None
self.max_param_space = -1
self.final_jump_addr = 0
self.start = 0
self.closed = True
self.patch_start_here = 0
# ----------------------------------------------------------------
# the public Builder interface:
def end(self):
pass
@specialize.arg(1)
def genop1(self, opname, gv_arg):
#print opname, 'on', id(self)
genmethod = getattr(self, 'op_' + opname)
r = genmethod(gv_arg)
#print '->', id(r)
return r
@specialize.arg(1)
def genop2(self, opname, gv_arg1, gv_arg2):
#print opname, 'on', id(self)
genmethod = getattr(self, 'op_' + opname)
r = genmethod(gv_arg1, gv_arg2)
#print '->', id(r)
return r
@specialize.arg(1)
def genraisingop2(self, opname, gv_arg1, gv_arg2):
genmethod = getattr(self, 'raisingop_' + opname)
r = genmethod(gv_arg1, gv_arg2)
return r
@specialize.arg(1)
def genraisingop1(self, opname, gv_arg):
genmethod = getattr(self, 'raisingop_' + opname)
r = genmethod(gv_arg)
return r
def genop_ptr_iszero(self, kind, gv_ptr):
return self.op_ptr_iszero(gv_ptr)
def genop_ptr_nonzero(self, kind, gv_ptr):
return self.op_ptr_nonzero(gv_ptr)
def genop_ptr_eq(self, kind, gv_ptr1, gv_ptr2):
return self.op_ptr_eq(gv_ptr1, gv_ptr2)
def genop_ptr_ne(self, kind, gv_ptr1, gv_ptr2):
return self.op_ptr_ne(gv_ptr1, gv_ptr2)
def genop_call(self, sigtoken, gv_fnptr, args_gv):
self.insns.append(insn.SpillCalleeSaves())
for i in range(len(args_gv)):
self.insns.append(insn.LoadArg(i, args_gv[i]))
gv_result = Var()
self.max_param_space = max(self.max_param_space, len(args_gv)*4)
self.insns.append(insn.CALL(gv_result, gv_fnptr))
return gv_result
def genop_getfield(self, fieldtoken, gv_ptr):
fieldoffset, fieldsize = fieldtoken
opcode = {1:_PPC.lbz, 2:_PPC.lhz, 4:_PPC.lwz}[fieldsize]
return self._arg_simm_op(gv_ptr, IntConst(fieldoffset), opcode)
def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
gv_result = Var()
fieldoffset, fieldsize = fieldtoken
opcode = {1:_PPC.stb, 2:_PPC.sth, 4:_PPC.stw}[fieldsize]
self.insns.append(
insn.Insn_None__GPR_GPR_IMM(opcode,
[gv_value, gv_ptr, IntConst(fieldoffset)]))
return gv_result
def genop_getsubstruct(self, fieldtoken, gv_ptr):
return self._arg_simm_op(gv_ptr, IntConst(fieldtoken[0]), _PPC.addi)
def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
_, _, itemsize = arraytoken
opcode = {1:_PPC.lbzx,
2:_PPC.lhzx,
4:_PPC.lwzx}[itemsize]
opcodei = {1:_PPC.lbz,
2:_PPC.lhz,
4:_PPC.lwz}[itemsize]
gv_itemoffset = self.itemoffset(arraytoken, gv_index)
return self._arg_arg_op_with_simm(gv_ptr, gv_itemoffset, opcode, opcodei)
def genop_getarraysubstruct(self, arraytoken, gv_ptr, gv_index):
_, _, itemsize = arraytoken
assert itemsize == 4
gv_itemoffset = self.itemoffset(arraytoken, gv_index)
return self._arg_arg_op_with_simm(gv_ptr, gv_itemoffset, _PPC.add, _PPC.addi,
commutative=True)
def genop_getarraysize(self, arraytoken, gv_ptr):
lengthoffset, _, _ = arraytoken
return self._arg_simm_op(gv_ptr, IntConst(lengthoffset), _PPC.lwz)
def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
_, _, itemsize = arraytoken
gv_itemoffset = self.itemoffset(arraytoken, gv_index)
gv_result = Var()
if gv_itemoffset.fits_in_simm():
opcode = {1:_PPC.stb,
2:_PPC.sth,
4:_PPC.stw}[itemsize]
self.insns.append(
insn.Insn_None__GPR_GPR_IMM(opcode,
[gv_value, gv_ptr, gv_itemoffset]))
else:
opcode = {1:_PPC.stbx,
2:_PPC.sthx,
4:_PPC.stwx}[itemsize]
self.insns.append(
insn.Insn_None__GPR_GPR_GPR(opcode,
[gv_value, gv_ptr, gv_itemoffset]))
def genop_malloc_fixedsize(self, alloctoken):
return self.genop_call(1, # COUGH
IntConst(gc_malloc_fnaddr()),
[IntConst(alloctoken)])
def genop_malloc_varsize(self, varsizealloctoken, gv_size):
gv_itemoffset = self.itemoffset(varsizealloctoken, gv_size)
gv_result = self.genop_call(1, # COUGH
IntConst(gc_malloc_fnaddr()),
[gv_itemoffset])
lengthoffset, _, _ = varsizealloctoken
self.insns.append(
insn.Insn_None__GPR_GPR_IMM(_PPC.stw,
[gv_size, gv_result, IntConst(lengthoffset)]))
return gv_result
def genop_same_as(self, kindtoken, gv_arg):
if not isinstance(gv_arg, Var):
gv_result = Var()
gv_arg.load(self.insns, gv_result)
return gv_result
else:
return gv_arg
def genop_cast_int_to_ptr(self, ptrkindtoken, gv_int):
return gv_int
## def genop_debug_pdb(self): # may take an args_gv later
def genop_get_frame_base(self):
gv_result = Var()
self.insns.append(
insn.LoadFramePointer(gv_result))
return gv_result
def get_frame_info(self, vars_gv):
result = []
for v in vars_gv:
if isinstance(v, Var):
place = StackInfo()
self.insns.append(insn.CopyIntoStack(place, v))
result.append(place)
else:
result.append(None)
return result
def alloc_frame_place(self, kind, gv_initial_value=None):
place = StackInfo()
if gv_initial_value is None:
gv_initial_value = AddrConst(llmemory.NULL)
self.insns.append(insn.CopyIntoStack(place, gv_initial_value))
return place
def genop_absorb_place(self, kind, place):
var = Var()
self.insns.append(insn.CopyOffStack(var, place))
return var
def enter_next_block(self, kinds, args_gv):
if DEBUG_PRINT:
print 'enter_next_block1', args_gv
seen = {}
for i in range(len(args_gv)):
gv = args_gv[i]
if isinstance(gv, Var):
if gv in seen:
new_gv = self._arg_op(gv, _PPC.mr)
args_gv[i] = new_gv
seen[gv] = True
else:
new_gv = Var()
gv.load(self.insns, new_gv)
args_gv[i] = new_gv
if DEBUG_PRINT:
print 'enter_next_block2', args_gv
r = Label(args_gv)
self.insns.append(insn.Label(r))
return r
def jump_if_false(self, gv_condition, args_gv):
return self._jump(gv_condition, False, args_gv)
def jump_if_true(self, gv_condition, args_gv):
return self._jump(gv_condition, True, args_gv)
def finish_and_return(self, sigtoken, gv_returnvar):
self.insns.append(insn.Return(gv_returnvar))
self.allocate_and_emit([])
# standard epilogue:
# restore old SP
self.asm.lwz(rSP, rSP, 0)
# restore all callee-save GPRs
self.asm.lmw(gprs[32-NSAVEDREGISTERS].number, rSP, -4*(NSAVEDREGISTERS+1))
# restore Condition Register
self.asm.lwz(rSCRATCH, rSP, 4)
self.asm.mtcr(rSCRATCH)
# restore Link Register and jump to it
self.asm.lwz(rSCRATCH, rSP, 8)
self.asm.mtlr(rSCRATCH)
self.asm.blr()
self._close()
def finish_and_goto(self, outputargs_gv, target):
if target.min_stack_offset == 1:
self.pause_writing(outputargs_gv)
self.start_writing()
allocator = self.allocate(outputargs_gv)
if DEBUG_PRINT:
before_moves = len(self.insns)
print outputargs_gv
print target.args_gv
allocator.spill_offset = min(allocator.spill_offset, target.min_stack_offset)
prepare_for_jump(
self.insns, outputargs_gv, allocator.var2loc, target, allocator)
if DEBUG_PRINT:
print 'moves:'
for i in self.insns[before_moves:]:
print ' ', i
self.emit(allocator)
here_size = self._stack_size(allocator.spill_offset)
there_size = self._stack_size(target.min_stack_offset)
if here_size != there_size:
self.emit_stack_adjustment(there_size)
if self.rgenop.DEBUG_SCRIBBLE:
if here_size > there_size:
offsets = range(there_size, here_size, 4)
else:
offsets = range(here_size, there_size, 4)
for offset in offsets:
self.asm.load_word(rSCRATCH, 0x23456789)
self.asm.stw(rSCRATCH, rSP, -offset)
self.asm.load_word(rSCRATCH, target.startaddr)
self.asm.mtctr(rSCRATCH)
self.asm.bctr()
self._close()
def flexswitch(self, gv_exitswitch, args_gv):
# make sure the exitswitch ends the block in a register:
crresult = Var()
self.insns.append(insn.FakeUse(crresult, gv_exitswitch))
allocator = self.allocate_and_emit(args_gv)
switch_mc = self.asm.mc.reserve(7 * 5 + 4)
self._close()
result = FlexSwitch(self.rgenop, switch_mc,
allocator.loc_of(gv_exitswitch),
allocator.loc_of(crresult),
allocator.var2loc,
allocator.spill_offset)
return result, result.add_default()
def start_writing(self):
if not self.closed:
return self
assert self.asm.mc is None
if self.final_jump_addr != 0:
mc = self.rgenop.open_mc()
target = mc.tell()
if target == self.final_jump_addr + 16:
mc.setpos(mc.getpos()-4)
else:
self.asm.mc = self.rgenop.ExistingCodeBlock(
self.final_jump_addr, self.final_jump_addr+8)
self.asm.load_word(rSCRATCH, target)
flush_icache(self.final_jump_addr, 8)
self._code_start = mc.tell()
self.asm.mc = mc
self.final_jump_addr = 0
self.closed = False
return self
else:
self._open()
self.maybe_patch_start_here()
return self
def maybe_patch_start_here(self):
if self.patch_start_here:
mc = self.asm.mc
self.asm.mc = self.rgenop.ExistingCodeBlock(
self.patch_start_here, self.patch_start_here+8)
self.asm.load_word(rSCRATCH, mc.tell())
flush_icache(self.patch_start_here, 8)
self.asm.mc = mc
self.patch_start_here = 0
def pause_writing(self, args_gv):
allocator = self.allocate_and_emit(args_gv)
self.initial_var2loc = allocator.var2loc
self.initial_spill_offset = allocator.spill_offset
self.insns = []
self.max_param_space = -1
self.final_jump_addr = self.asm.mc.tell()
self.closed = True
self.asm.nop()
self.asm.nop()
self.asm.mtctr(rSCRATCH)
self.asm.bctr()
self._close()
return self
# ----------------------------------------------------------------
# ppc-specific interface:
def itemoffset(self, arraytoken, gv_index):
# if gv_index is constant, this can return a constant...
lengthoffset, startoffset, itemsize = arraytoken
gv_offset = Var()
self.insns.append(
insn.Insn_GPR__GPR_IMM(RPPCAssembler.mulli,
gv_offset, [gv_index, IntConst(itemsize)]))
gv_itemoffset = Var()
self.insns.append(
insn.Insn_GPR__GPR_IMM(RPPCAssembler.addi,
gv_itemoffset, [gv_offset, IntConst(startoffset)]))
return gv_itemoffset
def _write_prologue(self, sigtoken):
numargs = sigtoken # for now
if DEBUG_TRAP:
self.asm.trap()
inputargs = [Var() for i in range(numargs)]
assert self.initial_var2loc is None
self.initial_var2loc = {}
for arg in inputargs[:8]:
self.initial_var2loc[arg] = gprs[3+len(self.initial_var2loc)]
if len(inputargs) > 8:
for i in range(8, len(inputargs)):
arg = inputargs[i]
self.initial_var2loc[arg] = insn.stack_slot(24 + 4 * len(self.initial_var2loc))
self.initial_spill_offset = self._var_offset(0)
# Standard prologue:
# Minimum stack space = 24+params+lv+4*GPRSAVE+8*FPRSAVE
# params = stack space for parameters for functions we call
# lv = stack space for local variables
# GPRSAVE = the number of callee-save GPRs we save, currently
# NSAVEDREGISTERS which is 19, i.e. all of them
# FPRSAVE = the number of callee-save FPRs we save, currently 0
# Initially, we set params == lv == 0 and allow each basic block to
# ensure it has enough space to continue.
minspace = self._stack_size(self._var_offset(0))
# save Link Register
self.asm.mflr(rSCRATCH)
self.asm.stw(rSCRATCH, rSP, 8)
# save Condition Register
self.asm.mfcr(rSCRATCH)
self.asm.stw(rSCRATCH, rSP, 4)
# save the callee-save GPRs
self.asm.stmw(gprs[32-NSAVEDREGISTERS].number, rSP, -4*(NSAVEDREGISTERS + 1))
# set up frame pointer
self.asm.mr(rFP, rSP)
# save stack pointer into linkage area and set stack pointer for us.
self.asm.stwu(rSP, rSP, -minspace)
if self.rgenop.DEBUG_SCRIBBLE:
# write junk into all non-argument, non rFP or rSP registers
self.asm.load_word(rSCRATCH, 0x12345678)
for i in range(min(11, 3+len(self.initial_var2loc)), 32):
self.asm.load_word(i, 0x12345678)
# scribble the part of the stack between
# self._var_offset(0) and minspace
for offset in range(self._var_offset(0), -minspace, -4):
self.asm.stw(rSCRATCH, rFP, offset)
# and then a bit more
for offset in range(-minspace-4, -minspace-200, -4):
self.asm.stw(rSCRATCH, rFP, offset)
return inputargs
def _var_offset(self, v):
"""v represents an offset into the local variable area in bytes;
this returns the offset relative to rFP"""
return -(4*NSAVEDREGISTERS+4+v)
def _stack_size(self, lv):
""" Returns the required stack size to store all data, assuming
that there are 'param' bytes of parameters for callee functions and
'lv' is the largest (wrt to abs() :) rFP-relative byte offset of
any variable on the stack. Plus 4 because the rFP actually points
into our caller's linkage area."""
assert lv <= 0
if self.max_param_space >= 0:
param = max(self.max_param_space, 32) + 24
else:
param = 0
return ((4 + param - lv + 15) & ~15)
def _open(self):
self.asm.mc = self.rgenop.open_mc()
self._code_start = self.asm.mc.tell()
self.closed = False
def _close(self):
_code_stop = self.asm.mc.tell()
code_size = _code_stop - self._code_start
flush_icache(self._code_start, code_size)
self.rgenop.close_mc(self.asm.mc)
self.asm.mc = None
def allocate_and_emit(self, live_vars_gv):
allocator = self.allocate(live_vars_gv)
return self.emit(allocator)
def allocate(self, live_vars_gv):
assert self.initial_var2loc is not None
allocator = RegisterAllocation(
self.rgenop.freeregs,
self.initial_var2loc,
self.initial_spill_offset)
self.insns = allocator.allocate_for_insns(self.insns)
return allocator
def emit(self, allocator):
in_size = self._stack_size(self.initial_spill_offset)
our_size = self._stack_size(allocator.spill_offset)
if in_size != our_size:
assert our_size > in_size
self.emit_stack_adjustment(our_size)
if self.rgenop.DEBUG_SCRIBBLE:
for offset in range(in_size, our_size, 4):
self.asm.load_word(rSCRATCH, 0x23456789)
self.asm.stw(rSCRATCH, rSP, -offset)
if self.rgenop.DEBUG_SCRIBBLE:
locs = {}
for _, loc in self.initial_var2loc.iteritems():
locs[loc] = True
regs = insn.gprs[3:]
for reg in regs:
if reg not in locs:
self.asm.load_word(reg.number, 0x3456789)
self.asm.load_word(0, 0x3456789)
for offset in range(self._var_offset(0),
self.initial_spill_offset,
-4):
if insn.stack_slot(offset) not in locs:
self.asm.stw(0, rFP, offset)
for insn_ in self.insns:
insn_.emit(self.asm)
for label in allocator.labels_to_tell_spill_offset_to:
label.min_stack_offset = allocator.spill_offset
for builder in allocator.builders_to_tell_spill_offset_to:
builder.initial_spill_offset = allocator.spill_offset
return allocator
def emit_stack_adjustment(self, newsize):
# the ABI requires that at all times that r1 is valid, in the
# sense that it must point to the bottom of the stack and that
# executing SP <- *(SP) repeatedly walks the stack.
# this code satisfies this, although there is a 1-instruction
# window where such walking would find a strange intermediate
# "frame"
self.asm.addi(rSCRATCH, rFP, -newsize)
self.asm.sub(rSCRATCH, rSCRATCH, rSP)
# this is a pure debugging check that we avoid the situation
# where *(r1) == r1 which would violates the ABI rules listed
# above. after a while it can be removed or maybe made
# conditional on some --option passed to py.test
self.asm.tweqi(rSCRATCH, 0)
self.asm.stwux(rSP, rSP, rSCRATCH)
self.asm.stw(rFP, rSP, 0)
def _arg_op(self, gv_arg, opcode):
gv_result = Var()
self.insns.append(
insn.Insn_GPR__GPR(opcode, gv_result, gv_arg))
return gv_result
def _arg_arg_op(self, gv_x, gv_y, opcode):
gv_result = Var()
self.insns.append(
insn.Insn_GPR__GPR_GPR(opcode, gv_result, [gv_x, gv_y]))
return gv_result
def _arg_simm_op(self, gv_x, gv_imm, opcode):
assert gv_imm.fits_in_simm()
gv_result = Var()
self.insns.append(
insn.Insn_GPR__GPR_IMM(opcode, gv_result, [gv_x, gv_imm]))
return gv_result
def _arg_uimm_op(self, gv_x, gv_imm, opcode):
assert gv_imm.fits_in_uimm()
gv_result = Var()
self.insns.append(
insn.Insn_GPR__GPR_IMM(opcode, gv_result, [gv_x, gv_imm]))
return gv_result
def _arg_arg_op_with_simm(self, gv_x, gv_y, opcode, opcodei,
commutative=False):
if gv_y.fits_in_simm():
return self._arg_simm_op(gv_x, gv_y, opcodei)
elif gv_x.fits_in_simm() and commutative:
return self._arg_simm_op(gv_y, gv_x, opcodei)
else:
return self._arg_arg_op(gv_x, gv_y, opcode)
def _arg_arg_op_with_uimm(self, gv_x, gv_y, opcode, opcodei,
commutative=False):
if gv_y.fits_in_uimm():
return self._arg_uimm_op(gv_x, gv_y, opcodei)
elif gv_x.fits_in_uimm() and commutative:
return self._arg_uimm_op(gv_y, gv_x, opcodei)
else:
return self._arg_arg_op(gv_x, gv_y, opcode)
def _identity(self, gv_arg):
return gv_arg
cmp2info = {
# bit-in-crf negated
'gt': ( 1, 0 ),
'lt': ( 0, 0 ),
'le': ( 1, 1 ),
'ge': ( 0, 1 ),
'eq': ( 2, 0 ),
'ne': ( 2, 1 ),
}
cmp2info_flipped = {
# bit-in-crf negated
'gt': ( 0, 0 ),
'lt': ( 1, 0 ),
'le': ( 0, 1 ),
'ge': ( 1, 1 ),
'eq': ( 2, 0 ),
'ne': ( 2, 1 ),
}
def _compare(self, op, gv_x, gv_y):
#print "op", op
gv_result = ConditionVar()
if gv_y.fits_in_simm():
self.insns.append(
insn.CMPWI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
elif gv_x.fits_in_simm():
self.insns.append(
insn.CMPWI(self.cmp2info_flipped[op], gv_result, [gv_y, gv_x]))
else:
self.insns.append(
insn.CMPW(self.cmp2info[op], gv_result, [gv_x, gv_y]))
return gv_result
def _compare_u(self, op, gv_x, gv_y):
gv_result = ConditionVar()
if gv_y.fits_in_uimm():
self.insns.append(
insn.CMPWLI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
elif gv_x.fits_in_uimm():
self.insns.append(
insn.CMPWLI(self.cmp2info_flipped[op], gv_result, [gv_y, gv_x]))
else:
self.insns.append(
insn.CMPWL(self.cmp2info[op], gv_result, [gv_x, gv_y]))
return gv_result
def _jump(self, gv_condition, if_true, args_gv):
targetbuilder = self.rgenop.newbuilder()
self.insns.append(
insn.Jump(gv_condition, targetbuilder, if_true, args_gv))
return targetbuilder
def _ov(self):
# mfxer rFOO
# extrwi rBAR, rFOO, 1, 1
gv_xer = Var()
self.insns.append(
insn.Insn_GPR(_PPC.mfxer, gv_xer))
gv_ov = Var()
self.insns.append(insn.Extrwi(gv_ov, gv_xer, 1, 1))
return gv_ov
def op_bool_not(self, gv_arg):
return self._arg_uimm_op(gv_arg, self.rgenop.genconst(1), RPPCAssembler.xori)
def op_int_is_true(self, gv_arg):
return self._compare('ne', gv_arg, self.rgenop.genconst(0))
def op_int_neg(self, gv_arg):
return self._arg_op(gv_arg, _PPC.neg)
def raisingop_int_neg_ovf(self, gv_arg):
gv_result = self._arg_op(gv_arg, _PPC.nego)
gv_ov = self._ov()
return (gv_result, gv_ov)
def op_int_abs(self, gv_arg):
gv_sign = self._arg_uimm_op(gv_arg, self.rgenop.genconst(31), _PPC.srawi)
gv_maybe_inverted = self._arg_arg_op(gv_arg, gv_sign, _PPC.xor)
return self._arg_arg_op(gv_sign, gv_maybe_inverted, _PPC.subf)
def raisingop_int_abs_ovf(self, gv_arg):
gv_sign = self._arg_uimm_op(gv_arg, self.rgenop.genconst(31), _PPC.srawi)
gv_maybe_inverted = self._arg_arg_op(gv_arg, gv_sign, _PPC.xor)
gv_result = self._arg_arg_op(gv_sign, gv_maybe_inverted, _PPC.subfo)
return (gv_result, self._ov())
def op_int_invert(self, gv_arg):
return self._arg_op(gv_arg, _PPC.not_)
def op_int_add(self, gv_x, gv_y):
return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.add, _PPC.addi,
commutative=True)
def raisingop_int_add_ovf(self, gv_x, gv_y):
gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.addo)
gv_ov = self._ov()
return (gv_result, gv_ov)
def op_int_sub(self, gv_x, gv_y):
return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.sub, _PPC.subi)
def raisingop_int_sub_ovf(self, gv_x, gv_y):
gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.subo)
gv_ov = self._ov()
return (gv_result, gv_ov)
def op_int_mul(self, gv_x, gv_y):
return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.mullw, _PPC.mulli,
commutative=True)
def raisingop_int_mul_ovf(self, gv_x, gv_y):
gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.mullwo)
gv_ov = self._ov()
return (gv_result, gv_ov)
def op_int_floordiv(self, gv_x, gv_y):
return self._arg_arg_op(gv_x, gv_y, _PPC.divw)
## def op_int_floordiv_zer(self, gv_x, gv_y):
def op_int_mod(self, gv_x, gv_y):
gv_dividend = self.op_int_floordiv(gv_x, gv_y)
gv_z = self.op_int_mul(gv_dividend, gv_y)
return self.op_int_sub(gv_x, gv_z)
## def op_int_mod_zer(self, gv_x, gv_y):
def op_int_lt(self, gv_x, gv_y):
return self._compare('lt', gv_x, gv_y)
def op_int_le(self, gv_x, gv_y):
return self._compare('le', gv_x, gv_y)
def op_int_eq(self, gv_x, gv_y):
return self._compare('eq', gv_x, gv_y)
def op_int_ne(self, gv_x, gv_y):
return self._compare('ne', gv_x, gv_y)
def op_int_gt(self, gv_x, gv_y):
return self._compare('gt', gv_x, gv_y)
def op_int_ge(self, gv_x, gv_y):
return self._compare('ge', gv_x, gv_y)
op_char_lt = op_int_lt
op_char_le = op_int_le
op_char_eq = op_int_eq
op_char_ne = op_int_ne
op_char_gt = op_int_gt
op_char_ge = op_int_ge
op_unichar_eq = op_int_eq
op_unichar_ne = op_int_ne
def op_int_and(self, gv_x, gv_y):
return self._arg_arg_op(gv_x, gv_y, _PPC.and_)
def op_int_or(self, gv_x, gv_y):
return self._arg_arg_op_with_uimm(gv_x, gv_y, _PPC.or_, _PPC.ori,
commutative=True)
def op_int_lshift(self, gv_x, gv_y):
if gv_y.fits_in_simm():
if abs(gv_y.value) >= 32:
return self.rgenop.genconst(0)
else:
return self._arg_uimm_op(gv_x, gv_y, _PPC.slwi)
# computing x << y when you don't know y is <=32
# (we can assume y >= 0 though)
# here's the plan:
#
# z = nltu(y, 32) (as per cwg)
# w = x << y
# r = w&z
gv_a = self._arg_simm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
gv_b = self._arg_op(gv_y, _PPC.addze)
gv_z = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
gv_w = self._arg_arg_op(gv_x, gv_y, _PPC.slw)
return self._arg_arg_op(gv_z, gv_w, _PPC.and_)
## def op_int_lshift_val(self, gv_x, gv_y):
def op_int_rshift(self, gv_x, gv_y):
if gv_y.fits_in_simm():
if abs(gv_y.value) >= 32:
gv_y = self.rgenop.genconst(31)
return self._arg_simm_op(gv_x, gv_y, _PPC.srawi)
# computing x >> y when you don't know y is <=32
# (we can assume y >= 0 though)
# here's the plan:
#
# ntlu_y_32 = nltu(y, 32) (as per cwg)
# o = srawi(x, 31) & ~ntlu_y_32
# w = (x >> y) & ntlu_y_32
# r = w|o
gv_a = self._arg_uimm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
gv_b = self._arg_op(gv_y, _PPC.addze)
gv_ntlu_y_32 = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
gv_c = self._arg_uimm_op(gv_x, self.rgenop.genconst(31), _PPC.srawi)
gv_o = self._arg_arg_op(gv_c, gv_ntlu_y_32, _PPC.andc_)
gv_e = self._arg_arg_op(gv_x, gv_y, _PPC.sraw)
gv_w = self._arg_arg_op(gv_e, gv_ntlu_y_32, _PPC.and_)
return self._arg_arg_op(gv_o, gv_w, _PPC.or_)
## def op_int_rshift_val(self, gv_x, gv_y):
def op_int_xor(self, gv_x, gv_y):
return self._arg_arg_op_with_uimm(gv_x, gv_y, _PPC.xor, _PPC.xori,
commutative=True)
## various int_*_ovfs
op_uint_is_true = op_int_is_true
op_uint_invert = op_int_invert
op_uint_add = op_int_add
op_uint_sub = op_int_sub
op_uint_mul = op_int_mul
def op_uint_floordiv(self, gv_x, gv_y):
return self._arg_arg_op(gv_x, gv_y, _PPC.divwu)
## def op_uint_floordiv_zer(self, gv_x, gv_y):
def op_uint_mod(self, gv_x, gv_y):
gv_dividend = self.op_uint_floordiv(gv_x, gv_y)
gv_z = self.op_uint_mul(gv_dividend, gv_y)
return self.op_uint_sub(gv_x, gv_z)
## def op_uint_mod_zer(self, gv_x, gv_y):
def op_uint_lt(self, gv_x, gv_y):
return self._compare_u('lt', gv_x, gv_y)
def op_uint_le(self, gv_x, gv_y):
return self._compare_u('le', gv_x, gv_y)
def op_uint_eq(self, gv_x, gv_y):
return self._compare_u('eq', gv_x, gv_y)
def op_uint_ne(self, gv_x, gv_y):
return self._compare_u('ne', gv_x, gv_y)
def op_uint_gt(self, gv_x, gv_y):
return self._compare_u('gt', gv_x, gv_y)
def op_uint_ge(self, gv_x, gv_y):
return self._compare_u('ge', gv_x, gv_y)
op_uint_and = op_int_and
op_uint_or = op_int_or
op_uint_lshift = op_int_lshift
## def op_uint_lshift_val(self, gv_x, gv_y):
def op_uint_rshift(self, gv_x, gv_y):
if gv_y.fits_in_simm():
if abs(gv_y.value) >= 32:
return self.rgenop.genconst(0)
else:
return self._arg_simm_op(gv_x, gv_y, _PPC.srwi)
# computing x << y when you don't know y is <=32
# (we can assume y >=0 though, i think)
# here's the plan:
#
# z = ngeu(y, 32) (as per cwg)
# w = x >> y
# r = w&z
gv_a = self._arg_simm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
gv_b = self._arg_op(gv_y, _PPC.addze)
gv_z = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
gv_w = self._arg_arg_op(gv_x, gv_y, _PPC.srw)
return self._arg_arg_op(gv_z, gv_w, _PPC.and_)
## def op_uint_rshift_val(self, gv_x, gv_y):
op_uint_xor = op_int_xor
# ... floats ...
# ... llongs, ullongs ...
# here we assume that booleans are always 1 or 0 and chars are
# always zero-padded.
op_cast_bool_to_int = _identity
op_cast_bool_to_uint = _identity
## def op_cast_bool_to_float(self, gv_arg):
op_cast_char_to_int = _identity
op_cast_unichar_to_int = _identity
op_cast_int_to_char = _identity
op_cast_int_to_unichar = _identity
op_cast_int_to_uint = _identity
## def op_cast_int_to_float(self, gv_arg):
## def op_cast_int_to_longlong(self, gv_arg):
op_cast_uint_to_int = _identity
## def op_cast_uint_to_float(self, gv_arg):
## def op_cast_float_to_int(self, gv_arg):
## def op_cast_float_to_uint(self, gv_arg):
## def op_truncate_longlong_to_int(self, gv_arg):
# many pointer operations are genop_* special cases above
op_ptr_eq = op_int_eq
op_ptr_ne = op_int_ne
op_ptr_nonzero = op_int_is_true
op_ptr_ne = op_int_ne
op_ptr_eq = op_int_eq
def op_ptr_iszero(self, gv_arg):
return self._compare('eq', gv_arg, self.rgenop.genconst(0))
op_cast_ptr_to_int = _identity
# ... address operations ...
@specialize.arg(0)
def cast_int_to_whatever(T, value):
if isinstance(T, lltype.Ptr):
return lltype.cast_int_to_ptr(T, value)
elif T is llmemory.Address:
return llmemory.cast_int_to_adr(value)
else:
return lltype.cast_primitive(T, value)
@specialize.arg(0)
def cast_whatever_to_int(T, value):
if isinstance(T, lltype.Ptr):
return lltype.cast_ptr_to_int(value)
elif T is llmemory.Address:
return llmemory.cast_adr_to_int(value)
else:
return lltype.cast_primitive(lltype.Signed, value)
class RPPCGenOp(AbstractRGenOp):
# the set of registers we consider available for allocation
# we can artifically restrict it for testing purposes
freeregs = {
insn.GP_REGISTER:insn.gprs[3:],
insn.FP_REGISTER:insn.fprs,
insn.CR_FIELD:insn.crfs,
insn.CT_REGISTER:[insn.ctr]}
DEBUG_SCRIBBLE = option.debug_scribble
MC_SIZE = 65536
def __init__(self):
self.mcs = [] # machine code blocks where no-one is currently writing
self.keepalive_gc_refs = []
# ----------------------------------------------------------------
# the public RGenOp interface
def newgraph(self, sigtoken, name):
numargs = sigtoken # for now
builder = self.newbuilder()
builder._open()
entrypoint = builder.asm.mc.tell()
inputargs_gv = builder._write_prologue(sigtoken)
return builder, IntConst(entrypoint), inputargs_gv
@specialize.genconst(1)
def genconst(self, llvalue):
T = lltype.typeOf(llvalue)
if T is llmemory.Address:
return AddrConst(llvalue)
elif isinstance(T, lltype.Primitive):
return IntConst(lltype.cast_primitive(lltype.Signed, llvalue))
elif isinstance(T, lltype.Ptr):
lladdr = llmemory.cast_ptr_to_adr(llvalue)
if T.TO._gckind == 'gc':
self.keepalive_gc_refs.append(lltype.cast_opaque_ptr(llmemory.GCREF, llvalue))
return AddrConst(lladdr)
else:
assert 0, "XXX not implemented"
## @staticmethod
## @specialize.genconst(0)
## def constPrebuiltGlobal(llvalue):
@staticmethod
def genzeroconst(kind):
return zero_const
def replay(self, label, kinds):
return ReplayBuilder(self), [dummy_var] * len(kinds)
@staticmethod
def erasedType(T):
if T is llmemory.Address:
return llmemory.Address
if isinstance(T, lltype.Primitive):
return lltype.Signed
elif isinstance(T, lltype.Ptr):
return llmemory.GCREF
else:
assert 0, "XXX not implemented"
@staticmethod
@specialize.memo()
def fieldToken(T, name):
FIELD = getattr(T, name)
if isinstance(FIELD, lltype.ContainerType):
fieldsize = 0 # not useful for getsubstruct
else:
fieldsize = llmemory.sizeof(FIELD)
return (llmemory.offsetof(T, name), fieldsize)
@staticmethod
@specialize.memo()
def allocToken(T):
return llmemory.sizeof(T)
@staticmethod
@specialize.memo()
def varsizeAllocToken(T):
if isinstance(T, lltype.Array):
return RPPCGenOp.arrayToken(T)
else:
# var-sized structs
arrayfield = T._arrayfld
ARRAYFIELD = getattr(T, arrayfield)
arraytoken = RPPCGenOp.arrayToken(ARRAYFIELD)
length_offset, items_offset, item_size = arraytoken
arrayfield_offset = llmemory.offsetof(T, arrayfield)
return (arrayfield_offset+length_offset,
arrayfield_offset+items_offset,
item_size)
@staticmethod
@specialize.memo()
def arrayToken(A):
return (llmemory.ArrayLengthOffset(A),
llmemory.ArrayItemsOffset(A),
llmemory.ItemOffset(A.OF))
@staticmethod
@specialize.memo()
def kindToken(T):
if T is lltype.Float:
py.test.skip("not implemented: floats in the i386^WPPC back-end")
return None # for now
@staticmethod
@specialize.memo()
def sigToken(FUNCTYPE):
return len(FUNCTYPE.ARGS) # for now
@staticmethod
@specialize.arg(0)
def read_frame_var(T, base, info, index):
"""Read from the stack frame of a caller. The 'base' is the
frame stack pointer captured by the operation generated by
genop_get_frame_base(). The 'info' is the object returned by
get_frame_info(); we are looking for the index-th variable
in the list passed to get_frame_info()."""
place = info[index]
if isinstance(place, StackInfo):
#print '!!!', base, place.offset
#print '???', [peek_word_at(base + place.offset + i)
# for i in range(-64, 65, 4)]
assert place.offset != 0
value = peek_word_at(base + place.offset)
return cast_int_to_whatever(T, value)
else:
assert isinstance(place, GenConst)
return place.revealconst(T)
@staticmethod
@specialize.arg(0)
def write_frame_place(T, base, place, value):
assert place.offset != 0
value = cast_whatever_to_int(T, value)
poke_word_into(base + place.offset, value)
@staticmethod
@specialize.arg(0)
def read_frame_place(T, base, place):
value = peek_word_at(base + place.offset)
return cast_int_to_whatever(T, value)
def check_no_open_mc(self):
pass
# ----------------------------------------------------------------
# ppc-specific interface:
MachineCodeBlock = codebuf.OwningMachineCodeBlock
ExistingCodeBlock = codebuf.ExistingCodeBlock
def open_mc(self):
if self.mcs:
return self.mcs.pop()
else:
return self.MachineCodeBlock(self.MC_SIZE) # XXX supposed infinite for now
def close_mc(self, mc):
## from pypy.jit.codegen.ppc.ppcgen.asmfunc import get_ppcgen
## print '!!!!', cast(mc._data, c_void_p).value
## print '!!!!', mc._data.contents[0]
## get_ppcgen().flush2(cast(mc._data, c_void_p).value,
## mc._size*4)
self.mcs.append(mc)
def newbuilder(self):
return Builder(self)
# a switch can take 7 instructions:
# load_word rSCRATCH, gv_case.value (really two instructions)
# cmpw crf, rSWITCH, rSCRATCH
# load_word rSCRATCH, targetaddr (again two instructions)
# mtctr rSCRATCH
# beqctr crf
# yay RISC :/
class FlexSwitch(CodeGenSwitch):
# a fair part of this code could likely be shared with the i386
# backend.
def __init__(self, rgenop, mc, switch_reg, crf, var2loc, initial_spill_offset):
self.rgenop = rgenop
self.crf = crf
self.switch_reg = switch_reg
self.var2loc = var2loc
self.initial_spill_offset = initial_spill_offset
self.asm = RPPCAssembler()
self.asm.mc = mc
self.default_target_addr = 0
def add_case(self, gv_case):
targetbuilder = self.rgenop.newbuilder()
targetbuilder._open()
targetbuilder.initial_var2loc = self.var2loc
targetbuilder.initial_spill_offset = self.initial_spill_offset
target_addr = targetbuilder.asm.mc.tell()
p = self.asm.mc.getpos()
# that this works depends a bit on the fixed length of the
# instruction sequences we use to jump around. if the code is
# ever updated to use the branch-relative instructions (a good
# idea, btw) this will need to be thought about again
try:
self._add_case(gv_case, target_addr)
except codebuf.CodeBlockOverflow:
self.asm.mc.setpos(p)
base = self.asm.mc.tell()
mc = self.rgenop.open_mc()
newmc = mc.reserve(7 * 5 + 4)
self.rgenop.close_mc(mc)
new_addr = newmc.tell()
self.asm.load_word(rSCRATCH, new_addr)
self.asm.mtctr(rSCRATCH)
self.asm.bctr()
size = self.asm.mc.tell() - base
flush_icache(base, size)
self.asm.mc = newmc
self._add_case(gv_case, target_addr)
return targetbuilder
def _add_case(self, gv_case, target_addr):
asm = self.asm
base = self.asm.mc.tell()
assert isinstance(gv_case, GenConst)
gv_case.load_now(asm, insn.gprs[0])
asm.cmpw(self.crf.number, rSCRATCH, self.switch_reg.number)
asm.load_word(rSCRATCH, target_addr)
asm.mtctr(rSCRATCH)
asm.bcctr(12, self.crf.number*4 + 2)
if self.default_target_addr:
self._write_default()
size = self.asm.mc.tell() - base
flush_icache(base, size)
def add_default(self):
targetbuilder = self.rgenop.newbuilder()
targetbuilder._open()
targetbuilder.initial_var2loc = self.var2loc
targetbuilder.initial_spill_offset = self.initial_spill_offset
base = self.asm.mc.tell()
self.default_target_addr = targetbuilder.asm.mc.tell()
self._write_default()
size = self.asm.mc.tell() - base
flush_icache(base, size)
return targetbuilder
def _write_default(self):
pos = self.asm.mc.getpos()
self.asm.load_word(rSCRATCH, self.default_target_addr)
self.asm.mtctr(rSCRATCH)
self.asm.bctr()
self.asm.mc.setpos(pos)
global_rgenop = RPPCGenOp()
RPPCGenOp.constPrebuiltGlobal = global_rgenop.genconst
def peek_word_at(addr):
# now the Very Obscure Bit: when translated, 'addr' is an
# address. When not, it's an integer. It just happens to
# make the test pass, but that's probably going to change.
if we_are_translated():
return addr.signed[0]
else:
from ctypes import cast, c_void_p, c_int, POINTER
p = cast(c_void_p(addr), POINTER(c_int))
return p[0]
def poke_word_into(addr, value):
# now the Very Obscure Bit: when translated, 'addr' is an
# address. When not, it's an integer. It just happens to
# make the test pass, but that's probably going to change.
if we_are_translated():
addr.signed[0] = value
else:
from ctypes import cast, c_void_p, c_int, POINTER
p = cast(c_void_p(addr), POINTER(c_int))
p[0] = value
zero_const = AddrConst(llmemory.NULL)
| Python |
from pypy.jit.codegen.ppc.instruction import \
gprs, fprs, crfs, ctr, \
NO_REGISTER, GP_REGISTER, FP_REGISTER, CR_FIELD, CT_REGISTER, \
CMPInsn, Spill, Unspill, stack_slot, \
rSCRATCH
from pypy.jit.codegen.ppc.conftest import option
DEBUG_PRINT = option.debug_print
class RegisterAllocation:
def __init__(self, freeregs, initial_mapping, initial_spill_offset):
if DEBUG_PRINT:
print
print "RegisterAllocation __init__", initial_mapping.items()
self.insns = [] # output list of instructions
# registers with dead values
self.freeregs = {}
for regcls in freeregs:
self.freeregs[regcls] = freeregs[regcls][:]
self.var2loc = {} # maps Vars to AllocationSlots
self.lru = [] # least-recently-used list of vars; first is oldest.
# contains all vars in registers, and no vars on stack
self.spill_offset = initial_spill_offset # where to put next spilled
# value, relative to rFP,
# measured in bytes
self.free_stack_slots = [] # a free list for stack slots
# go through the initial mapping and initialize the data structures
for var, loc in initial_mapping.iteritems():
self.set(var, loc)
if loc.is_register:
if loc.alloc in self.freeregs[loc.regclass]:
self.freeregs[loc.regclass].remove(loc.alloc)
self.lru.append(var)
else:
assert loc.offset >= self.spill_offset
self.labels_to_tell_spill_offset_to = []
self.builders_to_tell_spill_offset_to = []
def set(self, var, loc):
assert var not in self.var2loc
self.var2loc[var] = loc
def forget(self, var, loc):
assert self.var2loc[var] is loc
del self.var2loc[var]
def loc_of(self, var):
return self.var2loc[var]
def spill_slot(self):
""" Returns an unused stack location. """
if self.free_stack_slots:
return self.free_stack_slots.pop()
else:
self.spill_offset -= 4
return stack_slot(self.spill_offset)
def spill(self, reg, argtospill):
if argtospill in self.lru:
self.lru.remove(argtospill)
self.forget(argtospill, reg)
spillslot = self.spill_slot()
if reg.regclass != GP_REGISTER:
self.insns.append(reg.move_to_gpr(0))
reg = gprs[0]
self.insns.append(Spill(argtospill, reg, spillslot))
self.set(argtospill, spillslot)
def _allocate_reg(self, regclass, newarg):
# check if there is a register available
freeregs = self.freeregs[regclass]
if freeregs:
reg = freeregs.pop().make_loc()
self.set(newarg, reg)
if DEBUG_PRINT:
print "allocate_reg: Putting %r into fresh register %r" % (newarg, reg)
return reg
# if not, find something to spill
for i in range(len(self.lru)):
argtospill = self.lru[i]
reg = self.loc_of(argtospill)
assert reg.is_register
if reg.regclass == regclass:
del self.lru[i]
break
else:
assert 0
# Move the value we are spilling onto the stack, both in the
# data structures and in the instructions:
self.spill(reg, argtospill)
if DEBUG_PRINT:
print "allocate_reg: Spilled %r from %r to %r." % (argtospill, reg, self.loc_of(argtospill))
# update data structures to put newarg into the register
reg = reg.alloc.make_loc()
self.set(newarg, reg)
if DEBUG_PRINT:
print "allocate_reg: Put %r in stolen reg %r." % (newarg, reg)
return reg
def _promote(self, arg):
if arg in self.lru:
self.lru.remove(arg)
self.lru.append(arg)
def allocate_for_insns(self, insns):
from pypy.jit.codegen.ppc.rgenop import Var
insns2 = []
# make a pass through the instructions, loading constants into
# Vars where needed.
for insn in insns:
newargs = []
for arg in insn.reg_args:
if not isinstance(arg, Var):
newarg = Var()
arg.load(insns2, newarg)
newargs.append(newarg)
else:
newargs.append(arg)
insn.reg_args[0:len(newargs)] = newargs
insns2.append(insn)
# Walk through instructions in forward order
for insn in insns2:
if DEBUG_PRINT:
print "Processing instruction"
print insn
print "LRU list was:", self.lru
print 'located at', [self.loc_of(a) for a in self.lru]
# put things into the lru
for arg in insn.reg_args:
self._promote(arg)
if insn.result:
self._promote(insn.result)
if DEBUG_PRINT:
print "LRU list is now:", self.lru
print 'located at', [self.loc_of(a) for a in self.lru if a is not insn.result]
# We need to allocate a register for each used
# argument that is not already in one
for i in range(len(insn.reg_args)):
arg = insn.reg_args[i]
argcls = insn.reg_arg_regclasses[i]
if DEBUG_PRINT:
print "Allocating register for", arg, "..."
argloc = self.loc_of(arg)
if DEBUG_PRINT:
print "currently in", argloc
if not argloc.is_register:
# It has no register now because it has been spilled
self.forget(arg, argloc)
newargloc = self._allocate_reg(argcls, arg)
if DEBUG_PRINT:
print "unspilling to", newargloc
self.insns.append(Unspill(arg, newargloc, argloc))
self.free_stack_slots.append(argloc)
elif argloc.regclass != argcls:
# it's in the wrong kind of register
# (this code is excessively confusing)
self.forget(arg, argloc)
self.freeregs[argloc.regclass].append(argloc.alloc)
if argloc.regclass != GP_REGISTER:
if argcls == GP_REGISTER:
gpr = self._allocate_reg(GP_REGISTER, arg).number
else:
gpr = rSCRATCH
self.insns.append(
argloc.move_to_gpr(gpr))
else:
gpr = argloc.number
if argcls != GP_REGISTER:
newargloc = self._allocate_reg(argcls, arg)
self.insns.append(
newargloc.move_from_gpr(gpr))
else:
if DEBUG_PRINT:
print "it was in ", argloc
pass
# Need to allocate a register for the destination
assert not insn.result or insn.result not in self.var2loc
if insn.result_regclass != NO_REGISTER:
if DEBUG_PRINT:
print "Allocating register for result %r..." % (insn.result,)
resultreg = self._allocate_reg(insn.result_regclass, insn.result)
insn.allocate(self)
if DEBUG_PRINT:
print insn
print
self.insns.append(insn)
#print 'allocation done'
#for i in self.insns:
# print i
#print self.var2loc
return self.insns
| Python |
import os
from pypy.jit.codegen.ppc.ppcgen import form
# don't be fooled by the fact that there's some separation between a
# generic assembler class and a PPC assembler class... there's
# certainly a RISC dependency in here, and quite possibly a PPC
# dependency or two too. I personally don't care :)
class AssemblerException(Exception):
pass
class Assembler(object):
def __init__(self):
self.insts = []
self.labels = {}
self.rlabels = {}
def label(self, name):
if name in self.labels:
raise AssemblerException, "duplicate label '%s'"%(name,)
self.labels[name] = len(self.insts)*4
self.rlabels.setdefault(len(self.insts)*4, []).append(name)
def labelname(self, base="L"):
i = 0
while 1:
ln = base + str(i)
if ln not in self.labels:
return ln
i += 1
def assemble0(self, dump=os.environ.has_key('PPY_DEBUG')):
for i, inst in enumerate(self.insts):
for f in inst.lfields:
l = self.labels[inst.fields[f]] - 4*i
inst.fields[f] = l
buf = []
for inst in self.insts:
buf.append(inst.assemble())
if dump:
for i in range(len(buf)):
inst = self.disassemble(buf[i], self.rlabels, i*4)
for lab in self.rlabels.get(4*i, []):
print "%s:"%(lab,)
print "\t%4d %s"%(4*i, inst)
return buf
def assemble(self, dump=os.environ.has_key('PPY_DEBUG')):
insns = self.assemble0(dump)
from pypy.jit.codegen.ppc.ppcgen import asmfunc
c = asmfunc.AsmCode(len(insns)*4)
for i in insns:
c.emit(i)
c.flush_cache()
return c
def get_idescs(cls):
r = []
for name in dir(cls):
a = getattr(cls, name)
if isinstance(a, form.IDesc):
r.append((name, a))
return r
get_idescs = classmethod(get_idescs)
def disassemble(cls, inst, labels={}, pc=0):
matches = []
idescs = cls.get_idescs()
for name, idesc in idescs:
m = idesc.match(inst)
if m > 0:
matches.append((m, idesc, name))
if matches:
score, idesc, name = max(matches)
return idesc.disassemble(name, inst, labels, pc)
disassemble = classmethod(disassemble)
| Python |
def lookup(sym):
global lookup
import py
_ppcgen = py.magic.autopath().dirpath().join('_ppcgen.c')._getpymodule()
try:
from _ppcgen import NSLookupAndBindSymbol
def lookup(sym):
return NSLookupAndBindSymbol('_' + sym)
except ImportError:
from _ppcgen import dlsym as lookup
return lookup(sym)
| Python |
from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
from pypy.jit.codegen.ppc.ppcgen.symbol_lookup import lookup
from pypy.jit.codegen.ppc.ppcgen.regname import *
def load_arg(code, argi, typecode):
rD = r3+argi
code.lwz(rD, r4, 12 + 4*argi)
if typecode == 'i':
code.load_word(r0, lookup("PyInt_Type"))
code.lwz(r31, rD, 4) # XXX ick!
code.cmpw(r0, r31)
code.bne("argserror")
code.lwz(rD, rD, 8)
elif typecode == 'f':
code.load_word(r0, lookup("PyFloat_Type"))
code.lwz(r31, rD, 4)
code.cmpw(r0, r31)
code.bne("argserror")
code.lfd(rD-2, rD, 8)
elif typecode != "O":
raise Exception, "erk"
FAST_ENTRY_LABEL = "FAST-ENTRY-LABEL"
def make_func(code, retcode, signature, localwords=0):
"""code shouldn't contain prologue/epilogue (or touch r31)"""
stacksize = 80 + 4*localwords
argcount = len(signature)
ourcode = MyPPCAssembler()
ourcode.mflr(r0)
ourcode.stmw(r31, r1, -4)
ourcode.stw(r0, r1, 8)
ourcode.stwu(r1, r1, -stacksize)
ourcode.lwz(r3, r4, 8)
ourcode.cmpwi(r3, argcount)
ourcode.bne("argserror")
assert argcount < 9
if argcount > 0:
load_arg(ourcode, 0, signature[0])
for i in range(2, argcount):
load_arg(ourcode, i, signature[i])
if argcount > 1:
load_arg(ourcode, 1, signature[1])
ourcode.bl(FAST_ENTRY_LABEL)
if retcode == 'i':
s = lookup("PyInt_FromLong")
ourcode.load_word(r0, s)
ourcode.mtctr(r0)
ourcode.bctrl()
elif retcode == 'f':
s = lookup("PyFloat_FromDouble")
ourcode.load_word(r0, s)
ourcode.mtctr(r0)
ourcode.bctrl()
ourcode.label("epilogue")
ourcode.lwz(r0, r1, stacksize + 8)
ourcode.addi(r1, r1, stacksize)
ourcode.mtlr(r0)
ourcode.lmw(r31, r1, -4)
ourcode.blr()
err_set = lookup("PyErr_SetObject")
exc = lookup("PyExc_TypeError")
ourcode.label("argserror")
ourcode.load_word(r5, err_set)
ourcode.mtctr(r5)
ourcode.load_from(r3, exc)
ourcode.mr(r4, r3)
ourcode.bctrl()
ourcode.li(r3, 0)
ourcode.b("epilogue")
ourcode.label(FAST_ENTRY_LABEL)
# err, should be an Assembler method:
l = {}
for k in code.labels:
l[k] = code.labels[k] + 4*len(ourcode.insts)
r = code.rlabels.copy()
for k in code.rlabels:
r[k + 4*len(ourcode.insts)] = code.rlabels[k]
ourcode.insts.extend(code.insts)
ourcode.labels.update(l)
ourcode.rlabels.update(r)
r = ourcode.assemble()
r.FAST_ENTRY_LABEL = ourcode.labels[FAST_ENTRY_LABEL]
return r
def wrap(funcname, retcode, signature):
argcount = len(signature)
ourcode = MyPPCAssembler()
ourcode.mflr(r0)
ourcode.stmw(r31, r1, -4)
ourcode.stw(r0, r1, 8)
ourcode.stwu(r1, r1, -80)
ourcode.lwz(r3, r4, 8)
ourcode.cmpwi(r3, argcount)
ourcode.bne("argserror")
assert argcount < 9
if argcount > 0:
load_arg(ourcode, 0, signature[0])
for i in range(2, argcount):
load_arg(ourcode, i, signature[i])
if argcount > 1:
load_arg(ourcode, 1, signature[1])
ourcode.load_word(r0, lookup(funcname))
ourcode.mtctr(r0)
ourcode.bctrl()
if retcode == 'i':
s = lookup("PyInt_FromLong")
ourcode.load_word(r0, s)
ourcode.mtctr(r0)
ourcode.bctrl()
elif retcode == 'f':
s = lookup("PyFloat_FromDouble")
ourcode.load_word(r0, s)
ourcode.mtctr(r0)
ourcode.bctrl()
ourcode.label("epilogue")
ourcode.lwz(r0, r1, 88)
ourcode.addi(r1, r1, 80)
ourcode.mtlr(r0)
ourcode.lmw(r31, r1, -4)
ourcode.blr()
err_set = lookup("PyErr_SetObject")
exc = lookup("PyExc_TypeError")
ourcode.label("argserror")
ourcode.load_word(r5, err_set)
ourcode.mtctr(r5)
ourcode.load_from(r3, exc)
ourcode.mr(r4, r3)
ourcode.bctrl()
ourcode.li(r3, 0)
ourcode.b("epilogue")
return ourcode.assemble()
| Python |
import py
import mmap, struct
_ppcgen = None
def get_ppcgen():
global _ppcgen
if _ppcgen is None:
_ppcgen = py.magic.autopath().dirpath().join('_ppcgen.c')._getpymodule()
return _ppcgen
class AsmCode(object):
def __init__(self, size):
self.code = mmap.mmap(-1, size,
mmap.MAP_ANON|mmap.MAP_PRIVATE,
mmap.PROT_WRITE|mmap.PROT_READ|mmap.PROT_EXEC)
def emit(self, insn):
self.code.write(struct.pack('i', insn))
def __call__(self, *args):
return get_ppcgen().mmap_exec(self.code, args)
def flush_cache(self):
get_ppcgen().mmap_flush(self.code)
| Python |
# only a small file, but there's some hairy stuff in here!
"""
>>> f = Field('test', 16, 31)
>>> f
<Field 'test'>
>>> f.encode(65535)
65535
>>> f.encode(65536)
Traceback (most recent call last):
File \"<stdin>\", line 1, in ?
File \"field.py\", line 25, in encode
raise ValueError(\"field '%s' can't accept value %s\"
ValueError: field 'test' can't accept value 65536
>>>
"""
class Field(object):
def __init__(self, name, left, right, signedness=False, valclass=int):
self.name = name
self.left = left
self.right = right
width = self.right - self.left + 1
# mask applies before shift!
self.mask = 2**width - 1
self.signed = signedness == 'signed'
self.valclass = valclass
def __repr__(self):
return '<Field %r>'%(self.name,)
def encode(self, value):
if not issubclass(self.valclass, type(value)):
raise ValueError("field '%s' takes '%s's, not '%s's"
%(self.name, self.valclass.__name__, type(value).__name__))
if not self.signed and value < 0:
raise ValueError("field '%s' is unsigned and can't accept value %d"
%(self.name, value))
# that this does the right thing is /not/ obvious (but true!)
if ((value >> 31) ^ value) & ~(self.mask >> self.signed):
raise ValueError("field '%s' can't accept value %s"
%(self.name, value))
value &= self.mask
value = long(value)
value <<= (32 - self.right - 1)
if value & 0x80000000L:
# yuck:
return ~int((~value)&0xFFFFFFFFL)
else:
return int(value)
def decode(self, inst):
mask = self.mask
v = (inst >> 32 - self.right - 1) & mask
if self.signed and (~mask >> 1) & mask & v:
v = ~(~v&mask)
return self.valclass(v)
def r(self, v, labels, pc):
return self.decode(v)
if __name__=='__main__':
import doctest
doctest.testmod()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.