code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
"""
A simple standalone target for the javascript interpreter.
"""
import sys
from pypy.rlib.streamio import open_file_as_stream
from pypy.lang.js.interpreter import *
from pypy.lang.js.jsobj import ExecutionReturned
# __________ Entry point __________
interp = Interpreter()
def entry_point(argv):
if len(argv) == 2:
f = open_file_as_stream(argv[1])
interp.run(load_source(f.readall()))
return 0
elif argv[0] == 'foo':
raise ExecutionReturned(None)
else:
print "Usage: %s jsourcefile" % argv[0]
return 1
# _____ Define and setup target ___
def target(driver, args):
driver.exe_name = 'js-%(backend)s'
return entry_point, None
if __name__ == '__main__':
entry_point(sys.argv)
| Python |
# Ported from a Java benchmark whose history is :
# This is adapted from a benchmark written by John Ellis and Pete Kovac
# of Post Communications.
# It was modified by Hans Boehm of Silicon Graphics.
#
# This is no substitute for real applications. No actual application
# is likely to behave in exactly this way. However, this benchmark was
# designed to be more representative of real applications than other
# Java GC benchmarks of which we are aware.
# It attempts to model those properties of allocation requests that
# are important to current GC techniques.
# It is designed to be used either to obtain a single overall performance
# number, or to give a more detailed estimate of how collector
# performance varies with object lifetimes. It prints the time
# required to allocate and collect balanced binary trees of various
# sizes. Smaller trees result in shorter object lifetimes. Each cycle
# allocates roughly the same amount of memory.
# Two data structures are kept around during the entire process, so
# that the measured performance is representative of applications
# that maintain some live in-memory data. One of these is a tree
# containing many pointers. The other is a large array containing
# double precision floating point numbers. Both should be of comparable
# size.
#
# The results are only really meaningful together with a specification
# of how much memory was used. It is possible to trade memory for
# better time performance. This benchmark should be run in a 32 MB
# heap, though we don't currently know how to enforce that uniformly.
#
# Unlike the original Ellis and Kovac benchmark, we do not attempt
# measure pause times. This facility should eventually be added back
# in. There are several reasons for omitting it for now. The original
# implementation depended on assumptions about the thread scheduler
# that don't hold uniformly. The results really measure both the
# scheduler and GC. Pause time measurements tend to not fit well with
# current benchmark suites. As far as we know, none of the current
# commercial Java implementations seriously attempt to minimize GC pause
# times.
#
# Known deficiencies:
# - No way to check on memory use
# - No cyclic data structures
# - No attempt to measure variation with object size
# - Results are sensitive to locking cost, but we dont
# check for proper locking
import os, time
def println(s):
os.write(1, s+"\n")
class Node(object):
def __init__(self, l=None, r=None):
self.left = l
self.right = r
kStretchTreeDepth = 18 # about 16Mb (for Java)
kLongLivedTreeDepth = 16 # about 4Mb (for Java)
kArraySize = 500000 # about 4Mb
kMinTreeDepth = 4
kMaxTreeDepth = 16
def tree_size(i):
"Nodes used by a tree of a given size"
return (1 << (i + 1)) - 1
def num_iters(i):
"Number of iterations to use for a given tree depth"
return 2 * tree_size(kStretchTreeDepth) / tree_size(i);
def populate(depth, node):
"Build tree top down, assigning to older objects."
if depth <= 0:
return
else:
depth -= 1
node.left = Node()
node.right = Node()
populate(depth, node.left)
populate(depth, node.right)
def make_tree(depth):
"Build tree bottom-up"
if depth <= 0:
return Node()
else:
return Node(make_tree(depth-1), make_tree(depth-1))
def print_diagnostics():
"ought to print free/total memory"
pass
def time_construction(depth):
niters = num_iters(depth)
println("Creating %d trees of depth %d" % (niters, depth))
t_start = time.time()
for i in range(niters):
temp_tree = Node()
populate(depth, temp_tree)
temp_tree = None
t_finish = time.time()
println("\tTop down constrution took %f ms" % ((t_finish-t_start)*1000.))
t_start = time.time()
for i in range(niters):
temp_tree = make_tree(depth)
temp_tree = None
t_finish = time.time()
println("\tBottom up constrution took %f ms" % ((t_finish-t_start)*1000.))
def main():
println("Garbage Collector Test")
println(" Stretching memory with a binary tree of depth %d" % kStretchTreeDepth)
print_diagnostics()
t_start = time.time()
temp_tree = make_tree(kStretchTreeDepth)
temp_tree = None
# Create a long lived object
println(" Creating a long-lived binary tree of depth %d" % kLongLivedTreeDepth)
long_lived_tree = Node()
populate(kLongLivedTreeDepth, long_lived_tree)
# Create long-lived array, filling half of it
println(" Creating a long-lived array of %d doubles" % kArraySize)
array = [0.0] * kArraySize
i = 1
while i < kArraySize/2:
array[i] = 1.0/i
i += 1
print_diagnostics()
for d in range(kMinTreeDepth, kMaxTreeDepth+1, 2):
time_construction(d)
if long_lived_tree is None or array[1000] != 1.0/1000:
println("Failed")
t_finish = time.time()
print_diagnostics()
println("Completed in %f ms." % ((t_finish-t_start)*1000.))
if __name__ == '__main__':
main()
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
import os, sys
from pypy.translator.test import rpystone
from pypy.translator.goal import richards
import pypy.interpreter.gateway # needed before sys, order of imports !!!
from pypy.module.sys.version import svn_revision
# __________ Entry point __________
VERSION = svn_revision()
# note that we have %f but no length specifiers in RPython
def pystones_main(loops):
benchtime, stones = rpystone.pystones(abs(loops))
s = '' # annotator happiness
if loops >= 0:
s = ("RPystone(%s) time for %d passes = %f" %
(VERSION, loops, benchtime) + '\n' + (
"This machine benchmarks at %f pystones/second" % stones))
os.write(1, s)
if loops == 12345:
pystones_main(loops-1)
def richards_main(iterations):
s = "Richards benchmark (RPython) starting...\n"
os.write(1, s)
result, startTime, endTime = richards.entry_point(iterations)
if not result:
os.write(2, "Incorrect results!\n")
return
os.write(1, "finished.\n")
total_s = endTime - startTime
avg = total_s * 1000 / iterations
os.write(1, "Total time for %d iterations: %f secs\n" %(iterations, total_s))
os.write(1, "Average time per iteration: %f ms\n" %(avg))
DEF_PYSTONE = 10000000
DEF_RICHARDS = 1000
def entry_point(argv):
proc = pystones_main
default = DEF_PYSTONE
n = 0
for s in argv[1:]:
s = s.lower()
if 'pystone'.startswith(s):
proc = pystones_main
default = DEF_PYSTONE
elif 'richards'.startswith(s):
proc = richards_main
default = DEF_RICHARDS
else:
try:
n = abs(int(s))
except ValueError:
os.write(2, '"%s" is neither a valid option (pystone, richards)'
' nor an integer\n' % s)
return 1
if not n:
n = default
proc(n)
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
"""
Why is this a stand-alone target?
The above target specifies None as the argument types list.
This is a case treated specially in the driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
""" | Python |
from pypy.translator.goal import richards
entry_point = richards.entry_point
# _____ Define and setup target ___
def target(*args):
return entry_point, [int]
def get_llinterp_args():
return [1]
# _____ Run translated _____
def run(c_entry_point):
print "Translated:"
richards.main(c_entry_point, iterations=500)
print "CPython:"
richards.main(iterations=5)
| Python |
#! /usr/bin/env python
# App-level version of py.py.
# See test/test_app_main.
"""
options:
-i inspect interactively after running script
-O dummy optimization flag for compatibility with C Python
-c CMD program passed in as CMD (terminates option list)
-S do not 'import site' on initialization
-u unbuffered binary stdout and stderr
-h, --help show this help message and exit
-m library module to be run as a script (terminates option list)
--version print the PyPy version
--info print translation information about this PyPy executable
"""
import sys, os
DEBUG = False # dump exceptions before calling the except hook
originalexcepthook = sys.__excepthook__
def run_toplevel(f, *fargs, **fkwds):
"""Calls f() and handle all OperationErrors.
Intended use is to run the main program or one interactive statement.
run_protected() handles details like forwarding exceptions to
sys.excepthook(), catching SystemExit, printing a newline after
sys.stdout if needed, etc.
"""
try:
# run it
f(*fargs, **fkwds)
# we arrive here if no exception is raised. stdout cosmetics...
try:
stdout = sys.stdout
softspace = stdout.softspace
except AttributeError:
pass
# Don't crash if user defined stdout doesn't have softspace
else:
if softspace:
stdout.write('\n')
except SystemExit, e:
# exit if we catch a w_SystemExit
exitcode = e.code
if exitcode is None:
exitcode = 0
else:
try:
exitcode = int(exitcode)
except:
# not an integer: print it to stderr
try:
stderr = sys.stderr
except AttributeError:
pass # too bad
else:
print >> stderr, exitcode
exitcode = 1
raise SystemExit(exitcode)
except:
etype, evalue, etraceback = sys.exc_info()
try:
# extra debugging info in case the code below goes very wrong
if DEBUG and hasattr(sys, 'stderr'):
s = getattr(etype, '__name__', repr(etype))
print >> sys.stderr, "debug: exception-type: ", s
print >> sys.stderr, "debug: exception-value:", str(evalue)
tbentry = etraceback
if tbentry:
while tbentry.tb_next:
tbentry = tbentry.tb_next
lineno = tbentry.tb_lineno
filename = tbentry.tb_frame.f_code.co_filename
print >> sys.stderr, "debug: exception-tb: %s:%d" % (
filename, lineno)
# set the sys.last_xxx attributes
sys.last_type = etype
sys.last_value = evalue
sys.last_traceback = etraceback
# call sys.excepthook
hook = getattr(sys, 'excepthook', originalexcepthook)
hook(etype, evalue, etraceback)
return False # done
except:
try:
stderr = sys.stderr
except AttributeError:
pass # too bad
else:
print >> stderr, 'Error calling sys.excepthook:'
originalexcepthook(*sys.exc_info())
print >> stderr
print >> stderr, 'Original exception was:'
# we only get here if sys.excepthook didn't do its job
originalexcepthook(etype, evalue, etraceback)
return False
return True # success
# ____________________________________________________________
# Option parsing
def print_info():
try:
options = sys.pypy_translation_info
except AttributeError:
print >> sys.stderr, 'no translation information found'
else:
optitems = options.items()
optitems.sort()
for name, value in optitems:
print ' %51s: %s' % (name, value)
def print_help():
print 'usage: %s [options]' % (sys.executable,)
print __doc__
def print_error(msg):
print >> sys.stderr, msg
print >> sys.stderr, 'usage: %s [options]' % (sys.executable,)
print >> sys.stderr, 'Try `%s -h` for more information.' % (sys.executable,)
def set_unbuffered_io():
if os.name == 'nt':
raise NotImplementedError("binary stdin/stdout not implemented "
"on Windows")
sys.stdin = sys.__stdin__ = os.fdopen(0, 'rb', 0)
sys.stdout = sys.__stdout__ = os.fdopen(1, 'wb', 0)
sys.stderr = sys.__stderr__ = os.fdopen(2, 'wb', 0)
# ____________________________________________________________
# Main entry point
AUTOSUBPATH = 'share' + os.sep + 'pypy-%d.%d'
def entry_point(executable, argv):
# find the full path to the executable, assuming that if there is no '/'
# in the provided one then we must look along the $PATH
if os.sep not in executable:
path = os.getenv('PATH')
if path:
for dir in path.split(os.pathsep):
fn = os.path.join(dir, executable)
if os.path.isfile(fn):
executable = fn
break
sys.executable = os.path.abspath(executable)
# set up a sys.path that depends on the local machine
autosubpath = AUTOSUBPATH % sys.pypy_version_info[:2]
search = executable
while 1:
dirname = resolvedirof(search)
if dirname == search:
# not found! let's hope that the compiled-in path is ok
print >> sys.stderr, ('debug: WARNING: library path not found, '
'using compiled-in sys.path')
break
newpath = sys.pypy_initial_path(dirname)
if newpath is None:
newpath = sys.pypy_initial_path(os.path.join(dirname, autosubpath))
if newpath is None:
search = dirname # walk to the parent directory
continue
sys.path = newpath # found!
break
go_interactive = False
run_command = False
import_site = True
i = 0
run_module = False
run_stdin = False
while i < len(argv):
arg = argv[i]
if not arg.startswith('-'):
break
if arg == '-i':
go_interactive = True
elif arg == '-c':
if i+1 >= len(argv):
print_error('Argument expected for the -c option')
return 2
run_command = True
break
elif arg == '-u':
set_unbuffered_io()
elif arg == '-O':
pass
elif arg == '--version':
print sys.version
return 0
elif arg == '--info':
print_info()
return 0
elif arg == '-h' or arg == '--help':
print_help()
return 0
elif arg == '-S':
import_site = False
elif arg == '-':
run_stdin = True
break # not an option but a file name representing stdin
elif arg == '-m':
i += 1
if i >= len(argv):
print_error('Argument expected for the -m option')
return 2
run_module = True
break
elif arg == '--':
i += 1
break # terminates option list
else:
print_error('unrecognized option %r' % (arg,))
return 2
i += 1
sys.argv = argv[i:]
if not sys.argv:
sys.argv.append('')
run_stdin = True
# with PyPy in top of CPython we can only have around 100
# but we need more in the translated PyPy for the compiler package
sys.setrecursionlimit(5000)
mainmodule = type(sys)('__main__')
sys.modules['__main__'] = mainmodule
if import_site:
try:
import site
except:
print >> sys.stderr, "'import site' failed"
# set up the Ctrl-C => KeyboardInterrupt signal handler, if the
# signal module is available
try:
import signal
except ImportError:
pass
else:
signal.signal(signal.SIGINT, signal.default_int_handler)
if hasattr(signal, "SIGPIPE"):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
def is_interactive():
return go_interactive or os.getenv('PYTHONINSPECT')
success = True
try:
if run_command:
cmd = sys.argv.pop(1)
def run_it():
exec cmd in mainmodule.__dict__
success = run_toplevel(run_it)
elif run_module:
def run_it():
import runpy
runpy.run_module(sys.argv[0], None, '__main__', True)
success = run_toplevel(run_it)
elif run_stdin:
if is_interactive() or sys.stdin.isatty():
print_banner()
python_startup = os.getenv('PYTHONSTARTUP')
if python_startup:
try:
startup = open(python_startup).read()
except IOError:
pass
else:
def run_it():
co_python_startup = compile(startup,
python_startup,
'exec')
exec co_python_startup in mainmodule.__dict__
run_toplevel(run_it)
go_interactive = True
else:
def run_it():
co_stdin = compile(sys.stdin.read(), '<stdin>', 'exec')
exec co_stdin in mainmodule.__dict__
mainmodule.__file__ = '<stdin>'
success = run_toplevel(run_it)
else:
mainmodule.__file__ = sys.argv[0]
scriptdir = resolvedirof(sys.argv[0])
sys.path.insert(0, scriptdir)
success = run_toplevel(execfile, sys.argv[0], mainmodule.__dict__)
if is_interactive():
try:
import _curses
import termios
from pyrepl.python_reader import main
from pyrepl import cmdrepl
#import pdb
#pdb.Pdb = cmdrepl.replize(pdb.Pdb, 1)
except ImportError:
success = run_toplevel(interactive_console, mainmodule)
else:
main(print_banner=False)
success = True
except SystemExit, e:
return e.code
else:
return not success
def resolvedirof(filename):
try:
filename = os.path.abspath(filename)
except OSError:
pass
dirname = os.path.dirname(filename)
if os.path.islink(filename):
try:
link = os.readlink(filename)
except OSError:
pass
else:
return resolvedirof(os.path.join(dirname, link))
return dirname
def print_banner():
print 'Python %s on %s' % (sys.version, sys.platform)
print ('Type "help", "copyright", "credits" or '
'"license" for more information.')
def interactive_console(mainmodule):
# some parts of code.py are copied here because it seems to be impossible
# to start an interactive console without printing at least one line
# of banner
import code
console = code.InteractiveConsole(mainmodule.__dict__)
try:
import readline
except ImportError:
pass
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = raw_input(prompt)
except EOFError:
console.write("\n")
break
else:
more = console.push(line)
except KeyboardInterrupt:
console.write("\nKeyboardInterrupt\n")
console.resetbuffer()
more = 0
if __name__ == '__main__':
import autopath
# obscure! try removing the following line, see how it crashes, and
# guess why...
ImStillAroundDontForgetMe = sys.modules['__main__']
sys.ps1 = '>>>> '
sys.ps2 = '.... '
# debugging only
def pypy_initial_path(s):
from pypy.module.sys.state import getinitialpath
try:
return getinitialpath(s)
except OSError:
return None
from pypy.module.sys.version import PYPY_VERSION
sys.pypy_version_info = PYPY_VERSION
sys.pypy_initial_path = pypy_initial_path
sys.exit(entry_point(sys.argv[0], sys.argv[1:]))
#sys.exit(entry_point('app_main.py', sys.argv[1:]))
| Python |
from pypy.module._demo import Module, demo
from pypy.objspace.cpy.ann_policy import CPyAnnotatorPolicy
from pypy.objspace.cpy.objspace import CPyObjSpace
from pypy.objspace.cpy.wrappable import reraise
import pypy.rpython.rctypes.implementation
from pypy.interpreter.error import OperationError
space = CPyObjSpace()
module = Module(space, space.wrap('_demo'))
w_moduledict = module.getdict()
def __init__(mod):
w_mod = CPyObjSpace.W_Object(mod)
try:
## space.appexec([w_mod, w_moduledict],
## '''(mod, newdict):
## old = mod.__dict__.copy()
## mod.__dict__.clear()
## mod.__dict__['__rpython__'] = old
## for key in ['__name__', '__doc__', 'RPythonError']:
## if key in old:
## mod.__dict__[key] = old[key]
## mod.__dict__.update(newdict)
## ''')
# the same at interp-level:
w_moddict = space.getattr(w_mod, space.wrap('__dict__'))
w_old = space.call_method(w_moddict, 'copy')
space.call_method(w_moddict, 'clear')
space.setitem(w_moddict, space.wrap('__rpython__'), w_old)
for key in ['__name__', '__doc__', 'RPythonError']:
w_key = space.wrap(key)
try:
w1 = space.getitem(w_old, w_key)
except OperationError:
pass
else:
space.setitem(w_moddict, w_key, w1)
space.call_method(w_moddict, 'update', w_moduledict)
except OperationError, e:
reraise(e)
__init__.allow_someobjects = True
# _____ Define and setup target ___
def target(driver, args):
driver.extmod_name = '_demo'
return __init__, [object], CPyAnnotatorPolicy(space)
if __name__ == '__main__':
import sys
if len(sys.argv) <= 1:
N = 500000
else:
N = int(sys.argv[1])
print 'Timing for %d iterations...' % N
print demo.measuretime(space, N, space.W_Object(int)), 'seconds'
| Python |
print '--- beginning of app_example.py ---'
print 6*7
print '--- end of app_example.py ---'
| Python |
# patches for the Boehm GC for PyPy under Windows
"""
How to build a pypy compatible version of the Boehm collector
for Windows and Visual Studio .net 2003.
First of all, download the official Boehm collector suite
from http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc.tar.gz
At the time of writing (2005-10-06) this contains version gc6.5 .
Unpack this folder somewhere, for instance to "d:\tmp".
Change to this folder using
d:
cd \tmp\gc6.5
Then copy the file NT_THREADS_MAKEFILE to Makefile:
copy NT_THREADS_MAKEFILE Makefile
This file is the general-purpose gc dll makefile. For some internal
reasons, this file's defaults are bad for PyPy. The early initialisation
in DllMain() inhibits the changes necessary for PyPy. Use this script to
do a patch: (assuming that you have d:\pypy\dist\pypy\translator\goal)
python d:\pypy\dist\pypy\translator\goal\win32\gc_patch_windows.py
Now, your makefile is patched a little bit. In particular,
ALL_INTERIOR_POINTERS is now undefined, which PyPy wants to have
NO_GETENV is specified, since we don't want dependencies
and the name of the .lib and .dll files is changed to gc_pypy.???
Now you need to build your gc, either as a debug or as a release
build. First of all, make sure that you have your environment prepared.
Please note that you will need to use Microsoft's cmd, as cygwin bash
doesn't correctly handle the batch file in the next step.
With my setup, I have to do
"e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"
After that, you can either build a release or a debug gc.
After a successful build, you need to enable gc_pypy.dll for your compiler.
There are many ways to install this. The following recommendation just
works without changing your environment variables. I think this is the
easiest way possible, but this is a matter of taste. What I did is:
nmake CFG="gc - Win32 Release"
After the build, you will find a gc_pypy.dll file in the Release folder.
Copy this file to c:\windows\system32 or any other folder that is always
in your PATH variable.
Also, copy Release\gc_pypy.lib to (in my case)
"e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\lib";
finally, copy d:\tmp\gc6.5\include to
"e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\include"
and rename this folder to "gc", so that "gc/gc.h" is valid.
That's all, folks!
In case of a debug build, replace "Release" by "Debug", and also copy
gc_pypy.pdb to your lib folder. This allows you to use source-level
debugging. Please note: If you want to both build the default gc.dll
and gc_pypy.dll, please delete the Debug resp. Release folders in
between. The generated .sbr files are in the way.
Please use the above recipe and report any bugs to me.
In case of trouble, I also can provide you with pre-built dlls.
Note: We also could have solved this by including the gc source
into the PyPy build. This may or may not become necessary if something
changes dramatically, again. As long as this is not needed, I prefer
this simple solution.
Summary transcript of the steps involved: (please adjust paths)
d:
cd \tmp\gc6.5
copy NT_THREADS_MAKEFILE Makefile
python d:\pypy\dist\pypy\translator\goal\win32\gc_patch_windows.py
"e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"
nmake CFG="gc - Win32 Release"
copy Release\gc_pypy.dll c:\windows\system32
copy Release\gc_pypy.lib "e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\lib"
mkdir "e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\include\gc"
copy include "e:\Programme\Microsoft Visual Studio .NET 2003\Vc7\include\gc"
cheers - chris
"""
REPLACE = {
'"ALL_INTERIOR_POINTERS"': '"NO_GETENV"',
}
for ending in "lib exp map pdb bsc dll pch".split():
REPLACE["gc.%s" % ending] = "gc_pypy.%s" % ending
def change_settings(src):
for old, new in REPLACE.items():
newsrc = src.replace(old, new)
if newsrc == src:
raise ValueError, "this makefile does not contain %s" % old
src = newsrc
return src
def find_file():
import os
for name in os.listdir("."):
if name.lower() == 'makefile':
return name
else:
raise ValueError, 'Makefile not found'
try:
name = find_file()
source = change_settings(file(name).read())
file(name, "w").write(source)
print "Updated your Makefile to fit PyPy's needs. Your lib will be named gc_pypy.dll"
print "and gc_pypy.lib. Please put them into appropriate places, see __doc__."
except:
print __doc__
raise
| Python |
import autopath
from pypy.config.pypyoption import get_pypy_config
from pypy.translator.goal import translate
from pypy.translator.goal import targetpypystandalone
from pypy.translator.driver import TranslationDriver
import os, sys, traceback, random
def longoptfromname(config, name):
from pypy.config.makerestdoc import get_cmdline
# begin horror
h, n = config._cfgimpl_get_home_by_path(name)
opt = getattr(h._cfgimpl_descr, n)
# end horror
cmdline = get_cmdline(opt.cmdline, name)
assert cmdline is not None
shortest_long_option = 'X'*1000
for cmd in cmdline.split():
if cmd.startswith('--') and len(cmd) < len(shortest_long_option):
shortest_long_option = cmd
return shortest_long_option
def exe_name_from_options(config, opts):
from pypy.module.sys.version import svn_revision
backend = config.translation.backend
if not backend:
backend = 'c'
rev = svn_revision()
nameparts = []
for opt, v in opts.iteritems():
if opt == 'translation.backend':
backend = v
optname = longoptfromname(config, opt).strip('-')
if v is False:
optname = 'no-' + optname
elif v is not True:
optname += '=' + str(v)
nameparts.append(optname)
suffix = ''
if nameparts:
def k(s):
if s.startswith('no-'):
return s[3:]
else:
return s
nameparts.sort(key=k)
suffix = '-' + '-'.join(nameparts)
return 'pypy-%s-%d%s'%(backend, rev, suffix)
def _build(config, exe_name):
driver = TranslationDriver.from_targetspec(
targetpypystandalone.__dict__,
config=config)
driver.exe_name = exe_name
driver.compile()
def build_pypy_with_options(basedir, opts):
config = get_pypy_config(translate.OVERRIDES, translating=True)
try:
config.set(**opts)
except:
return exe_name_from_options(config, opts), "didn't configure"
exe_name = os.path.join(basedir, exe_name_from_options(config, opts))
print exe_name,
sys.stdout.flush()
pid = os.fork()
if pid == 0:
logfile = open(exe_name + '-log', 'w')
davenull = os.open('/dev/null', os.O_RDONLY)
os.dup2(davenull, 0)
os.dup2(logfile.fileno(), 1)
os.dup2(logfile.fileno(), 2)
try:
try:
r = _build(config, exe_name)
finally:
logfile.close()
except:
os._exit(1)
else:
os._exit(0)
else:
pid, status = os.waitpid(pid, 0)
if status:
r = 'failed'
else:
r = 'succeeded'
print r
return exe_name, r
def get_options(fname):
def gen_opts(sofar, remaining):
if not remaining:
yield sofar
else:
for (k, v) in remaining[0]:
d2 = sofar.copy()
d2[k] = v
for d in gen_opts(d2, remaining[1:]):
yield d
options = []
for line in open(fname):
l = []
optname, options_ = line.split(':')
options.append([(optname.strip(), eval(optval.strip())) for optval in options_.split(',')])
return gen_opts({}, options)
if __name__ == '__main__':
basedir = sys.argv[1]
optionsfile = sys.argv[2]
results = []
options = list(get_options(optionsfile))
random.shuffle(options)
for opts in options:
results.append(build_pypy_with_options(basedir, opts))
out = open(os.path.join(basedir, 'results'), 'w')
for exe, r in results:
print >>out, exe, r
| Python |
# functions to query information out of the translator and annotator from the debug prompt of translate
import types
import re
import pypy.annotation.model as annmodel
import pypy.objspace.flow.model as flowmodel
class typerep(object):
def __init__(self, x):
self.typ = getattr(x, '__class__', type(x))
self.bound = None
if hasattr(x, 'im_self'):
self.bound = x.im_self is not None
elif hasattr(x, '__self__'):
self.bound = x.__self__ is not None
def __hash__(self):
return hash(self.typ)
def __cmp__(self, other):
return cmp((self.typ.__name__, self.bound, self.typ), (other.typ.__name__, other.bound, other.typ))
def __str__(self):
if self.bound is None:
s = self.typ.__name__
elif self.bound:
s = 'bound-%s' % self.typ.__name__
else:
s = 'unbound-%s' % self.typ.__name__
if self.typ.__module__ == '__builtin__':
s = "*%s*" % s
return s
def typereps(bunch):
t = dict.fromkeys([typerep(x) for x in bunch]).keys()
t.sort()
return t
def roots(classes):
# find independent hierarchy roots in classes,
# preserve None if it's part of classes
work = list(classes)
res = []
notbound = False
while None in work:
work.remove(None)
notbound = True
if len(work) == 1:
return notbound, classes[0]
while work:
cand = work.pop()
for cls in work:
if issubclass(cls, cand):
continue
if issubclass(cand, cls):
cand = cls
continue
res.append(cand)
work = [cls for cls in work if not issubclass(cls, cand)]
for x in res:
for y in res:
if x != y:
assert not issubclass(x, y), "%s %s %s" % (classes, x,y)
assert not issubclass(y, x), "%s %s %s" % (classes, x,y)
return notbound, tuple(res)
def callablereps(bunch):
callables = [func for clsdef, func in bunch]
classes = [clsdef and clsdef.cls for clsdef, func in bunch]
return roots(classes), tuple(typereps(callables))
def prettyfunc(func):
descr = "(%s:%s)" % (getattr(func, '__module__', None) or '?', func.func_code.co_firstlineno)
funcname = getattr(func, '__name__', None) or 'UNKNOWN'
cls = getattr(func, 'class_', None)
if cls:
funcname = "%s.%s" % (cls.__name__, funcname)
return descr+funcname
def prettycallable((cls, obj)):
if cls is None or cls == (True, ()):
cls = None
else:
notbound = False
if isinstance(cls, tuple) and isinstance(cls[0], bool):
notbound, cls = cls
if isinstance(cls, tuple):
cls = "[%s]" % '|'.join([x.__name__ for x in cls])
else:
cls = cls.__name__
if notbound:
cls = "_|%s" % cls
if isinstance(obj, types.FunctionType):
obj = prettyfunc(obj)
elif isinstance(obj, tuple):
obj = "[%s]" % '|'.join([str(x) for x in obj])
else:
obj = str(obj)
if obj.startswith('<'):
obj = obj[1:-1]
if cls is None:
return str(obj)
else:
return "%s::%s" % (cls, obj)
def prettybunch(bunch):
if len(bunch) == 1:
parts = ["one", iter(bunch).next()]
else:
parts = ["of type(s)"] + typereps(bunch)
return ' '.join(map(str, parts))
def pbcaccess(translator):
annotator = translator.annotator
for inf in annotator.getpbcaccesssets().root_info.itervalues():
objs = inf.objects
print len(objs), prettybunch(objs), inf.attrs.keys()
# PBCs
def pbcs(translator):
bk = translator.annotator.bookkeeper
xs = bk.pbccache.keys()
funcs = [x for x in xs if isinstance(x, types.FunctionType)]
staticmethods = [x for x in xs if isinstance(x, staticmethod)]
binstancemethods = [x for x in xs if isinstance(x, types.MethodType) and x.im_self]
ubinstancemethods = [x for x in xs if isinstance(x, types.MethodType) and not x.im_self]
typs = [x for x in xs if isinstance(x, (type, types.ClassType))]
rest = [x for x in xs if not isinstance(x, (types.FunctionType, staticmethod, types.MethodType, type, types.ClassType))]
for objs in (funcs, staticmethods, binstancemethods, ubinstancemethods, typs, rest):
print len(objs), prettybunch(objs)
# mutable captured "constants")
def mutables(translator):
bk = translator.annotator.bookkeeper
xs = bk.seen_mutable.keys()
print len(xs), prettybunch(xs)
def prettypatt(patts):
accum = []
patts.sort()
for (sh_cnt, sh_ks, sh_st, sh_stst) in patts:
arg = []
arg.append("+%d" % sh_cnt)
for kw in sh_ks:
arg.append("%s=" % kw)
if sh_st:
arg.append('*')
if sh_stst:
arg.append('**')
accum.append("(%s)" % ', '.join(arg))
return ' '.join(accum)
def pbccallsanity(translator):
callb = translator.annotator.getpbccallables()
bk = translator.annotator.bookkeeper
typs = [x for x in callb if isinstance(x, (type, types.ClassType))]
for t in typs:
assert len(callb[t]) == 1
assert callb[t] == {(None,t): True}
print len(typs), "of ",prettycallable(callablereps([(None, Exception), (None, object)]))
ubm = [x for x in callb if isinstance(x, types.MethodType) and x.im_self is None]
assert len(ubm) == 0
bm = [x for x in callb if isinstance(x, types.MethodType) and x.im_self is not None]
frompbc = 0
notfrompbc = []
for b in bm:
assert len(callb[b]) == 1
assert callb[b] == {(None,b): True}
if b.im_class in bk.pbctypes or (b.im_class is None and b.im_self in bk.pbccache):
frompbc += 1
else:
notfrompbc.append(b)
class A:
def m():
pass
print frompbc, "of", prettycallable(callablereps([(None, A().m)])), "from PBCs"
print len(bm)-frompbc, "of", prettycallable(callablereps([(None, A().m)])), "not from PBCs"
if len(notfrompbc) < 40:
for b in notfrompbc:
print " "*4, prettycallable((None, b))
fs = [x for x in callb if isinstance(x, types.FunctionType)]
assert len(fs) + len(typs) + frompbc + len(notfrompbc) == len(callb)
plain = []
r = []
for x in fs:
if len(callb[x]) == 1 and callb[x].keys()[0][0] == None:
r.extend(callb[x].keys())
plain.append(x)
print len(plain), "of", prettycallable(callablereps(r))
r = []
for x in fs:
if x not in plain and len(callb[x]) == 1:
r.extend(callb[x].keys())
print len(r), "of", prettycallable(callablereps(r))
r = []
b_nb = []
for x in fs:
if len(callb[x]) == 2 and [1 for clsdef, f in callb[x].keys() if clsdef is None]:
r.extend(callb[x].keys())
b_nb.append(x)
print len(r), "of", prettycallable(callablereps(r))
print "- other -"
for x in fs:
if len(callb[x]) >= 2 and x not in b_nb:
print ' '.join([prettycallable((classdef and classdef.cls, func)) for (classdef,func) in callb[x].keys()])
def pretty_els(objs):
accum = []
for classdef, obj in objs:
cls = classdef and classdef.cls
accum.append(prettycallable((cls, obj)))
els = ' '.join(accum)
if len(accum) == 1:
return els
else:
return "{%s}" % els
def pbccall(translator):
fams = translator.annotator.getpbccallfamilies().root_info.itervalues()
one_pattern_fams = {}
rest = []
for fam in fams:
shapes = fam.patterns
if len(shapes) != 1:
rest.append((len(fam.objects), fam.objects, shapes.keys()))
else:
kinds = callablereps(fam.objects)
flavor = tuple(kinds), shapes.keys()[0]
cntrs = one_pattern_fams.setdefault(flavor, [0,0])
cntrs[0] += 1
cntrs[1] += len(fam.objects)
def pretty_nfam(nfam):
if nfam == 1:
return "1 family"
else:
return "%d families" % nfam
def pretty_nels(kinds, nels, nfam):
if nels == 1 or nels == nfam:
return "one %s" % prettycallable(kinds)
else:
return "in total %d %s" % (nels, prettycallable(kinds))
items = one_pattern_fams.items()
items.sort(lambda a,b: cmp((a[0][1],a[1][1]), (b[0][1],b[1][1]))) # sort by pattern and then by els
for (kinds, patt), (nfam, nels) in items:
print pretty_nfam(nfam), "with", pretty_nels(kinds, nels, nfam), "with one call-pattern:", prettypatt([patt])
print "- many patterns -"
manycallb = False
rest.sort(lambda a,b: cmp((a[0],a[2]), (b[0],b[2])))
for n, objs, patts in rest:
if len(objs) > 1 and not manycallb:
manycallb = True
print " - many callables, many patterns -"
print "family of", pretty_els(objs), "with call-patterns:", prettypatt(patts)
def pbcbmsanity(translator):
callb = translator.annotator.getpbccallables()
bk = translator.annotator.bookkeeper
bmeths = [x for x in callb if isinstance(x, types.MethodType) and x.im_self is not None]
print "%d bound-methods" % len(bmeths)
fams = translator.annotator.getpbccallfamilies()
plural_bm_families = {}
one_el = 0
for bm in bmeths:
notpbc = bm.im_self not in bk.pbccache
freestanding = bm.im_func in callb
if notpbc or freestanding:
print "! %s," % bm,
if notpbc:
print "of non-PBC %s,",
if freestanding:
print "found freestanding too"
bm_fam = fams[(None, bm)]
if len(bm_fam.objects) == 1:
one_el += 1
else:
plural_bm_families[bm_fam] = True
print "%d families of one bound-method" % one_el
print "%d families with more than just one bound-method" % len(plural_bm_families)
for bm_fam in plural_bm_families:
print pretty_els(bm_fam.objects)
return plural_bm_families
class Counters(dict):
def __getitem__(self, outcome):
if (isinstance(outcome, annmodel.SomeObject) or
isinstance(outcome, tuple) and outcome and
isinstance(outcome[0], annmodel.SomeObject)):
for k in self.iterkeys():
if k == outcome:
outcome = k
break
else:
raise KeyError
return dict.__getitem__(self, outcome)
def get(self, outcome, defl):
try:
return self[outcome]
except KeyError:
return defl
def __setitem__(self, outcome, c):
if (isinstance(outcome, annmodel.SomeObject) or
isinstance(outcome, tuple) and outcome and
isinstance(outcome[0], annmodel.SomeObject)):
for k in self.iterkeys():
if k == outcome:
outcome = k
break
return dict.__setitem__(self, outcome, c)
def keyrepr(k):
if isinstance(k, tuple):
return "(%s)" % ', '.join([keyrepr(x) for x in k])
else:
return str(k)
def statsfor(t, category):
stats = t.annotator.bookkeeper.stats
for_category = stats.classify[category]
print "%s total = %d" % (category, len(for_category))
counters = Counters()
for pos, outcome in for_category.iteritems():
counters[outcome] = counters.get(outcome, 0) + 1
w = max([len(keyrepr(o)) for o in counters.keys()])+1
if w < 60:
for outcome, n in counters.iteritems():
print "%*s | %d" % (w, keyrepr(outcome), n)
else:
for outcome, n in counters.iteritems():
print "%s | %d" % (keyrepr(outcome), n)
def statsforstrformat(t):
stats = t.annotator.bookkeeper.stats
stats = stats.classify['strformat']
result = {}
for fmt, args in stats.itervalues():
fmts = re.findall("%l?.", fmt)
if not isinstance(args, tuple):
args = (args,)
for f, a in zip(fmts, args):
result[(f,a)] = result.get((f,a), 0) + 1
for (f,a), c in result.iteritems():
print "%s %s %d" % (f, keyrepr(a), c)
def statbuiltins(t):
stats = t.annotator.bookkeeper.stats.classify
for k in stats:
if k.startswith('__builtin__'):
statsfor(t, k)
def dicts(t):
ann = t.annotator
r = []
def sdicts():
for so in ann.bindings.itervalues():
if isinstance(so, annmodel.SomeDict):
yield so
for so in ann.bookkeeper.immutable_cache.itervalues():
if isinstance(so, annmodel.SomeDict):
yield so
for so in sdicts():
sk, sv = so.dictdef.dictkey.s_value, so.dictdef.dictvalue.s_value
for x in r:
if x == (sk, sv):
break
else:
r.append((sk, sv))
for x in r:
print x
# debug helper
def tryout(f, *args):
try:
f(*args)
except:
import traceback
traceback.print_exc()
def graph_footprint(graph):
class Counter:
blocks = 0
links = 0
ops = 0
count = Counter()
def visit(block):
if isinstance(block, flowmodel.Block):
count.blocks += 1
count.ops += len(block.operations)
elif isinstance(block, flowmodel.Link):
count.links += 1
flowmodel.traverse(visit, graph)
return count.blocks, count.links, count.ops
# better used before backends opts
def duplication(t):
d = {}
funcs = t.flowgraphs.keys()
print len(funcs)
for f in funcs:
fingerprint = f.func_code, graph_footprint(t.flowgraphs[f])
d.setdefault(fingerprint ,[]).append(f)
l = []
for fingerprint, funcs in d.iteritems():
if len(funcs) > 1:
l.append((fingerprint[0].co_name, len(funcs)))
l.sort()
for name, c in l:
print name, c
def backcalls(t):
g = {}
for caller, callee in t.callgraph.itervalues():
g.setdefault(caller,[]).append(callee)
back = []
color = {}
WHITE, GRAY, BLACK = 0,1,2
def visit(fcur,witness=[]):
color[fcur] = GRAY
for f in dict.fromkeys(g.get(fcur, [])):
fcolor = color.get(f, WHITE)
if fcolor == WHITE:
visit(f,witness+[f])
elif fcolor == GRAY:
print "*", witness, f
back.append((fcur, f))
color[fcur] = BLACK
visit(t.entrypoint, [t.entrypoint])
return back
#
def worstblocks_topten(t, n=10):
from pypy.tool.ansi_print import ansi_print
ann = t.annotator
h = [(count, block) for block, count in ann.reflowcounter.iteritems()]
h.sort()
if not h:
print "annotator should have been run with debug collecting enabled"
return
print
ansi_print(',----------------------- Top %d Most Reflown Blocks -----------------------.' % n, 36)
for i in range(n):
if not h:
break
count, block = h.pop()
ansi_print(' #%3d: reflown %d times |' % (i+1, count), 36)
t.about(block)
ansi_print("`----------------------------------------------------------------------------'", 36)
print
| Python |
"""
A simple standalone target for the scheme interpreter.
"""
import autopath
import sys
from pypy.rlib.streamio import open_file_as_stream
from pypy.lang.scheme.ssparser import parse
from pypy.lang.scheme.object import SchemeQuit, ExecutionContext
# __________ Entry point __________
def entry_point(argv):
if len(argv) == 2:
f = open_file_as_stream(argv[1])
t = parse(f.readall())
ctx = ExecutionContext()
try:
for sexpr in t:
w_retval = sexpr.eval(ctx)
print w_retval.to_string()
except SchemeQuit, e:
return 0
return 0
else:
print "Usage: %s schemesourcefile" % argv[0]
return 1
# _____ Define and setup target ___
def target(driver, args):
driver.exe_name = 'ss-%(backend)s'
return entry_point, None
if __name__ == '__main__':
entry_point(sys.argv)
| Python |
"""
A simple deeply recursive target.
The target below specifies None as the argument types list.
This is a case treated specially in driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
import os, sys
def debug(msg):
os.write(2, "debug: " + msg + '\n')
# __________ Entry point __________
def ackermann(x, y):
if x == 0:
return y + 1
if y == 0:
return ackermann(x - 1, 1)
return ackermann(x - 1, ackermann(x, y - 1))
def entry_point(argv):
debug(str(ackermann(3, 12)) + "\n")
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
| Python |
from pypy.translator.goal import targetrpystonex
LOOPS = 2000000
# __________ Entry point __________
# _____ Define and setup target _____
# _____ Run translated _____
(entry_point,
target,
run) = targetrpystonex.make_target_definition(LOOPS)
def get_llinterp_args():
return [1]
| Python |
#! /usr/bin/env python
import os
homedir = os.getenv('HOME')
os.environ['PATH'] += ':/usr/local/bin:/usr/local/llvm/cfrontend/ppc/llvm-gcc/bin:'+homedir+'/bin'
import autopath
import py
import time, os, sys, stat
from pypy.translator.llvm.buildllvm import Builder
os.umask(022) # allow everyone to read/execute the produced pypy-c's
tmpdir = py.std.tempfile.gettempdir() + '/usession-' + os.environ['USER'] + '/'
cflags = "-O3"
lflags = "-lgc -lm -lpthread"
dry_run = False
def run(cmd):
print 'RUN:', cmd
sys.stdout.flush()
result = 0 #OK
if not dry_run:
result = os.system(cmd) #note: result is system dependent but works on Linux the way we want
return result
def update_pypy():
os.chdir(homedir + '/projects/pypy-dist')
run('/usr/local/bin/svn up 2>&1')
def update_llvm():
os.chdir(homedir + '/projects/llvm')
run('cvs -q up 2>&1')
run('make -k -j3 tools-only 2>&1')
def compile_llvm_variants(revision, features):
ll2bc(revision, features)
bc2c_exe(revision, features, 'from richards import *;main(iterations=1)')
bc2x86_exe(revision, features, 'llvm')
def ll2bc(revision, features):
if features:
features = '-' + features
cmd = 'cp %spypy.ll pypy/translator/goal/archive/pypy%s-%s.ll' % (tmpdir, features, revision)
run(cmd)
opts = Builder(None).optimizations()
cmd = '~/bin/llvm-as < %spypy.ll | ~/bin/opt %s -f -o %spypy.bc' % (
tmpdir, opts, tmpdir)
run(cmd)
cmd = 'cp %spypy.bc pypy/translator/goal/archive/pypy%s-%s.bc' % (tmpdir, features, revision)
run(cmd)
def bc2c_exe(revision, features, profile_command=None):
if features:
features = '-' + features
filename = "pypy-llvm-%s%s-c" % (revision, features)
b = tmpdir + filename
run("~/bin/llc %spypy.bc -march=c -f -o %s.c" % (tmpdir, b))
run("cp %s.c pypy/translator/goal/archive" % b)
run("gcc %s.c %s -S -o %s.s" % (b, cflags, b))
run("cp %s.s pypy/translator/goal/archive" % b)
run("gcc %s.s %s -o %s" % (b, lflags, b))
run("cp %s pypy/translator/goal" % b)
if profile_command:
run("gcc %s.c -fprofile-generate %s -S -o %s.s" % (b, cflags, b))
run("gcc %s.s -fprofile-generate %s -o %s" % (b, lflags, b))
run("%s -c '%s'" % (b, profile_command))
run("gcc %s.c -fprofile-use %s -S -o %s.s" % (b, cflags, b))
run("cp %s.s pypy/translator/goal/archive/%s-prof.s" % (b, filename))
run("gcc %s.s -fprofile-use %s -o %s" % (b, lflags, b))
run("cp %s pypy/translator/goal/%s-prof" % (b, filename))
def bc2x86_exe(revision, features, name_extra, llc_extra_options=''):
if features:
features = '-' + features
b = "%spypy-llvm-%s%s-%s" % (tmpdir, revision, features, name_extra)
cmd = "~/bin/llc %spypy.bc %s -f -o %s.s" % (tmpdir, llc_extra_options, b)
run(cmd)
cmd = 'cp %s.s pypy/translator/goal/archive' % b
run(cmd)
cmd = "gcc %s.s %s -o %s" % (b, lflags, b)
run(cmd)
cmd = "cp %s pypy/translator/goal" % b
run(cmd)
def compile(backend):
try:
backend, features = backend.split('--', 1)
featureoptions = ''.join([" --" + f for f in features.split('--') if f[0] != '_'])
targetoptions = ''.join([" --" + f[1:] for f in features.split('--') if f[0] == '_'])
except:
features = ''
featureoptions = ''
targetoptions = ''
if backend == 'llvm':
translateoptions = ' --source --raisingop2direct_call'
else:
translateoptions = ''
def normalize(f):
if f.startswith('_'):
f = f[1:]
if f.startswith('profopt'):
f = 'prof'
return f
features = '--'.join([normalize(f) for f in features.split('--')])
os.chdir(homedir + '/projects/pypy-dist/pypy/translator/goal')
run('/usr/local/bin/python translate.py --backend=%(backend)s%(featureoptions)s%(translateoptions)s --text --batch targetpypystandalone.py %(targetoptions)s 2>&1' % locals())
if backend == 'llvm':
run('mv %s/entry_point.ll %s/pypy.ll' % (tmpdir, tmpdir))
os.chdir(homedir + '/projects/pypy-dist')
try:
revision = '%d' % (py.path.svnwc('.').info().rev,)
except:
revision = 'unknown'
basename = homedir + '/projects/pypy-dist/pypy/translator/goal/' + 'pypy-' + backend
realname = basename + '-' + revision
if features:
realname += "-" + features
if backend == 'llvm': #create llvm exectutable from the current source
compile_llvm_variants(revision, features)
elif os.path.exists(basename): #copy executable
run("mv %s %s" % (basename, realname))
if backend == 'cli':
basename_dir = basename + '-data'
realname_dir = realname + '-data'
run("mv %s %s" % (basename_dir, realname_dir))
#pypy = open(basename, 'rb').read()
#if len(pypy) > 0:
# open(realname, 'wb').write(pypy)
#os.chmod(realname, stat.S_IRWXU)
#os.unlink(basename)
def get_load():
g = os.popen('uptime', 'r')
buf = g.read().strip()
g.close()
return buf
def benchmark():
os.chdir(homedir + '/projects/pypy-dist/pypy/translator/goal')
uname = os.popen('uname -a', 'r').read()
startload = get_load()
# result = run('/usr/local/bin/withlock /tmp/cpu_cycles_lock /usr/local/bin/python bench-unix.py 2>&1 | tee benchmark.txt' % locals())
result = run('/usr/local/bin/python bench-unix.py 2>&1 | tee benchmark.txt' % locals())
endload = get_load()
if not dry_run and result == 0:
f = open('benchmark.html', 'w')
print >> f, "<html><body>"
print >> f, "<pre>"
print >> f, "uname -a:", uname
print >> f, "Benchmark started:", startload
print >> f, " ended:", endload
print >> f
f.write(open('benchmark.txt').read())
print >> f, "</pre>"
print >> f, "</body></html>"
f.close()
def main(backends=[]):
if backends == []: #_ prefix means target specific option, # prefix to outcomment
backends = [backend.strip() for backend in """
llvm--_faassen
c
c--stackless--_faassen
c--_faassen
c--thread
c--_objspace=taint
c--_allworkingmodules
c--_objspace-std-withtproxy--_faassen
c--gc=framework--_faassen
c--_objspace-std-withrope
cli
""".split('\n') if backend.strip() and not backend.strip().startswith('#')]
print time.ctime()
for backend in backends:
if backend.startswith('llvm'):
update_llvm()
break
update_pypy()
for backend in backends:
try:
compile(backend)
except:
raise
pass
benchmark()
print time.ctime()
print 80*'-'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--benchmark-only':
benchmark()
else:
if args and args[0] == '--dry-run':
del args[0]
dry_run = True
main(args)
| Python |
from pypy.translator.gensupp import NameManager
from pypy.translator.squeak.node import FunctionNode, ClassNode, SetupNode
from pypy.translator.squeak.node import MethodNode, SetterNode, GetterNode
from pypy.rpython.ootypesystem.ootype import Record
try:
set
except NameError:
from sets import Set as set
class GenSqueak:
def __init__(self, sqdir, translator, modname=None):
self.sqdir = sqdir
self.translator = translator
self.modname = (modname or
translator.graphs[0].name)
self.name_manager = NameManager(number_sep="")
self.unique_name_mapping = {}
self.pending_nodes = []
self.generated_nodes = set()
self.constant_insts = {}
def gen(self):
graph = self.translator.graphs[0]
self.pending_nodes.append(FunctionNode(self, graph))
self.filename = '%s.st' % graph.name
file = self.sqdir.join(self.filename).open('w')
self.gen_source(file)
self.pending_nodes.append(SetupNode(self, self.constant_insts))
self.gen_source(file)
file.close()
return self.filename
def gen_source(self, file):
while self.pending_nodes:
node = self.pending_nodes.pop()
self.gen_node(node, file)
def gen_node(self, node, f):
for dep in node.dependencies():
if dep not in self.generated_nodes:
self.pending_nodes.append(node)
self.schedule_node(dep)
return
self.generated_nodes.add(node)
for line in node.render():
print >> f, line
print >> f, ""
def schedule_node(self, node):
if node not in self.generated_nodes:
if node in self.pending_nodes:
# We move the node to the front so we can enforce
# the generation of dependencies.
self.pending_nodes.remove(node)
self.pending_nodes.append(node)
def unique_func_name(self, funcgraph, schedule=True):
function = funcgraph.func
squeak_func_name = self.unique_name(function, function.__name__)
if schedule:
self.schedule_node(FunctionNode(self, funcgraph))
return squeak_func_name
def unique_method_name(self, INSTANCE, method_name, schedule=True):
# XXX it's actually more complicated than that because of
# inheritance ...
squeak_method_name = self.unique_name(
(INSTANCE, method_name), method_name)
if schedule:
self.schedule_node(MethodNode(self, INSTANCE, method_name))
return squeak_method_name
def unique_class_name(self, INSTANCE):
class_node = self.schedule_node(ClassNode(self, INSTANCE))
if isinstance(INSTANCE, Record): # XXX quick hack
class_name = "Record"
else:
class_name = INSTANCE._name.split(".")[-1]
squeak_class_name = self.unique_name(INSTANCE, class_name)
return "Py%s" % squeak_class_name
def unique_field_name(self, INSTANCE, field_name, schedule=True):
# XXX nameclashes with superclasses must be considered, too.
while not INSTANCE._fields.has_key(field_name):
# This is necessary to prevent a field from having different
# unique names in different subclasses.
INSTANCE = INSTANCE._superclass
if schedule:
# Generating getters and setters for all fields by default which
# is potentially a waste, but easier for now.
self.schedule_node(SetterNode(self, INSTANCE, field_name))
self.schedule_node(GetterNode(self, INSTANCE, field_name))
return self.unique_name(
(INSTANCE, "field", field_name), field_name)
def unique_var_name(self, variable):
return self.unique_name(variable, variable.name)
def unique_name(self, key, basename):
# XXX should account for squeak keywords here
if self.unique_name_mapping.has_key(key):
unique = self.unique_name_mapping[key]
else:
camel_basename = camel_case(basename)
unique = self.name_manager.uniquename(camel_basename)
self.unique_name_mapping[key] = unique
return unique
def camel_case(identifier):
identifier = identifier.replace(".", "_")
words = identifier.split('_')
return ''.join([words[0]] + [w.capitalize() for w in words[1:]])
| Python |
import py
Option = py.test.config.Option
option = py.test.config.addoptions("pypy-squeak options",
Option('--showsqueak', action="store_true", dest="showsqueak",
default=False, help="don't run squeak headless, for debugging"),
)
| Python |
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython.ootypesystem import ootype
class AbstractCode:
pass
class Message(AbstractCode):
def __init__(self, name):
self.name = name
self.infix = False
if len(name) <= 2 and not name.isalnum():
# Binary infix selector, e.g. "+"
self.infix = True
def with_args(self, args):
return MessageWithArgs(self, args)
def send_to(self, receiver, args):
return self.with_args(args).send_to(receiver)
class MessageWithArgs(AbstractCode):
def __init__(self, message, args):
self.message = message
self.args = args
def send_to(self, receiver):
return SentMessage(self, receiver)
class SentMessage(AbstractCode):
def __init__(self, message_wargs, receiver):
self.message_wargs = message_wargs
self.receiver = receiver
def assign_to(self, result):
return Assignment(result, self)
class Assignment(AbstractCode):
def __init__(self, lvalue, rvalue):
self.lvalue = lvalue
self.rvalue = rvalue
class Self(AbstractCode):
pass
class Field(AbstractCode):
def __init__(self, name):
self.name = name
class CustomVariable(AbstractCode):
def __init__(self, name):
self.name = name
class CodeFormatter:
def __init__(self, gen=None): # XXX get rid of default argument
self.gen = gen
def format(self, code):
if isinstance(code, Variable) or isinstance(code, Constant):
return self.format_arg(code)
elif isinstance(code, AbstractCode):
type_name = code.__class__.__name__
method = getattr(self, "format_%s" % type_name)
return method(code)
else:
# Assume it's a constant value to be formatted
return self.name_constant(code)
def format_arg(self, arg):
"""Formats Variables and Constants."""
if isinstance(arg, Variable):
return self.gen.unique_var_name(arg)
elif isinstance(arg, Constant):
if isinstance(arg.concretetype, ootype.Instance):
# XXX fix this
const_id = self.gen.unique_name(
arg, "const_%s" % self.format_Instance(arg.value._TYPE))
self.gen.constant_insts[arg] = const_id
return "(PyConstants getConstant: '%s')" % const_id
elif arg.concretetype == ootype.Char or arg.concretetype == ootype.UniChar:
# XXX temporary
return str(ord(arg.value))
else:
return self.name_constant(arg.value)
else:
raise TypeError, "No representation for argument %r" % (arg,)
def name_constant(self, value):
if isinstance(value, bool):
return str(value).lower()
elif isinstance(value, (ootype.Instance, ootype.Record)):
return self.format_Instance(value)
elif value is None:
return "nil"
elif isinstance(value, (int, float)):
return str(value)
elif isinstance(value, ootype._class):
return self.format_Instance(value._INSTANCE)
elif isinstance(value, (ootype._instance, ootype._view)):
return self.format_Instance(value._TYPE)
elif isinstance(value, ootype._static_meth):
return self.gen.unique_func_name(value.graph)
else:
raise TypeError, "can't format constant (%s)" % value
def format_Instance(self, INSTANCE):
if INSTANCE is None:
return "Object"
else:
return self.gen.unique_class_name(INSTANCE)
def format_Self(self, _):
return "self"
def format_Field(self, field):
return field.name
def format_CustomVariable(self, var):
return var.name
def format_MessageWithArgs(self, message):
name = message.message.name
arg_count = len(message.args)
if arg_count == 0:
return name
elif message.message.infix:
assert arg_count == 1
return "%s %s" % (name, self.format(message.args[0]))
else:
parts = [name]
if arg_count > 1:
parts += ["with"] * (arg_count - 1)
return " ".join(["%s: %s" % (p, self.format(a))
for (p, a) in zip(parts, message.args)])
def format_SentMessage(self, smessage):
return "(%s %s)" % (self.format(smessage.receiver),
self.format_MessageWithArgs(smessage.message_wargs))
def format_Assignment(self, ass):
return "%s := %s" % (self.format(ass.lvalue), self.format(ass.rvalue))
| Python |
import datetime
from pypy.objspace.flow.model import Constant, Variable, c_last_exception
from pypy.translator.backendopt.removenoops import remove_unaryops
from pypy.translator.squeak.opformatter import OpFormatter
from pypy.translator.squeak.codeformatter import CodeFormatter, Message
from pypy.translator.squeak.codeformatter import Field, Assignment, CustomVariable
from pypy.rpython.ootypesystem.ootype import Instance, Class, Record, ROOT, _view
from pypy.rpython.ootypesystem.ootype import dynamicType, oodowncast
class CodeNode:
def __hash__(self):
return hash(self.hash_key)
def __eq__(self, other):
return isinstance(other, CodeNode) \
and self.hash_key == other.hash_key
def render_fileout_header(self, class_name, category):
return "!%s methodsFor: '%s' stamp: 'pypy %s'!" % (
class_name, category,
datetime.datetime.now().strftime("%m/%d/%Y %H:%M"))
class ClassNode(CodeNode):
def __init__(self, gen, INSTANCE, class_vars=None, host_base=None):
self.gen = gen
self.INSTANCE = INSTANCE
self.class_vars = [] # XXX should probably go away
if class_vars is not None:
self.class_vars = class_vars
self.host_base = host_base
self.hash_key = INSTANCE
# We can treat Instances and Records uniformly, this looks
# slightly hackish but just works.
if isinstance(INSTANCE, Record):
self.host_base = "Object"
def dependencies(self):
deps = []
if self.host_base is None \
and self.INSTANCE._superclass is not None: # not root or record
deps.append(ClassNode(self.gen, self.INSTANCE._superclass))
return deps
def render(self):
codef = CodeFormatter(self.gen)
if self.host_base is None:
superclass = codef.format_Instance(self.INSTANCE._superclass)
else:
superclass = self.host_base
yield "%s subclass: #%s" % \
(superclass, codef.format_Instance(self.INSTANCE))
fields = [self.gen.unique_field_name(self.INSTANCE, f) for f in
self.INSTANCE._fields.iterkeys()]
yield " instanceVariableNames: '%s'" % ' '.join(fields)
yield " classVariableNames: '%s'" % ' '.join(self.class_vars)
yield " poolDictionaries: ''"
yield " category: 'PyPy-Test'!"
class LoopFinder:
def __init__(self, startblock):
self.loops = {}
self.parents = {startblock: startblock}
self.temps = {}
self.seen = []
self.visit_Block(startblock)
def visit_Block(self, block, switches=[]):
#self.temps.has_key()
self.seen.append(block)
if block.exitswitch:
switches.append(block)
self.parents[block] = block
for link in block.exits:
self.visit_Link(link, switches)
def visit_Link(self, link, switches):
if link.target in switches:
self.loops[link.target] = True
if not link.target in self.seen:
self.parents[link.target] = self.parents[link.prevblock]
self.visit_Block(link.target, switches)
class CallableNode(CodeNode):
OPERATION_ERROR = Instance("OperationError", ROOT,
fields={"type": Class, "value": ROOT})
def dependencies(self):
return [ClassNode(self.gen, self.OPERATION_ERROR, host_base="Exception")]
def render_body(self, startblock):
self.codef = CodeFormatter(self.gen)
self.loops = LoopFinder(startblock).loops
args = self.arguments(startblock)
message = Message(self.unique_name).with_args(args)
yield self.codef.format(message)
# XXX should declare local variables here
for line in self.render_block(startblock):
yield " %s" % line
yield '! !'
def render_return(self, args):
if len(args) == 2:
# exception
yield self.render_exception(args[0], args[1])
else:
# regular return block
retval = self.codef.format(args[0])
yield "^%s" % retval
def render_exception(self, exception_class, exception_value):
exc_cls = self.codef.format(exception_class)
exc_val = self.codef.format(exception_value)
return "((%s new) type: %s; value: %s) signal." \
% (self.codef.format_Instance(self.OPERATION_ERROR),
exc_cls, exc_val)
def render_link(self, link):
block = link.target
if link.args:
for i in range(len(link.args)):
yield '%s := %s.' % \
(self.codef.format(block.inputargs[i]),
self.codef.format(link.args[i]))
for line in self.render_block(block):
yield line
def render_block(self, block):
if self.loops.has_key(block):
if not self.loops[block]:
yield '"skip1"'
return
yield "["
if block.exitswitch is c_last_exception:
yield "["
formatter = OpFormatter(self.gen, self)
for op in block.operations:
yield "%s." % formatter.format(op)
if len(block.exits) == 0:
for line in self.render_return(block.inputargs):
yield line
return
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
for line in self.render_link(block.exits[0]):
yield line
elif block.exitswitch is c_last_exception:
# exception branching
# wuah. ugly!
codef = formatter.codef
exc_var = self.gen.unique_name(("var", "exception"), "exception")
yield "] on: %s do: [:%s |" \
% (codef.format(self.OPERATION_ERROR), exc_var)
exc_exits = []
non_exc_exit = None
for exit in block.exits:
if exit.exitcase is None:
non_exc_exit = exit
else:
exc_exits.append(exit)
for exit in exc_exits:
yield "(%s type isKindOf: %s) ifTrue: [" \
% (exc_var, codef.format(dynamicType(exit.llexitcase)))
if exit.last_exception is not None:
yield "%s := %s type." \
% (codef.format(exit.last_exception), exc_var)
if exit.last_exc_value is not None:
yield "%s := %s value." \
% (codef.format(exit.last_exc_value), exc_var)
for line in self.render_link(exit):
yield line
yield "] ifFalse: ["
for exit in exc_exits:
yield "]"
yield "]."
for line in self.render_link(non_exc_exit):
yield line
else:
#exitswitch
if self.loops.has_key(block):
if self.loops[block]:
self.loops[block] = False
yield "%s] whileTrue: [" % self.codef.format(block.exitswitch)
for line in self.render_link(block.exits[True]):
yield " %s" % line
yield "]."
for line in self.render_link(block.exits[False]):
yield "%s" % line
else:
yield "%s ifTrue: [" % self.codef.format(block.exitswitch)
for line in self.render_link(block.exits[True]):
yield " %s" % line
yield "] ifFalse: ["
for line in self.render_link(block.exits[False]):
yield " %s" % line
yield "]"
def apply_backendopt(self, graph):
remove_unaryops(graph, ["same_as"])
class MethodNode(CallableNode):
def __init__(self, gen, INSTANCE, method_name):
self.gen = gen
self.INSTANCE = INSTANCE
self.name = method_name
self.unique_name = gen.unique_method_name(
INSTANCE, method_name, schedule=False)
self.self = None # Will be set upon rendering
self.hash_key = (INSTANCE, method_name)
def dependencies(self):
return CallableNode.dependencies(self) \
+ [ClassNode(self.gen, self.INSTANCE)]
def arguments(self, startblock):
# Omit the explicit self
return startblock.inputargs[1:]
def render(self):
codef = CodeFormatter(self.gen)
yield self.render_fileout_header(
codef.format(self.INSTANCE), "methods")
graph = self.INSTANCE._methods[self.name].graph
self.apply_backendopt(graph)
self.self = graph.startblock.inputargs[0]
for line in self.render_body(graph.startblock):
yield line
class FunctionNode(CallableNode):
FUNCTIONS = Instance("Functions", ROOT)
def __init__(self, gen, graph):
self.gen = gen
self.graph = graph
self.unique_name = gen.unique_func_name(graph, schedule=False)
self.self = None
self._class_name = gen.unique_class_name(self.FUNCTIONS)
self.hash_key = graph
def dependencies(self):
return CallableNode.dependencies(self) \
+ [ClassNode(self.gen, self.FUNCTIONS)]
def arguments(self, startblock):
return startblock.inputargs
def render(self):
yield self.render_fileout_header(
"%s class" % self._class_name, "functions")
self.apply_backendopt(self.graph)
for line in self.render_body(self.graph.startblock):
yield line
class AccessorNode(CodeNode):
def __init__(self, gen, INSTANCE, field_name):
self.gen = gen
self.INSTANCE = INSTANCE
self.field_name = field_name
self.unique_name = gen.unique_field_name(
INSTANCE, field_name, schedule=False)
self.codef = CodeFormatter(gen)
self.hash_key = (INSTANCE, field_name, self.__class__)
def dependencies(self):
return [ClassNode(self.gen, self.INSTANCE)]
class SetterNode(AccessorNode):
def render(self):
yield self.render_fileout_header(
self.codef.format(self.INSTANCE), "accessors")
arg_name = self.gen.unique_name((SetterNode, "arg"), "value")
yield "%s: %s" % (self.unique_name, arg_name)
yield " %s := %s" % (self.unique_name, arg_name)
yield "! !"
class GetterNode(AccessorNode):
def render(self):
yield self.render_fileout_header(
self.codef.format(self.INSTANCE), "accessors")
yield self.unique_name
yield " ^%s" % self.unique_name
yield "! !"
class HelperNode(CodeNode):
HELPERS = Instance("Helpers", ROOT)
def __init__(self, gen, message, code):
self.gen = gen
self.message = message
self.code = code
self._class_name = gen.unique_class_name(self.HELPERS)
self.hash_key = ("helper", code)
def apply(self, args):
return self.message.send_to(self.HELPERS, args)
def dependencies(self):
return [ClassNode(self.gen, self.HELPERS)]
def render(self):
# XXX should not use explicit name "PyHelpers" here
yield self.render_fileout_header(
"%s class" % self._class_name, "helpers")
for line in self.code.strip().split("\n"):
yield line
yield "! !"
class FieldInitializerNode(CodeNode):
def __init__(self, gen, INSTANCE):
self.gen = gen
self.INSTANCE = INSTANCE
self.hash_key = ("fieldinit", INSTANCE)
def dependencies(self):
return [ClassNode(self.gen, self.INSTANCE)]
def render(self):
codef = CodeFormatter(self.gen)
yield self.render_fileout_header(
codef.format(self.INSTANCE), "initializers")
fields = self.INSTANCE._allfields()
args = [CustomVariable("a%s" % i) for i in range(len(fields))]
message = Message("fieldInit").with_args(args)
yield codef.format(message)
for field_name, arg in zip(fields.keys(), args):
unique_field = self.gen.unique_field_name(self.INSTANCE, field_name)
ass = Assignment(Field(unique_field), arg)
yield " %s." % codef.format(ass)
yield "! !"
class SetupNode(CodeNode):
CONSTANTS = Instance("Constants", ROOT)
def __init__(self, gen, constants):
self.gen = gen
self.constants = constants
self._class_name = gen.unique_class_name(self.CONSTANTS)
self.hash_key = "setup"
def dependencies(self):
# Important: Field initializers for the *runtime* type
return [FieldInitializerNode(self.gen, dynamicType(c.value))
for c in self.constants.iterkeys()] + \
[ClassNode(self.gen, self.CONSTANTS, class_vars=["Constants"])]
def render(self):
codef = CodeFormatter(self.gen)
# XXX use CodeFormatter throughout here
yield self.render_fileout_header(
"%s class" % self._class_name, "internals")
message = Message("setupConstants")
yield codef.format(message.with_args([]))
yield " Constants := Dictionary new."
for const, const_id in self.constants.iteritems():
INST = dynamicType(const.value)
inst = oodowncast(INST, const.value)
field_names = INST._allfields().keys()
field_values = [getattr(inst, f) for f in field_names]
new = Message("new").send_to(INST, [])
init_message = Message("fieldInit").send_to(new, field_values)
yield " Constants at: '%s' put: %s." \
% (const_id, codef.format(init_message))
yield "! !"
yield ""
yield self.render_fileout_header(
"%s class" % self._class_name, "internals")
arg = CustomVariable("constId")
get_message = Message("getConstant")
yield codef.format(get_message.with_args([arg]))
yield " ^ Constants at: constId"
yield "! !"
| Python |
from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong
from pypy.translator.squeak.codeformatter import CodeFormatter
from pypy.translator.squeak.codeformatter import Message, Self, Assignment, Field
def _setup_int_masks():
"""Generates code for helpers to mask the various integer types."""
masks = {}
# NB: behaviour of signed long longs is undefined on overflow
for name, r_type in ("int", r_int), ("uint", r_uint), ("ullong", r_ulonglong):
helper_name = "mask%s" % name.capitalize()
if name[0] == "u":
# Unsigned integer type
code = """%s: i
^ i bitAnd: %s""" % (helper_name, r_type.MASK)
else:
# Signed integer type
code = """%s: i
(i <= %s) & (i >= %s) ifTrue: [^i].
(i < 0) ifTrue: [^i bitAnd: %s]
ifFalse: [^(((i negated) - 1) bitAnd: %s) negated - 1]
""" % (helper_name, r_type.MASK>>1, -(r_type.MASK>>1)-1,
r_type.MASK>>1, r_type.MASK>>1)
masks[name] = helper_name, code
return masks
class OpFormatter:
ops = {
'new': 'new',
'runtimenew': 'new',
'classof': 'class',
'bool_not': 'not',
'cast_int_to_float': 'asFloat',
# XXX this potentially incorrect (may return LargeIntegers)
'cast_float_to_int': 'truncated',
}
number_ops = {
'abs': 'abs',
'is_true': 'isZero not',
'neg': 'negated',
'invert': 'bitInvert',
'add': '+',
'sub': '-',
'eq': '=',
'mul': '*',
'floordiv': 'quo',
'truediv': '/ asFloat',
'mod': r'\\',
'eq': '=',
'ne': '~=',
'lt': '<',
'le': '<=',
'gt': '>',
'ge': '>=',
'and': 'bitAnd',
'or': 'bitOr',
'lshift': '<<',
'rshift': '>>',
'xor': 'bitXor',
# XXX need to support x_ovf ops
}
number_opprefixes = "int", "uint", "llong", "ullong",\
"float", "char", "unichar"
wrapping_ops = "neg", "invert", "add", "sub", "mul", "lshift"
noops = "same_as", "ooupcast", "oodowncast", "cast_char_to_int", \
"cast_unichar_to_int", "cast_int_to_unichar", \
"cast_int_to_char", "cast_int_to_longlong", \
"truncate_longlong_to_int"
int_masks = _setup_int_masks()
def __init__(self, gen, node):
self.gen = gen
self.node = node
self.codef = CodeFormatter(gen)
def format(self, op):
if self.ops.has_key(op.opname):
name = self.ops[op.opname]
sent = Message(name).send_to(op.args[0], op.args[1:])
return self.codef.format(sent.assign_to(op.result))
opname_parts = op.opname.split("_")
if opname_parts[0] in self.number_opprefixes:
return self.format_number_op(
op, opname_parts[0], "_".join(opname_parts[1:]))
op_method = getattr(self, "op_%s" % op.opname, None)
if op_method is not None:
return self.codef.format(op_method(op))
else:
raise NotImplementedError(
"operation not supported: %s" % op.opname)
def format_number_op(self, op, ptype, opname):
messages = self.number_ops[opname].split()
msg = Message(messages[0])
sent_message = msg.send_to(op.args[0], op.args[1:])
for add_message in messages[1:]:
sent_message = Message(add_message).send_to(sent_message, [])
if opname in self.wrapping_ops \
and self.int_masks.has_key(ptype):
sent_message = self.apply_mask_helper(sent_message, ptype)
return self.codef.format(sent_message.assign_to(op.result))
def apply_mask_helper(self, receiver, mask_type_name):
# XXX how do i get rid of this import?
from pypy.translator.squeak.node import HelperNode
mask_name, mask_code = self.int_masks[mask_type_name]
helper = HelperNode(self.gen, Message(mask_name), mask_code)
result = helper.apply([receiver])
self.gen.schedule_node(helper)
return result
def op_oosend(self, op):
message_name = op.args[0].value
message_name = self.gen.unique_method_name(
op.args[1].concretetype, message_name)
if op.args[1] == self.node.self:
receiver = Self()
else:
receiver = op.args[1]
sent_message = Message(message_name).send_to(receiver, op.args[2:])
return sent_message.assign_to(op.result)
def op_oogetfield(self, op):
INST = op.args[0].concretetype
field_name = self.gen.unique_field_name(INST, op.args[1].value)
if op.args[0] == self.node.self:
# Private field access
# Could also directly substitute op.result with name
# everywhere for optimization.
rvalue = Field(field_name)
else:
# Public field access
rvalue = Message(field_name).send_to(op.args[0], [])
return Assignment(op.result, rvalue)
def op_oosetfield(self, op):
# Note that the result variable is never used
INST = op.args[0].concretetype
field_name = self.gen.unique_field_name(INST, op.args[1].value)
field_value = op.args[2]
if op.args[0] == self.node.self:
# Private field access
return Assignment(Field(field_name), field_value)
else:
# Public field access
return Message(field_name).send_to(op.args[0], [field_value])
def op_direct_call(self, op):
# XXX how do i get rid of this import?
from pypy.translator.squeak.node import FunctionNode
function_name = self.gen.unique_func_name(op.args[0].value.graph)
msg = Message(function_name).send_to(FunctionNode.FUNCTIONS, op.args[1:])
return msg.assign_to(op.result)
def cast_bool(self, op, true_repr, false_repr):
msg = Message("ifTrue: [%s] ifFalse: [%s]" % (true_repr, false_repr))
return msg.send_to(op.args[0], []).assign_to(op.result)
def op_cast_bool_to_int(self, op):
return self.cast_bool(op, "1", "0")
op_cast_bool_to_uint = op_cast_bool_to_int
def op_cast_bool_to_float(self, op):
return self.cast_bool(op, "1.0", "0.0")
def masking_cast(self, op, mask):
cast = self.apply_mask_helper(op.args[0], mask)
return Assignment(op.result, cast)
def op_cast_int_to_uint(self, op):
return self.masking_cast(op, "uint")
def op_cast_uint_to_int(self, op):
return self.masking_cast(op, "int")
def op_cast_float_to_uint(self, op):
truncated = Message("truncated").send_to(op.args[0], [])
return Assignment(op.result, self.apply_mask_helper(truncated, "uint"))
def noop(self, op):
return Assignment(op.result, op.args[0])
for opname in OpFormatter.noops:
setattr(OpFormatter, "op_%s" % opname, OpFormatter.noop)
| Python |
import os
import py
from pypy.tool.udir import udir
from pypy.translator.squeak.gensqueak import GenSqueak, camel_case
from pypy.translator.translator import TranslationContext
from pypy import conftest
from pypy.translator.squeak import conftest as sqconftest
def compile_function(func, annotation=[], graph=None):
return SqueakFunction(func, annotation, graph)
def squeak_checks():
try:
import posix
except ImportError:
py.test.skip("Squeak tests only work on Unix right now.")
if py.path.local.sysfind("squeak") is None:
py.test.skip("Squeak is not on your path.")
if os.getenv("SQUEAK_IMAGE") is None:
py.test.skip("Squeak tests expect the SQUEAK_IMAGE environment "
"variable to point to an image.")
# For now use pipes to communicate with squeak. This is very flaky
# and only works for posix systems. At some later point we'll
# probably need a socket based solution for this.
startup_script = """
| stdout src selector result arguments arg i |
src := Smalltalk getSystemAttribute: 3.
FileStream fileIn: src.
selector := (Smalltalk getSystemAttribute: 4) asSymbol.
arguments := OrderedCollection new.
i := 4.
[(arg := Smalltalk getSystemAttribute: (i := i + 1)) notNil]
whileTrue: [arguments add: arg asInteger].
PyConstants setupConstants.
result := (PyFunctions perform: selector withArguments: arguments asArray).
stdout := StandardFileStream fileNamed: '/dev/stdout'.
stdout nextPutAll: result asString.
Smalltalk snapshot: false andQuit: true.
"""
class SqueakFunction:
def __init__(self, func, annotation, graph=None):
squeak_checks()
self._func = func
self._gen = self._build(func, annotation, graph)
def _build(self, func, annotation, graph=None):
try:
func = func.im_func
except AttributeError:
pass
t = TranslationContext()
if graph is not None:
graph.func = func
ann = t.buildannotator()
inputcells = [ann.typeannotation(a) for a in annotation]
ann.build_graph_types(graph, inputcells)
t.graphs.insert(0, graph)
else:
t.buildannotator().build_types(func, annotation)
t.buildrtyper(type_system="ootype").specialize()
self.graph = t.graphs[0]
if conftest.option.view:
t.viewcg()
gen = GenSqueak(udir, t)
gen.gen()
return gen
def _write_startup(self):
startup_st = udir.join("startup.st")
try:
# Erm, py.path.local has no "exists" method?
startup_st.stat()
except py.error.ENOENT:
f = startup_st.open("w")
f.write(startup_script)
f.close()
return startup_st
def _symbol(self, arg_count):
name = camel_case(self._func.__name__)
if arg_count == 0:
return name
else:
parts = [name]
if arg_count > 1:
parts += ["with"] * (arg_count - 1)
return "%s:%s" % (parts[0], "".join([p + ":" for p in parts[1:]]))
def __call__(self, *args):
# NB: only integers arguments are supported currently
startup_st = self._write_startup()
options = "-headless"
if sqconftest.option.showsqueak:
options = ""
cmd = 'squeak %s -- %s %s "%s" %s' \
% (options, startup_st, udir.join(self._gen.filename),
self._symbol(len(args)),
" ".join(['"%s"' % a for a in args]))
squeak_process = os.popen(cmd)
result = squeak_process.read()
assert squeak_process.close() is None # exit status was 0
return result
| Python |
class A:
def m(self, i):
return 1 + i
def f(i):
return i + 1
| Python |
"""
Backend for the JVM.
"""
import sys
import py
from py.compat import subprocess
from pypy.tool.udir import udir
from pypy.translator.translator import TranslationContext
from pypy.translator.oosupport.genoo import GenOO
from pypy.translator.jvm.generator import JasminGenerator
from pypy.translator.jvm.option import getoption
from pypy.translator.jvm.database import Database
from pypy.translator.jvm.log import log
from pypy.translator.jvm.node import EntryPoint, Function
from pypy.translator.jvm.opcodes import opcodes
from pypy.rpython.ootypesystem import ootype
from pypy.translator.jvm.constant import \
JVMConstantGenerator, JVMStaticMethodConst, JVMCustomDictConst, \
JVMWeakRefConst
from pypy.translator.jvm.prebuiltnodes import create_interlink_node
class JvmError(Exception):
""" Indicates an error occurred in JVM backend """
def pretty_print(self):
print str(self)
pass
class JvmSubprogramError(JvmError):
""" Indicates an error occurred running some program """
def __init__(self, res, args, stdout, stderr):
self.res = res
self.args = args
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Error code %d running %s" % (self.res, repr(self.args))
def pretty_print(self):
JvmError.pretty_print(self)
print "vvv Stdout vvv\n"
print self.stdout
print "vvv Stderr vvv\n"
print self.stderr
pass
class JvmGeneratedSource(object):
"""
An object which represents the generated sources. Contains methods
to find out where they are located, to compile them, and to execute
them.
For those interested in the location of the files, the following
attributes exist:
tmpdir --- root directory from which all files can be found (py.path obj)
javadir --- the directory containing *.java (py.path obj)
classdir --- the directory where *.class will be generated (py.path obj)
package --- a string with the name of the package (i.e., 'java.util')
The following attributes also exist to find the state of the sources:
compiled --- True once the sources have been compiled successfully
"""
def __init__(self, tmpdir, package):
"""
'tmpdir' --- the base directory where the sources are located
'package' --- the package the sources are in; if package is pypy.jvm,
then we expect to find the sources in $tmpdir/pypy/jvm
'jfiles' --- list of files we need to run jasmin on
"""
self.tmpdir = tmpdir
self.package = package
self.compiled = False
self.jasmin_files = None
# Compute directory where .j files are
self.javadir = self.tmpdir
for subpkg in package.split('.'):
self.javadir = self.javadir.join(subpkg)
# Compute directory where .class files should go
self.classdir = self.javadir
def set_jasmin_files(self, jfiles):
self.jasmin_files = jfiles
def _invoke(self, args, allow_stderr):
import sys
if sys.platform == 'nt':
shell = True
else:
shell = False
subp = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=shell, universal_newlines=True)
stdout, stderr = subp.communicate()
res = subp.wait()
if res or (not allow_stderr and stderr):
raise JvmSubprogramError(res, args, stdout, stderr)
return stdout, stderr
def _compile_helper(self, clsnms):
# HACK: compile the Java helper class. Should eventually
# use rte.py
tocompile = []
for clsnm in clsnms:
pypycls = self.classdir.join(clsnm + '.class')
if not pypycls.check():
tocompile.append(clsnm)
if tocompile:
thisdir = py.magic.autopath().dirpath()
javasrcs = [str(thisdir.join('src/pypy', clsnm + '.java')) for
clsnm in tocompile]
self._invoke([getoption('javac'),
'-nowarn',
'-d', str(self.classdir)]+
javasrcs,
True)
def compile(self):
"""
Compiles the .java sources into .class files, ready for execution.
"""
jascmd = [getoption('jasmin'), '-d', str(self.javadir)]
def split_list(files):
"Split the files list into manageable pieces"
# - On Windows 2000, commands in .bat are limited to 2047 chars.
# - But the 'jasmin' script contains a line like
# path_to_jre/java -jar path_to_jasmin/jasmin.jar $*
# So we limit the length of arguments files to:
MAXLINE = 1500
chunk = []
chunklen = 0
for f in files:
# Account for the space between items
chunklen += len(f) + 1
if chunklen > MAXLINE:
yield chunk
chunk = []
chunklen = len(f)
chunk.append(f)
if chunk:
yield chunk
for files in split_list(self.jasmin_files):
print "Invoking jasmin on %s" % files
self._invoke(jascmd + files, False)
print "... completed!"
self.compiled = True
self._compile_helper(('Callback',
'CustomDict',
'DictItemsIterator',
'Equals',
'Filter',
'FilterIterator',
'FilterSet',
'HashCode',
'Interlink',
'PyPy',
))
def _make_str(self, a):
if isinstance(a, ootype._string):
return a._str
return str(a)
def execute(self, args):
"""
Executes the compiled sources in a separate process. Returns the
output as a string. The 'args' are provided as arguments,
and will be converted to strings.
"""
assert self.compiled
strargs = [self._make_str(a) for a in args]
cmd = [getoption('java'),
'-cp',
str(self.javadir),
self.package+".Main"] + strargs
print "Invoking java to run the code"
stdout, stderr = self._invoke(cmd, True)
print "...done!"
sys.stderr.write(stderr)
return stdout
def generate_source_for_function(func, annotation):
"""
Given a Python function and some hints about its argument types,
generates JVM sources that call it and print the result. Returns
the JvmGeneratedSource object.
"""
if hasattr(func, 'im_func'):
func = func.im_func
t = TranslationContext()
ann = t.buildannotator()
ann.build_types(func, annotation)
t.buildrtyper(type_system="ootype").specialize()
main_graph = t.graphs[0]
if getoption('view'): t.view()
if getoption('wd'): tmpdir = py.path.local('.')
else: tmpdir = udir
jvm = GenJvm(tmpdir, t, EntryPoint(main_graph, True, True))
return jvm.generate_source()
def detect_missing_support_programs():
def check(exechelper):
if py.path.local.sysfind(exechelper) is None:
py.test.skip("%s is not on your path" % exechelper)
check(getoption('jasmin'))
check(getoption('javac'))
check(getoption('java'))
class GenJvm(GenOO):
""" Master object which guides the JVM backend along. To use,
create with appropriate parameters and then invoke
generate_source(). *You can not use one of these objects more than
once.* """
TypeSystem = lambda X, db: db # TypeSystem and Database are the same object
Function = Function
Database = Database
opcodes = opcodes
log = log
ConstantGenerator = JVMConstantGenerator
CustomDictConst = JVMCustomDictConst
StaticMethodConst = JVMStaticMethodConst
WeakRefConst = JVMWeakRefConst
def __init__(self, tmpdir, translator, entrypoint):
"""
'tmpdir' --- where the generated files will go. In fact, we will
put our binaries into the directory pypy/jvm
'translator' --- a TranslationContext object
'entrypoint' --- if supplied, an object with a render method
"""
GenOO.__init__(self, tmpdir, translator, entrypoint)
create_interlink_node(self.db)
self.jvmsrc = JvmGeneratedSource(tmpdir, getoption('package'))
def generate_source(self):
""" Creates the sources, and returns a JvmGeneratedSource object
for manipulating them """
GenOO.generate_source(self)
self.jvmsrc.set_jasmin_files(self.db.jasmin_files())
return self.jvmsrc
def create_assembler(self):
""" Creates and returns a Generator object according to the
configuration. Right now, however, there is only one kind of
generator: JasminGenerator """
print "Uh...?"
return JasminGenerator(
self.db, self.jvmsrc.javadir, self.jvmsrc.package)
| Python |
from pypy.translator.jvm.conftest import option
# Not sure why this is needed. Sure that it shouldn't be, even.
_default_values = {
'javac':'javac',
'java':'java',
'jasmin':'jasmin',
'noasm':False,
'package':'pypy',
'wd':False,
'norun':False,
'trace':False,
'byte-arrays':False
}
def getoption(name):
if hasattr(option, name):
return getattr(option, name)
return _default_values[name]
| Python |
import py
Option = py.test.config.Option
option = py.test.config.addoptions(
"pypy-jvm options",
Option('--java', action='store', dest='java', default='java',
help='Define the java executable to use'),
Option('--javac', action='store', dest='javac', default='javac',
help='Define the javac executable to use'),
Option('--jasmin', action='store', dest='java', default='java',
help='Define the jasmin script to use'),
Option('--noassemble', action='store_true', dest="noasm", default=False,
help="don't assemble jasmin files"),
Option('--package', action='store', dest='package', default='pypy',
help='Package to output generated classes into'),
Option('--trace', action='store_true', dest='trace', default=False,
help='Trace execution of generated code'),
Option('--byte-arrays', action='store_true', dest='byte-arrays',
default=False, help='Use byte arrays rather than native strings'),
)
| Python |
"""
Definition and some basic translations between PyPy ootypesystem and
JVM type system.
Here are some tentative non-obvious decisions:
Signed scalar types mostly map as is.
Unsigned scalar types are a problem; the basic idea is to store them
as signed values, but execute special code when working with them. Another
option would be to use classes, or to use the "next larger" type and remember to use appropriate modulos. The jury is out on
this. Another idea would be to add a variant type system that does
not have unsigned values, and write the required helper and conversion
methods in RPython --- then it could be used for multiple backends.
Python strings are mapped to byte arrays, not Java Strings, since
Python strings are really sets of bytes, not unicode code points.
Jury is out on this as well; this is not the approach taken by cli,
for example.
Python Unicode strings, on the other hand, map directly to Java Strings.
WeakRefs can hopefully map to Java Weak References in a straight
forward fashion.
Collections can hopefully map to Java collections instances. Note
that JVM does not have an idea of generic typing at its lowest level
(well, they do have signature attributes, but those don't really count
for much).
"""
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.ootypesystem import ootype
from pypy.translator.jvm.option import getoption
from pypy.translator.jvm.log import log
# ___________________________________________________________________________
# Type Descriptors
#
# Internal representations of types for the JVM. Generally speaking,
# only the generator code should deal with these and even it tries to
# avoid them except write before dumping to the output file.
class JvmTypeDescriptor(str):
"""
An internal class representing JVM type descriptors, which are
essentially Java's short hand for types. This is the lowest level
of our representation for types and are mainly used when defining
the types of fields or arguments to methods. The grammar for type
descriptors can be read about here:
http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html
We use this class also to represent method descriptors, which define
a set of argument and return types.
"""
def is_scalar(self):
return self[0] != 'L' and self[0] != '['
def is_reference(self):
return not self.is_scalar()
def is_array(self):
return self[0] == '['
def is_method(self):
return self[0] == '('
def class_name(self):
""" Converts a descriptor like Ljava/lang/Object; to
full class name java.lang.Object """
return self.int_class_name().replace('/','.')
def int_class_name(self):
""" Converts a descriptor like Ljava/lang/Object; to
internal class name java/lang/Object """
assert self[0] == 'L' and self[-1] == ';'
return self[1:-1]
def type_width(self):
""" Returns number of JVM words this type takes up. JVM words
are a theoretically abstract quantity that basically
represents 32 bits; so most types are 1, but longs and doubles
are 2. """
if self[0] == 'J' or self[0] == 'D':
return 2
return 1
# JVM type functions
def desc_for_array_of(jdescr):
""" Returns a JvmType representing an array of 'jtype', which must be
another JvmType """
assert isinstance(jdescr, JvmTypeDescriptor)
return JvmTypeDescriptor('['+jdescr)
def desc_for_class(classnm):
""" Returns a JvmType representing a particular class 'classnm', which
should be a fully qualified java class name (i.e., 'java.lang.String') """
return JvmTypeDescriptor('L%s;' % classnm.replace('.','/'))
def desc_for_method(argtypes, rettype):
""" A Java method has a descriptor, which is a string specified
its argument and return types. This function converts a list of
argument types (JvmTypes) and the return type (also a JvmType),
into one of these descriptor strings. """
return JvmTypeDescriptor("(%s)%s" % ("".join(argtypes), rettype))
# ______________________________________________________________________
# Basic JVM Types
class JvmType(object):
"""
The JvmType interface defines the interface for type objects
that we return in the database in various places.
"""
def __init__(self, descriptor):
""" 'descriptor' should be a jvm.generator.JvmTypeDescriptor object
for this type """
self.descriptor = descriptor # public
self.name = None # public, string like "java.lang.Object"
# (None for scalars and arrays)
def lookup_field(self, fieldnm):
""" If the class has a field named 'fieldnm', returns a
jvmgen.Field or jvmgen.Property object that represents it and can
be used with the interpreter to load/store it. If no such field
exists, or this is not a class, then raises KeyError. """
raise NotImplementedException
def lookup_method(self, methodnm):
""" Returns a jvm.generator.Method object representing the method
with the given name, or raises KeyError if that field does not
exist on this type. """
raise NotImplementedException
def __repr__(self):
return "%s<%s>" % (self.__class__.__name__, self.descriptor)
class JvmClassType(JvmType):
"""
Base class used for all class instances. Kind of an abstract class;
instances of this class do not support field or method lookup and
only work to obtain the descriptor. We use it on occasion for classes
like java.lang.Object etc.
"""
def __init__(self, classnm, throwable=False):
JvmType.__init__(self, desc_for_class(classnm))
self.name = classnm # public; String, like 'java.lang.Object'
self.throwable = throwable # public; boolean
def lookup_field(self, fieldnm):
raise KeyError(fieldnm) # we treat as opaque type
def lookup_method(self, methodnm):
raise KeyError(fieldnm) # we treat as opaque type
class JvmInterfaceType(JvmClassType):
pass
jIntegerClass = JvmClassType('java.lang.Integer')
jLongClass = JvmClassType('java.lang.Long')
jDoubleClass = JvmClassType('java.lang.Double')
jByteClass = JvmClassType('java.lang.Byte')
jCharClass = JvmClassType('java.lang.Character')
jBoolClass = JvmClassType('java.lang.Boolean')
jThrowable = JvmClassType('java.lang.Throwable', throwable=True)
jObject = JvmClassType('java.lang.Object')
jString = JvmClassType('java.lang.String')
jCharSequence = JvmClassType('java.lang.CharSequence')
jArrays = JvmClassType('java.util.Arrays')
jMap = JvmInterfaceType('java.util.Map')
jHashMap = JvmClassType('java.util.HashMap')
jIterator = JvmClassType('java.util.Iterator')
jClass = JvmClassType('java.lang.Class')
jStringBuilder = JvmClassType('java.lang.StringBuilder')
jPrintStream = JvmClassType('java.io.PrintStream')
jMath = JvmClassType('java.lang.Math')
jList = JvmInterfaceType('java.util.List')
jArrayList = JvmClassType('java.util.ArrayList')
jWeakRef = JvmClassType('java.lang.ref.WeakReference')
jPyPy = JvmClassType('pypy.PyPy')
jPyPyExcWrap = JvmClassType('pypy.ExceptionWrapper')
jPyPyConst = JvmClassType('pypy.Constant')
jPyPyMain = JvmClassType('pypy.Main')
jPyPyDictItemsIterator = JvmClassType('pypy.DictItemsIterator')
jPyPyInterlink = JvmClassType('pypy.Interlink')
jPyPyCustomDict = JvmClassType('pypy.CustomDict')
jArithmeticException = JvmClassType('java.lang.ArithmeticException', throwable=True)
class JvmScalarType(JvmType):
"""
Subclass used for all scalar type instances.
"""
def __init__(self, descrstr, boxtype, unboxmethod):
JvmType.__init__(self, JvmTypeDescriptor(descrstr))
self.box_type = boxtype
self.unbox_method = unboxmethod
def lookup_field(self, fieldnm):
raise KeyError(fieldnm) # Scalar objects have no fields
def lookup_method(self, methodnm):
raise KeyError(methodnm) # Scalar objects have no methods
jVoid = JvmScalarType('V', None, None)
jInt = JvmScalarType('I', jIntegerClass, 'intValue')
jLong = JvmScalarType('J', jLongClass, 'longValue')
jBool = JvmScalarType('Z', jBoolClass, 'booleanValue')
jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue')
jByte = JvmScalarType('B', jByteClass, 'byteValue')
jChar = JvmScalarType('C', jCharClass, 'charValue')
class JvmArrayType(JvmType):
"""
Subclass used for all array instances.
"""
def __init__(self, elemtype):
JvmType.__init__(self, desc_for_array_of(elemtype.descriptor))
self.element_type = elemtype
def lookup_field(self, fieldnm):
raise KeyError(fieldnm) # TODO adjust interface to permit opcode here
def lookup_method(self, methodnm):
raise KeyError(methodnm) # Arrays have no methods
jByteArray = JvmArrayType(jByte)
jObjectArray = JvmArrayType(jObject)
jStringArray = JvmArrayType(jString)
class Generifier(object):
"""
A utility class for working with generic methods in the OOTYPE
system. You instantiate it with a given type, and you can ask it
for the actual or erased types of any method of that type.
"""
def __init__(self, OOTYPE):
self.OOTYPE = OOTYPE
# Make a hashtable mapping the generic parameter to a tuple:
# (actual type, erased type)
self.generics = {}
if hasattr(self.OOTYPE, 'SELFTYPE_T'):
self.generics[self.OOTYPE.SELFTYPE_T] = (self.OOTYPE,self.OOTYPE)
for pname,pval in (('ITEMTYPE_T', '_ITEMTYPE'),
('KEYTYPE_T', '_KEYTYPE'),
('VALUETYPE_T', '_VALUETYPE')):
if hasattr(self.OOTYPE, pname):
placeholder = getattr(self.OOTYPE, pname)
placeholder_val = getattr(self.OOTYPE, pval)
self.generics[placeholder] = (placeholder_val, ootype.ROOT)
def full_types(self, method_name):
"""
Returns a tuple of argument and return types for the method
named 'method_name'. These are the actual generic types. The
set method for a list of strings, for example, might return:
( [INT, STRING], VOID )
"""
GENMETH = self.OOTYPE._GENERIC_METHODS[method_name]
ARGS, RESULT = (GENMETH.ARGS, GENMETH.RESULT)
ARGS = [self.generics.get(X,(X,))[0] for X in ARGS]
RESULT = self.generics.get(RESULT, (RESULT,))[0]
return (ARGS, RESULT)
def erased_types(self, method_name):
"""
Returns a tuple of argument and return types for the method
named 'method_name'. These are the erased generic types. The
set method for a list of strings, for example, might return:
( [INT, OBJECT], VOID )
"""
GENMETH = self.OOTYPE._GENERIC_METHODS[method_name]
ARGS, RESULT = (GENMETH.ARGS, GENMETH.RESULT)
ARGS = [self.generics.get(X,(None,X))[1] for X in ARGS]
RESULT = self.generics.get(RESULT, (None,RESULT))[1]
return (ARGS, RESULT)
# ______________________________________________________________________
# Java Callback Interfaces
#
# A list of interfaces which static functions that we generate will
# automatically implement if application. See the pypy/Callback.java,
# node.py/StaticMethodInterface for more information.
jCallbackInterfaces = [] # collects all of the defined JvmCallbackInterfaces
class JvmCallbackInterface(JvmInterfaceType):
def __init__(self, name, jargtypes, jrettype):
JvmInterfaceType.__init__(self, name)
self.java_argument_types = jargtypes
self.java_return_type = jrettype
jCallbackInterfaces.append(self) # add to global list
def matches(self, jargtypes, jrettype):
""" Given a set of argument types and a return type for some
static function defined by the user, returns true if this
JvmCallbackInterface applies. Note that the types don't have
to match exactly: we assume that (in the list of arguments)
jObject is used as a wildcard, and some adaptation code may
have to be inserted."""
if len(self.java_argument_types) != len(jargtypes):
return False
for expjarg, actjarg in zip(self.java_argument_types, jargtypes):
if expjarg == jObject: continue # hack: assume obj means any type
if expjarg != actjarg: return False
return jrettype == self.java_return_type
jPyPyHashCode = JvmCallbackInterface('pypy.HashCode', [jObject], jInt)
jPyPyEquals = JvmCallbackInterface('pypy.Equals', [jObject, jObject], jBool)
| Python |
from pypy.objspace.flow import model as flowmodel
from pypy.translator.oosupport.metavm import Generator
from pypy.rpython.ootypesystem import ootype
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.translator.oosupport.constant import push_constant
import pypy.translator.jvm.typesystem as jvmtype
from pypy.translator.jvm.typesystem import \
JvmType, jString, jInt, jLong, jDouble, jBool, jString, \
jPyPy, jVoid, jMath, desc_for_method, jPrintStream, jClass, jChar, \
jObject, jByteArray, jPyPyExcWrap, jIntegerClass, jLongClass, \
jDoubleClass, jCharClass, jStringBuilder, JvmScalarType, jArrayList, \
jObjectArray, jPyPyInterlink, jPyPyCustomDict, jPyPyEquals, \
jPyPyHashCode, jMap, jWeakRef
# ___________________________________________________________________________
# Miscellaneous helper functions
def _isnan(v):
return v != v*1.0 or (v == 1.0 and v == 2.0)
def _isinf(v):
return v!=0 and (v == v*2)
def _unsigned_to_signed_32(val):
""" In the JVM, we store unsigned integers in a signed integer slot
(since JVM has no signed integers). This function converts an
unsigned value Python integer (possibly a long) into its corresponding
Python signed integer. """
if val <= 0x7FFFFFFF:
return int(val)
return int(_two_comp_32(val))
def _unsigned_to_signed_64(val):
""" Same as _unsigned_to_signed_32, but for longs. """
if val <= 0x7FFFFFFFFFFFFFFF:
return val
return _two_comp_64(val)
def _two_comp_32(val):
""" Returns the 32 bit two's complement. """
return -((val ^ 0xFFFFFFFF)+1)
def _two_comp_64(val):
""" Returns the 64 bit two's complement. """
return -((val ^ 0xFFFFFFFFFFFFFFFF)+1)
# ___________________________________________________________________________
# JVM Opcodes:
#
# Map from symbolic name to an instance of the Opcode class
class Opcode(object):
def __init__(self, jvmstr):
"""
flags is a set of flags (see above) that describe opcode #UPDATE
jvmstr is the name for jasmin printouts
"""
self.jvmstr = jvmstr
self.flags = None #Should flags be added to args?
def __repr__(self):
return "<Opcode %s:%x>" % (self.jvmstr, self.flags)
def specialize(self, args):
""" Process the argument list according to the various flags.
Returns a tuple (OPCODE, ARGS) where OPCODE is a string representing
the new opcode, and ARGS is a list of arguments or empty tuple.
Most of these do not do anything. """
return (self.jvmstr, args)
class IntConstOpcode(Opcode):
""" The ICONST opcode specializes itself for small integer opcodes. """
def specialize(self, args):
assert len(args) == 1
if args[0] == -1:
return self.jvmstr + "_m1", ()
elif args[0] >= 0 and args[0] <= 5:
return self.jvmstr + "_" + str(args[0]), ()
# Non obvious: convert ICONST to LDC if the constant is out of
# range
return "ldc", args
class VarOpcode(Opcode):
""" An Opcode which takes a variable index as an argument; specialized
to small integer indices. """
def specialize(self, args):
assert len(args) == 1
if args[0] >= 0 and args[0] <= 3:
return self.jvmstr + "_" + str(args[0]), ()
return Opcode.specialize(self, args)
class IntClassNameOpcode(Opcode):
""" An opcode which takes an internal class name as its argument;
the actual argument will be a JvmType instance. """
def specialize(self, args):
args = [args[0].descriptor.int_class_name()]
return self.jvmstr, args
class OpcodeFamily(object):
"""
Many opcodes in JVM have variants that depend on the type of the
operands; for example, one must choose the correct ALOAD, ILOAD,
or DLOAD depending on whether one is loading a reference, integer,
or double variable respectively. Each instance of this class
defines one 'family' of opcodes, such as the LOAD family shown
above, and produces Opcode objects specific to a particular type.
"""
def __init__(self, opcclass, suffix):
"""
opcclass is the opcode subclass to use (see above) when
instantiating a particular opcode
jvmstr is the name for jasmin printouts
"""
self.opcode_class = opcclass
self.suffix = suffix
self.cache = {}
def _o(self, prefix):
try:
return self.cache[prefix]
except KeyError:
self.cache[prefix] = obj = self.opcode_class(
prefix+self.suffix)
return obj
def for_type(self, argtype):
""" Returns a customized opcode of this family appropriate to
'argtype', a JvmType object. """
desc = argtype.descriptor
# These are always true:
if desc[0] == 'L': return self._o("a") # Objects
if desc[0] == '[': return self._o("a") # Arrays
if desc == 'I': return self._o("i") # Integers
if desc == 'J': return self._o("l") # Integers
if desc == 'D': return self._o("d") # Doubles
if desc == 'V': return self._o("") # Void [used by RETURN]
# Chars/Bytes/Booleans are normally represented as ints
# in the JVM, but some opcodes are different. They use a
# different OpcodeFamily (see ArrayOpcodeFamily for ex)
if desc == 'C': return self._o("i") # Characters
if desc == 'B': return self._o("i") # Bytes
if desc == 'Z': return self._o("i") # Boolean
assert False, "Unknown argtype=%s" % repr(argtype)
raise NotImplementedError
class ArrayOpcodeFamily(OpcodeFamily):
""" Opcode family specialized for array access instr """
def for_type(self, argtype):
desc = argtype.descriptor
if desc == 'J': return self._o("l") # Integers
if desc == 'D': return self._o("d") # Doubles
if desc == 'C': return self._o("c") # Characters
if desc == 'B': return self._o("b") # Bytes
if desc == 'Z': return self._o("b") # Boolean (access as bytes)
return OpcodeFamily.for_type(self, argtype)
# Define the opcodes for IFNE, IFEQ, IFLT, IF_ICMPLT, etc. The IFxx
# variants compare a single integer arg against 0, and the IF_ICMPxx
# variants compare 2 integer arguments against each other.
for cmpop in ('ne', 'eq', 'lt', 'gt', 'le', 'ge'):
ifop = "if%s" % cmpop
if_icmpop = "if_icmp%s" % cmpop
globals()[ifop.upper()] = Opcode(ifop)
globals()[if_icmpop.upper()] = Opcode(if_icmpop)
# Compare references, either against NULL or against each other
IFNULL = Opcode('ifnull')
IFNONNULL = Opcode('ifnonnull')
IF_ACMPEQ = Opcode('if_acmpeq')
IF_ACMPNE = Opcode('if_acmpne')
# Method invocation
INVOKESTATIC = Opcode('invokestatic')
INVOKEVIRTUAL = Opcode('invokevirtual')
INVOKESPECIAL = Opcode('invokespecial')
INVOKEINTERFACE = Opcode('invokeinterface')
# Other opcodes
LDC = Opcode('ldc') # single-word types
LDC2 = Opcode('ldc2_w') # double-word types: doubles and longs
GOTO = Opcode('goto')
ICONST = IntConstOpcode('iconst')
ICONST_0 = Opcode('iconst_0') # sometimes convenient to refer to this directly
ACONST_NULL=Opcode('aconst_null')
DCONST_0 = Opcode('dconst_0')
DCONST_1 = Opcode('dconst_1')
LCONST_0 = Opcode('lconst_0')
LCONST_1 = Opcode('lconst_1')
GETFIELD = Opcode('getfield')
PUTFIELD = Opcode('putfield')
GETSTATIC = Opcode('getstatic')
PUTSTATIC = Opcode('putstatic')
CHECKCAST = IntClassNameOpcode('checkcast')
INEG = Opcode('ineg')
IXOR = Opcode('ixor')
IADD = Opcode('iadd')
ISUB = Opcode('isub')
IMUL = Opcode('imul')
IDIV = Opcode('idiv')
IREM = Opcode('irem')
IAND = Opcode('iand')
IOR = Opcode('ior')
ISHL = Opcode('ishl')
ISHR = Opcode('ishr')
IUSHR = Opcode('iushr')
LCMP = Opcode('lcmp')
DCMPG = Opcode('dcmpg')
DCMPL = Opcode('dcmpl')
NOP = Opcode('nop')
I2D = Opcode('i2d')
I2L = Opcode('i2l')
D2I= Opcode('d2i')
#D2L= Opcode('d2l') #PAUL
L2I = Opcode('l2i')
L2D = Opcode('l2d')
ATHROW = Opcode('athrow')
DNEG = Opcode('dneg')
DADD = Opcode('dadd')
DSUB = Opcode('dsub')
DMUL = Opcode('dmul')
DDIV = Opcode('ddiv')
DREM = Opcode('drem')
LNEG = Opcode('lneg')
LADD = Opcode('ladd')
LSUB = Opcode('lsub')
LMUL = Opcode('lmul')
LDIV = Opcode('ldiv')
LREM = Opcode('lrem')
LAND = Opcode('land')
LOR = Opcode('lor')
LXOR = Opcode('lxor')
LSHL = Opcode('lshl')
LSHR = Opcode('lshr')
LUSHR = Opcode('lushr')
NEW = IntClassNameOpcode('new')
DUP = Opcode('dup')
DUP2 = Opcode('dup2')
DUP_X1 = Opcode('dup_x1')
POP = Opcode('pop')
POP2 = Opcode('pop2')
SWAP = Opcode('swap')
INSTANCEOF= IntClassNameOpcode('instanceof')
# Loading/storing local variables
LOAD = OpcodeFamily(VarOpcode, "load")
STORE = OpcodeFamily(VarOpcode, "store")
RETURN = OpcodeFamily(Opcode, "return")
# Loading/storing from arrays
# *NOTE*: This family is characterized by the type of the ELEMENT,
# not the type of the ARRAY.
#
# Also: here I break from convention by naming the objects ARRLOAD
# rather than ALOAD, even though the suffix is 'aload'. This is to
# avoid confusion with the ALOAD opcode.
ARRLOAD = ArrayOpcodeFamily(Opcode, "aload")
ARRSTORE = ArrayOpcodeFamily(Opcode, "astore")
# ___________________________________________________________________________
# Labels
#
# We use a class here just for sanity checks and debugging print-outs.
class Label(object):
def __init__(self, number, desc):
""" number is a unique integer
desc is a short, descriptive string that is a valid java identifier """
self.label = "%s_%s" % (desc, number)
def __repr__(self):
return "Label(%s)"%self.label
def jasmin_syntax(self):
return self.label
# ___________________________________________________________________________
# Methods
#
# "Method" objects describe all the information needed to invoke a
# method. We create one for each node.Function object, as well as for
# various helper methods (defined below). To invoke a method, you
# push its arguments and then use generator.emit(methobj) where
# methobj is its Method instance.
class Method(object):
# Create a constructor:
def c(classty, argtypes):
return Method(classty.name, "<init>", argtypes, jVoid,
opcode=INVOKESPECIAL)
c = staticmethod(c)
# Create a virtual method:
def v(classty, methnm, argtypes, rettype):
"""
Shorthand to create a virtual method.
'class' - JvmType object for the class
'methnm' - name of the method (Python string)
'argtypes' - list of JvmType objects, one for each argument but
not the this ptr
'rettype' - JvmType for return type
"""
assert argtypes is not None
assert rettype is not None
classnm = classty.name
if isinstance(classty, jvmtype.JvmInterfaceType):
opc = INVOKEINTERFACE
else:
assert isinstance(classty, jvmtype.JvmClassType)
opc = INVOKEVIRTUAL
return Method(classnm, methnm, argtypes, rettype, opcode=opc)
v = staticmethod(v)
# Create a static method:
def s(classty, methnm, argtypes, rettype):
"""
Shorthand to create a static method.
'class' - JvmType object for the class
'methnm' - name of the method (Python string)
'argtypes' - list of JvmType objects, one for each argument but
not the this ptr
'rettype' - JvmType for return type
"""
assert isinstance(classty, JvmType)
classnm = classty.name
return Method(classnm, methnm, argtypes, rettype)
s = staticmethod(s)
def __init__(self, classnm, methnm, argtypes, rettype, opcode=INVOKESTATIC):
self.opcode = opcode
self.class_name = classnm # String, ie. "java.lang.Math"
self.method_name = methnm # String "abs"
self.argument_types = argtypes # List of jvmtypes
self.return_type = rettype # jvmtype
# Compute the method descriptior, which is a string like "()I":
argtypesdesc = [a.descriptor for a in argtypes]
rettypedesc = rettype.descriptor
self.descriptor = desc_for_method(argtypesdesc, rettypedesc)
def invoke(self, gen):
gen._instr(self.opcode, self)
def is_static(self):
return self.opcode == INVOKESTATIC
def jasmin_syntax(self):
res = "%s/%s%s" % (self.class_name.replace('.','/'),
self.method_name,
self.descriptor)
# A weird, inexplicable quirk of Jasmin syntax is that it requires
# the number of arguments after an invokeinterface call:
if self.opcode == INVOKEINTERFACE:
res += " %d" % (len(self.argument_types)+1,)
return res
OBJHASHCODE = Method.v(jObject, 'hashCode', (), jInt)
OBJTOSTRING = Method.v(jObject, 'toString', (), jString)
OBJEQUALS = Method.v(jObject, 'equals', (jObject,), jBool)
INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString)
LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString)
DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString)
CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString)
MATHIABS = Method.s(jMath, 'abs', (jInt,), jInt)
IABSOVF = Method.s(jPyPy, 'abs_ovf', (jInt,), jInt)
MATHLABS = Method.s(jMath, 'abs', (jLong,), jLong)
LABSOVF = Method.s(jPyPy, 'abs_ovf', (jLong,), jLong)
MATHDABS = Method.s(jMath, 'abs', (jDouble,), jDouble)
INEGOVF = Method.s(jPyPy, 'negate_ovf', (jInt,), jInt)
LNEGOVF = Method.s(jPyPy, 'negate_ovf', (jLong,), jLong)
IADDOVF = Method.s(jPyPy, 'add_ovf', (jInt, jInt), jInt)
LADDOVF = Method.s(jPyPy, 'add_ovf', (jLong, jLong), jLong)
ISUBOVF = Method.s(jPyPy, 'sub_ovf', (jInt, jInt), jInt)
LSUBOVF = Method.s(jPyPy, 'sub_ovf', (jLong, jLong), jLong)
IMULOVF = Method.s(jPyPy, 'multiply_ovf', (jInt, jInt), jInt)
LMULOVF = Method.s(jPyPy, 'multiply_ovf', (jLong, jLong), jLong)
MATHFLOOR = Method.s(jMath, 'floor', (jDouble,), jDouble)
IFLOORDIVOVF = Method.s(jPyPy, 'floordiv_ovf', (jInt, jInt), jInt)
LFLOORDIVOVF = Method.s(jPyPy, 'floordiv_ovf', (jLong, jLong), jLong)
IFLOORDIVZEROVF = Method.s(jPyPy, 'floordiv_zer_ovf', (jInt, jInt), jInt)
LFLOORDIVZEROVF = Method.s(jPyPy, 'floordiv_zer_ovf', (jLong, jLong), jLong)
IREMOVF = Method.s(jPyPy, 'mod_ovf', (jInt, jInt), jInt)
LREMOVF = Method.s(jPyPy, 'mod_ovf', (jLong, jLong), jLong)
MATHDPOW = Method.s(jMath, 'pow', (jDouble, jDouble), jDouble)
PRINTSTREAMPRINTSTR = Method.v(jPrintStream, 'print', (jString,), jVoid)
CLASSFORNAME = Method.s(jClass, 'forName', (jString,), jClass)
CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool)
PYPYAPPEND = Method.s(jPyPy, 'append',
(jStringBuilder, jString), jVoid)
PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt)
PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt)
PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble)
PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt)
PYPYDOUBLETOLONG = Method.s(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL
PYPYLONGBITWISENEGATE = Method.s(jPyPy, 'long_bitwise_negate', (jLong,), jLong)
PYPYSTRTOINT = Method.s(jPyPy, 'str_to_int', (jString,), jInt)
PYPYSTRTOUINT = Method.s(jPyPy, 'str_to_uint', (jString,), jInt)
PYPYSTRTOLONG = Method.s(jPyPy, 'str_to_long', (jString,), jLong)
PYPYSTRTOULONG = Method.s(jPyPy, 'str_to_ulong', (jString,), jLong)
PYPYSTRTOBOOL = Method.s(jPyPy, 'str_to_bool', (jString,), jBool)
PYPYSTRTODOUBLE = Method.s(jPyPy, 'str_to_double', (jString,), jDouble)
PYPYSTRTOCHAR = Method.s(jPyPy, 'str_to_char', (jString,), jChar)
PYPYDUMP = Method.s(jPyPy, 'dump', (jString,), jVoid)
PYPYDUMPEXCWRAPPER = Method.s(jPyPy, 'dump_exc_wrapper', (jObject,), jVoid)
PYPYSERIALIZEBOOLEAN = Method.s(jPyPy, 'serialize_boolean', (jBool,), jString)
PYPYSERIALIZEUINT = Method.s(jPyPy, 'serialize_uint', (jInt,), jString)
PYPYSERIALIZEVOID = Method.s(jPyPy, 'serialize_void', (), jString)
PYPYESCAPEDCHAR = Method.s(jPyPy, 'escaped_char', (jChar,), jString)
PYPYESCAPEDSTRING = Method.s(jPyPy, 'escaped_string', (jString,), jString)
PYPYSERIALIZEOBJECT = Method.s(jPyPy, 'serializeObject', (jObject,), jString)
PYPYRUNTIMENEW = Method.s(jPyPy, 'RuntimeNew', (jClass,), jObject)
PYPYSTRING2BYTES = Method.s(jPyPy, 'string2bytes', (jString,), jByteArray)
PYPYARRAYTOLIST = Method.s(jPyPy, 'array_to_list', (jObjectArray,), jArrayList)
OBJECTGETCLASS = Method.v(jObject, 'getClass', (), jClass)
CLASSGETNAME = Method.v(jClass, 'getName', (), jString)
CUSTOMDICTMAKE = Method.s(jPyPyCustomDict, 'make',
(jPyPyEquals, jPyPyHashCode), jPyPyCustomDict)
# ___________________________________________________________________________
# Fields
#
# Field objects encode information about fields.
class Field(object):
def __init__(self, classnm, fieldnm, jtype, static, OOTYPE=None):
# All fields are public
self.class_name = classnm # String, ie. "java.lang.Math"
self.field_name = fieldnm # String "someField"
self.OOTYPE = OOTYPE # OOTYPE equivalent of JvmType, may be None
self.jtype = jtype # JvmType
self.is_static = static # True or False
def load(self, gen):
if self.is_static:
gen._instr(GETSTATIC, self)
else:
gen._instr(GETFIELD, self)
def store(self, gen):
if self.is_static:
gen._instr(PUTSTATIC, self)
else:
gen._instr(PUTFIELD, self)
def jasmin_syntax(self):
return "%s/%s %s" % (
self.class_name.replace('.','/'),
self.field_name,
self.jtype.descriptor)
class Property(object):
"""
An object which acts like a Field, but when a value is loaded or
stored it actually invokes accessor methods.
"""
def __init__(self, field_name, get_method, put_method, OOTYPE=None):
self.get_method = get_method
self.put_method = put_method
self.field_name = field_name
self.OOTYPE = OOTYPE
# Synthesize the Field attributes from the get_method/put_method:
self.class_name = get_method.class_name
assert put_method.class_name == self.class_name
self.jtype = get_method.return_type
self.is_static = get_method.is_static
def load(self, gen):
self.get_method.invoke(gen)
def store(self, gen):
self.put_method.invoke(gen)
# jasmin_syntax is not needed, since this object itself never appears
# as an argument an Opcode
SYSTEMOUT = Field('java.lang.System', 'out', jPrintStream, True)
SYSTEMERR = Field('java.lang.System', 'err', jPrintStream, True)
DOUBLENAN = Field('java.lang.Double', 'NaN', jDouble, True)
DOUBLEPOSINF = Field('java.lang.Double', 'POSITIVE_INFINITY', jDouble, True)
DOUBLENEGINF = Field('java.lang.Double', 'NEGATIVE_INFINITY', jDouble, True)
PYPYINTERLINK= Field(jPyPy.name, 'interlink', jPyPyInterlink, True)
# ___________________________________________________________________________
# Generator State
class ClassState(object):
""" When you invoked begin_class(), one of these objects is allocated
and tracks the state as we go through the definition process. """
def __init__(self, classty, superclassty):
self.class_type = classty
self.superclass_type = superclassty
def out(self, arg):
self.file.write(arg)
class FunctionState(object):
""" When you invoked begin_function(), one of these objects is allocated
and tracks the state as we go through the definition process. """
def __init__(self):
self.next_offset = 0
self.local_vars = {}
self.function_arguments = []
self.instr_counter = 0
def add_var(self, jvar, jtype):
""" Adds new entry for variable 'jvar', of java type 'jtype' """
idx = self.next_offset
self.next_offset += jtype.descriptor.type_width()
if jvar:
assert jvar not in self.local_vars # never been added before
self.local_vars[jvar] = idx
self.function_arguments.append((jtype, idx))
return idx
def var_offset(self, jvar, jtype):
""" Returns offset for variable 'jvar', of java type 'jtype' """
if jvar in self.local_vars:
return self.local_vars[jvar]
return self.add_var(jvar, jtype)
# ___________________________________________________________________________
# Generator
class JVMGenerator(Generator):
""" Base class for all JVM generators. Invokes a small set of '_'
methods which indicate which opcodes to emit; these can be
translated by a subclass into Jasmin assembly, binary output, etc.
Must be inherited from to specify a particular output format;
search for the string 'unimplemented' to find the methods that
must be overloaded. """
def __init__(self, db):
self.db = db
self.label_counter = 0
self.curclass = None
self.curfunc = None
# __________________________________________________________________
# JVM specific methods to be overloaded by a subclass
#
# If the name does not begin with '_', it will be called from
# outside the generator.
def begin_class(self, classty, superclsty,
abstract=False, interface=False):
"""
Begins a class declaration. Overall flow of class declaration
looks like:
begin_class()
{implements()}
{add_field()}
begin_constructor()...end_constructor()
[begin_function()...end_function()]
end_class()
Where items in brackets may appear anywhere from 0 to inf times.
classty --- JvmType for the class
superclassty --- JvmType for the superclass
"""
assert not self.curclass
self.curclass = ClassState(classty, superclsty)
self._begin_class(abstract, interface)
def end_class(self):
self._end_class()
self.curclass = None
self.curfunc = None
def current_type(self):
""" Returns the jvm type we are currently defining. If
begin_class() has not been called, returns None. """
return self.curclass.class_type
def _begin_class(self, abstract, interface):
""" Main implementation of begin_class """
raise NotImplementedError
def _end_class(self):
""" Main implementation of end_class """
raise NotImplementedError
def implements(self, jinterface):
"""
Indicates that the current class implements the interface
jinterface, which should be a JvmType.
"""
raise NotImplementedError
def add_field(self, fobj):
"""
fobj: a Field object
"""
unimplemented
def begin_constructor(self):
"""
Emits the constructor for this class, which merely invokes the
parent constructor.
superclsnm --- same Java name of super class as from begin_class
"""
self.begin_function("<init>", [], [self.current_type()], jVoid)
self.load_jvm_var(self.current_type(), 0)
jmethod = Method(self.curclass.superclass_type.name, "<init>",
(), jVoid, opcode=INVOKESPECIAL)
jmethod.invoke(self)
def end_constructor(self):
self.return_val(jVoid)
self.end_function()
def begin_j_function(self, cls_obj, method_obj, abstract=False):
"""
A convenience function that invokes begin_function() with the
appropriate arguments to define a method on class 'cls_obj' that
could be invoked with 'method_obj'.
"""
return self.begin_function(method_obj.method_name,
[],
[cls_obj]+method_obj.argument_types,
method_obj.return_type,
static=method_obj.is_static(),
abstract=abstract)
def begin_function(self, funcname, argvars, argtypes, rettype,
static=False, abstract=False):
"""
funcname --- name of the function
argvars --- list of objects passed to load() that represent arguments;
should be in order, or () if load() will not be used
argtypes --- JvmType for each argument [INCLUDING this]
rettype --- JvmType for the return value
static --- keyword, if true then a static func is generated
This function also defines the scope for variables passed to
load()/store().
"""
# Compute the indicates of each argument in the local variables
# for the function. Note that some arguments take up two slots
# depending on their type [this is compute by type_width()]
assert not self.curfunc
self.curfunc = FunctionState()
for idx, ty in enumerate(argtypes):
if idx < len(argvars): var = argvars[idx]
else: var = None
self.curfunc.add_var(var, ty)
# Prepare a map for the local variable indices we will add
# Let the subclass do the rest of the work; note that it does
# not need to know the argvars parameter, so don't pass it
self._begin_function(funcname, argtypes, rettype, static, abstract)
def _begin_function(self, funcname, argtypes, rettype, static, abstract):
"""
Main implementation of begin_function. The begin_function()
does some generic handling of args.
"""
unimplemented
def end_function(self):
self._end_function()
self.curfunc = None
def _end_function(self):
unimplemented
def mark(self, lbl):
""" Marks the point that a label indicates. """
unimplemented
def _instr(self, opcode, *args):
""" Emits an instruction with the given opcode and arguments.
The correct opcode and their types depends on the opcode. """
unimplemented
def return_val(self, jtype):
""" Returns a value from top of stack of the JvmType 'jtype' """
self._instr(RETURN.for_type(jtype))
def load_class_name(self):
""" Loads the name of the *Java* class of the object on the top of
the stack as a Java string. Note that the result for a PyPy
generated class will look something like 'pypy.some.pkg.cls' """
self.emit(OBJECTGETCLASS)
self.emit(CLASSGETNAME)
def load_string(self, str):
""" Pushes a Java version of a Python string onto the stack.
'str' should be a Python string encoded in UTF-8 (I think) """
# Create an escaped version of str:
def escape(char):
if char == '"': return r'\"'
if char == '\n': return r'\n'
if char == "\\": return r'\\'
if ord(char) > 127: return r'\u%04x' % ord(char)
return char
res = ('"' +
"".join(escape(c) for c in str) +
'"')
# Use LDC to load the Java version:
# XXX --- support byte arrays here? Would be trickier!
self._instr(LDC, res)
def load_jvm_var(self, jvartype, varidx):
""" Loads from jvm slot #varidx, which is expected to hold a value of
type vartype """
assert varidx < self.curfunc.next_offset
opc = LOAD.for_type(jvartype)
self.add_comment(" load_jvm_jar: jvartype=%s varidx=%s" % (
repr(jvartype), repr(varidx)))
self._instr(opc, varidx)
def store_jvm_var(self, vartype, varidx):
""" Loads from jvm slot #varidx, which is expected to hold a value of
type vartype """
self.add_comment(" store_jvm_jar: vartype=%s varidx=%s" % (
repr(vartype), repr(varidx)))
self._instr(STORE.for_type(vartype), varidx)
def load_from_array(self, elemtype):
""" Loads something from an array; the result will be of type 'elemtype'
(and hence the array is of type 'array_of(elemtype)'), where
'elemtype' is a JvmType. Assumes that the array ref and index are
already pushed onto stack (in that order). """
self._instr(ARRLOAD.for_type(elemtype))
def store_to_array(self, elemtype):
""" Stores something into an array; the result will be of type
'elemtype' (and hence the array is of type
'array_of(elemtype)'), where 'elemtype' is a JvmType. Assumes
that the array ref, index, and value are already pushed onto
stack (in that order)."""
self._instr(ARRLOAD.for_type(elemtype))
def unique_label(self, desc, mark=False):
""" Returns an opaque, unique label object that can be passed an
argument for branching opcodes, or the mark instruction.
'desc' should be a comment describing the use of the label.
It is for decorative purposes only and should be a valid C
identifier.
'mark' --- if True, then also calls self.mark() with the new lbl """
res = Label(self.label_counter, desc)
self.label_counter += 1
if mark:
self.mark(res)
return res
def load_this_ptr(self):
""" Convenience method. Be sure you only call it from a
virtual method, not static methods. """
self.load_jvm_var(jObject, 0)
def load_function_argument(self, index):
""" Convenience method. Loads function argument #index; note that
the this pointer is index #0. """
jtype, jidx = self.curfunc.function_arguments[index]
self.load_jvm_var(jtype, jidx)
def prepare_generic_argument(self, ITEMTYPE):
jty = self.db.lltype_to_cts(ITEMTYPE)
self.prepare_generic_argument_with_jtype(jty)
def prepare_generic_argument_with_jtype(self, jty):
if jty is jvmtype.jVoid:
self.emit(ACONST_NULL)
elif isinstance(jty, JvmScalarType):
self.box_value(jty)
def prepare_generic_result(self, ITEMTYPE):
jresty = self.db.lltype_to_cts(ITEMTYPE)
self.prepare_generic_result_with_jtype(jresty)
def prepare_generic_result_with_jtype(self, jresty):
if jresty is jvmtype.jVoid:
self.emit(POP)
elif isinstance(jresty, JvmScalarType):
# Perform any un-boxing required:
self.downcast_jtype(jresty.box_type)
self.unbox_value(jresty)
else:
# Perform any casting required:
self.downcast_jtype(jresty)
def box_value(self, jscalartype):
""" Assuming that an value of type jscalartype is on the stack,
boxes it into an Object. """
jclasstype = jscalartype.box_type
jmethod = Method.s(jclasstype, 'valueOf', (jscalartype,), jclasstype)
self.emit(jmethod)
def unbox_value(self, jscalartype):
""" Assuming that a boxed value of type jscalartype is on the stack,
unboxes it. """
jclasstype = jscalartype.box_type
jmethod = Method.v(
jclasstype, jscalartype.unbox_method, (), jscalartype)
self.emit(jmethod)
def swap(self):
""" Swaps the two words highest on the stack. """
self.emit(SWAP)
# __________________________________________________________________
# Exception Handling
def begin_try(self):
"""
Begins a try/catch region. Must be followed by a call to end_try()
after the code w/in the try region is complete.
"""
self.begintrylbl = self.unique_label("begin_try", mark=True)
def end_try(self):
"""
Ends a try/catch region. Must be followed immediately
by a call to begin_catch().
"""
self.endtrylbl = self.unique_label("end_try", mark=True)
def begin_catch(self, jexcclsty):
"""
Begins a catch region corresponding to the last try; there can
be more than one call to begin_catch, in which case the last
try region is reused.
'jexcclsty' --- a JvmType for the class of exception to be caught
"""
catchlbl = self.unique_label("catch", mark=True)
self.try_catch_region(
jexcclsty, self.begintrylbl, self.endtrylbl, catchlbl)
def end_catch(self):
"""
Ends a catch region.
(Included for CLI compatibility)
"""
return
def try_catch_region(self, jexcclsty, trystartlbl, tryendlbl, catchlbl):
"""
Indicates a try/catch region.
Either invoked directly, or from the begin_catch() routine:
the latter is invoked by the oosupport code.
'jexcclsty' --- a JvmType for the class of exception to be caught
'trystartlbl', 'tryendlbl' --- labels marking the beginning and end
of the try region
'catchlbl' --- label marking beginning of catch region
"""
unimplemented
_equals = {
ootype.Void: (None,None),
ootype.SignedLongLong: (LCMP,IFEQ),
ootype.UnsignedLongLong: (LCMP,IFEQ),
ootype.Float: (DCMPG,IFEQ),
ootype.Signed: (None,IF_ICMPNE),
ootype.Unsigned: (None,IF_ICMPNE),
ootype.Bool: (None,IF_ICMPNE),
ootype.Char: (None,IF_ICMPNE),
ootype.UniChar: (None,IF_ICMPNE),
}
def compare_values(self, OOTYPE, unequal_lbl):
""" Assumes that two instances of OOTYPE are pushed on the stack;
compares them and jumps to 'unequal_lbl' if they are unequal """
if OOTYPE in self._equals:
i1, i2 = self._equals[OOTYPE]
if i1: self.emit(i1)
if i2: self.emit(i2, unequal_lbl)
return
self.emit(OBJEQUALS)
self.emit(IFEQ, unequal_lbl)
_hash = {
ootype.Void: ICONST_0,
ootype.SignedLongLong: L2I,
ootype.UnsignedLongLong: L2I,
ootype.Float: D2I,
ootype.Signed: None,
ootype.Unsigned: None,
ootype.Bool: None,
ootype.Char: None,
ootype.UniChar: None,
}
def hash_value(self, OOTYPE):
""" Assumes that an instance of OOTYPE is pushed on the stack.
When finished, an int will be on the stack as a hash value. """
if OOTYPE in self._hash:
i1 = self._hash[OOTYPE]
if i1: self.emit(i1)
return
self.emit(OBJHASHCODE)
# __________________________________________________________________
# Generator methods and others that are invoked by MicroInstructions
#
# These translate into calls to the above methods.
def emit(self, instr, *args):
""" 'instr' in our case must be either a string, in which case
it is the name of a method to invoke, or an Opcode/Method
object (defined above)."""
if isinstance(instr, str):
return getattr(self, instr)(*args)
if isinstance(instr, Opcode):
return self._instr(instr, *args)
if isinstance(instr, Method):
return instr.invoke(self)
if isinstance(instr, Field) or isinstance(instr, Property):
return instr.load(self)
raise Exception("Unknown object in call to emit(): "+repr(instr))
def _var_data(self, v):
# Determine java type:
jty = self.db.lltype_to_cts(v.concretetype)
# Determine index in stack frame slots:
# note that arguments and locals can be treated the same here
return jty, self.curfunc.var_offset(v, jty)
def load(self, value):
if isinstance(value, flowmodel.Variable):
jty, idx = self._var_data(value)
return self.load_jvm_var(jty, idx)
if isinstance(value, flowmodel.Constant):
return push_constant(self.db, value.concretetype, value.value, self)
raise Exception('Unexpected type for v in load(): '+
repr(value.concretetype) + " v=" + repr(value))
def store(self, v):
# Ignore Void values
if v.concretetype is ootype.Void:
return
if isinstance(v, flowmodel.Variable):
jty, idx = self._var_data(v)
return self.store_jvm_var(jty, idx)
raise Exception('Unexpected type for v in store(): '+v)
def set_field(self, CONCRETETYPE, fieldname):
clsobj = self.db.pending_class(CONCRETETYPE)
fieldobj = clsobj.lookup_field(fieldname)
fieldobj.store(self)
def get_field(self, CONCRETETYPE, fieldname):
clsobj = self.db.pending_class(CONCRETETYPE)
fieldobj = clsobj.lookup_field(fieldname)
fieldobj.load(self)
def downcast(self, TYPE):
jtype = self.db.lltype_to_cts(TYPE)
self.downcast_jtype(jtype)
def downcast_jtype(self, jtype):
self._instr(CHECKCAST, jtype)
def instanceof(self, TYPE):
jtype = self.db.lltype_to_cts(TYPE)
self._instr(INSTANCEOF, jtype)
# included for compatibility with oosupport, but instanceof_jtype
# follows our naming convention better
def isinstance(self, jtype):
return self.instanceof_jtype(jtype)
def instanceof_jtype(self, jtype):
self._instr(INSTANCEOF, jtype)
def branch_unconditionally(self, target_label):
self.goto(target_label)
def branch_conditionally(self, cond, target_label):
if cond:
self._instr(IFNE, target_label)
else:
self._instr(IFEQ, target_label)
def branch_if_equal(self, target_label):
self._instr(IF_ICMPEQ, target_label)
def call_graph(self, graph):
mthd = self.db.pending_function(graph)
mthd.invoke(self)
def call_method(self, OOCLASS, method_name):
clsobj = self.db.pending_class(OOCLASS)
mthd = clsobj.lookup_method(method_name)
mthd.invoke(self)
# Check if we have to convert the result type at all:
gener = jvmtype.Generifier(OOCLASS)
RETTYPE = gener.full_types(method_name)[1]
jrettype = self.db.lltype_to_cts(RETTYPE)
if jrettype != mthd.return_type:
# if the intended return type is not the same as the
# actual return type in the JVM (mthd.return_type),
# we have to "deal with it"
self.prepare_generic_result(RETTYPE)
def call_primitive(self, graph):
argtypes, rettype = self.db.types_for_graph(graph)
mthd = Method.s(jPyPy, graph.func.func_name, argtypes, rettype)
self.emit(mthd)
def call_oostring(self, OOTYPE):
cts_type = self.db.lltype_to_cts(OOTYPE)
# treat all objects the same:
if isinstance(cts_type, jvmtype.JvmClassType):
cts_type = jObject
mthd = Method.s(jPyPy, 'oostring', [cts_type, jInt], jString)
self.emit(mthd)
if self.db.using_byte_array:
self.emit(PYPYSTRING2BYTES)
def new(self, TYPE):
jtype = self.db.lltype_to_cts(TYPE)
self.new_with_jtype(jtype)
def new_with_jtype(self, jtype):
ctor = Method.c(jtype, ())
self.emit(NEW, jtype)
self.emit(DUP)
self.emit(ctor)
def instantiate(self):
self.emit(PYPYRUNTIMENEW)
def getclassobject(self, OOINSTANCE):
jvmtype = self.db.lltype_to_cts(OOINSTANCE)
self.load_string(jvmtype.name)
CLASSFORNAME.invoke(self)
def dup(self, OOTYPE):
jvmtype = self.db.lltype_to_cts(OOTYPE)
self.dup_jtype(jvmtype)
def dup_jtype(self, jvmtype):
if jvmtype.descriptor.type_width() == 1:
self.emit(DUP)
else:
self.emit(DUP2)
def pop(self, OOTYPE):
jvmtype = self.db.lltype_to_cts(OOTYPE)
if jvmtype.descriptor.type_width() == 1:
self.emit(POP)
else:
self.emit(POP2)
def push_null(self, OOTYPE):
self.emit(ACONST_NULL)
DEFINED_INT_SYMBOLICS = {'MALLOC_ZERO_FILLED':1}
def push_primitive_constant(self, TYPE, value):
if TYPE is ootype.Void:
return
elif isinstance(value, CDefinedIntSymbolic):
self.emit(ICONST, self.DEFINED_INT_SYMBOLICS[value.expr])
elif TYPE in (ootype.Bool, ootype.Signed):
self.emit(ICONST, int(value))
elif TYPE is ootype.Unsigned:
# Converts the unsigned int into its corresponding signed value
# and emits it using ICONST.
self.emit(ICONST, _unsigned_to_signed_32(value))
elif TYPE is ootype.Char or TYPE is ootype.UniChar:
self.emit(ICONST, ord(value))
elif TYPE is ootype.SignedLongLong:
self._push_long_constant(long(value))
elif TYPE is ootype.UnsignedLongLong:
self._push_long_constant(_unsigned_to_signed_64(value))
elif TYPE is ootype.Float:
self._push_double_constant(float(value))
elif TYPE is ootype.String:
if value == ootype.null(ootype.String):
self.emit(ACONST_NULL)
else:
self.load_string(str(value._str))
def _push_long_constant(self, value):
if value == 0:
self.emit(LCONST_0)
elif value == 1:
self.emit(LCONST_1)
else:
self.emit(LDC2, value)
def _push_double_constant(self, value):
if _isnan(value):
DOUBLENAN.load(self)
elif _isinf(value):
if value > 0: DOUBLEPOSINF.load(self)
else: DOUBLENEGINF.load(self)
elif value == 0.0:
self.emit(DCONST_0)
elif value == 1.0:
self.emit(DCONST_1)
else:
# Big hack to avoid exponential notation:
self.emit(LDC2, "%22.22f" % value)
def prepare_cast_ptr_to_weak_address(self):
"""
To cast a pointer to a weak ref is a 2-step process.
First, invoke this routine. Then, invoke what is needed
to push the value, then invoke finalize_cast_ptr_to_weak_address
"""
self.emit(NEW, jWeakRef)
self.emit(DUP)
def finalize_cast_ptr_to_weak_address(self, OOTYPE):
"""
After prepare_cast_ptr_to_weak_address has been called, and the
ptr to cast has been pushed, you can invoke this routine.
OOTYPE should be the type of value which was pushed.
The result will be that at the top of the stack is a weak reference.
"""
self.prepare_generic_argument(OOTYPE)
ctor = Method.c(jWeakRef, (jObject,))
self.emit(ctor)
def cast_weak_address_to_ptr(self, OOTYPE):
"""
If a weak ref is at the top of the stack, yields the object
that this weak ref is a pointer to. OOTYPE is the kind of object
you had a weak reference to.
"""
get_mthd = Method.v(jWeakRef, 'get', (), jObject)
self.emit(get_mthd)
self.prepare_generic_result(OOTYPE)
# __________________________________________________________________
# Methods invoked directly by strings in jvm/opcode.py
def throw(self):
""" Throw the object from top of the stack as an exception """
self._instr(ATHROW)
def iabs(self):
MATHIABS.invoke(self)
def dbl_abs(self):
MATHDABS.invoke(self)
def bitwise_negate(self):
""" Invert all the bits in the "int" on the top of the stack """
self._instr(ICONST, -1)
self._instr(IXOR)
def goto(self, label):
""" Jumps unconditionally """
self._instr(GOTO, label)
def goto_if_true(self, label):
""" Jumps if the top of stack is true """
self._instr(IFNE, label)
def goto_if_false(self, label):
""" Jumps if the top of stack is false """
self._instr(IFEQ, label)
##### Comparison methods
def _compare_op(self, cmpopcode):
"""
Converts a comparison operation into a boolean value on the
stack. For example, compare_op(IFEQ) emits the instructions
to perform a logical inversion [because it is true if the
instruction equals zero]. Consumes as many operands from the
stack as the cmpopcode consumes, typically 1 or 2.
"""
midlbl = self.unique_label('cmpop')
endlbl = self.unique_label('cmpop')
self._instr(cmpopcode, midlbl)
self._instr(ICONST, 0)
self._instr(GOTO, endlbl)
self.mark(midlbl)
self._instr(ICONST, 1)
self.mark(endlbl)
is_null = lambda self: self._compare_op(IFNULL)
is_not_null = lambda self: self._compare_op(IFNONNULL)
ref_is_eq = lambda self: self._compare_op(IF_ACMPEQ)
ref_is_neq = lambda self: self._compare_op(IF_ACMPNEQ)
logical_not = lambda self: self._compare_op(IFEQ)
equals_zero = logical_not
not_equals_zero = lambda self: self._compare_op(IFNE)
equals = lambda self: self._compare_op(IF_ICMPEQ)
not_equals = lambda self: self._compare_op(IF_ICMPNE)
less_than = lambda self: self._compare_op(IF_ICMPLT)
greater_than = lambda self: self._compare_op(IF_ICMPGT)
less_equals = lambda self: self._compare_op(IF_ICMPLE)
greater_equals = lambda self: self._compare_op(IF_ICMPGE)
def _uint_compare_op(self, cmpopcode):
PYPYUINTCMP.invoke(self)
self._compare_op(cmpopcode)
u_equals = equals
u_not_equals = not_equals
u_less_than = lambda self: self._uint_compare_op(IFLT)
u_greater_than = lambda self: self._uint_compare_op(IFGT)
u_less_equals = lambda self: self._uint_compare_op(IFLE)
u_greater_equals = lambda self: self._uint_compare_op(IFGE)
def _dbl_compare_op(self, cmpopcode):
# XXX --- NaN behavior?
self.emit(DCMPG)
self._compare_op(cmpopcode)
dbl_equals = lambda self: self._dbl_compare_op(IFEQ)
dbl_not_equals = lambda self: self._dbl_compare_op(IFNE)
dbl_less_than = lambda self: self._dbl_compare_op(IFLT)
dbl_greater_than = lambda self: self._dbl_compare_op(IFGT)
dbl_less_equals = lambda self: self._dbl_compare_op(IFLE)
dbl_greater_equals = lambda self: self._dbl_compare_op(IFGE)
def _long_compare_op(self, cmpopcode):
self.emit(LCMP)
self._compare_op(cmpopcode)
long_equals = lambda self: self._long_compare_op(IFEQ)
long_not_equals = lambda self: self._long_compare_op(IFNE)
long_less_than = lambda self: self._long_compare_op(IFLT)
long_greater_than = lambda self: self._long_compare_op(IFGT)
long_less_equals = lambda self: self._long_compare_op(IFLE)
long_greater_equals = lambda self: self._long_compare_op(IFGE)
def _ulong_compare_op(self, cmpopcode):
PYPYULONGCMP.invoke(self)
self._compare_op(cmpopcode)
ulong_equals = long_equals
ulong_not_equals = long_not_equals
ulong_less_than = lambda self: self._ulong_compare_op(IFLT)
ulong_greater_than = lambda self: self._ulong_compare_op(IFGT)
ulong_less_equals = lambda self: self._ulong_compare_op(IFLE)
ulong_greater_equals = lambda self: self._ulong_compare_op(IFGE)
class JasminGenerator(JVMGenerator):
def __init__(self, db, outdir, package):
JVMGenerator.__init__(self, db)
self.outdir = outdir
def _begin_class(self, abstract, interface):
"""
Invoked by begin_class. It is expected that self.curclass will
be set when this method is invoked.
abstract: True if the class to generate is abstract
interface: True if the 'class' to generate is an interface
"""
iclassnm = self.current_type().descriptor.int_class_name()
isuper = self.curclass.superclass_type.descriptor.int_class_name()
jfile = self.outdir.join("%s.j" % iclassnm)
jfile.dirpath().ensure(dir=True)
self.curclass.file = jfile.open('w')
self.db.add_jasmin_file(str(jfile))
# Determine the "declaration string"
if interface: decl_str = "interface"
else: decl_str = "class"
# Write the JasminXT header
fields = ["public"]
if abstract: fields.append('abstract')
self.curclass.out(".%s %s %s\n" % (
decl_str, " ".join(fields), iclassnm))
self.curclass.out(".super %s\n" % isuper)
def _end_class(self):
self.curclass.file.close()
def close(self):
assert self.curclass is None
def add_comment(self, comment):
self.curclass.out(" ; %s\n" % comment)
def implements(self, jinterface):
self.curclass.out(
'.implements ' + jinterface.descriptor.int_class_name() + '\n')
def add_field(self, fobj):
kw = ['public']
if fobj.is_static: kw.append('static')
self.curclass.out('.field %s %s %s\n' % (
" ".join(kw), fobj.field_name, fobj.jtype.descriptor))
def _begin_function(self, funcname, argtypes, rettype, static, abstract):
if not static: argtypes = argtypes[1:]
# Throws clause? Only use RuntimeExceptions?
kw = ['public']
if static: kw.append('static')
if abstract: kw.append('abstract')
self.curclass.out('.method %s %s(%s)%s\n' % (
" ".join(kw),
funcname,
"".join([a.descriptor for a in argtypes]),
rettype.descriptor))
self._abstract_method = abstract
def _end_function(self):
if not self._abstract_method:
self.curclass.out('.limit stack 100\n') # HACK, track max offset
self.curclass.out('.limit locals %d\n' % self.curfunc.next_offset)
self.curclass.out('.end method\n')
def mark(self, lbl):
""" Marks the point that a label indicates. """
assert isinstance(lbl, Label)
self.curclass.out(' %s:\n' % lbl.jasmin_syntax())
# We count labels as instructions because ASM does:
self.curfunc.instr_counter += 1
def _instr(self, opcode, *args):
jvmstr, args = opcode.specialize(args)
def jasmin_syntax(arg):
if hasattr(arg, 'jasmin_syntax'): return arg.jasmin_syntax()
return str(arg)
strargs = [jasmin_syntax(arg) for arg in args]
instr_text = '%s %s' % (jvmstr, " ".join(strargs))
self.curclass.out(' .line %d\n' % self.curfunc.instr_counter)
self.curclass.out(' %-60s\n' % (instr_text,))
self.curfunc.instr_counter+=1
def try_catch_region(self, jexcclsty, trystartlbl, tryendlbl, catchlbl):
self.curclass.out(' .catch %s from %s to %s using %s\n' % (
jexcclsty.descriptor.int_class_name(),
trystartlbl.jasmin_syntax(),
tryendlbl.jasmin_syntax(),
catchlbl.jasmin_syntax()))
| Python |
"""
Special methods which we hand-generate, such as toString(), equals(), and hash().
These are generally added to methods listing of node.Class, and the
only requirement is that they must have a render(self, gen) method.
"""
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
from pypy.rpython.ootypesystem import ootype, rclass
class BaseDumpMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "toString"
self.jargtypes = [clsobj]
self.jrettype = jvmtype.jString
def _print_field_value(self, fieldnm, FIELDOOTY):
self.gen.emit(jvmgen.DUP)
self.gen.load_this_ptr()
fieldobj = self.clsobj.lookup_field(fieldnm)
fieldobj.load(self.gen)
dumpmethod = self.db.toString_method_for_ootype(FIELDOOTY)
self.gen.emit(dumpmethod)
self.gen.emit(jvmgen.PYPYAPPEND)
def _print(self, str):
self.gen.emit(jvmgen.DUP)
self.gen.load_string(str)
self.gen.emit(jvmgen.PYPYAPPEND)
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
gen.new_with_jtype(jvmtype.jStringBuilder)
self._render_guts(gen)
gen.emit(jvmgen.OBJTOSTRING)
gen.emit(jvmgen.RETURN.for_type(jvmtype.jString))
gen.end_function()
self.gen = None
class InstanceDumpMethod(BaseDumpMethod):
def _render_guts(self, gen):
clsobj = self.clsobj
genprint = self._print
# Start the dump
genprint("InstanceWrapper(")
genprint("'" + self.OOCLASS._name + "', ")
genprint("{")
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
genprint('"'+fieldnm+'":')
print "fieldnm=%r fieldty=%r" % (fieldnm, FIELDOOTY)
# Print the value of the field:
self._print_field_value(fieldnm, FIELDOOTY)
# Dump close
genprint("})")
class RecordDumpMethod(BaseDumpMethod):
def _render_guts(self, gen):
clsobj = self.clsobj
genprint = self._print
# We only render records that represent tuples:
# In that case, the field names look like item0, item1, etc
# Otherwise, we just do nothing... this is because we
# never return records that do not represent tuples from
# a testing function
for f_name in self.OOCLASS._fields:
if not f_name.startswith('item'):
return
# Start the dump
genprint("StructTuple((")
numfields = len(self.OOCLASS._fields)
for i in range(numfields):
f_name = 'item%d' % i
FIELD_TYPE, f_default = self.OOCLASS._fields[f_name]
if FIELD_TYPE is ootype.Void:
continue
# Print the value of the field:
self._print_field_value(f_name, FIELD_TYPE)
genprint(',')
# Decrement indent and dump close
genprint("))")
class ConstantStringDumpMethod(BaseDumpMethod):
""" Just prints out a string """
def __init__(self, clsobj, str):
BaseDumpMethod.__init__(self, None, None, clsobj)
self.constant_string = str
def _render_guts(self, gen):
genprint = self._print
genprint("'" + self.constant_string + "'")
class DeepEqualsMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "equals"
self.jargtypes = [clsobj, jvmtype.jObject]
self.jrettype = jvmtype.jBool
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
# Label to branch to should the items prove to be unequal
unequal_lbl = gen.unique_label('unequal')
gen.add_comment('check that the argument is of the correct type')
gen.load_jvm_var(self.clsobj, 1)
gen.instanceof(self.OOCLASS)
gen.goto_if_false(unequal_lbl)
gen.add_comment('Cast it to the right type:')
gen.load_jvm_var(self.clsobj, 1)
gen.downcast(self.OOCLASS)
gen.store_jvm_var(self.clsobj, 1)
# If so, compare field by field
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldobj = self.clsobj.lookup_field(fieldnm)
gen.add_comment('Compare field %s of type %s' % (fieldnm, FIELDOOTY))
# Load the field from both this and the argument:
gen.load_jvm_var(self.clsobj, 0)
gen.emit(fieldobj)
gen.load_jvm_var(self.clsobj, 1)
gen.emit(fieldobj)
# And compare them:
gen.compare_values(FIELDOOTY, unequal_lbl)
# Return true or false as appropriate
gen.push_primitive_constant(ootype.Bool, True)
gen.return_val(jvmtype.jBool)
gen.mark(unequal_lbl)
gen.push_primitive_constant(ootype.Bool, False)
gen.return_val(jvmtype.jBool)
gen.end_function()
class DeepHashMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "hashCode"
self.jargtypes = [clsobj]
self.jrettype = jvmtype.jInt
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
# Initial hash: 0
gen.push_primitive_constant(ootype.Signed, 0)
# Get hash of each field
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldobj = self.clsobj.lookup_field(fieldnm)
gen.add_comment('Hash field %s of type %s' % (fieldnm, FIELDOOTY))
# Load the field and hash it:
gen.load_jvm_var(self.clsobj, 0)
gen.emit(fieldobj)
gen.hash_value(FIELDOOTY)
# XOR that with the main hash
gen.emit(jvmgen.IXOR)
# Return the final hash
gen.return_val(jvmtype.jInt)
gen.end_function()
| Python |
"""
Log for the JVM backend
Do the following:
from pypy.translator.jvm.log import log
"""
from pypy.tool.ansi_print import ansi_log
import py
log = py.log.Producer("jvm")
py.log.setconsumer("jvm", ansi_log)
| Python |
from pypy.translator.translator import graphof
# ___________________________________________________________________________
def throwZeroDivisionError():
raise ZeroDivisionError
def throwIndexError():
raise IndexError
def throwOverflowError():
raise OverflowError
# ___________________________________________________________________________
def create_interlink_node(db):
""" Translates the create_interlink_impl() function and returns
a jvmgen.Method object that allows it to be called. """
translator = db.genoo.translator
HELPERS = [val for nm, val in globals().items() if nm.startswith('throw')]
for func in HELPERS:
translator.annotator.build_types(func, [])
translator.rtyper.specialize_more_blocks()
helpers = {}
for func in HELPERS:
graph = graphof(translator, func)
helpers[func.func_name] = db.pending_function(graph)
db.create_interlink_node(helpers)
| Python |
from pypy.translator.oosupport.metavm import MicroInstruction
from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
class _IndirectCall(MicroInstruction):
def render(self, gen, op):
interface = gen.db.lltype_to_cts(op.args[0].concretetype)
method = interface.lookup_method('invoke')
gen.emit(method)
IndirectCall = _IndirectCall()
class _JvmCallMethod(MicroInstruction):
def _invoke_method(self, gen, db, jmethod, jactargs, args, jactres, res):
for arg, jmthdty in zip(args, jactargs):
# Load the argument on the stack:
gen.load(arg)
# Perform any boxing required:
jargty = db.lltype_to_cts(arg.concretetype)
if jargty != jmthdty:
gen.prepare_generic_argument(arg.concretetype)
gen.emit(jmethod)
# Perform any unboxing required:
jresty = db.lltype_to_cts(res.concretetype)
if jresty != jactres:
gen.prepare_generic_result(res.concretetype)
def render(self, gen, op):
method = op.args[0] # a FlowConstant string...
this = op.args[1]
# Locate the java method we will be invoking:
thisjtype = gen.db.lltype_to_cts(this.concretetype)
jmethod = thisjtype.lookup_method(method.value)
# Ugly: if jmethod ends up being a static method, then
# peel off the first argument
jactargs = jmethod.argument_types
if jmethod.is_static():
jactargs = jactargs[1:]
# Iterate through the arguments, inserting casts etc as required
gen.load(this)
self._invoke_method(gen, gen.db, jmethod,
jactargs, op.args[2:],
jmethod.return_type, op.result)
JvmCallMethod = _JvmCallMethod()
class TranslateException(MicroInstruction):
""" Translates an exception into a call of a method on the PyPy object """
def __init__(self, jexc, pexcmthd, inst):
"""
jexc: the JvmType of the exception
pexcmthd: the name of the method on the PyPy object to call.
The PyPy method must take no arguments, return void, and must
always throw an exception in practice.
"""
self.java_exc = jexc
self.pypy_method = jvmgen.Method.s(
jvmtype.jPyPy, pexcmthd, [], jvmtype.jVoid)
self.instruction = inst
def render(self, gen, op):
trylbl = gen.unique_label('translate_exc_begin')
catchlbl = gen.unique_label('translate_exc_catch')
donelbl = gen.unique_label('translate_exc_done')
# try {
gen.mark(trylbl)
self.instruction.render(gen, op)
gen.goto(donelbl)
# } catch (JavaExceptionType) {
gen.mark(catchlbl)
gen.emit(jvmgen.POP)
gen.emit(self.pypy_method)
# Note: these instructions will never execute, as we expect
# the pypy_method to throw an exception and not to return. We
# need them here to satisfy the Java verifier, however, as it
# does not know that the pypy_method will never return.
gen.emit(jvmgen.ACONST_NULL)
gen.emit(jvmgen.ATHROW)
# }
gen.mark(donelbl)
gen.try_catch_region(self.java_exc, trylbl, catchlbl, catchlbl)
class _NewCustomDict(MicroInstruction):
def _load_func(self, gen, fn, obj, method_name):
db = gen.db
if fn.value:
# Standalone function: find the delegate class and
# instantiate it.
assert method_name.value is None
smimpl = fn.value.concretize().value # ootype._static_meth
db.record_delegate(smimpl._TYPE) # _TYPE is a StaticMethod
ty = db.record_delegate_standalone_func_impl(smimpl.graph)
gen.new_with_jtype(ty)
else:
# Bound method: create a wrapper bound to the given
# object, using the "bind()" static method that bound
# method wrapper classes have.
INSTANCE = obj.concretetype
method_name = method_name.value
ty = db.record_delegate_bound_method_impl(INSTANCE, method_name)
gen.load(obj)
gen.emit(ty.bind_method)
def render(self, generator, op):
self._load_func(generator, *op.args[1:4])
self._load_func(generator, *op.args[4:7])
generator.emit(jvmgen.CUSTOMDICTMAKE)
NewCustomDict = _NewCustomDict()
class _CastPtrToWeakAddress(MicroInstruction):
def render(self, generator, op):
arg = op.args[0]
generator.prepare_cast_ptr_to_weak_address()
generator.load(arg)
generator.finalize_cast_ptr_to_weak_address(arg.concretetype)
generator.store(op.result)
CastPtrToWeakAddress = _CastPtrToWeakAddress()
class _CastWeakAddressToPtr(MicroInstruction):
def render(self, generator, op):
RESULTTYPE = op.result.concretetype
generator.cast_weak_address_to_ptr(RESULTTYPE)
CastWeakAddressToPtr = _CastWeakAddressToPtr()
| Python |
"""
Nodes describe Java structures that we are building. They know how to
render themselves so as to build the java structure they describe.
They are entered onto the database worklist as we go.
Some nodes describe parts of the JVM structure that already exist ---
for example, there are nodes that are used to describe built-in JVM
types like String, etc. In this case, they are never placed on the
database worklist, and don't know how to render themselves (since they
don't have to).
Nodes representing classes that we will build also implement the JvmType
interface defined by database.JvmType.
"""
from pypy.objspace.flow import \
model as flowmodel
from pypy.rpython.lltypesystem import \
lltype
from pypy.rpython.ootypesystem import \
ootype, rclass
from pypy.translator.jvm.typesystem import \
JvmClassType, jString, jStringArray, jVoid, jThrowable, jInt, jPyPyMain, \
jObject, JvmType, jStringBuilder, jPyPyInterlink, jCallbackInterfaces, \
JvmInterfaceType
from pypy.translator.jvm.opcodes import \
opcodes
from pypy.translator.jvm.option import \
getoption
from pypy.translator.jvm.methods import \
BaseDumpMethod, InstanceDumpMethod, RecordDumpMethod, ConstantStringDumpMethod
from pypy.translator.oosupport.function import \
Function as OOFunction
from pypy.translator.oosupport.constant import \
push_constant
import pypy.translator.jvm.generator as jvmgen
class Node(object):
def set_db(self, db):
self.db = db
def dependencies(self):
pass
class EntryPoint(Node):
"""
A special node that generates the pypy.Main class which has a static
main method. Can be configured with a number of options for internal
testing (see __init__)
"""
def __init__(self, graph, expandargs, printresult):
"""
'graph' --- The initial graph to invoke from main()
'expandargs' --- controls whether the arguments passed to main()
are passed as a list, or expanded to match each argument to the graph
'printresult' --- controls whether the result is printed to stdout
when the program finishes
The 'expandargs' option deserves explanation:
it will be false for a standalone build, because in that
case we want to convert the String[] array that main() receives
into a corresponding python List of string objects.
it will (generally) be true when compiling individual
functions, in which case we might be compiling an entry
point with a signature like (a:int,b:float) in which case
argv[1] should be converted to an integer, and argv[2]
should be converted to a float.
"""
self.graph = graph
self.expand_arguments = expandargs
self.print_result = printresult
pass
# XXX --- perhaps this table would be better placed in typesystem.py
# so as to constrain the knowledge of lltype and ootype
_type_conversion_methods = {
ootype.Signed:jvmgen.PYPYSTRTOINT,
ootype.Unsigned:jvmgen.PYPYSTRTOUINT,
lltype.SignedLongLong:jvmgen.PYPYSTRTOLONG,
lltype.UnsignedLongLong:jvmgen.PYPYSTRTOULONG,
ootype.Bool:jvmgen.PYPYSTRTOBOOL,
ootype.Float:jvmgen.PYPYSTRTODOUBLE,
ootype.Char:jvmgen.PYPYSTRTOCHAR,
ootype.UniChar:jvmgen.PYPYSTRTOCHAR,
ootype.String:None
}
def render(self, gen):
gen.begin_class(jPyPyMain, jObject)
gen.begin_function(
'main', (), [jStringArray], jVoid, static=True)
# First thing we do is setup the PyPy helper. For now this is
# a static variable of the PyPy class, though that precludes
# running multiple translations.
gen.new_with_jtype(gen.db.interlink_class)
jvmgen.PYPYINTERLINK.store(gen)
if self.print_result:
gen.begin_try()
# Handle arguments:
if self.expand_arguments:
# Convert each entry into the array to the desired type by
# invoking an appropriate helper function on each one
for i, arg in enumerate(self.graph.getargs()):
jty = self.db.lltype_to_cts(arg.concretetype)
gen.load_jvm_var(jStringArray, 0)
gen.emit(jvmgen.ICONST, i)
gen.load_from_array(jString)
conv = self._type_conversion_methods[arg.concretetype]
if conv: gen.emit(conv)
else:
# Convert the array of strings to a List<String> as the
# python method expects
arg0 = self.graph.getargs()[0]
assert isinstance(arg0.concretetype, ootype.List), str(arg0.concretetype)
assert arg0.concretetype._ITEMTYPE is ootype.String
gen.load_jvm_var(jStringArray, 0)
gen.emit(jvmgen.PYPYARRAYTOLIST)
# Generate a call to this method
gen.emit(self.db.pending_function(self.graph))
# Print result?
#
# Use the dump method for non-exceptional results
#
# For exceptions, just print the runtime type
#
if self.print_result:
done_printing = gen.unique_label('done_printing')
RESOOTYPE = self.graph.getreturnvar().concretetype
dumpmethod = self.db.toString_method_for_ootype(RESOOTYPE)
gen.add_comment('Invoking dump method for result of type '
+str(RESOOTYPE))
gen.emit(dumpmethod) # generate the string
gen.emit(jvmgen.PYPYDUMP) # dump to stdout
gen.goto(done_printing)
gen.end_try()
jexc = self.db.exception_root_object()
gen.begin_catch(jexc)
gen.emit(jvmgen.PYPYDUMPEXCWRAPPER) # dumps to stdout
gen.end_catch()
gen.mark(done_printing)
# And finish up
gen.return_val(jVoid)
gen.end_function()
gen.end_class()
class Function(object):
""" A generic interface for Function objects; these objects can
be added as methods of classes and rendered. This class serves
only as documentation. """
# A "name" attribute must be defined
name = None
def render(self, gen):
""" Uses the gen argument, a jvmgen.Generator, to create the
appropriate JVM assembly for this method. """
raise NotImplementedError
def method(self):
""" Returns a jvmgen.Method object that would allow this
function to be invoked. """
raise NotImplementedError
class GetterFunction(Function):
def __init__(self, db, cls_obj, method_obj, field_obj):
self.db = db
self.name = method_obj.method_name
self.cls_obj = cls_obj
self.method_obj = method_obj
self.field_obj = field_obj
def method(self):
return self.method_obj
def render(self, gen):
gen.begin_function(
self.method_obj.method_name, [],
[self.cls_obj], self.field_obj.jtype)
gen.load_this_ptr()
self.field_obj.load(gen)
gen.return_val(self.field_obj.jtype)
gen.end_function()
class PutterFunction(Function):
def __init__(self, db, cls_obj, method_obj, field_obj):
self.db = db
self.cls_obj = cls_obj
self.method_obj = method_obj
self.field_obj = field_obj
def method(self):
return self.method_obj
def render(self, gen):
gen.begin_function(
self.method_obj.method_name, [],
[self.cls_obj, self.field_obj.jtype], jVoid)
gen.load_this_ptr()
gen.load_function_argument(1)
self.field_obj.store(gen)
gen.return_val(jVoid)
gen.end_function()
class GraphFunction(OOFunction, Function):
""" Represents a function that is generated from a graph. """
def __init__(self, db, classty, name, jargtypes,
jrettype, graph, is_static):
"""
classty: the JvmClassType object this is a part of (even static
functions have a class)
name: the name of the function
jargtypes: JvmType of each argument
jrettype: JvmType this function returns
graph: the graph representing the body of the function
is_static: boolean flag indicate whether func is static (!)
"""
OOFunction.__init__(self, db, graph, name, not is_static)
self.classty = classty
self.jargtypes = jargtypes
self.jrettype = jrettype
self._block_labels = {}
def method(self):
""" Returns a jvmgen.Method that can invoke this function """
if not self.is_method:
ctor = jvmgen.Method.s
startidx = 0
else:
ctor = jvmgen.Method.v
startidx = 1
return ctor(self.classty, self.name,
self.jargtypes[startidx:], self.jrettype)
def begin_render(self):
# Prepare argument lists for begin_function call
lltype_to_cts = self.db.lltype_to_cts
jargvars = []
jargtypes = []
for arg in self.graph.getargs():
if arg.concretetype is ootype.Void: continue
jargvars.append(arg)
jargtypes.append(lltype_to_cts(arg.concretetype))
# Determine return type
jrettype = lltype_to_cts(self.graph.getreturnvar().concretetype)
self.ilasm.begin_function(
self.name, jargvars, jargtypes, jrettype, static=not self.is_method)
def end_render(self):
self.ilasm.end_function()
def _create_generator(self, ilasm):
# JVM doesn't distinguish
return ilasm
def _get_block_name(self, block):
if block in self._block_labels:
return self._block_labels[block]
blocklbl = self.ilasm.unique_label('BasicBlock')
self._block_labels[block] = blocklbl
return blocklbl
def set_label(self, blocklbl):
self.ilasm.mark(blocklbl)
def begin_try(self):
self.ilasm.begin_try()
def end_try(self, exit_label):
self.ilasm.branch_unconditionally(exit_label)
self.ilasm.end_try()
def begin_catch(self, llexitcase):
ll_meta_exc = llexitcase
ll_exc = ll_meta_exc._inst.class_._INSTANCE
jtype = self.cts.lltype_to_cts(ll_exc)
assert jtype.throwable # SHOULD only try to catch subtypes of Exception
self.ilasm.begin_catch(jtype)
def end_catch(self, exit_lbl):
self.ilasm.goto(exit_lbl)
self.ilasm.end_catch()
def store_exception_and_link(self, link):
if self._is_raise_block(link.target):
# the exception value is on the stack, use it as the 2nd target arg
assert len(link.args) == 2
assert len(link.target.inputargs) == 2
self.ilasm.store(link.target.inputargs[1])
else:
# the exception value is on the stack, store it in the proper place
if isinstance(link.last_exception, flowmodel.Variable):
self.ilasm.emit(jvmgen.DUP)
self.ilasm.store(link.last_exc_value)
fld = self.db.lltype_to_cts(rclass.OBJECT).lookup_field('meta')
self.ilasm.emit(fld)
self.ilasm.store(link.last_exception)
else:
self.ilasm.store(link.last_exc_value)
self._setup_link(link)
def render_return_block(self, block):
return_var = block.inputargs[0]
return_ty = self.db.lltype_to_cts(return_var.concretetype)
if return_var.concretetype is not ootype.Void:
self.ilasm.load(return_var)
self.ilasm.return_val(return_ty)
def render_raise_block(self, block):
exc = block.inputargs[1]
self.ilasm.load(exc)
# Check whether the static type is known to be throwable.
# If not, emit a CHECKCAST to the base exception type.
# According to Samuele, no non-Exceptions should be thrown,
# but this is not enforced by the RTyper or annotator.
jtype = self.db.lltype_to_cts(exc.concretetype)
if not jtype.throwable:
self.ilasm.downcast_jtype(self.db.exception_root_object())
self.ilasm.throw()
def _trace(self, str):
jvmgen.SYSTEMERR.load(self.generator)
self.generator.load_string(str)
jvmgen.PRINTSTREAMPRINTSTR.invoke(self.generator)
def _is_printable(self, res):
if res.concretetype in (
ootype.Instance,
ootype.Signed,
ootype.Unsigned,
ootype.SignedLongLong,
ootype.UnsignedLongLong,
ootype.Bool,
ootype.Float,
ootype.Char,
ootype.UniChar,
ootype.String,
ootype.StringBuilder,
ootype.Class):
return True
if isinstance(res.concretetype, (
ootype.Instance,
ootype.Record,
ootype.List,
ootype.Dict,
ootype.DictItemsIterator)):
return True
return False
def _trace_value(self, prompt, res):
if res and self._is_printable(res):
jmethod = self.db.toString_method_for_ootype(
res.concretetype)
self._trace(" "+prompt+": ")
self.generator.emit(jvmgen.SYSTEMERR)
self.generator.load(res)
self.generator.emit(jmethod)
self.generator.emit(jvmgen.PRINTSTREAMPRINTSTR)
self._trace("\n")
def _render_op(self, op):
self.generator.add_comment(str(op))
if getoption('trace'):
self._trace(str(op)+"\n")
for i, arg in enumerate(op.args):
self._trace_value('Arg %02d' % i, arg)
OOFunction._render_op(self, op)
if getoption('trace'):
self._trace_value('Result', op.result)
class StaticMethodInterface(Node, JvmClassType):
"""
We generate an abstract base class when we need function pointers,
which correspond to constants of StaticMethod ootype. We need a
different interface for each different set of argument/return
types. These abstract base classes look like:
abstract class Foo {
public abstract ReturnType invoke(Arg1, Arg2, ...);
}
Depending on the signature of Arg1, Arg2, and ReturnType, this
abstract class may have additional methods and may implement
interfaces such as PyPy.Equals or PyPy.HashCode. This is to allow
it to interface with the the standalone Java code. See
the pypy.Callback interface for more information.
"""
def __init__(self, name, jargtypes, jrettype):
"""
argtypes: list of JvmTypes
rettype: JvmType
"""
JvmClassType.__init__(self, name)
assert isinstance(jrettype, JvmType)
self.java_argument_types = [self] + list(jargtypes)
self.java_return_type = jrettype
self.dump_method = ConstantStringDumpMethod(
self, "StaticMethodInterface")
self.invoke_method_obj = jvmgen.Method.v(
self, 'invoke',
self.java_argument_types[1:], self.java_return_type)
def lookup_field(self, fieldnm):
raise KeyError(fieldnm) # no fields
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
assert isinstance(self.java_return_type, JvmType)
if methodnm == 'invoke':
return self.invoke_method_obj
raise KeyError(methodnm) # only one method
def render(self, gen):
assert isinstance(self.java_return_type, JvmType)
# Scan through the jCallbackInterfaces and look for any
# that apply.
for jci in jCallbackInterfaces:
if jci.matches(self.java_argument_types[1:], self.java_return_type):
break
else:
jci = None
gen.begin_class(self, jObject, abstract=True)
if jci: gen.implements(jci)
gen.begin_constructor()
gen.end_constructor()
gen.begin_function('invoke', [], self.java_argument_types,
self.java_return_type, abstract=True)
gen.end_function()
# Because methods in the JVM are identified by both their name
# and static signature, we need to create a dummy "invoke"
# method if the Java callback interface argument types don't
# match the actual types for this method. For example, the
# equals interface has the static signature
# "(Object,Object)=>boolean", but there may be static methods
# with some signature "(X,Y)=>boolean" where X and Y are other
# types. In that case, we create an adaptor method like:
#
# boolean invoke(Object x, Object y) {
# return invoke((X)x, (Y)y);
# }
if (jci and
(jci.java_argument_types != self.java_argument_types[1:] or
jci.java_return_type != self.java_return_type)):
jci_jargs = [self] + list(jci.java_argument_types)
jci_ret = jci.java_return_type
gen.begin_function('invoke', [], jci_jargs, jci_ret)
idx = 0
for jci_arg, self_arg in zip(jci_jargs, self.java_argument_types):
gen.load_jvm_var(jci_arg, idx)
if jci_arg != self_arg:
gen.prepare_generic_result_with_jtype(self_arg)
idx += jci_arg.descriptor.type_width()
gen.emit(self.invoke_method_obj)
assert jci_ret == self.java_return_type # no variance here currently
gen.return_val(jci_ret)
gen.end_function()
gen.end_class()
class StaticMethodImplementation(Node, JvmClassType):
"""
In addition to the StaticMethodInterface, we must generate an
implementation for each specific method that is called. These
implementation objects look like:
class Bar extends Foo {
public ReturnType invoke(Arg1, Arg2) {
return SomeStaticClass.StaticMethod(Arg1, Arg2);
}
}
If the bound_to_jty argument is not None, then this class
represents a bound method, and looks something like:
class Bar extends Foo {
Qux bound_to;
public static Bar bind(Qux to) {
Bar b = new Bar();
b.bound_to = to;
return b;
}
public ReturnType invoke(Arg1, Arg2) {
return bound_to.SomeMethod(Arg1, Arg2);
}
}
"""
def __init__(self, name, super_class, bound_to_jty, impl_method):
JvmClassType.__init__(self, name)
self.super_class = super_class
self.impl_method = impl_method
self.dump_method = ConstantStringDumpMethod(
self, "StaticMethodImplementation")
if bound_to_jty:
self.bound_to_jty = bound_to_jty
self.bound_to_fld = jvmgen.Field(
self.name, 'bound_to', bound_to_jty, False)
self.bind_method = jvmgen.Method.s(
self, 'bind', (self.bound_to_jty,), self)
else:
self.bound_to_jty = None
self.bound_to_fld = None
self.bind_method = None
def lookup_field(self, fieldnm):
if self.bound_to_fld and fieldnm == self.bound_to_fld.name:
return self.bound_to_fld
return self.super_class.lookup_field(fieldnm)
def lookup_method(self, methodnm):
if self.bind_method and methodnm == 'bind':
return self.bind_method
return self.super_class.lookup_method(methodnm)
def render(self, gen):
gen.begin_class(self, self.super_class)
if self.bound_to_fld:
gen.add_field(self.bound_to_fld)
gen.begin_constructor()
gen.end_constructor()
# Emit the "bind" function which creates an instance if there is
# a bound field:
if self.bound_to_jty:
assert self.bound_to_fld and self.bind_method
gen.begin_function(
'bind', [], (self.bound_to_jty,), self, static=True)
gen.new_with_jtype(self)
gen.emit(jvmgen.DUP)
gen.load_jvm_var(self.bound_to_jty, 0)
self.bound_to_fld.store(gen)
gen.return_val(self)
gen.end_function()
# Emit the invoke() function, which just re-pushes the
# arguments and then invokes either the (possibly static)
# method self.impl_method. Note that if we are bound to an
# instance, we push that as the this pointer for
# self.impl_method.
gen.begin_function('invoke', [],
self.super_class.java_argument_types,
self.super_class.java_return_type)
if self.bound_to_fld:
gen.load_jvm_var(self, 0)
gen.emit(self.bound_to_fld)
for i in range(len(self.super_class.java_argument_types)):
if not i: continue # skip the this ptr
gen.load_function_argument(i)
gen.emit(self.impl_method)
gen.return_val(self.super_class.java_return_type)
gen.end_function()
gen.end_class()
class Interface(Node, JvmInterfaceType):
"""
Represents an interface to be generated. The only class that we
currently generate into an interface is ootype.ROOT.
"""
def __init__(self, name):
JvmClassType.__init__(self, name)
self.super_class = jObject
self.rendered = False
self.properties = {}
self.methods = {}
def lookup_field(self, fieldnm):
# Right now, we don't need inheritance between interfaces.
return self.properties[fieldnm]
def lookup_method(self, methodnm):
# Right now, we don't need inheritance between interfaces.
return self.methods[methodnm]
def add_property(self, prop):
self.properties[prop.field_name] = prop
def add_method(self, method):
self.methods[method.name] = method
def render(self, gen):
self.rendered = True
gen.begin_class(self, self.super_class, interface=True)
def emit_method(method):
gen.begin_j_function(self, method, abstract=True)
gen.end_function()
for method in self.methods.values():
emit_method(method)
for prop in self.properties.values():
emit_method(prop.get_method)
emit_method(prop.put_method)
gen.end_class()
class Class(Node, JvmClassType):
""" Represents a class to be emitted. Note that currently, classes
are emitted all in one shot, not piecemeal. """
def __init__(self, name, supercls=None):
"""
'name' should be a fully qualified Java class name like
"java.lang.String", supercls is a Class object
"""
JvmClassType.__init__(self, name)
self.rendered = False # has rendering occurred?
self.abstract = False # is this an abstract class?
self.fields = {} # maps field name to jvmgen.Field object
self.interfaces = [] # list of JvmTypes
self.methods = {} # maps method name to a Function object*
self.abstract_methods = {} # maps method name to jvmgen.Method object
self.set_super_class(supercls)
# * --- actually maps to an object that defines the
# attributes: name, method() and render(). Usually, this is a
# Function object, but in some subclasses it is not.
def simple_name(self):
dot = self.name.rfind('.')
if dot == -1: return self.name
return self.name[dot+1:]
def set_super_class(self, supercls):
self.super_class = supercls
# Throwability is inherited:
if self.super_class and self.super_class.throwable:
self.throwable = True
def add_field(self, fieldobj, fielddef):
""" Creates a new field accessed via the jvmgen.Field
descriptor 'fieldobj'. Must be called before render()."""
assert not self.rendered and isinstance(fieldobj, jvmgen.Field)
self.fields[fieldobj.field_name] = (fieldobj, fielddef)
def add_interface(self, inter):
assert not self.rendered and isinstance(inter, JvmType)
self.interfaces.append(inter)
def lookup_field(self, fieldnm):
if fieldnm in self.fields:
return self.fields[fieldnm][0]
return self.super_class.lookup_field(fieldnm)
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
if methodnm in self.methods:
return self.methods[methodnm].method()
if methodnm in self.abstract_methods:
return self.abstract_methods[methodnm]
return self.super_class.lookup_method(methodnm)
def add_method(self, func):
""" Creates a new method in this class, represented by the
Function object 'func'. Must be called before render();
intended to be invoked by the database. Note that some of these
'methods' may actually represent static functions. """
self.methods[func.name] = func
def add_abstract_method(self, jmethod):
""" Adds an abstract method to our list of methods; jmethod should
be a jvmgen.Method object """
assert jmethod.method_name not in self.methods
self.abstract = True
self.abstract_methods[jmethod.method_name] = jmethod
def render(self, gen):
self.rendered = True
gen.begin_class(self, self.super_class, abstract=self.abstract)
for inter in self.interfaces:
gen.implements(inter)
for field, fielddef in self.fields.values():
gen.add_field(field)
# Emit the constructor:
gen.begin_constructor()
# set default values for fields
for field, f_default in self.fields.values():
if field.jtype is not jVoid:
gen.load_jvm_var(self, 0) # load this ptr
# load default value of field
push_constant(gen.db, field.OOTYPE, f_default, gen)
field.store(gen) # store value into field
gen.end_constructor()
for method in self.methods.values():
method.render(gen)
for method in self.abstract_methods.values():
gen.begin_j_function(self, method, abstract=True)
gen.end_function()
gen.end_class()
class InterlinkFunction(Function):
"""
Used for methods of the interlink helper class that we generate.
Generates a method which takes no arguments and which invokes
a given static helper function.
"""
def __init__(self, interlink, name, helper):
"""
interlink: the JvmType of the Interlink implementation
name: the name of the method
helper: a jvmgen.Method object for the helper func we should invoke
"""
self.interlink = interlink
self.name = name
self.helper = helper
self.method_obj = jvmgen.Method.v(interlink, self.name, [], jVoid)
def method(self):
return self.method_obj
def render(self, gen):
gen.begin_function(self.name, (), [self.interlink], jVoid)
gen.emit(self.helper)
gen.return_val(jVoid)
gen.end_function()
| Python |
"""
Mapping from OOType opcodes to JVM MicroInstructions. Most of these
come from the oosupport directory.
"""
from pypy.translator.oosupport.metavm import \
PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call,\
SetField, GetField, DownCast, RuntimeNew, OOString, CastTo
from pypy.translator.jvm.metavm import \
IndirectCall, JvmCallMethod, TranslateException, NewCustomDict, \
CastPtrToWeakAddress, CastWeakAddressToPtr
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
def _proc(val):
""" Function which is used to post-process each entry in the
opcodes table; it adds a PushAllArgs and StoreResult by default,
unless the entry is a list already. """
if not isinstance(val, list):
val = InstructionList((PushAllArgs, val, StoreResult))
else:
val = InstructionList(val)
return val
def _check_zer(op):
return [TranslateException(
jvmtype.jArithmeticException,
'throwZeroDivisionError',
_proc(op))]
def _check_ovf(op):
return op
# This table maps the opcodes to micro-ops for processing them.
# It is post-processed by _proc.
_opcodes = {
# __________ object oriented operations __________
'new': [New, StoreResult],
'runtimenew': [RuntimeNew, StoreResult],
'oosetfield': [SetField],
'oogetfield': [GetField, StoreResult],
'oosend': [JvmCallMethod, StoreResult],
'ooupcast': DoNothing,
'oodowncast': [DownCast, StoreResult],
'oois': 'ref_is_eq',
'oononnull': 'is_not_null',
'instanceof': [CastTo, StoreResult],
'subclassof': [PushAllArgs, jvmgen.SWAP, jvmgen.CLASSISASSIGNABLEFROM, StoreResult],
'ooidentityhash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oohash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oostring': [OOString, StoreResult],
#'ooparse_int': [PushAllArgs, 'call int32 [pypylib]pypy.runtime.Utils::OOParseInt(string, int32)'],
'oonewcustomdict': [NewCustomDict, StoreResult],
#
'same_as': DoNothing,
'hint': [PushArg(0), StoreResult],
'direct_call': [Call, StoreResult],
'indirect_call': [PushAllArgs, IndirectCall, StoreResult],
'cast_ptr_to_weakadr': [CastPtrToWeakAddress],
'cast_weakadr_to_ptr': CastWeakAddressToPtr,
#'gc__collect': 'call void class [mscorlib]System.GC::Collect()',
#'resume_point': Ignore,
'debug_assert': [], # TODO: implement?
# __________ numeric operations __________
'bool_not': 'logical_not',
'char_lt': 'less_than',
'char_le': 'less_equals',
'char_eq': 'equals',
'char_ne': 'not_equals',
'char_gt': 'greater_than',
'char_ge': 'greater_equals',
'unichar_eq': 'equals',
'unichar_ne': 'not_equals',
'int_is_true': 'not_equals_zero',
'int_neg': jvmgen.INEG,
'int_neg_ovf': jvmgen.INEGOVF,
'int_abs': 'iabs',
'int_abs_ovf': jvmgen.IABSOVF,
'int_invert': 'bitwise_negate',
'int_add': jvmgen.IADD,
'int_sub': jvmgen.ISUB,
'int_mul': jvmgen.IMUL,
'int_floordiv': jvmgen.IDIV,
'int_floordiv_zer': _check_zer(jvmgen.IDIV),
'int_mod': jvmgen.IREM,
'int_lt': 'less_than',
'int_le': 'less_equals',
'int_eq': 'equals',
'int_ne': 'not_equals',
'int_gt': 'greater_than',
'int_ge': 'greater_equals',
'int_and': jvmgen.IAND,
'int_or': jvmgen.IOR,
'int_lshift': jvmgen.ISHL,
'int_rshift': jvmgen.ISHR,
'int_xor': jvmgen.IXOR,
'int_add_ovf': jvmgen.IADDOVF,
'int_sub_ovf': jvmgen.ISUBOVF,
'int_mul_ovf': jvmgen.IMULOVF,
'int_floordiv_ovf': jvmgen.IDIV, # these can't overflow!
'int_mod_zer': _check_zer(jvmgen.IREM),
'int_mod_ovf': jvmgen.IREMOVF,
'int_lt_ovf': 'less_than',
'int_le_ovf': 'less_equals',
'int_eq_ovf': 'equals',
'int_ne_ovf': 'not_equals',
'int_gt_ovf': 'greater_than',
'int_ge_ovf': 'greater_equals',
'int_and_ovf': jvmgen.IAND,
'int_or_ovf': jvmgen.IOR,
'int_lshift_ovf': _check_ovf(jvmgen.ISHL),
'int_lshift_ovf_val': _check_ovf(jvmgen.ISHL), # VAL??
'int_rshift_ovf': jvmgen.ISHR, # these can't overflow!
'int_xor_ovf': jvmgen.IXOR,
'int_floordiv_ovf_zer': _check_zer(jvmgen.IDIV),
'int_mod_ovf_zer': _check_zer(jvmgen.IREMOVF),
'uint_is_true': 'not_equals_zero',
'uint_invert': 'bitwise_negate',
'uint_add': jvmgen.IADD,
'uint_sub': jvmgen.ISUB,
'uint_mul': jvmgen.IMUL,
'uint_div': jvmgen.IDIV, # valid?
'uint_truediv': None, # TODO
'uint_floordiv': jvmgen.IDIV, # valid?
'uint_mod': jvmgen.IREM, # valid?
'uint_lt': 'u_less_than',
'uint_le': 'u_less_equals',
'uint_eq': 'u_equals',
'uint_ne': 'u_not_equals',
'uint_gt': 'u_greater_than',
'uint_ge': 'u_greater_equals',
'uint_and': jvmgen.IAND,
'uint_or': jvmgen.IOR,
'uint_lshift': jvmgen.ISHL,
'uint_rshift': jvmgen.IUSHR,
'uint_xor': jvmgen.IXOR,
'float_is_true': [PushAllArgs,
jvmgen.DCONST_0,
'dbl_not_equals'],
'float_neg': jvmgen.DNEG,
'float_abs': 'dbl_abs',
'float_add': jvmgen.DADD,
'float_sub': jvmgen.DSUB,
'float_mul': jvmgen.DMUL,
'float_truediv': jvmgen.DDIV,
'float_pow': jvmgen.MATHDPOW,
'float_lt': 'dbl_less_than',
'float_le': 'dbl_less_equals',
'float_eq': 'dbl_equals',
'float_ne': 'dbl_not_equals',
'float_gt': 'dbl_greater_than',
'float_ge': 'dbl_greater_equals',
'llong_is_true': [PushAllArgs,
jvmgen.LCONST_0,
'long_not_equals'],
'llong_neg': jvmgen.LNEG,
'llong_neg_ovf': jvmgen.LNEGOVF,
'llong_abs': jvmgen.MATHLABS,
'llong_abs_ovf': jvmgen.LABSOVF,
'llong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'llong_add': jvmgen.LADD,
'llong_sub': jvmgen.LSUB,
'llong_mul': jvmgen.LMUL,
'llong_div': jvmgen.LDIV,
'llong_truediv': None, # TODO
'llong_floordiv': jvmgen.LDIV,
'llong_floordiv_zer': _check_zer(jvmgen.LDIV),
'llong_mod': jvmgen.LREM,
'llong_mod_zer': _check_zer(jvmgen.LREM),
'llong_lt': 'long_less_than',
'llong_le': 'long_less_equals',
'llong_eq': 'long_equals',
'llong_ne': 'long_not_equals',
'llong_gt': 'long_greater_than',
'llong_ge': 'long_greater_equals',
'llong_and': jvmgen.LAND,
'llong_or': jvmgen.LOR,
'llong_lshift': jvmgen.LSHL,
'llong_rshift': jvmgen.LSHR,
'llong_xor': jvmgen.LXOR,
'llong_floordiv_ovf': jvmgen.LDIV, # these can't overflow!
'llong_mod_ovf': jvmgen.LREMOVF,
'ullong_is_true': [PushAllArgs,
jvmgen.LCONST_0,
'long_not_equals'],
'ullong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'ullong_add': jvmgen.LADD,
'ullong_sub': jvmgen.LSUB,
'ullong_mul': jvmgen.LMUL,
'ullong_div': jvmgen.LDIV, # valid?
'ullong_truediv': None, # TODO
'ullong_floordiv': jvmgen.LDIV, # valid?
'ullong_mod': jvmgen.LREM, # valid?
'ullong_mod_zer': _check_zer(jvmgen.LREM),
'ullong_lt': 'ulong_less_than',
'ullong_le': 'ulong_less_equals',
'ullong_eq': 'ulong_equals',
'ullong_ne': 'ulong_not_equals',
'ullong_gt': 'ulong_greater_than',
'ullong_ge': 'ulong_greater_equals',
# when casting from bool we want that every truth value is casted
# to 1: we can't simply DoNothing, because the CLI stack could
# contains a truth value not equal to 1, so we should use the !=0
# trick. #THIS COMMENT NEEDS TO BE VALIDATED AND UPDATED
'cast_bool_to_int': DoNothing,
'cast_bool_to_uint': DoNothing,
'cast_bool_to_float': [PushAllArgs, 'not_equals_zero', jvmgen.I2D],
'cast_char_to_int': DoNothing,
'cast_unichar_to_int': DoNothing,
'cast_int_to_char': DoNothing,
'cast_int_to_unichar': DoNothing,
'cast_int_to_uint': DoNothing,
'cast_int_to_float': jvmgen.I2D,
'cast_int_to_longlong': jvmgen.I2L,
'cast_uint_to_int': DoNothing,
'cast_uint_to_float': jvmgen.PYPYUINTTODOUBLE,
'cast_float_to_int': jvmgen.D2I,
#'cast_float_to_longlong': jvmgen.D2L, #PAUL
'cast_float_to_longlong': jvmgen.PYPYDOUBLETOLONG, #PAUL
'cast_float_to_uint': jvmgen.PYPYDOUBLETOUINT,
'truncate_longlong_to_int': jvmgen.L2I,
'cast_longlong_to_float': jvmgen.L2D,
}
opcodes = {}
for opc, val in _opcodes.items():
opcodes[opc] = _proc(val)
del _opcodes
| Python |
"""
The database centralizes information about the state of our translation,
and the mapping between the OOTypeSystem and the Java type system.
"""
from cStringIO import StringIO
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.ootypesystem import ootype, rclass
from pypy.translator.jvm import typesystem as jvmtype
from pypy.translator.jvm import node, methods
from pypy.translator.jvm.option import getoption
import pypy.translator.jvm.generator as jvmgen
from pypy.translator.jvm.generator import Method, Property, Field
import pypy.translator.jvm.constant as jvmconst
from pypy.translator.jvm.typesystem import \
jStringBuilder, jInt, jVoid, jString, jChar, jPyPyConst, jObject, \
jThrowable
from pypy.translator.jvm.builtin import JvmBuiltInType
from pypy.rpython.lltypesystem.llmemory import WeakGcAddress
from pypy.translator.oosupport.database import Database as OODatabase
# ______________________________________________________________________
# Database object
class Database(OODatabase):
def __init__(self, genoo):
OODatabase.__init__(self, genoo)
# Private attributes:
self._jasmin_files = [] # list of strings --- .j files we made
self._classes = {} # Maps ootype class objects to node.Class objects,
# and JvmType objects as well
self._functions = {} # graph -> jvmgen.Method
# (jargtypes, jrettype) -> node.StaticMethodInterface
self._delegates = {}
# (INSTANCE, method_name) -> node.StaticMethodImplementation
self._bound_methods = {}
self._function_names = {} # graph --> function_name
self._constants = {} # flowmodel.Variable --> jvmgen.Const
# Special fields for the Object class, see _translate_Object
self._object_interf = None
self._object_impl = None
self._object_exc_impl = None
# _________________________________________________________________
# Java String vs Byte Array
#
# We allow the user to configure whether Python strings are stored
# as Java strings, or as byte arrays. The latter saves space; the
# former may be faster.
using_byte_array = False
# XXX have to fill this in
# _________________________________________________________________
# Miscellaneous
def _uniq(self, nm):
return nm + "_" + str(self.unique())
def _pkg(self, nm):
return "%s.%s" % (getoption('package'), nm)
def class_name(self, TYPE):
jtype = self.lltype_to_cts(TYPE)
assert isinstance(jtype, jvmtype.JvmClassType)
return jtype.name
def add_jasmin_file(self, jfile):
""" Adds to the list of files we need to run jasmin on """
self._jasmin_files.append(jfile)
def jasmin_files(self):
""" Returns list of files we need to run jasmin on """
return self._jasmin_files
def is_Object(self, OOTYPE):
return isinstance(OOTYPE, ootype.Instance) and OOTYPE._name == "Object"
# _________________________________________________________________
# Node Creation
#
# Creates nodes that represents classes, functions, simple constants.
def create_interlink_node(self, methods):
""" This is invoked by create_interlinke_node() in
jvm/prebuiltnodes.py. It creates a Class node that will
be an instance of the Interlink interface, which is used
to allow the static java code to throw PyPy exceptions and the
like.
The 'methods' argument should be a dictionary whose keys are
method names and whose entries are jvmgen.Method objects which
the corresponding method should invoke. """
nm = self._pkg(self._uniq('InterlinkImplementation'))
cls = node.Class(nm, supercls=jObject)
for method_name, helper in methods.items():
cls.add_method(node.InterlinkFunction(cls, method_name, helper))
cls.add_interface(jvmtype.jPyPyInterlink)
self.interlink_class = cls
self.pending_node(cls)
def types_for_graph(self, graph):
"""
Given a graph, returns a tuple like so:
( (java argument types...), java return type )
For example, if the graph took two strings and returned a bool,
then the return would be:
( (jString, jString), jBool )
"""
argtypes = [arg.concretetype for arg in graph.getargs()
if arg.concretetype is not ootype.Void]
jargtypes = tuple([self.lltype_to_cts(argty) for argty in argtypes])
rettype = graph.getreturnvar().concretetype
jrettype = self.lltype_to_cts(rettype)
return jargtypes, jrettype
def _function_for_graph(self, classobj, funcnm, is_static, graph):
"""
Creates a node.Function object for a particular graph. Adds
the method to 'classobj', which should be a node.Class object.
"""
jargtypes, jrettype = self.types_for_graph(graph)
funcobj = node.GraphFunction(
self, classobj, funcnm, jargtypes, jrettype, graph, is_static)
return funcobj
def _translate_record(self, OOTYPE):
assert OOTYPE is not ootype.ROOT
# Create class object if it does not already exist:
if OOTYPE in self._classes:
return self._classes[OOTYPE]
# Create the class object first
clsnm = self._pkg(self._uniq('Record'))
clsobj = node.Class(clsnm, jObject)
self._classes[OOTYPE] = clsobj
# Add fields:
self._translate_class_fields(clsobj, OOTYPE)
# generate toString
dump_method = methods.RecordDumpMethod(self, OOTYPE, clsobj)
clsobj.add_method(dump_method)
# generate equals and hash
equals_method = methods.DeepEqualsMethod(self, OOTYPE, clsobj)
clsobj.add_method(equals_method)
hash_method = methods.DeepHashMethod(self, OOTYPE, clsobj)
clsobj.add_method(hash_method)
self.pending_node(clsobj)
return clsobj
def _translate_Object(self, OBJ):
"""
We handle the class 'Object' quite specially: we translate it
into an interface with two implementations. One
implementation serves as the root of most objects, and the
other as the root for all exceptions.
"""
assert self.is_Object(OBJ)
assert OBJ._superclass == ootype.ROOT
# Have we already translated Object?
if self._object_interf: return self._object_interf
# Create the interface and two implementations:
def gen_name(): return self._pkg(self._uniq(OBJ._name))
internm, implnm, exc_implnm = gen_name(), gen_name(), gen_name()
self._object_interf = node.Interface(internm)
self._object_impl = node.Class(implnm, supercls=jObject)
self._object_exc_impl = node.Class(exc_implnm, supercls=jThrowable)
self._object_impl.add_interface(self._object_interf)
self._object_exc_impl.add_interface(self._object_interf)
# Translate the fields into properties on the interface,
# and into actual fields on the implementations.
for fieldnm, (FIELDOOTY, fielddef) in OBJ._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldty = self.lltype_to_cts(FIELDOOTY)
# Currently use hacky convention of _jvm_FieldName for the name
methodnm = "_jvm_"+fieldnm
def getter_method_obj(node):
return Method.v(node, methodnm+"_g", [], fieldty)
def putter_method_obj(node):
return Method.v(node, methodnm+"_p", [fieldty], jVoid)
# Add get/put methods to the interface:
prop = Property(
fieldnm,
getter_method_obj(self._object_interf),
putter_method_obj(self._object_interf),
OOTYPE=FIELDOOTY)
self._object_interf.add_property(prop)
# Generate implementations:
def generate_impl(clsobj):
clsnm = clsobj.name
fieldobj = Field(clsnm, fieldnm, fieldty, False, FIELDOOTY)
clsobj.add_field(fieldobj, fielddef)
clsobj.add_method(node.GetterFunction(
self, clsobj, getter_method_obj(clsobj), fieldobj))
clsobj.add_method(node.PutterFunction(
self, clsobj, putter_method_obj(clsobj), fieldobj))
generate_impl(self._object_impl)
generate_impl(self._object_exc_impl)
# Ensure that we generate all three classes.
self.pending_node(self._object_interf)
self.pending_node(self._object_impl)
self.pending_node(self._object_exc_impl)
def _translate_superclass_of(self, OOSUB):
"""
Invoked to translate OOSUB's super class. Normally just invokes
pending_class, but we treat "Object" differently so that we can
make all exceptions descend from Throwable.
"""
OOSUPER = OOSUB._superclass
if not self.is_Object(OOSUPER):
return self.pending_class(OOSUPER)
self._translate_Object(OOSUPER) # ensure this has been done
if OOSUB._name == "exceptions.Exception":
return self._object_exc_impl
return self._object_impl
def _translate_instance(self, OOTYPE):
assert isinstance(OOTYPE, ootype.Instance)
assert OOTYPE is not ootype.ROOT
# Create class object if it does not already exist:
if OOTYPE in self._classes:
return self._classes[OOTYPE]
# Create the class object first
clsnm = self._pkg(self._uniq(OOTYPE._name))
clsobj = node.Class(clsnm)
self._classes[OOTYPE] = clsobj
# Resolve super class
assert OOTYPE._superclass
supercls = self._translate_superclass_of(OOTYPE)
clsobj.set_super_class(supercls)
# TODO --- mangle field and method names? Must be
# deterministic, or use hashtable to avoid conflicts between
# classes?
# Add fields:
self._translate_class_fields(clsobj, OOTYPE)
# Add methods:
for mname, mimpl in OOTYPE._methods.iteritems():
if not hasattr(mimpl, 'graph'):
# Abstract method
METH = mimpl._TYPE
arglist = [self.lltype_to_cts(ARG) for ARG in METH.ARGS
if ARG is not ootype.Void]
returntype = self.lltype_to_cts(METH.RESULT)
clsobj.add_abstract_method(jvmgen.Method.v(
clsobj, mname, arglist, returntype))
else:
# if the first argument's type is not a supertype of
# this class it means that this method this method is
# not really used by the class: don't render it, else
# there would be a type mismatch.
args = mimpl.graph.getargs()
SELF = args[0].concretetype
if not ootype.isSubclass(OOTYPE, SELF): continue
mobj = self._function_for_graph(
clsobj, mname, False, mimpl.graph)
clsobj.add_method(mobj)
# currently, we always include a special "dump" method for debugging
# purposes
dump_method = node.InstanceDumpMethod(self, OOTYPE, clsobj)
clsobj.add_method(dump_method)
self.pending_node(clsobj)
return clsobj
def _translate_class_fields(self, clsobj, OOTYPE):
for fieldnm, (FIELDOOTY, fielddef) in OOTYPE._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldty = self.lltype_to_cts(FIELDOOTY)
clsobj.add_field(
jvmgen.Field(clsobj.name, fieldnm, fieldty, False, FIELDOOTY),
fielddef)
def pending_class(self, OOTYPE):
return self.lltype_to_cts(OOTYPE)
def pending_function(self, graph):
"""
This is invoked when a standalone function is to be compiled.
It creates a class named after the function with a single
method, invoke(). This class is added to the worklist.
Returns a jvmgen.Method object that allows this function to be
invoked.
"""
if graph in self._functions:
return self._functions[graph]
classnm = self._pkg(self._uniq(graph.name))
classobj = node.Class(classnm, self.pending_class(ootype.ROOT))
funcobj = self._function_for_graph(classobj, "invoke", True, graph)
classobj.add_method(funcobj)
self.pending_node(classobj)
res = self._functions[graph] = funcobj.method()
return res
def record_delegate(self, TYPE):
"""
Creates and returns a StaticMethodInterface type; this type
represents an abstract base class for functions with a given
signature, represented by TYPE, a ootype.StaticMethod
instance.
"""
# Translate argument/return types into java types, check if
# we already have such a delegate:
jargs = tuple([self.lltype_to_cts(ARG) for ARG in TYPE.ARGS
if ARG is not ootype.Void])
jret = self.lltype_to_cts(TYPE.RESULT)
return self.record_delegate_sig(jargs, jret)
def record_delegate_sig(self, jargs, jret):
"""
Like record_delegate, but the signature is in terms of java
types. jargs is a list of JvmTypes, one for each argument,
and jret is a JvmType. Note that jargs does NOT include an
entry for the this pointer of the resulting object.
"""
key = (jargs, jret)
if key in self._delegates:
return self._delegates[key]
# TODO: Make an intelligent name for this interface by
# mangling the list of parameters
name = self._pkg(self._uniq('Delegate'))
# Create a new one if we do not:
interface = node.StaticMethodInterface(name, jargs, jret)
self._delegates[key] = interface
self.pending_node(interface)
return interface
def record_delegate_standalone_func_impl(self, graph):
"""
Creates a class with an invoke() method that invokes the given
graph. This object can be used as a function pointer. It
will extend the appropriate delegate for the graph's
signature.
"""
jargtypes, jrettype = self.types_for_graph(graph)
super_class = self.record_delegate_sig(jargtypes, jrettype)
pfunc = self.pending_function(graph)
implnm = self._pkg(self._uniq(graph.name+'_delegate'))
n = node.StaticMethodImplementation(implnm, super_class, None, pfunc)
self.pending_node(n)
return n
def record_delegate_bound_method_impl(self, INSTANCE, method_name):
"""
Creates an object with an invoke() method which invokes
a method named method_name on an instance of INSTANCE.
"""
key = (INSTANCE, method_name)
if key in self._bound_methods:
return self._bound_methods[key]
METH_TYPE = INSTANCE._lookup(method_name)[1]._TYPE
super_class = self.record_delegate(METH_TYPE)
self_class = self.lltype_to_cts(INSTANCE)
mthd_obj = self_class.lookup_method(method_name)
implnm = self._pkg(self._uniq(
self_class.simple_name()+"_"+method_name+"_delegate"))
n = self._bound_methods[key] = node.StaticMethodImplementation(
implnm, super_class, self_class, mthd_obj)
self.pending_node(n)
return n
# _________________________________________________________________
# toString functions
#
# Obtains an appropriate method for serializing an object of
# any type.
_toString_methods = {
ootype.Signed:jvmgen.INTTOSTRINGI,
ootype.Unsigned:jvmgen.PYPYSERIALIZEUINT,
ootype.SignedLongLong:jvmgen.LONGTOSTRINGL,
ootype.Float:jvmgen.DOUBLETOSTRINGD,
ootype.Bool:jvmgen.PYPYSERIALIZEBOOLEAN,
ootype.Void:jvmgen.PYPYSERIALIZEVOID,
ootype.Char:jvmgen.PYPYESCAPEDCHAR,
ootype.String:jvmgen.PYPYESCAPEDSTRING,
}
def toString_method_for_ootype(self, OOTYPE):
"""
Assuming than an instance of type OOTYPE is pushed on the
stack, returns a Method object that you can invoke. This method
will return a string representing the contents of that type.
Do something like:
> gen.load(var)
> mthd = db.toString_method_for_ootype(var.concretetype)
> mthd.invoke(gen)
to print the value of 'var'.
"""
return self._toString_methods.get(OOTYPE, jvmgen.PYPYSERIALIZEOBJECT)
# _________________________________________________________________
# Type translation functions
#
# Functions which translate from OOTypes to JvmType instances.
# FIX --- JvmType and their Class nodes should not be different.
def escape_name(self, nm):
# invoked by oosupport/function.py; our names don't need escaping?
return nm
def llvar_to_cts(self, llv):
""" Returns a tuple (JvmType, str) with the translated type
and name of the given variable"""
return self.lltype_to_cts(llv.concretetype), llv.name
# Dictionary for scalar types; in this case, if we see the key, we
# will return the value
ootype_to_scalar = {
ootype.Void: jvmtype.jVoid,
ootype.Signed: jvmtype.jInt,
ootype.Unsigned: jvmtype.jInt,
ootype.SignedLongLong: jvmtype.jLong,
ootype.UnsignedLongLong: jvmtype.jLong,
ootype.Bool: jvmtype.jBool,
ootype.Float: jvmtype.jDouble,
ootype.Char: jvmtype.jChar, # byte would be sufficient, but harder
ootype.UniChar: jvmtype.jChar,
ootype.Class: jvmtype.jClass,
ootype.ROOT: jvmtype.jObject, # treat like a scalar
WeakGcAddress: jvmtype.jWeakRef
}
# Dictionary for non-scalar types; in this case, if we see the key, we
# will return a JvmBuiltInType based on the value
ootype_to_builtin = {
ootype.String: jvmtype.jString,
ootype.StringBuilder: jvmtype.jStringBuilder,
ootype.List: jvmtype.jArrayList,
ootype.Dict: jvmtype.jHashMap,
ootype.DictItemsIterator:jvmtype.jPyPyDictItemsIterator,
ootype.CustomDict: jvmtype.jPyPyCustomDict,
}
def lltype_to_cts(self, OOT):
""" Returns an instance of JvmType corresponding to
the given OOType """
# Handle built-in types:
if OOT in self.ootype_to_scalar:
return self.ootype_to_scalar[OOT]
if isinstance(OOT, lltype.Ptr) and isinstance(t.TO, lltype.OpaqueType):
return jObject
if OOT in self.ootype_to_builtin:
return JvmBuiltInType(self, self.ootype_to_builtin[OOT], OOT)
if OOT.__class__ in self.ootype_to_builtin:
return JvmBuiltInType(
self, self.ootype_to_builtin[OOT.__class__], OOT)
# Handle non-built-in-types:
if isinstance(OOT, ootype.Instance):
if self.is_Object(OOT):
return self._translate_Object(OOT)
return self._translate_instance(OOT)
if isinstance(OOT, ootype.Record):
return self._translate_record(OOT)
if isinstance(OOT, ootype.StaticMethod):
return self.record_delegate(OOT)
assert False, "Untranslatable type %s!" % OOT
def exception_root_object(self):
"""
Returns a JvmType representing the version of Object that
serves as the root of all exceptions.
"""
self.lltype_to_cts(rclass.OBJECT)
assert self._object_interf
return self._object_exc_impl
# _________________________________________________________________
# Uh....
#
# These functions are invoked by the code in oosupport, but I
# don't think we need them or use them otherwise.
def record_function(self, graph, name):
self._function_names[graph] = name
def graph_name(self, graph):
# XXX: graph name are not guaranteed to be unique
return self._function_names.get(graph, None)
| Python |
from pypy.rpython.ootypesystem import ootype
from pypy.objspace.flow import model as flowmodel
from pypy.translator.jvm.generator import \
Field, Method, CUSTOMDICTMAKE
from pypy.translator.oosupport.constant import \
BaseConstantGenerator, RecordConst, InstanceConst, ClassConst, \
StaticMethodConst, CustomDictConst, WeakRefConst, push_constant
from pypy.translator.jvm.typesystem import \
jPyPyConst, jObject, jVoid, jWeakRef
# ___________________________________________________________________________
# Constant Generator
class JVMConstantGenerator(BaseConstantGenerator):
# _________________________________________________________________
# Constant Operations
#
# We store constants in static fields of the jPyPyConst class.
def _init_constant(self, const):
# Determine the Java type of the constant: some constants
# (weakrefs) do not have an OOTYPE, so if it returns None use
# jtype()
JFIELDOOTY = const.OOTYPE()
if not JFIELDOOTY: jfieldty = const.jtype()
else: jfieldty = self.db.lltype_to_cts(JFIELDOOTY)
const.fieldobj = Field(jPyPyConst.name, const.name, jfieldty, True)
def push_constant(self, gen, const):
const.fieldobj.load(gen)
def _store_constant(self, gen, const):
const.fieldobj.store(gen)
# _________________________________________________________________
# Constant Generation
def _begin_gen_constants(self, gen, all_constants):
gen.begin_class(jPyPyConst, jObject)
return gen
def _declare_const(self, gen, const):
gen.add_field(const.fieldobj)
def _declare_step(self, gen, stepnum):
next_nm = "constant_init_%d" % stepnum
gen.begin_function(next_nm, [], [], jVoid, True)
def _close_step(self, gen, stepnum):
gen.return_val(jVoid)
gen.end_function() # end constant_init_N where N == stepnum
def _end_gen_constants(self, gen, numsteps):
# The static init code just needs to call constant_init_1..N
gen.begin_function('<clinit>', [], [], jVoid, True)
for x in range(numsteps):
m = Method.s(jPyPyConst, "constant_init_%d" % x, [], jVoid)
gen.emit(m)
gen.return_val(jVoid)
gen.end_function()
gen.end_class()
class JVMStaticMethodConst(StaticMethodConst):
def record_dependencies(self):
if self.value is ootype.null(self.value._TYPE):
self.delegate_impl = None
return
StaticMethodConst.record_dependencies(self)
self.delegate_impl = self.db.record_delegate_standalone_func_impl(
self.value.graph)
def create_pointer(self, gen):
if self.delegate_impl:
gen.new_with_jtype(self.delegate_impl)
else:
gen.push_null(jObject)
def initialize_data(self, ilasm):
return
class JVMCustomDictConst(CustomDictConst):
def record_dependencies(self):
# Near as I can tell, self.value is an ootype._custom_dict,
# key_eq is a Python function and graph is, well, a method
# graph that seems to be added to the function pointer
# somewhere. Adapted from cli/constant.py
self.eq_jcls = self.db.record_delegate_standalone_func_impl(
self.value._dict.key_eq.graph)
self.hash_jcls = self.db.record_delegate_standalone_func_impl(
self.value._dict.key_hash.graph)
def create_pointer(self, gen):
gen.new_with_jtype(self.eq_jcls)
gen.new_with_jtype(self.hash_jcls)
gen.emit(CUSTOMDICTMAKE)
class JVMWeakRefConst(WeakRefConst):
# Ensure that weak refs are initialized last:
PRIORITY = 200
def jtype(self):
return jWeakRef
def create_pointer(self, gen):
gen.prepare_cast_ptr_to_weak_address()
if not self.value:
TYPE = ootype.ROOT
gen.push_null(TYPE)
else:
TYPE = self.value._TYPE
push_constant(self.db, self.value._TYPE, self.value, gen)
gen.finalize_cast_ptr_to_weak_address(TYPE)
def initialize_data(self, gen):
gen.pop(ootype.ROOT)
return True
| Python |
from pypy.translator.jvm import typesystem as jvmtype
from pypy.translator.jvm import generator as jvmgen
from pypy.rpython.ootypesystem import ootype
from pypy.translator.jvm.typesystem import \
jInt, jVoid, jStringBuilder, jString, jPyPy, jChar, jArrayList, jObject, \
jBool, jHashMap, jPyPyDictItemsIterator, Generifier, jCharSequence, \
jPyPyCustomDict
# ______________________________________________________________________
# Mapping of built-in OOTypes to JVM types
class JvmBuiltInType(jvmtype.JvmClassType):
"""
Represents built-in types to JVM. May optionally be associated
with an OOTYPE; if it is, then we will support lookup of the OOTYPE
methods and will re-map them as needed to the JVM equivalents.
"""
def __init__(self, db, classty, OOTYPE):
jvmtype.JvmClassType.__init__(self, classty.name)
self.db = db
self.OOTYPE = OOTYPE
self.gen = Generifier(OOTYPE)
def __eq__(self, other):
return isinstance(other, JvmBuiltInType) and other.name == self.name
def __hash__(self):
return hash(self.name)
def lookup_field(self, fieldnm):
""" Given a field name, returns a jvmgen.Field object """
_, FIELDTY = self.OOTYPE._lookup_field(fieldnm)
jfieldty = self.db.lltype_to_cts(FIELDTY)
return jvmgen.Field(
self.descriptor.class_name(), fieldnm, jfieldty, False)
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
# Look for a shortcut method in our table of remappings:
try:
key = (self.OOTYPE.__class__, methodnm)
return built_in_methods[key]
except KeyError: pass
# Otherwise, determine the Method object automagically
# First, map the OOTYPE arguments and results to
# the java types they will be at runtime. Note that
# we must use the erased types for this.
ARGS, RESULT = self.gen.erased_types(methodnm)
jargtypes = [self.db.lltype_to_cts(P) for P in ARGS]
jrettype = self.db.lltype_to_cts(RESULT)
if self.OOTYPE.__class__ in bridged_objects:
# Bridged objects are ones where we have written a java class
# that has methods with the correct names and types already
return jvmgen.Method.v(self, methodnm, jargtypes, jrettype)
else:
# By default, we assume it is a static method on the PyPy
# object, that takes an instance of this object as the first
# argument. The other arguments we just convert to java versions,
# except for generics.
jargtypes = [self] + jargtypes
return jvmgen.Method.s(jPyPy, methodnm, jargtypes, jrettype)
# When we lookup a method on a BuiltInClassNode, we first check the
# 'built_in_methods' and 'bridged_objects' tables. This allows us to
# redirect to other methods if we like.
bridged_objects = (
ootype.DictItemsIterator,
)
built_in_methods = {
# Note: String and StringBuilder are rebound in ootype, and thus
# .__class__ is required
(ootype.StringBuilder.__class__, "ll_allocate"):
jvmgen.Method.v(jStringBuilder, "ensureCapacity", (jInt,), jVoid),
(ootype.StringBuilder.__class__, "ll_build"):
jvmgen.Method.v(jStringBuilder, "toString", (), jString),
(ootype.String.__class__, "ll_streq"):
jvmgen.Method.v(jString, "equals", (jObject,), jBool),
(ootype.String.__class__, "ll_strlen"):
jvmgen.Method.v(jString, "length", (), jInt),
(ootype.String.__class__, "ll_stritem_nonneg"):
jvmgen.Method.v(jString, "charAt", (jInt,), jChar),
(ootype.String.__class__, "ll_startswith"):
jvmgen.Method.v(jString, "startsWith", (jString,), jBool),
(ootype.String.__class__, "ll_endswith"):
jvmgen.Method.v(jString, "endsWith", (jString,), jBool),
(ootype.String.__class__, "ll_strcmp"):
jvmgen.Method.v(jString, "compareTo", (jString,), jInt),
(ootype.String.__class__, "ll_upper"):
jvmgen.Method.v(jString, "toUpperCase", (), jString),
(ootype.String.__class__, "ll_lower"):
jvmgen.Method.v(jString, "toLowerCase", (), jString),
(ootype.String.__class__, "ll_contains"):
jvmgen.Method.v(jString, "contains", (jCharSequence,), jBool),
(ootype.String.__class__, "ll_replace_chr_chr"):
jvmgen.Method.v(jString, "replace", (jChar, jChar), jString),
(ootype.Dict, "ll_set"):
jvmgen.Method.v(jHashMap, "put", (jObject, jObject), jObject),
(ootype.Dict, "ll_get"):
jvmgen.Method.v(jHashMap, "get", (jObject,), jObject),
(ootype.Dict, "ll_contains"):
jvmgen.Method.v(jHashMap, "containsKey", (jObject,), jBool),
(ootype.Dict, "ll_length"):
jvmgen.Method.v(jHashMap, "size", (), jInt),
(ootype.Dict, "ll_clear"):
jvmgen.Method.v(jHashMap, "clear", (), jVoid),
(ootype.CustomDict, "ll_set"):
jvmgen.Method.v(jPyPyCustomDict, "put", (jObject, jObject), jObject),
(ootype.CustomDict, "ll_get"):
jvmgen.Method.v(jPyPyCustomDict, "get", (jObject,), jObject),
(ootype.CustomDict, "ll_contains"):
jvmgen.Method.v(jPyPyCustomDict, "containsKey", (jObject,), jBool),
(ootype.CustomDict, "ll_length"):
jvmgen.Method.v(jPyPyCustomDict, "size", (), jInt),
(ootype.CustomDict, "ll_clear"):
jvmgen.Method.v(jPyPyCustomDict, "clear", (), jVoid),
(ootype.List, "ll_length"):
jvmgen.Method.v(jArrayList, "size", (), jInt),
(ootype.List, "ll_getitem_fast"):
jvmgen.Method.v(jArrayList, "get", (jInt,), jObject),
}
| Python |
import os
import platform
import py
from py.compat import subprocess
from pypy.tool.udir import udir
from pypy.rpython.test.tool import BaseRtypingTest, OORtypeMixin
from pypy.rpython.lltypesystem.lltype import typeOf
from pypy.rpython.ootypesystem import ootype
from pypy.annotation.model import lltype_to_annotation
from pypy.translator.translator import TranslationContext
from pypy.translator.jvm.genjvm import \
generate_source_for_function, JvmError, detect_missing_support_programs
from pypy.translator.jvm.option import getoption
FLOAT_PRECISION = 8
# CLI duplicate. Needs JVMtest support -Paul
# check function depends on this function
#def compile_function(func, annotation=[], graph=None, backendopt=True):
# olddefs = patch()
# gen = _build_gen(func, annotation, graph, backendopt)
# gen.generate_source()
# exe_name = gen.build_exe()
# unpatch(*olddefs) # restore original values
# return CliFunctionWrapper(exe_name)
# CLI duplicate. Needs compile_function support (see above) -Paul
# check is used in test_overflow
#def check(func, annotation, args):
# mono = compile_function(func, annotation)
# res1 = func(*args)
# res2 = mono(*args)
#
# if type(res1) is float:
# assert round(res1, FLOAT_PRECISION) == round(res2, FLOAT_PRECISION)
# else:
# assert res1 == res2
class StructTuple(tuple):
def __getattr__(self, name):
if name.startswith('item'):
i = int(name[len('item'):])
return self[i]
else:
raise AttributeError, name
# CLI duplicate
class OOList(list):
def ll_length(self):
return len(self)
def ll_getitem_fast(self, i):
return self[i]
# CLI duplicate
class ExceptionWrapper:
def __init__(self, class_name):
# We put all of our classes into some package like 'pypy':
# strip the initial 'pypy.' that results from the class name,
# and we append a number to make the class name unique. Strip
# those.
pkg = getoption('package')+'.'
assert class_name.startswith(pkg)
uniqidx = class_name.rindex('_')
self.class_name = class_name[len(pkg):uniqidx]
def __repr__(self):
return 'ExceptionWrapper(%s)' % repr(self.class_name)
class InstanceWrapper:
def __init__(self, class_name, fields):
self.class_name = class_name
# fields is a list of (name, value) tuples
self.fields = fields
def __repr__(self):
return 'InstanceWrapper(%s, %r)' % (self.class_name, self.fields)
# CLI could-be duplicate
class JvmGeneratedSourceWrapper(object):
def __init__(self, gensrc):
""" gensrc is an instance of JvmGeneratedSource """
self.gensrc = gensrc
def __call__(self, *args):
if not self.gensrc.compiled:
py.test.skip("Assembly disabled")
if getoption('norun'):
py.test.skip("Execution disabled")
resstr = self.gensrc.execute(args)
print "resstr=%s" % repr(resstr)
res = eval(resstr)
if isinstance(res, tuple):
res = StructTuple(res) # so tests can access tuple elements with .item0, .item1, etc.
elif isinstance(res, list):
res = OOList(res)
return res
class JvmTest(BaseRtypingTest, OORtypeMixin):
def __init__(self):
self._func = None
self._ann = None
self._jvm_src = None
def _compile(self, fn, args, ann=None):
if ann is None:
ann = [lltype_to_annotation(typeOf(x)) for x in args]
if self._func is fn and self._ann == ann:
return JvmGeneratedSourceWrapper(self._jvm_src)
else:
self._func = fn
self._ann = ann
self._jvm_src = generate_source_for_function(fn, ann)
if not getoption('noasm'):
self._jvm_src.compile()
return JvmGeneratedSourceWrapper(self._jvm_src)
def _skip_win(self, reason):
if platform.system() == 'Windows':
py.test.skip('Windows --> %s' % reason)
def _skip_powerpc(self, reason):
if platform.processor() == 'powerpc':
py.test.skip('PowerPC --> %s' % reason)
def interpret(self, fn, args, annotation=None):
detect_missing_support_programs()
try:
src = self._compile(fn, args, annotation)
res = src(*args)
if isinstance(res, ExceptionWrapper):
raise res
return res
except JvmError, e:
e.pretty_print()
raise
def interpret_raises(self, exception, fn, args):
import exceptions # needed by eval
try:
self.interpret(fn, args)
except ExceptionWrapper, ex:
assert issubclass(eval(ex.class_name), exception)
else:
assert False, 'function did not raise any exception at all'
def float_eq(self, x, y):
return round(x, FLOAT_PRECISION) == round(y, FLOAT_PRECISION)
def ll_to_string(self, s):
return s
def ll_to_list(self, l):
return l
def class_name(self, value):
return value.class_name.split(".")[-1]
def is_of_instance_type(self, val):
return isinstance(val, InstanceWrapper)
def read_attr(self, obj, name):
py.test.skip("read_attr not supported on JVM")
# TODO --- this "almost works": I think the problem is that our
# dump methods don't dump fields of the super class??
#return obj.fields["o"+name]
| Python |
"""
Turning function dependencies into linear order
-----------------------------------------------
The purpose of this module is to calculate a good linear
ordering of functions, according to call transition
statistics.
Every node has some connections to other nodes, expressed
in terms of transition frequencies. As a starting point,
one could assign every node its own dimension. All transitions
would therefore be orthogonal to each other. The resulting
vector space would be quite huge.
Instead, we use a different approach:
For a node having t transitions, we order the transitions
by decreasing frequencies. The initial position of each
node is this t-dimensional vector.
The distance between two nodes along a transition is the
Euclidean distance of the intersecion of the nodes dimensions.
The transition frequencies define a weight for each transition.
The weighted distance between two nodes
"""
from math import sqrt
import random
def zipextend(v1, v2):
adjust = len(v2) - len(v1)
if adjust:
if adjust > 0:
v1 += [0.0] * adjust
else:
v2 = v2[:] + [0.0] * -adjust
return zip(v1, v2)
class Vector:
# a really dumb little helper class
def __init__(self, seq=None):
self.coords = list(seq or [])
def __inplace_add__(self, other):
self.coords = [p + q for p, q in zipextend(self.coords, other.coords)]
def __inplace_sub__(self, other):
self.coords = [p - q for p, q in zipextend(self.coords, other.coords)]
def __inplace_mul__(self, scalar):
if isinstance(scalar, Vector):
# dot product. zip is correct here, zero cancels.
other = scalar
self.coords = [p * q for p, q in zip(self.coords, other.coords)]
else:
# scalar product
self.coords = [p * scalar for p in self.coords]
def __inplace_div__(self, scalar):
self.coords = [p / scalar for p in self.coords]
def __add__(self, other):
vec = Vector(self.coords)
vec.__inplace_add__(other)
return vec
def __sub__(self, other):
vec = Vector(self.coords)
vec.__inplace_sub__(other)
return vec
def __mul__(self, scalar):
vec = Vector(self.coords)
vec.__inplace_mul__(scalar)
return vec
def __div__(self, scalar):
vec = Vector(self.coords)
vec.__inplace_div__(scalar)
return vec
def __neg__(self):
return Vector([-k for k in self.coords])
def norm2(self):
if len(self.coords) == 1:
return abs(self.coords[0])
return sqrt(sum([k * k for k in self.coords]))
def getdim(self):
return len(self.coords)
# access to coordinates
def __getitem__(self, idx):
return self.coords[idx]
def __setitem__(self, idx, value):
self.coords[idx] = value
def __iter__(self):
return iter(self.coords)
def __repr__(self):
return 'Vector(%r)' % self.coords
class SpaceNode:
def __init__(self, node):
self.func = node.func
self.name = node.name
def setup(self, relations, weights, initpos):
self.relations = relations
self.weights = weights
self.position = initpos
def distance(self, other):
# using the nice property of zip to give the minimum length
return (other.position - self.position).norm2()
def scale(self, factor):
self.position *= factor
def shift(self, delta):
self.position += delta
def shiftx(self, deltax):
self.position[0] += deltax
def lonelyness(self):
# get the square norm of weighted distances
lonely = []
for weight, relative in zip(self.weights, self.relations):
lonely.append(weight * self.distance(relative))
return Vector(lonely).norm2()
def forcevector(self):
vec = Vector()
k = sum(self.weights)
for w, rel in zip(self.weights, self.relations):
tmp = rel.position - self.position
tmp *= w
vec += tmp
return vec
class SpaceGraph:
random = random.Random(42).random
def __init__(self, simgraph):
self.nodes = []
self.addgraph(simgraph)
self.lastdim = 0 # calculated by normalize
self.subgraphs = []
def addgraph(self, simgraph):
mapping = {}
for simnode in simgraph.nodes:
mapping[simnode] = SpaceNode(simnode)
i = len(self.nodes)
self.nodes += [mapping[simnode] for simnode in simgraph.nodes]
for simnode in simgraph.nodes:
relations, weights = simnode.get_relations()
relations = [mapping[rel] for rel in relations]
node = mapping[simnode]
# extreme simplification:
# use just one dimension
# scamble as much as possible to avoid
# starting in a local minimum
#node.setup(relations, weights, Vector([i]))
node.setup(relations, weights, Vector([self.random()]))
i += 1
self.subgraphs = []
def xminmax(self, nodes=None):
nodes = nodes or self.nodes
xaxis = [node.position[0] for node in nodes]
xmin = min(xaxis)
xmax = max(xaxis)
return float(xmin), float(xmax)
def compute_subgraphs(self):
nodes = {}
for node in self.nodes:
nodes[node] = node
self.subgraphs = []
while nodes:
for node in nodes:
break
todo = [node]
del nodes[node]
for node in todo:
for rel in node.relations:
if rel in nodes:
del nodes[rel]
todo.append(rel)
self.subgraphs.append(todo)
def order_subgraphs(self):
sgs = [ (-len(sg), sg[0].name, sg) for sg in self.subgraphs]
sgs.sort()
self.subgraphs = [sg for lng, name, sg in sgs]
def normalize(self):
# identify disjoint subgraphs.
# for every subgraph:
# move the graph center to zero
# scale the graph to make the x-axis as long as the number of nodes.
# shift all graphs to be in disjoint intervals on the x-axis.
if not self.subgraphs:
self.compute_subgraphs()
self.order_subgraphs()
def distort(nodes):
# stretch collapsed x-axis
for i, node in enumerate(nodes):
node.position[0] = i
return nodes
def norm_subgraph(nodes, start):
# normalize a subgraph, return the dimensionality as side effect
xmin, xmax = self.xminmax(nodes)
xwidth = xmax - xmin
if not xwidth: # degenerated
if len(nodes) > 1:
return norm_subgraph(distort(nodes), start)
factor = 1.0
else:
factor = (len(nodes) - 1) / xwidth
mean = Vector()
for node in nodes:
mean += node.position
mean /= len(nodes)
shift = -mean
dim = shift.getdim()
for node in nodes:
node.shift(shift)
node.scale(factor)
shiftx = start - (xmin + shift[0]) * factor
for node in nodes:
node.shiftx(shiftx)
return dim
start = 0.0
dim = 0
for nodes in self.subgraphs:
dim = max(dim, norm_subgraph(nodes, start))
start += len(nodes)
self.lastdim = dim
def do_correction(self, korr=0.0002):
forcevecs = [node.forcevector() for node in self.nodes]
for node, forcevec in zip(self.nodes, forcevecs):
corrvec = forcevec * korr
node.shift(corrvec)
def squeeze_dim(self):
scale = []
ndim = self.lastdim
for i in range(ndim):
scale.append( 1.01 ** -i )
scale = Vector(scale)
for node in self.nodes:
node.scale(scale)
def lonelyness2(self):
# square norm of lonelynesses
lonely = []
for node in self.nodes:
lonely.append(node.lonelyness())
return Vector(lonely).norm2()
def lonelyness(self):
# sum norm of lonelynesses
lonely = 0.0
for node in self.nodes:
lonely += node.lonelyness()
return lonely / len(self.nodes)
def order(self):
sorter = [(node.position[0], node.name, node) for node in self.nodes]
sorter.sort()
return [node for x, x, node in sorter]
def display(self):
for node in self.order():
print node.name, node.lonelyness(), node.position
if __name__ == '__main__':
from pypy.translator.locality.simulation import SimGraph
def test():
def a(): b()
def b(): c()
def c(): d()
def d(): e()
def e(): f()
def f(): a()
sim = SimGraph([a, b, c, d, e, f])
sim.sim_all(0.9, 50)
return sim
def test_singleton():
def s(): pass
sim = SimGraph([s])
sim.sim_all(0.9, 50)
return sim
g = SpaceGraph(test())
g.addgraph(test())
g.addgraph(test())
g.addgraph(test_singleton())
| Python |
"""
Simulation of function calls
----------------------------
The purpose of this module is to simulate function calls
in the call-graph of a program, to gather information
about frequencies of transitions between functions.
The following SimNode/SimGraph classes show an example of the
simulation performed. They can be subclassed to connect them
to client structures like flowgraphs.
- SimGraph.run was used to get an obviously correct reference implementation.
- SimGraph.sim_all simulates the calls of the run method. The results are
exactly the same, although the computation time ir orders of magnitudes
smaller, and the SimGraph.simulate method is able to handle recursions
and function call probabilities which are fractions.
"""
class SimNode:
def __init__(self, sim, func):
self.sim = sim
self.func = func
self.name = self._get_name(func)
self.callees = []
self._callers = None # computed
self.calls = 0
def __repr__(self):
return '(%s)' % self.name
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.name, other.name)
return cmp(id(self), id(other))
def __hash__(self):
return id(self)
def _get_name(self, func):
# to be overridden
return func.__name__
def _find_callee_names(self):
# to be overridden
return self.func.func_code.co_names
def call(self):
self.calls += 1
for i in range(self.sim.repetitions_per_call):
for func in self.callees:
self.sim.record_transition(self, func)
func.call()
def clear(self):
self.calls = 0
def simulate_call(self, weight=1):
self.calls += weight
# calls and returns are symmetric. We provide a callers
# interface that is computed on demand.
def _get_callers(self):
if not self.sim._callers_computed:
self.sim._compute_callers()
return self.callers
callers = property(_get_callers)
def get_relations(self):
# get callees and callers with frequency, ordered
# by decreasing frequency and then by name.
ret = []
for node in self.callees:
freq = self.sim.transitions[ (self, node) ]
ret.append( (-freq, node) )
for node in self.callers:
freq = self.sim.transitions[ (node, self) ]
ret.append( (-freq, node) )
# if there is nothing, link it to itself
if not ret:
ret.append( (-1, self) )
ret.sort()
freqs, nodes = zip(*ret)
return nodes, [-freq for freq in freqs]
class SimGraph:
def __init__(self, funcnodes, nodefactory=SimNode, clientdata=None):
self.nodes = []
self.transitions = {}
self.pending = {}
self.clientdata = clientdata
name2node = {}
for func in funcnodes:
node = nodefactory(self, func)
name2node[node.name] = node
self.nodes.append(node)
self._names_width = self._find_names_width()
for node in self.nodes:
for name in node._find_callee_names():
callee = name2node[name]
node.callees.append(callee)
self.transitions[ (node, callee) ] = 0
self._callers_computed = False
def _find_names_width(self):
n = 0
for node in self.nodes:
n = max(n, len(node.name))
return n
def record_transition(self, caller, callee, weight=1):
self.transitions[ (caller, callee) ] += weight
def run(self, reps=1, root=0):
self._callers_computed = False
self.repetitions_per_call = reps
root = self.nodes[root]
root.call()
def run_all(self, reps=1):
for root in range(len(self.nodes)):
self.run(reps, root)
def clear(self):
for key in self.transitions:
self.transitions[key] = 0
for node in self.nodes:
node.clear()
self.pending.clear()
def display(self):
d = {'w': max(self._names_width, 6) }
print '%%%(w)ds %%%(w)gs repetition' % d % ('caller', 'callee')
for caller, callee, reps in self.get_state():
print '%%%(w)ds %%%(w)gs %%6g' % d % (caller, callee, reps)
print '%%%(w)gs calls' % d % 'node'
for node in self.nodes:
print '%%%(w)gs %%6g' % d % (node.name, node.calls)
def get_state(self):
lst = []
for (caller, callee), reps in self.transitions.items():
lst.append( (caller.name, callee.name, reps) )
lst.sort()
return lst
def simulate(self, call_prob=1, root=None):
# simulating runs by not actually calling, but shooting
# the transitions in a weighted manner.
# this allows us to handle recursions as well.
# first, stimulate nodes if no transitions are pending
self._callers_computed = False
if not self.pending:
if root is not None:
startnodes = [self.nodes[root]]
else:
startnodes = self.nodes
for node in startnodes:
self.pending[node] = 1
# perform a single step of simulated calls.
pending = {}
for caller, ntrans in self.pending.items():
caller.simulate_call(ntrans)
for callee in caller.callees:
self.record_transition(caller, callee, ntrans * call_prob)
pending[callee] = pending.get(callee, 0) + ntrans * call_prob
self.pending = pending
def sim_all(self, call_prob=1, maxrun=None, root=None):
# simulate and stop after maxrun loops
self.simulate(call_prob, root)
i = 0
while self.pending:
self.simulate(call_prob)
i += 1
if maxrun and i >= maxrun:
break
def _compute_callers(self):
nodes = {}
for node in self.nodes:
nodes[node] = node
node.callers = []
returns = [ (callee, caller)
for caller, callee in self.transitions.keys()]
returns.sort()
for callee, caller in returns:
nodes[callee].callers.append(caller)
# sample functions for proof of correctness
def test(debug=False):
def a(): b(); c(); d()
def b(): c(); d()
def c(): pass
def d(): c(); e()
def e(): c()
sim = SimGraph([a, b, c, d, e])
if debug:
globals().update(locals())
sim.clear()
for prob in 1, 3, 2:
sim.clear()
sim.run_all(prob)
state1 = sim.get_state()
sim.clear()
sim.sim_all(prob)
state2 = sim.get_state()
assert state1 == state2
return sim
if __name__ == '__main__':
test()
| Python |
"""
CallTree
An approach to do static call analysis in the PyPy backend
to produce a somewhat locality-of-reference optimized
ordering of the function objects in the generated source.
In extent to that, it is planned to produce a non-optimized
binary from instrumented source code, run some sample
applications and optimize according to transition statistics.
This step will only be done if the first approach shows any
improvement.
Sketch of the algorithm:
------------------------
In a first pass, we inspect all function nodes for direct_call
opcodes and record the callees, if they are constants.
(Variables will later be tried to find out by re-using the
information in the translator).
We then run a simulation of calls.
See pypy/translator/locality/simulation.py.
After that, a poly-dimensional model is computed and morphed
into a one-dimensional ordering.
See pypy/translator/locality/projection.py.
"""
from pypy.objspace.flow.model import Variable, Constant
from pypy.translator.locality.support import log
from pypy.translator.locality.simulation import SimNode, SimGraph
from pypy.translator.locality.projection import SpaceNode, SpaceGraph
class FlowSimNode(SimNode):
def _get_name(self, func):
return func.name
def _find_callee_names(self):
calls = self.sim.clientdata[self.func]
return [func.name for func in calls]
class CallTree:
def __init__(self, funcnodes, database):
self.nodes = funcnodes
self.database = database
self.graphs2nodes = self._build_graph2nodes()
self.calls = {}
for node in self.nodes:
self.calls[node] = self.find_callees(node)
def _build_graph2nodes(self):
dic = {}
for node in self.nodes:
dic[node.obj.graph] = node
return dic
def find_callees(self, node):
graph = node.obj.graph
res = []
if node.obj._callable in self.database.externalfuncs:
s = "skipped external function %s" % node.obj._callable.__name__
log.calltree.findCallees(s)
return res
for block in graph.iterblocks():
for op in block.operations:
if op.opname == 'direct_call':
fnarg = op.args[0]
fnptr = fnarg.value
fn = fnptr._obj
graph = fn.graph
try:
callednode = self.graphs2nodes[graph]
except KeyError:
s = "No node found for graph %s" % graph.name
log.calltree.findCallees(s)
continue
else:
res.append(callednode)
elif op.opname == 'indirect_call':
s = "Node %s calls Variable %s" % (node.name, op.args[0])
log.calltree.findCallees(s)
return res
def simulate(self):
log.simulate('building SimGraph for simulation...')
sim = SimGraph(self.nodes, FlowSimNode, self.calls)
log.simulate('simulating...')
sim.sim_all(1.9, 50)
self.statgraph = sim
def optimize(self):
log.topology('building SpaceGraph for topological sort...')
sg = SpaceGraph(self.statgraph)
steps = 500
try:
for i in range(steps):
for k in range(10):
sg.do_correction()
sg.normalize()
s = "step %d of %d lonelyness = %g" % (i+1, steps, sg.lonelyness())
log.topology(s)
except KeyboardInterrupt:
log.topology("aborted after %d steps" % (i+1))
self.topology = sg
log.topology("done.")
def ordered_funcnodes(self):
nodes = self.topology.order()
ret = [node.func for node in nodes]
return ret
| Python |
# logging
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("locality")
py.log.setconsumer("locality", ansi_log)
| Python |
# | Python |
import types
from pypy.objspace.flow.model import FunctionGraph
from pypy.rpython.lltypesystem import lltype
from pypy.translator.c.support import cdecl
from pypy.rpython.lltypesystem.rstr import STR, mallocstr
from pypy.rpython.lltypesystem import rstr
from pypy.rpython.lltypesystem import rlist
from pypy.rpython.module import ll_time, ll_os
from pypy.rpython.module import ll_stackless, ll_stack
from pypy.rpython.lltypesystem.module.ll_os import STAT_RESULT, PIPE_RESULT
from pypy.rpython.lltypesystem.module.ll_os import WAITPID_RESULT
from pypy.rpython.lltypesystem.module.ll_os import Implementation as impl
from pypy.rpython.lltypesystem.module import ll_strtod
from pypy.rlib import ros
try:
from pypy.module.thread.rpython import ll_thread
except ImportError:
ll_thread = None
# table of functions hand-written in src/ll_*.h
# Note about *.im_func: The annotator and the rtyper expect direct
# references to functions, so we cannot insert classmethods here.
EXTERNALS = {
impl.ll_read_into: 'LL_read_into', # it's a staticmethod
impl.ll_os_write.im_func: 'LL_os_write',
impl.ll_os_close.im_func: 'LL_os_close',
impl.ll_os_access.im_func: 'LL_os_access',
impl.ll_os_stat.im_func: 'LL_os_stat',
impl.ll_os_fstat.im_func: 'LL_os_fstat',
impl.ll_os_lstat.im_func: 'LL_os_lstat',
impl.ll_os_lseek.im_func: 'LL_os_lseek',
impl.ll_os_isatty.im_func: 'LL_os_isatty',
impl.ll_os_ftruncate.im_func:'LL_os_ftruncate',
impl.ll_os_strerror.im_func: 'LL_os_strerror',
impl.ll_os_system.im_func: 'LL_os_system',
impl.ll_os_unlink.im_func: 'LL_os_unlink',
impl.ll_os_getcwd.im_func: 'LL_os_getcwd',
impl.ll_os_chdir.im_func: 'LL_os_chdir',
impl.ll_os_mkdir.im_func: 'LL_os_mkdir',
impl.ll_os_rmdir.im_func: 'LL_os_rmdir',
impl.ll_os_putenv.im_func: 'LL_os_putenv',
impl.ll_os_unsetenv.im_func:'LL_os_unsetenv',
impl.ll_os_environ.im_func: 'LL_os_environ',
impl.ll_os_opendir.im_func: 'LL_os_opendir',
impl.ll_os_readdir.im_func: 'LL_os_readdir',
impl.ll_os_closedir.im_func:'LL_os_closedir',
impl.ll_os_pipe.im_func: 'LL_os_pipe',
impl.ll_os_chmod.im_func: 'LL_os_chmod',
impl.ll_os_rename.im_func: 'LL_os_rename',
impl.ll_os_umask.im_func: 'LL_os_umask',
impl.ll_os_getpid.im_func: 'LL_os_getpid',
impl.ll_os_kill.im_func: 'LL_os_kill',
impl.ll_os_link.im_func: 'LL_os_link',
impl.ll_os_symlink.im_func: 'LL_os_symlink',
impl.ll_readlink_into: 'LL_readlink_into',
impl.ll_os_fork.im_func: 'LL_os_fork',
impl.ll_os_spawnv.im_func: 'LL_os_spawnv',
impl.ll_os_waitpid.im_func: 'LL_os_waitpid',
impl.ll_os__exit.im_func: 'LL_os__exit',
ll_time.ll_time_clock: 'LL_time_clock',
ll_time.ll_time_sleep: 'LL_time_sleep',
ll_time.ll_time_time: 'LL_time_time',
ll_strtod.Implementation.ll_strtod_parts_to_float:
'LL_strtod_parts_to_float',
ll_strtod.Implementation.ll_strtod_formatd:
'LL_strtod_formatd',
ll_stackless.ll_stackless_switch: 'LL_stackless_switch',
ll_stackless.ll_stackless_stack_frames_depth: 'LL_stackless_stack_frames_depth',
ll_stack.ll_stack_unwind: 'LL_stack_unwind',
ll_stack.ll_stack_too_big: 'LL_stack_too_big',
}
if ll_thread: EXTERNALS.update({
ll_thread.ll_newlock: 'LL_thread_newlock',
ll_thread.ll_acquirelock: 'LL_thread_acquirelock',
ll_thread.ll_releaselock: 'LL_thread_releaselock',
ll_thread.ll_fused_releaseacquirelock: 'LL_thread_fused_releaseacquirelock',
ll_thread.ll_thread_start: 'LL_thread_start',
ll_thread.ll_thread_get_ident: 'LL_thread_get_ident',
})
#______________________________________________________
# insert 'simple' math functions into EXTERNALs table:
# XXX: messy, messy, messy
# this interacts in strange ways with node.select_function_code_generators,
# because it fakes to be an ll_* function.
math_functions = [
'acos', 'asin', 'atan', 'ceil', 'cos', 'cosh', 'exp', 'fabs',
'floor', 'log', 'log10', 'sin', 'sinh', 'sqrt', 'tan', 'tanh',
'frexp', 'pow', 'atan2', 'fmod', 'ldexp', 'modf', 'hypot'
]
import math
for name in math_functions:
EXTERNALS['ll_math.ll_math_%s' % name] = 'LL_math_%s' % name
EXTERNALS['LL_flush_icache'] = 'LL_flush_icache'
#______________________________________________________
def find_list_of_str(rtyper):
for r in rtyper.reprs.itervalues():
if isinstance(r, rlist.ListRepr) and r.item_repr is rstr.string_repr:
return r.lowleveltype.TO
return None
def predeclare_common_types(db, rtyper):
from pypy.rpython.lltypesystem.module import ll_math
# Common types
yield ('RPyString', STR)
LIST_OF_STR = find_list_of_str(rtyper)
if LIST_OF_STR is not None:
yield ('RPyListOfString', LIST_OF_STR)
yield ('RPyFREXP_RESULT', ll_math.FREXP_RESULT)
yield ('RPyMODF_RESULT', ll_math.MODF_RESULT)
yield ('RPySTAT_RESULT', STAT_RESULT)
yield ('RPyPIPE_RESULT', PIPE_RESULT)
yield ('RPyWAITPID_RESULT', WAITPID_RESULT)
def predeclare_utility_functions(db, rtyper):
# Common utility functions
def RPyString_New(length=lltype.Signed):
return mallocstr(length)
# !!!
# be extremely careful passing a gc tracked object
# from such an helper result to another one
# as argument, this could result in leaks
# Such result should be only from C code
# returned directly as results
LIST_OF_STR = find_list_of_str(rtyper)
if LIST_OF_STR is not None:
p = lltype.Ptr(LIST_OF_STR)
def _RPyListOfString_New(length=lltype.Signed):
return LIST_OF_STR.ll_newlist(length)
def _RPyListOfString_New(length=lltype.Signed):
return LIST_OF_STR.ll_newlist(length)
def _RPyListOfString_SetItem(l=p,
index=lltype.Signed,
newstring=lltype.Ptr(STR)):
rlist.ll_setitem_nonneg(rlist.dum_nocheck, l, index, newstring)
def _RPyListOfString_GetItem(l=p,
index=lltype.Signed):
return rlist.ll_getitem_fast(l, index)
def _RPyListOfString_Length(l=p):
return rlist.ll_length(l)
for fname, f in locals().items():
if isinstance(f, types.FunctionType):
# XXX this is painful :(
if (LIST_OF_STR, fname) in db.helper2ptr:
yield (fname, db.helper2ptr[LIST_OF_STR, fname])
else:
# hack: the defaults give the type of the arguments
graph = rtyper.annotate_helper(f, f.func_defaults)
db.helper2ptr[LIST_OF_STR, fname] = graph
yield (fname, graph)
def get_extfunc_helper_ptrs(db, rtyper):
def annotate(func, args):
fptr = rtyper.annotate_helper(func, args)
db.helper2ptr[func] = fptr
return (func.__name__, fptr)
for func, args, symb in db.translator._implicitly_called_by_externals:
yield annotate(func, args)
def predeclare_extfunc_helpers(db, rtyper):
def decl(func):
return (func.__name__, db.helper2ptr[func])
for func, args, symb in db.translator._implicitly_called_by_externals:
yield decl(func)
yield ('LL_NEED_' + symb, 1)
def predeclare_extfuncs(db, rtyper):
modules = {}
def module_name(c_name):
frags = c_name[3:].split('_')
if frags[0] == '':
return '_' + frags[1]
else:
return frags[0]
for func, funcobj in db.externalfuncs.items():
c_name = EXTERNALS[func]
# construct a define LL_NEED_<modname> to make it possible to isolate in-development externals and headers
modname = module_name(c_name)
if modname not in modules:
modules[modname] = True
yield 'LL_NEED_%s' % modname.upper(), 1
funcptr = funcobj._as_ptr()
yield c_name, funcptr
def predeclare_exception_data(db, rtyper):
# Exception-related types and constants
exceptiondata = rtyper.getexceptiondata()
exctransformer = db.exctransformer
yield ('RPYTHON_EXCEPTION_VTABLE', exceptiondata.lltype_of_exception_type)
yield ('RPYTHON_EXCEPTION', exceptiondata.lltype_of_exception_value)
yield ('RPYTHON_EXCEPTION_MATCH', exceptiondata.fn_exception_match)
yield ('RPYTHON_TYPE_OF_EXC_INST', exceptiondata.fn_type_of_exc_inst)
yield ('RPYTHON_RAISE_OSERROR', exceptiondata.fn_raise_OSError)
if not db.standalone:
yield ('RPYTHON_PYEXCCLASS2EXC', exceptiondata.fn_pyexcclass2exc)
yield ('_RPyExceptionOccurred', exctransformer._rpyexc_occured_ptr.value)
yield ('RPyExceptionOccurred1', exctransformer.rpyexc_occured_ptr.value)
yield ('RPyFetchExceptionType', exctransformer.rpyexc_fetch_type_ptr.value)
yield ('RPyFetchExceptionValue', exctransformer.rpyexc_fetch_value_ptr.value)
yield ('RPyClearException', exctransformer.rpyexc_clear_ptr.value)
yield ('RPyRaiseException', exctransformer.rpyexc_raise_ptr.value)
for pyexccls in exceptiondata.standardexceptions:
exc_llvalue = exceptiondata.fn_pyexcclass2exc(
lltype.pyobjectptr(pyexccls))
# strange naming here because the macro name must be
# a substring of PyExc_%s
name = pyexccls.__name__
if pyexccls.__module__ != 'exceptions':
name = '%s_%s' % (pyexccls.__module__.replace('.', '__'), name)
yield ('RPyExc_%s' % name, exc_llvalue)
def predeclare_all(db, rtyper):
for fn in [predeclare_common_types,
predeclare_utility_functions,
predeclare_exception_data,
predeclare_extfunc_helpers,
predeclare_extfuncs,
]:
for t in fn(db, rtyper):
yield t
def get_all(db, rtyper):
for fn in [predeclare_common_types,
predeclare_utility_functions,
predeclare_exception_data,
get_extfunc_helper_ptrs,
predeclare_extfuncs,
]:
for t in fn(db, rtyper):
yield t[1]
# ____________________________________________________________
def do_the_getting(db, rtyper):
decls = list(get_all(db, rtyper))
rtyper.specialize_more_blocks()
for obj in decls:
if isinstance(obj, lltype.LowLevelType):
db.gettype(obj)
elif isinstance(obj, FunctionGraph):
db.get(rtyper.getcallable(obj))
else:
db.get(obj)
def pre_include_code_lines(db, rtyper):
# generate some #defines that go before the #include to provide
# predeclared well-known names for constant objects, functions and
# types. These names are then used by the #included files, like
# g_exception.h.
def predeclare(c_name, lowlevelobj):
llname = db.get(lowlevelobj)
assert '\n' not in llname
return '#define\t%s\t%s' % (c_name, llname)
def predeclaretype(c_typename, lowleveltype):
typename = db.gettype(lowleveltype)
return 'typedef %s;' % cdecl(typename, c_typename)
yield '#define HAVE_RTYPER'
decls = list(predeclare_all(db, rtyper))
for c_name, obj in decls:
if isinstance(obj, lltype.LowLevelType):
yield predeclaretype(c_name, obj)
elif isinstance(obj, FunctionGraph):
yield predeclare(c_name, rtyper.getcallable(obj))
else:
yield predeclare(c_name, obj)
| Python |
from pypy.rpython.lltypesystem.lltype import *
class SymbolTable:
"""For debugging purposes only. This collects the information
needed to know the byte-layout of the data structures generated in
a C source.
"""
def __init__(self):
self.next_number = 0
self.next_pointer = 0
self.lltypes = {}
self.globals = {}
self.module = None
def attach(self, module):
self.module = module
module.__symboltable__ = self
def generate_type_info(self, db, defnode):
self.lltypes[defnode.LLTYPE] = self.next_number
for number_expr in defnode.debug_offsets():
self.next_number += 1
yield number_expr
def generate_global_info(self, db, node):
self.globals[node.name] = self.next_pointer, node.T
self.next_pointer += 1
return node.ptrname
# __________ public interface (mapping-like) __________
def keys(self):
return self.globals.keys()
def __getitem__(self, globalname_or_address):
if isinstance(globalname_or_address, str):
ptrindex, T = self.globals[globalname]
address = self.module.debuginfo_global(ptrindex)
else:
for ptrindex, T in self.globals.values():
address = self.module.debuginfo_global(ptrindex)
if address == globalname_or_address:
break
else:
raise KeyError("no global object at 0x%x" %
(globalname_or_address,))
return debugptr(Ptr(T), address, self)
def __iter__(self):
return self.globals.iterkeys()
def getsymboltable(module):
if isinstance(module, str):
module = __import__(module)
return module.__symboltable__
# ____________________________________________________________
import struct
PrimitiveTag = {
Signed: 'l',
Unsigned: 'L',
Float: 'd',
Char: 'c',
Bool: 'b',
}
ptr_size = struct.calcsize('P')
class debugptr:
def __init__(self, PTRTYPE, address, symtable):
self._TYPE = PTRTYPE
self._address = address
self._symtable = symtable
def __eq__(self, other):
return self._address == other._address
def __ne__(self, other):
return not (self == other)
def __hash__(self):
raise TypeError("pointer objects are not hashable")
def __repr__(self):
addr = self._address
if addr < 0:
addr += 256 ** ptr_size
return '<debugptr %s to 0x%x>' % (self._TYPE.TO, addr)
def __nonzero__(self):
return self._address != 0
def _nth_offset(self, n):
index = self._symtable.lltypes[self._TYPE.TO]
return self._symtable.module.debuginfo_offset(index + n)
def _read(self, FIELD_TYPE, offset):
if not self: # NULL
raise ValueError, 'dereferencing NULL pointer'
module = self._symtable.module
address = self._address + offset
if isinstance(FIELD_TYPE, ContainerType):
return debugptr(Ptr(FIELD_TYPE), address, self._symtable)
elif isinstance(FIELD_TYPE, Primitive):
if FIELD_TYPE is Void:
return None
tag = PrimitiveTag[FIELD_TYPE]
size = struct.calcsize(tag)
data = module.debuginfo_peek(address, size)
result, = struct.unpack(tag, data)
return result
elif isinstance(FIELD_TYPE, Ptr):
data = module.debuginfo_peek(address, ptr_size)
result, = struct.unpack('P', data)
return debugptr(FIELD_TYPE, result, self._symtable)
else:
raise TypeError("unknown type %r" % (FIELD_TYPE,))
def __getattr__(self, name):
STRUCT = self._TYPE.TO
if not name.startswith('_') and isinstance(STRUCT, Struct):
try:
field_index = list(STRUCT._names).index(name)
except ValueError:
pass
else:
FIELD_TYPE = STRUCT._flds[name]
offset = self._nth_offset(field_index)
return self._read(FIELD_TYPE, offset)
if isinstance(self._TYPE.TO, ContainerType):
adtmeth = self._TYPE.TO._adtmeths.get(name)
if adtmeth is not None:
return adtmeth.__get__(self)
raise AttributeError, name
def __len__(self):
ARRAY = self._TYPE.TO
if isinstance(ARRAY, Array):
length_offset = self._nth_offset(0)
if length_offset == -1:
raise TypeError, "array has no stored length: %r" % (ARRAY,)
return self._read(Signed, length_offset)
raise TypeError, "not an array: %r" % (ARRAY,)
def __getitem__(self, index):
ARRAY = self._TYPE.TO
if isinstance(ARRAY, Array):
length_offset = self._nth_offset(0)
if length_offset == -1:
pass # just assume the access is within bounds
elif not (0 <= index < len(self)):
raise IndexError("array index out of bounds")
item0_offset = self._nth_offset(1)
item1_offset = self._nth_offset(2)
offset = item0_offset + (item1_offset-item0_offset) * index
return self._read(ARRAY.OF, offset)
raise TypeError, "not an array: %r" % (ARRAY,)
| Python |
from __future__ import generators
from pypy.translator.c.support import USESLOTS # set to False if necessary while refactoring
from pypy.translator.c.support import cdecl, ErrorValue
from pypy.translator.c.support import llvalue_from_constant, gen_assignments
from pypy.translator.c.support import c_string_constant
from pypy.objspace.flow.model import Variable, Constant, Block
from pypy.objspace.flow.model import c_last_exception, copygraph
from pypy.rpython.lltypesystem.lltype import Ptr, PyObject, Void, Bool, Signed
from pypy.rpython.lltypesystem.lltype import Unsigned, SignedLongLong, Float
from pypy.rpython.lltypesystem.lltype import UnsignedLongLong, Char, UniChar
from pypy.rpython.lltypesystem.lltype import pyobjectptr, ContainerType
from pypy.rpython.lltypesystem.lltype import Struct, Array, FixedSizeArray
from pypy.rpython.lltypesystem.lltype import ForwardReference, FuncType
from pypy.rpython.lltypesystem.llmemory import Address, WeakGcAddress
from pypy.translator.backendopt.ssa import SSI_to_SSA
PyObjPtr = Ptr(PyObject)
LOCALVAR = 'l_%s'
KEEP_INLINED_GRAPHS = False
class FunctionCodeGenerator(object):
"""
Collects information about a function which we have to generate
from a flow graph.
"""
if USESLOTS:
__slots__ = """graph db gcpolicy
exception_policy
more_ll_values
vars
lltypes
functionname
currentblock
blocknum
oldgraph""".split()
def __init__(self, graph, db, exception_policy=None, functionname=None):
self.graph = graph
self.db = db
self.gcpolicy = db.gcpolicy
self.exception_policy = exception_policy
self.functionname = functionname
# apply the stackless transformation
if db.stacklesstransformer:
db.stacklesstransformer.transform_graph(graph)
# apply the exception transformation
if self.db.exctransformer:
self.db.exctransformer.create_exception_handling(self.graph)
# apply the gc transformation
if self.db.gctransformer:
self.db.gctransformer.transform_graph(self.graph)
#self.graph.show()
self.collect_var_and_types()
for v in self.vars:
T = getattr(v, 'concretetype', PyObjPtr)
# obscure: skip forward references and hope for the best
# (needed for delayed function pointers)
if isinstance(T, Ptr) and T.TO.__class__ == ForwardReference:
continue
db.gettype(T) # force the type to be considered by the database
self.lltypes = None
def collect_var_and_types(self):
#
# collect all variables and constants used in the body,
# and get their types now
#
# NOTE: cannot use dictionaries with Constants as keys, because
# Constants may hash and compare equal but have different lltypes
mix = [self.graph.getreturnvar()]
self.more_ll_values = []
for block in self.graph.iterblocks():
mix.extend(block.inputargs)
for op in block.operations:
mix.extend(op.args)
mix.append(op.result)
for link in block.exits:
mix.extend(link.getextravars())
mix.extend(link.args)
if hasattr(link, 'llexitcase'):
self.more_ll_values.append(link.llexitcase)
elif link.exitcase is not None:
mix.append(Constant(link.exitcase))
if self.exception_policy == "CPython":
v, exc_cleanup_ops = self.graph.exc_cleanup
mix.append(v)
for cleanupop in exc_cleanup_ops:
mix.extend(cleanupop.args)
mix.append(cleanupop.result)
uniquemix = []
seen = {}
for v in mix:
if id(v) not in seen:
uniquemix.append(v)
seen[id(v)] = True
self.vars = uniquemix
def name(self, cname): #virtual
return cname
def patch_graph(self, copy_graph):
graph = self.graph
if self.db.gctransformer and self.db.gctransformer.inline:
if copy_graph:
graph = copygraph(graph, shallow=True)
self.db.gctransformer.inline_helpers(graph)
return graph
def implementation_begin(self):
self.oldgraph = self.graph
self.graph = self.patch_graph(copy_graph=True)
SSI_to_SSA(self.graph)
self.collect_var_and_types()
self.blocknum = {}
for block in self.graph.iterblocks():
self.blocknum[block] = len(self.blocknum)
db = self.db
lltypes = {}
for v in self.vars:
T = getattr(v, 'concretetype', PyObjPtr)
typename = db.gettype(T)
lltypes[id(v)] = T, typename
self.lltypes = lltypes
def implementation_end(self):
self.lltypes = None
self.vars = None
self.blocknum = None
self.currentblock = None
self.graph = self.oldgraph
del self.oldgraph
def argnames(self):
return [LOCALVAR % v.name for v in self.graph.getargs()]
def allvariables(self):
return [v for v in self.vars if isinstance(v, Variable)]
def allconstants(self):
return [c for c in self.vars if isinstance(c, Constant)]
def allconstantvalues(self):
for c in self.vars:
if isinstance(c, Constant):
yield llvalue_from_constant(c)
for llvalue in self.more_ll_values:
yield llvalue
def lltypemap(self, v):
T, typename = self.lltypes[id(v)]
return T
def lltypename(self, v):
T, typename = self.lltypes[id(v)]
return typename
def expr(self, v, special_case_void=True):
if isinstance(v, Variable):
if self.lltypemap(v) is Void and special_case_void:
return '/* nothing */'
else:
return LOCALVAR % v.name
elif isinstance(v, Constant):
value = llvalue_from_constant(v)
if value is None and not special_case_void:
return 'nothing'
else:
return self.db.get(value)
else:
raise TypeError, "expr(%r)" % (v,)
def error_return_value(self):
returnlltype = self.lltypemap(self.graph.getreturnvar())
return self.db.get(ErrorValue(returnlltype))
def return_with_error(self):
if self.exception_policy == "CPython":
assert self.lltypemap(self.graph.getreturnvar()) == PyObjPtr
v, exc_cleanup_ops = self.graph.exc_cleanup
vanishing_exc_value = self.expr(v)
yield 'RPyConvertExceptionToCPython(%s);' % vanishing_exc_value
for cleanupop in exc_cleanup_ops:
for line in self.gen_op(cleanupop):
yield line
yield 'return %s; ' % self.error_return_value()
# ____________________________________________________________
def cfunction_declarations(self):
# declare the local variables, excluding the function arguments
seen = {}
for a in self.graph.getargs():
seen[a.name] = True
result_by_name = []
for v in self.allvariables():
name = v.name
if name not in seen:
seen[name] = True
result = cdecl(self.lltypename(v), LOCALVAR % name) + ';'
if self.lltypemap(v) is Void:
continue #result = '/*%s*/' % result
result_by_name.append((v._name, result))
result_by_name.sort()
return [result for name, result in result_by_name]
# ____________________________________________________________
def cfunction_body(self):
graph = self.graph
# generate the body of each block
for block in graph.iterblocks():
self.currentblock = block
myblocknum = self.blocknum[block]
yield ''
yield 'block%d:' % myblocknum
for i, op in enumerate(block.operations):
for line in self.gen_op(op):
yield line
if len(block.exits) == 0:
assert len(block.inputargs) == 1
# regular return block
if self.exception_policy == "CPython":
assert self.lltypemap(self.graph.getreturnvar()) == PyObjPtr
yield 'if (RPyExceptionOccurred()) {'
yield '\tRPyConvertExceptionToCPython();'
yield '\treturn NULL;'
yield '}'
retval = self.expr(block.inputargs[0])
if self.exception_policy != "exc_helper":
yield 'RPY_DEBUG_RETURN();'
yield 'return %s;' % retval
continue
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
for op in self.gen_link(block.exits[0]):
yield op
else:
assert block.exitswitch != c_last_exception
# block ending in a switch on a value
TYPE = self.lltypemap(block.exitswitch)
if TYPE in (Bool, PyObjPtr):
expr = self.expr(block.exitswitch)
for link in block.exits[:0:-1]:
assert link.exitcase in (False, True)
if TYPE == Bool:
if not link.exitcase:
expr = '!' + expr
elif TYPE == PyObjPtr:
yield 'assert(%s == Py_True || %s == Py_False);' % (
expr, expr)
if link.exitcase:
expr = '%s == Py_True' % expr
else:
expr = '%s == Py_False' % expr
yield 'if (%s) {' % expr
for op in self.gen_link(link):
yield '\t' + op
yield '}'
link = block.exits[0]
assert link.exitcase in (False, True)
#yield 'assert(%s == %s);' % (self.expr(block.exitswitch),
# self.genc.nameofvalue(link.exitcase, ct))
for op in self.gen_link(link):
yield op
elif TYPE in (Signed, Unsigned, SignedLongLong,
UnsignedLongLong, Char, UniChar):
defaultlink = None
expr = self.expr(block.exitswitch)
yield 'switch (%s) {' % self.expr(block.exitswitch)
for link in block.exits:
if link.exitcase == 'default':
defaultlink = link
continue
yield 'case %s:' % self.db.get(link.llexitcase)
for op in self.gen_link(link):
yield '\t' + op
yield 'break;'
# Emit default case
yield 'default:'
if defaultlink is None:
yield '\tassert(!"bad switch!!");'
else:
for op in self.gen_link(defaultlink):
yield '\t' + op
yield '}'
else:
raise TypeError("exitswitch type not supported"
" Got %r" % (TYPE,))
def gen_link(self, link, linklocalvars=None):
"Generate the code to jump across the given Link."
is_alive = {}
linklocalvars = linklocalvars or {}
assignments = []
for a1, a2 in zip(link.args, link.target.inputargs):
a2type, a2typename = self.lltypes[id(a2)]
if a2type is Void:
continue
if a1 in linklocalvars:
src = linklocalvars[a1]
else:
src = self.expr(a1)
dest = LOCALVAR % a2.name
assignments.append((a2typename, dest, src))
for line in gen_assignments(assignments):
yield line
yield 'goto block%d;' % self.blocknum[link.target]
def gen_op(self, op):
macro = 'OP_%s' % op.opname.upper()
if op.opname.startswith('gc_'):
meth = getattr(self.gcpolicy, macro, None)
if meth:
line = meth(self, op)
else:
meth = getattr(self, macro, None)
if meth:
line = meth(op)
if meth is None:
lst = [self.expr(v) for v in op.args]
lst.append(self.expr(op.result))
line = '%s(%s);' % (macro, ', '.join(lst))
if "\n" not in line:
yield line
else:
for line in line.splitlines():
yield line
# ____________________________________________________________
# the C preprocessor cannot handle operations taking a variable number
# of arguments, so here are Python methods that do it
def OP_NEWLIST(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
if len(args) == 0:
return 'OP_NEWLIST0(%s);' % (r, )
else:
args.insert(0, '%d' % len(args))
return 'OP_NEWLIST((%s), %s);' % (', '.join(args), r)
def OP_NEWDICT(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
if len(args) == 0:
return 'OP_NEWDICT0(%s);' % (r, )
else:
assert len(args) % 2 == 0
args.insert(0, '%d' % (len(args)//2))
return 'OP_NEWDICT((%s), %s);' % (', '.join(args), r)
def OP_NEWTUPLE(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
args.insert(0, '%d' % len(args))
return 'OP_NEWTUPLE((%s), %s);' % (', '.join(args), r)
def OP_SIMPLE_CALL(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
args.append('NULL')
return 'OP_SIMPLE_CALL((%s), %s);' % (', '.join(args), r)
def OP_CALL_ARGS(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
return 'OP_CALL_ARGS((%s), %s);' % (', '.join(args), r)
def generic_call(self, FUNC, fnexpr, args_v, v_result):
args = []
assert len(args_v) == len(FUNC.TO.ARGS)
for v, ARGTYPE in zip(args_v, FUNC.TO.ARGS):
if ARGTYPE is Void:
continue # skip 'void' argument
args.append(self.expr(v))
# special case for rctypes: by-value container args:
if isinstance(ARGTYPE, ContainerType):
args[-1] = '*%s' % (args[-1],)
line = '%s(%s);' % (fnexpr, ', '.join(args))
if self.lltypemap(v_result) is not Void:
# skip assignment of 'void' return value
r = self.expr(v_result)
line = '%s = %s' % (r, line)
return line
def OP_DIRECT_CALL(self, op):
fn = op.args[0]
return self.generic_call(fn.concretetype, self.expr(fn),
op.args[1:], op.result)
def OP_INDIRECT_CALL(self, op):
fn = op.args[0]
return self.generic_call(fn.concretetype, self.expr(fn),
op.args[1:-1], op.result)
def OP_ADR_CALL(self, op):
ARGTYPES = [v.concretetype for v in op.args[1:]]
RESTYPE = op.result.concretetype
FUNC = Ptr(FuncType(ARGTYPES, RESTYPE))
typename = self.db.gettype(FUNC)
fnaddr = op.args[0]
fnexpr = '((%s)%s)' % (cdecl(typename, ''), self.expr(fnaddr))
return self.generic_call(FUNC, fnexpr, op.args[1:], op.result)
# low-level operations
def generic_get(self, op, sourceexpr):
T = self.lltypemap(op.result)
newvalue = self.expr(op.result, special_case_void=False)
result = ['%s = %s;' % (newvalue, sourceexpr)]
result = '\n'.join(result)
if T is Void:
result = '/* %s */' % result
return result
def generic_set(self, op, targetexpr):
newvalue = self.expr(op.args[2], special_case_void=False)
result = ['%s = %s;' % (targetexpr, newvalue)]
T = self.lltypemap(op.args[2])
result = '\n'.join(result)
if T is Void:
result = '/* %s */' % result
return result
def OP_GETFIELD(self, op, ampersand=''):
assert isinstance(op.args[1], Constant)
STRUCT = self.lltypemap(op.args[0]).TO
structdef = self.db.gettypedefnode(STRUCT)
expr = ampersand + structdef.ptr_access_expr(self.expr(op.args[0]),
op.args[1].value)
return self.generic_get(op, expr)
def OP_SETFIELD(self, op):
assert isinstance(op.args[1], Constant)
STRUCT = self.lltypemap(op.args[0]).TO
structdef = self.db.gettypedefnode(STRUCT)
expr = structdef.ptr_access_expr(self.expr(op.args[0]),
op.args[1].value)
return self.generic_set(op, expr)
OP_BARE_SETFIELD = OP_SETFIELD
def OP_GETSUBSTRUCT(self, op):
RESULT = self.lltypemap(op.result).TO
if isinstance(RESULT, FixedSizeArray):
return self.OP_GETFIELD(op, ampersand='')
else:
return self.OP_GETFIELD(op, ampersand='&')
def OP_GETARRAYSIZE(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
if isinstance(ARRAY, FixedSizeArray):
return '%s = %d;' % (self.expr(op.result),
ARRAY.length)
else:
return '%s = %s->length;' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_GETARRAYITEM(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
items = self.expr(op.args[0])
if not isinstance(ARRAY, FixedSizeArray):
items += '->items'
return self.generic_get(op, '%s[%s]' % (items,
self.expr(op.args[1])))
def OP_SETARRAYITEM(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
items = self.expr(op.args[0])
if not isinstance(ARRAY, FixedSizeArray):
items += '->items'
return self.generic_set(op, '%s[%s]' % (items,
self.expr(op.args[1])))
OP_BARE_SETARRAYITEM = OP_SETARRAYITEM
def OP_GETARRAYSUBSTRUCT(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
items = self.expr(op.args[0])
if not isinstance(ARRAY, FixedSizeArray):
items += '->items'
return '%s = %s + %s;' % (self.expr(op.result),
items,
self.expr(op.args[1]))
def OP_PTR_NONZERO(self, op):
return '%s = (%s != NULL);' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_PTR_ISZERO(self, op):
return '%s = (%s == NULL);' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_PTR_EQ(self, op):
return '%s = (%s == %s);' % (self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def OP_PTR_NE(self, op):
return '%s = (%s != %s);' % (self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def OP_BOEHM_MALLOC(self, op):
return 'OP_BOEHM_ZERO_MALLOC(%s, %s, void*, 0, 0);' % (self.expr(op.args[0]),
self.expr(op.result))
def OP_BOEHM_MALLOC_ATOMIC(self, op):
return 'OP_BOEHM_ZERO_MALLOC(%s, %s, void*, 1, 0);' % (self.expr(op.args[0]),
self.expr(op.result))
def OP_BOEHM_REGISTER_FINALIZER(self, op):
return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \
% (self.expr(op.args[0]), self.expr(op.args[1]))
def OP_RAW_MALLOC(self, op):
eresult = self.expr(op.result)
esize = self.expr(op.args[0])
return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult)
def OP_FLAVORED_MALLOC(self, op):
# XXX this function should DIE!
TYPE = self.lltypemap(op.result).TO
typename = self.db.gettype(TYPE)
eresult = self.expr(op.result)
esize = 'sizeof(%s)' % cdecl(typename, '')
erestype = cdecl(typename, '*')
flavor = op.args[0].value
if flavor == "raw":
return "OP_RAW_MALLOC(%s, %s, %s);" % (esize, eresult, erestype)
elif flavor == "stack":
return "OP_STACK_MALLOC(%s, %s, %s);" % (esize, eresult, erestype)
elif flavor == "cpy":
cpytype = self.expr(op.args[2])
return "OP_CPY_MALLOC(%s, %s, %s);" % (cpytype, eresult, erestype)
else:
raise NotImplementedError
def OP_FLAVORED_MALLOC_VARSIZE(self, op):
# XXX this function should DIE!, at least twice over
# XXX I know this working in just one case, probably makes
# sense to assert it here, rest is just copied
flavor = op.args[0].value
assert flavor == 'raw'
TYPE = self.lltypemap(op.result).TO
assert isinstance(TYPE, Array)
assert TYPE._hints.get('nolength', False)
# </obscure hack>
typename = self.db.gettype(TYPE)
lenfld = 'length'
nodedef = self.db.gettypedefnode(TYPE)
if isinstance(TYPE, Struct):
arfld = TYPE._arrayfld
lenfld = "%s.length" % nodedef.c_struct_field_name(arfld)
VARPART = TYPE._flds[TYPE._arrayfld]
else:
VARPART = TYPE
assert isinstance(VARPART, Array)
itemtypename = self.db.gettype(VARPART.OF)
elength = self.expr(op.args[2])
eresult = self.expr(op.result)
erestype = cdecl(typename, '*')
if VARPART.OF is Void: # strange
esize = 'sizeof(%s)' % (cdecl(typename, ''),)
result = '{\n'
else:
itemtype = cdecl(itemtypename, '')
result = 'IF_VARSIZE_OVERFLOW(%s, %s, %s)\nelse {\n' % (
elength,
itemtype,
eresult)
esize = 'sizeof(%s)-sizeof(%s)+%s*sizeof(%s)' % (
cdecl(typename, ''),
itemtype,
elength,
itemtype)
# ctypes Arrays have no length field
if not VARPART._hints.get('nolength', False):
result += '\nif(%s) %s->%s = %s;' % (eresult, eresult, lenfld, elength)
if flavor == "raw":
result += "OP_RAW_MALLOC(%s, %s, %s);" % (esize, eresult, erestype)
elif flavor == "stack":
result += "OP_STACK_MALLOC(%s, %s, %s);" % (esize, eresult, erestype)
elif flavor == "cpy":
xxx # this will never work, as I don't know which arg it would be
# tests, tests, tests....
cpytype = self.expr(op.args[2])
result += "OP_CPY_MALLOC(%s, %s, %s);" % (cpytype, eresult, erestype)
else:
raise NotImplementedError
result += '\n}'
return result
def OP_FLAVORED_FREE(self, op):
flavor = op.args[0].value
if flavor == "raw":
return "OP_RAW_FREE(%s, %s)" % (self.expr(op.args[1]),
self.expr(op.result))
elif flavor == "cpy":
return "OP_CPY_FREE(%s)" % (self.expr(op.args[1]),)
else:
raise NotImplementedError
def OP_DIRECT_FIELDPTR(self, op):
return self.OP_GETFIELD(op, ampersand='&')
def OP_DIRECT_ARRAYITEMS(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
items = self.expr(op.args[0])
if not isinstance(ARRAY, FixedSizeArray):
items += '->items'
return '%s = %s;' % (self.expr(op.result), items)
def OP_DIRECT_PTRADD(self, op):
return '%s = %s + %s;' % (self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def OP_CAST_POINTER(self, op):
TYPE = self.lltypemap(op.result)
typename = self.db.gettype(TYPE)
result = []
result.append('%s = (%s)%s;' % (self.expr(op.result),
cdecl(typename, ''),
self.expr(op.args[0])))
return '\t'.join(result)
OP_CAST_PTR_TO_ADR = OP_CAST_POINTER
OP_CAST_ADR_TO_PTR = OP_CAST_POINTER
OP_CAST_OPAQUE_PTR = OP_CAST_POINTER
def OP_CAST_PTR_TO_WEAKADR(self, op):
return '%s = HIDE_POINTER(%s);' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_CAST_WEAKADR_TO_PTR(self, op):
TYPE = self.lltypemap(op.result)
assert TYPE != PyObjPtr
typename = self.db.gettype(TYPE)
return '%s = (%s)REVEAL_POINTER(%s);' % (self.expr(op.result),
cdecl(typename, ''),
self.expr(op.args[0]))
def OP_CAST_INT_TO_PTR(self, op):
TYPE = self.lltypemap(op.result)
typename = self.db.gettype(TYPE)
return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""),
self.expr(op.args[0]))
def OP_SAME_AS(self, op):
result = []
TYPE = self.lltypemap(op.result)
assert self.lltypemap(op.args[0]) == TYPE
if TYPE is not Void:
result.append('%s = %s;' % (self.expr(op.result),
self.expr(op.args[0])))
return '\t'.join(result)
def OP_HINT(self, op):
hints = op.args[1].value
return '%s\t/* hint: %r */' % (self.OP_SAME_AS(op), hints)
def OP_KEEPALIVE(self, op): # xxx what should be the sematics consequences of this
return "/* kept alive: %s */ ;" % self.expr(op.args[0], special_case_void=False)
#address operations
def OP_RAW_STORE(self, op):
addr = self.expr(op.args[0])
TYPE = op.args[1].value
offset = self.expr(op.args[2])
value = self.expr(op.args[3])
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return "*(((%(typename)s) %(addr)s ) + %(offset)s) = %(value)s;" % locals()
def OP_RAW_LOAD(self, op):
addr = self.expr(op.args[0])
TYPE = op.args[1].value
offset = self.expr(op.args[2])
result = self.expr(op.result)
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return "%(result)s = *(((%(typename)s) %(addr)s ) + %(offset)s);" % locals()
def OP_CAST_PRIMITIVE(self, op):
TYPE = self.lltypemap(op.result)
val = self.expr(op.args[0])
ORIG = self.lltypemap(op.args[0])
if ORIG is Char:
val = "(unsigned char)%s" % val
elif ORIG is UniChar:
val = "(unsigned long)%s" % val
result = self.expr(op.result)
typename = cdecl(self.db.gettype(TYPE), '')
return "%(result)s = (%(typename)s)(%(val)s);" % locals()
def OP_RESUME_POINT(self, op):
return '/* resume point %s */'%(op.args[0],)
def OP_DEBUG_PRINT(self, op):
# XXX
from pypy.rpython.lltypesystem.rstr import STR
format = []
argv = []
for arg in op.args:
T = arg.concretetype
if T == Ptr(STR):
if isinstance(arg, Constant):
format.append(''.join(arg.value.chars).replace('%', '%%'))
else:
format.append('%s')
argv.append('RPyString_AsString(%s)' % self.expr(arg))
continue
elif T == Signed:
format.append('%d')
elif T == Float:
format.append('%f')
elif isinstance(T, Ptr) or T in (Address, WeakGcAddress):
format.append('%p')
elif T == Char:
if isinstance(arg, Constant):
format.append(arg.value.replace('%', '%%'))
continue
format.append('%c')
else:
raise Exception("don't know how to debug_print %r" % (T,))
argv.append(self.expr(arg))
return "fprintf(stderr, %s%s);" % (
c_string_constant(' '.join(format) + '\n\000'),
''.join([', ' + s for s in argv]))
def OP_DEBUG_ASSERT(self, op):
return 'RPyAssert(%s, %s);' % (self.expr(op.args[0]),
c_string_constant(op.args[1].value))
def OP_DEBUG_FATALERROR(self, op):
# XXX
from pypy.rpython.lltypesystem.rstr import STR
msg = op.args[0]
assert msg.concretetype == Ptr(STR)
argv = []
if isinstance(msg, Constant):
msg = c_string_constant(''.join(msg.value.chars))
else:
msg = 'RPyString_AsString(%s)' % self.expr(msg)
return 'fprintf(stderr, "%%s\\n", %s); abort();' % msg
def OP_INSTRUMENT_COUNT(self, op):
counter_label = op.args[1].value
self.db.instrument_ncounter = max(self.db.instrument_ncounter,
counter_label+1)
counter_label = self.expr(op.args[1])
return 'INSTRUMENT_COUNT(%s);' % counter_label
def OP_IS_EARLY_CONSTANT(self, op):
return self.expr(op.result) + ' = 0;' # Allways false
assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator)
| Python |
from __future__ import generators
import autopath, os, sys, __builtin__, marshal, zlib
import py
from types import FunctionType, CodeType, InstanceType, ClassType
from pypy.objspace.flow.model import Variable, Constant, FunctionGraph
from pypy.annotation.description import NoStandardGraph
from pypy.translator.gensupp import builtin_base, builtin_type_base
from pypy.translator.c.support import log
from pypy.translator.c.wrapper import gen_wrapper, new_method_graph
from pypy.translator.tool.raymond import should_expose
from pypy.rlib.rarithmetic import r_int, r_uint
from pypy.rpython.lltypesystem.lltype import pyobjectptr, LowLevelType
from pypy.rpython import extregistry
# XXX maybe this can be done more elegantly:
# needed to convince should_translate_attr
# to fill the space instance.
# Should this be registered with the annotator?
from pypy.interpreter.baseobjspace import ObjSpace
class PyObjMaker:
"""Handles 'PyObject*'; factored out from LowLevelDatabase.
This class contains all the nameof_xxx() methods that allow a wild variety
of Python objects to be 'pickled' as Python source code that will
reconstruct them.
"""
def __init__(self, namespace, db, translator=None):
self.namespace = namespace
self.db = db
self.translator = translator
self.initcode = [ # list of lines for the module's initxxx()
'import new, types, sys',
]
self.latercode = [] # list of generators generating extra lines
# for later in initxxx() -- for recursive
# objects
self.debugstack = () # linked list of nested nameof()
self.wrappers = {} # {'pycfunctionvariable': ('name', 'wrapperfn')}
self.import_hints = {} # I don't seem to need it any longer.
# leaving the import support intact, doesn't hurt.
self.name_for_meth = {} # get nicer wrapper names
self.is_method = {}
self.use_true_methods = False # may be overridden
def nameof(self, obj, debug=None):
if debug:
stackentry = debug, obj
else:
stackentry = obj
self.debugstack = (self.debugstack, stackentry)
try:
try:
self.translator.rtyper # check for presence
entry = extregistry.lookup(obj)
getter = entry.get_ll_pyobjectptr
except (KeyError, AttributeError):
# common case: 'p' is a _pyobject
p = pyobjectptr(obj)
else:
# 'p' should be a PyStruct pointer, i.e. a _pyobjheader
p = getter(self.translator.rtyper)
node = self.db.getcontainernode(p._obj)
finally:
self.debugstack, x = self.debugstack
assert x is stackentry
return node.exported_name
def computenameof(self, obj):
obj_builtin_base = builtin_base(obj)
if obj_builtin_base in (object, int, long) and type(obj) is not obj_builtin_base:
if isinstance(obj, FunctionGraph):
return self.nameof_graph(obj)
# assume it's a user defined thingy
return self.nameof_instance(obj)
else:
for cls in type(obj).__mro__:
meth = getattr(self,
'nameof_' + cls.__name__.replace(' ', ''),
None)
if meth:
break
else:
raise Exception, "nameof(%r)" % (obj,)
return meth(obj)
def uniquename(self, basename):
return self.namespace.uniquename(basename)
def initcode_python(self, name, pyexpr):
# generate init code that will evaluate the given Python expression
#self.initcode.append("print 'setting up', %r" % name)
self.initcode.append("%s = %s" % (name, pyexpr))
def nameof_object(self, value):
if isinstance(object, property):
return self.nameof_property(value)
if type(value) is not object:
raise Exception, "nameof(%r)" % (value,)
name = self.uniquename('g_object')
self.initcode_python(name, "object()")
return name
def nameof_NoneType(self, value):
assert value is None
name = self.uniquename('g_None')
self.initcode_python(name, "None")
return name
def nameof_bool(self, value):
assert value is False or value is True
if value:
name = 'True'
else:
name = 'False'
name = self.uniquename('g_' + name)
self.initcode_python(name, repr(value))
return name
def nameof_module(self, value):
easy = value is os or not hasattr(value, "__file__") or \
not (value.__file__.endswith('.pyc') or
value.__file__.endswith('.py') or
value.__file__.endswith('.pyo'))
name = self.uniquename('mod%s'%value.__name__)
if not easy:
self.initcode.append('######## warning ########')
self.initcode.append('## %r is not a builtin module (probably :)' %value)
access = "__import__(%r)" % value.__name__
# this is an inlined version of my_import, see sect. 2.1 of the python docs
for submodule in value.__name__.split('.')[1:]:
access += '.' + submodule
self.initcode_python(name, access)
return name
def _import_module(self, modname):
mod = __import__(modname)
for submodule in modname.split('.')[1:]:
mod = getattr(mod, submodule)
return mod
def _find_in_module(self, obj, mod):
if hasattr(obj, '__name__') and obj.__name__ in mod.__dict__:
return obj.__name__
for key, value in mod.__dict__.iteritems():
if value is obj:
return key
raise ImportError, 'object %r cannot be found in %r' % (obj, mod)
def nameof_int(self, value):
if value >= 0:
name = 'gint_%d' % value
else:
name = 'gint_minus%d' % abs(value)
name = self.uniquename(name)
self.initcode_python(name, repr(value))
return name
def nameof_long(self, value):
if value >= 0:
name = 'glong%d' % value
else:
name = 'glong_minus%d' % abs(value)
name = self.uniquename(name)
self.initcode_python(name, repr(value))
return name
def nameof_float(self, value):
name = 'gfloat_%s' % value
name = (name.replace('-', 'minus')
.replace('.', 'dot'))
name = self.uniquename(name)
self.initcode_python(name, repr(value))
return name
def nameof_str(self, value):
name = self.uniquename('gstr_' + value[:32])
self.initcode_python(name, repr(value))
return name
def nameof_unicode(self, value):
name = self.uniquename('guni_' + str(value[:32]))
self.initcode_python(name, repr(value))
return name
def skipped_function(self, func):
# debugging only! Generates a placeholder for missing functions
# that raises an exception when called.
if self.translator.annotator.frozen:
warning = 'NOT GENERATING'
else:
warning = 'skipped'
printable_name = '(%s:%d) %s' % (
func.func_globals.get('__name__', '?'),
func.func_code.co_firstlineno,
func.__name__)
log.WARNING("%s %s" % (warning, printable_name))
name = self.uniquename('gskippedfunc_' + func.__name__)
self.initcode.append('def %s(*a,**k):' % name)
self.initcode.append(' raise NotImplementedError')
return name
def shouldskipfunc(self, func):
if isinstance(func, (staticmethod, classmethod)):
func = func.__get__(42)
try: func = func.im_func
except AttributeError: pass
if isinstance(func, FunctionType):
ann = self.translator.annotator
if ann is None:
if (func.func_doc and
func.func_doc.lstrip().startswith('NOT_RPYTHON')):
return "NOT_RPYTHON" # True
else:
if not ann.bookkeeper.getdesc(func).querycallfamily():
return True
return False
def nameof_function(self, func):
assert self.translator is not None, (
"the Translator must be specified to build a PyObject "
"wrapper for %r" % (func,))
# shortcut imports
if func in self.import_hints:
return self.import_function(func)
# look for skipped functions
if self.shouldskipfunc(func):
return self.skipped_function(func)
try:
fwrapper = gen_wrapper(func, self.translator,
newname=self.name_for_meth.get(func, func.__name__),
as_method=func in self.is_method)
except NoStandardGraph:
return self.skipped_function(func)
pycfunctionobj = self.uniquename('gfunc_' + func.__name__)
self.wrappers[pycfunctionobj] = func.__name__, self.db.get(fwrapper), func.__doc__
return pycfunctionobj
def import_function(self, func):
name = self.uniquename('impfunc_' + func.__name__)
modulestr = self.import_hints[func] or func.__module__
module = self._import_module(modulestr)
modname = self.nameof(module)
obname = self._find_in_module(func, module)
self.initcode_python(name, '%s.%s' % (modname, obname))
return name
def nameof_staticmethod(self, sm):
# XXX XXX XXXX
func = sm.__get__(42.5)
name = self.uniquename('gsm_' + func.__name__)
functionname = self.nameof(func)
self.initcode_python(name, 'staticmethod(%s)' % functionname)
return name
def nameof_instancemethod(self, meth):
if meth.im_self is None:
# no error checking here
return self.nameof(meth.im_func)
else:
ob = self.nameof(meth.im_self)
func = self.nameof(meth.im_func)
typ = self.nameof(meth.im_class)
name = self.uniquename('gmeth_'+meth.im_func.__name__)
self.initcode_python(name, 'new.instancemethod(%s, %s, %s)' % (
func, ob, typ))
return name
nameof_method = nameof_instancemethod # when run on top of PyPy
def should_translate_attr(self, pbc, attr):
ann = self.translator.annotator
if ann is None or isinstance(pbc, ObjSpace):
ignore = getattr(pbc.__class__, 'NOT_RPYTHON_ATTRIBUTES', [])
if attr in ignore:
return False
else:
return "probably" # True
classdef = ann.bookkeeper.immutablevalue(pbc).classdef
if classdef and classdef.about_attribute(attr) is not None:
return True
return False
def nameof_instance(self, instance):
if extregistry.is_registered(instance):
return extregistry.lookup(instance).genc_pyobj(self)
if instance in self.import_hints:
return self.import_instance(instance)
klass = instance.__class__
if issubclass(klass, LowLevelType):
raise Exception, 'nameof_LowLevelType(%r)' % (instance,)
name = self.uniquename('ginst_' + klass.__name__)
cls = self.nameof(klass)
if hasattr(klass, '__base__'):
base_class = builtin_base(instance)
base = self.nameof(base_class)
else:
base_class = None
base = cls
def initinstance():
content = instance.__dict__.items()
content.sort()
for key, value in content:
if self.should_translate_attr(instance, key):
line = '%s.%s = %s' % (name, key, self.nameof(value))
yield line
if hasattr(instance,'__reduce_ex__'):
import copy_reg
reduced = instance.__reduce_ex__()
assert reduced[0] is copy_reg._reconstructor,"not clever enough"
assert reduced[1][1] is base_class, "not clever enough for %r vs. %r" % (base_class, reduced)
state = reduced[1][2]
else:
state = None
self.initcode.append('if isinstance(%s, type):' % cls)
if state is not None:
self.initcode.append(' %s = %s.__new__(%s, %r)' % (name, base, cls, state))
else:
self.initcode.append(' %s = %s.__new__(%s)' % (name, base, cls))
self.initcode.append('else:')
self.initcode.append(' %s = new.instance(%s)' % (name, cls))
self.later(initinstance())
return name
def import_instance(self, inst):
klass = inst.__class__
name = self.uniquename('impinst_' + klass.__name__)
modulestr = self.import_hints[inst] or klass.__module__
module = self._import_module(modulestr)
modname = self.nameof(module)
obname = self._find_in_module(func, module)
self.initcode_python(name, '%s.%s' % (modname, obname))
return name
def nameof_builtin_function_or_method(self, func):
if func.__self__ is None:
# builtin function
# where does it come from? Python2.2 doesn't have func.__module__
for modname, module in sys.modules.items():
if hasattr(module, '__file__'):
if (module.__file__.endswith('.py') or
module.__file__.endswith('.pyc') or
module.__file__.endswith('.pyo')):
continue # skip non-builtin modules
if func is getattr(module, func.__name__, None):
break
else:
raise Exception, '%r not found in any built-in module' % (func,)
name = self.uniquename('gbltin_' + func.__name__)
if modname == '__builtin__':
self.initcode_python(name, func.__name__)
else:
modname = self.nameof(module)
self.initcode_python(name, '%s.%s' % (modname, func.__name__))
else:
# builtin (bound) method
name = self.uniquename('gbltinmethod_' + func.__name__)
selfname = self.nameof(func.__self__)
self.initcode_python(name, '%s.%s' % (selfname, func.__name__))
return name
def nameof_classobj(self, cls):
if self.translator.rtyper.needs_wrapper(cls):
return self.wrap_exported_class(cls)
if cls.__doc__ and cls.__doc__.lstrip().startswith('NOT_RPYTHON'):
raise Exception, "%r should never be reached" % (cls,)
if cls in self.import_hints:
return self.import_classobj(cls)
metaclass = "type"
if issubclass(cls, Exception):
if (cls.__module__ == 'exceptions' or
cls is py.magic.AssertionError):
name = self.uniquename('gexc_' + cls.__name__)
self.initcode_python(name, cls.__name__)
return name
#else:
# # exceptions must be old-style classes (grr!)
# metaclass = "&PyClass_Type"
# For the moment, use old-style classes exactly when the
# pypy source uses old-style classes, to avoid strange problems.
if not isinstance(cls, type):
assert type(cls) is ClassType
metaclass = "types.ClassType"
name = self.uniquename('gcls_' + cls.__name__)
basenames = [self.nameof(base) for base in cls.__bases__]
def initclassobj():
content = cls.__dict__.items()
content.sort()
ignore = getattr(cls, 'NOT_RPYTHON_ATTRIBUTES', [])
for key, value in content:
if key.startswith('__'):
# we do not expose __del__, because it would be called twice
if key in ['__module__', '__doc__', '__dict__', '__del__',
'__weakref__', '__repr__', '__metaclass__']:
continue
# XXX some __NAMES__ are important... nicer solution sought
#raise Exception, "unexpected name %r in class %s"%(key, cls)
if key in ignore:
continue
skip = self.shouldskipfunc(value)
if skip:
if skip != 'NOT_RPYTHON':
log.WARNING("skipped class function: %r" % value)
continue
yield '%s.%s = %s' % (name, key, self.nameof(value))
baseargs = ", ".join(basenames)
if baseargs:
baseargs = '(%s)' % baseargs
self.initcode.append('class %s%s:' % (cls.__name__, baseargs))
self.initcode.append(' __metaclass__ = %s' % metaclass)
self.initcode.append('%s = %s' % (name, cls.__name__))
self.later(initclassobj())
return name
nameof_class = nameof_classobj # for Python 2.2
def import_classobj(self, cls):
name = self.uniquename('impcls_' + cls.__name__)
modulestr = self.import_hints[cls] or cls.__module__
module = self._import_module(modulestr)
modname = self.nameof(module)
obname = self._find_in_module(cls, module)
self.initcode_python(name, '%s.%s' % (modname, obname))
return name
typename_mapping = {
InstanceType: 'types.InstanceType',
type(None): 'type(None)',
CodeType: 'types.CodeType',
type(sys): 'type(new)',
r_int: 'int', # XXX
r_uint: 'int', # XXX
# XXX more hacks
# type 'builtin_function_or_method':
type(len): 'type(len)',
# type 'method_descriptor':
type(list.append): 'type(list.append)',
# type 'wrapper_descriptor':
type(type(None).__repr__): 'type(type(None).__repr__)',
# type 'getset_descriptor':
type(type.__dict__['__dict__']): "type(type.__dict__['__dict__'])",
# type 'member_descriptor':
type(type.__dict__['__basicsize__']): "type(type.__dict__['__basicsize__'])",
}
def nameof_type(self, cls):
if cls.__module__ != '__builtin__':
return self.nameof_classobj(cls) # user-defined type
name = self.uniquename('gtype_%s' % cls.__name__)
if getattr(__builtin__, cls.__name__, None) is cls:
expr = cls.__name__ # type available from __builtin__
else:
expr = self.typename_mapping[cls]
self.initcode_python(name, expr)
return name
def nameof_tuple(self, tup):
name = self.uniquename('g%dtuple' % len(tup))
args = [self.nameof(x) for x in tup]
args = ', '.join(args)
if args:
args += ','
self.initcode_python(name, '(%s)' % args)
return name
def nameof_list(self, lis):
name = self.uniquename('g%dlist' % len(lis))
def initlist():
for i in range(len(lis)):
item = self.nameof(lis[i])
yield '%s.append(%s)' % (name, item)
self.initcode_python(name, '[]')
self.later(initlist())
return name
def nameof_dict(self, dic):
assert dic is not __builtins__
assert '__builtins__' not in dic, 'Seems to be the globals of %s' % (
dic.get('__name__', '?'),)
name = self.uniquename('g%ddict' % len(dic))
def initdict():
for k in dic:
if type(k) is str:
yield '%s[%r] = %s' % (name, k, self.nameof(dic[k]))
else:
yield '%s[%s] = %s' % (name, self.nameof(k),
self.nameof(dic[k]))
self.initcode_python(name, '{}')
self.later(initdict())
return name
# strange prebuilt instances below, don't look too closely
# XXX oh well.
def nameof_member_descriptor(self, md):
name = self.uniquename('gdescriptor_%s_%s' % (
md.__objclass__.__name__, md.__name__))
cls = self.nameof(md.__objclass__)
self.initcode_python(name, '%s.__dict__[%r]' % (cls, md.__name__))
return name
nameof_getset_descriptor = nameof_member_descriptor
nameof_method_descriptor = nameof_member_descriptor
nameof_wrapper_descriptor = nameof_member_descriptor
def nameof_file(self, fil):
if fil is sys.stdin:
name = self.uniquename("gsys_stdin")
self.initcode_python(name, "sys.stdin")
return name
if fil is sys.stdout:
name = self.uniquename("gsys_stdout")
self.initcode_python(name, "sys.stdout")
return name
if fil is sys.stderr:
name = self.uniquename("gsys_stderr")
self.initcode_python(name, "sys.stderr")
return name
raise Exception, 'Cannot translate an already-open file: %r' % (fil,)
def later(self, gen):
self.latercode.append((gen, self.debugstack))
def collect_initcode(self):
while self.latercode:
gen, self.debugstack = self.latercode.pop()
#self.initcode.extend(gen) -- eats TypeError! bad CPython!
for line in gen:
self.initcode.append(line)
self.debugstack = ()
def getfrozenbytecode(self):
self.initcode.append('')
source = '\n'.join(self.initcode)
del self.initcode[:]
co = compile(source, '<initcode>', 'exec')
originalsource = source
small = zlib.compress(marshal.dumps(co))
source = """if 1:
import zlib, marshal
exec marshal.loads(zlib.decompress(%r))""" % small
# Python 2.2 SyntaxError without newline: Bug #501622
source += '\n'
co = compile(source, '<initcode>', 'exec')
del source
return marshal.dumps(co), originalsource
# ____________________________________________________________-
# addition for true extension module building
def wrap_exported_class(self, cls):
name = self.uniquename('gwcls_' + cls.__name__)
basenames = [self.nameof(base) for base in cls.__bases__]
# we merge the class dicts for more speed
def merge_classdicts(cls):
dic = {}
for cls in cls.mro()[:-1]:
for key, value in cls.__dict__.items():
if key not in dic:
dic[key] = value
return dic
def initclassobj():
content = merge_classdicts(cls).items()
content.sort()
init_seen = False
for key, value in content:
if key.startswith('__'):
# we do not expose __del__, because it would be called twice
if key in ['__module__', '__dict__', '__doc__', '__del__',
'__weakref__', '__repr__', '__metaclass__']:
continue
if self.shouldskipfunc(value):
log.WARNING("skipped class function: %r" % value)
continue
if isinstance(value, FunctionType):
func = value
fname = '%s.%s' % (cls.__name__, func.__name__)
if not should_expose(func):
log.REMARK('method %s hidden from wrapper' % fname)
continue
if func.__name__ == '__init__':
init_seen = True
# there is the problem with exposed classes inheriting from
# classes which are internal. We need to create a new wrapper
# for every class which uses an inherited __init__, because
# this is the context where we create the instance.
ann = self.translator.annotator
clsdef = ann.bookkeeper.getuniqueclassdef(cls)
graph = ann.bookkeeper.getdesc(func).getuniquegraph()
if ann.binding(graph.getargs()[0]).classdef is not clsdef:
value = new_method_graph(graph, clsdef, fname, self.translator)
self.name_for_meth[value] = fname
if self.use_true_methods:
self.is_method[value] = True
elif isinstance(value, property):
fget, fset, fdel, doc = value.fget, value.fset, value.fdel, value.__doc__
for f in fget, fset, fdel:
if f and self.use_true_methods:
self.is_method[f] = True
stuff = [self.nameof(x) for x in fget, fset, fdel, doc]
yield '%s.%s = property(%s, %s, %s, %s)' % ((name, key) +
tuple(stuff))
continue
yield '%s.%s = %s' % (name, key, self.nameof(value))
if not init_seen:
log.WARNING('No __init__ found for %s - you cannot build instances' %
cls.__name__)
baseargs = ", ".join(basenames)
if baseargs:
baseargs = '(%s)' % baseargs
a = self.initcode.append
a('class %s%s:' % (name, baseargs) )
if cls.__doc__:
a(' %r' % str(cls.__doc__) )
a(' __metaclass__ = type')
a(' __slots__ = ["__self__"] # for PyCObject')
self.later(initclassobj())
return name
def nameof_graph(self, g):
newname=self.name_for_meth.get(g, g.func.__name__)
fwrapper = gen_wrapper(g, self.translator, newname=newname,
as_method=g in self.is_method)
pycfunctionobj = self.uniquename('gfunc_' + newname)
self.wrappers[pycfunctionobj] = g.func.__name__, self.db.get(fwrapper), g.func.__doc__
return pycfunctionobj
def nameof_property(self, p):
fget, fset, fdel, doc = p.fget, p.fset, p.fdel, p.__doc__
for f in fget, fset, fdel:
if f and self.use_true_methods:
self.is_method[f] = True
stuff = [self.nameof(x) for x in fget, fset, fdel, doc]
name = self.uniquename('gprop')
expr = 'property(%s, %s, %s, %s)' % (tuple(stuff))
self.initcode_python(name, expr)
return name
| Python |
from __future__ import generators
from pypy.rpython.lltypesystem.lltype import typeOf, Void
from pypy.translator.c.support import USESLOTS # set to False if necessary while refactoring
from pypy.translator.c.support import cdecl, somelettersfrom
class CExternalFunctionCodeGenerator(object):
if USESLOTS:
__slots__ = """db fnptr FUNCTYPE argtypenames resulttypename""".split()
def __init__(self, fnptr, db):
self.fnptr = fnptr
self.db = db
self.FUNCTYPE = typeOf(fnptr)
assert Void not in self.FUNCTYPE.ARGS
self.argtypenames = [db.gettype(T) for T in self.FUNCTYPE.ARGS]
self.resulttypename = db.gettype(self.FUNCTYPE.RESULT)
def name(self, cname): #virtual
return cname
def argnames(self):
return ['%s%d' % (somelettersfrom(self.argtypenames[i]), i)
for i in range(len(self.argtypenames))]
def allconstantvalues(self):
return []
def implementation_begin(self):
pass
def cfunction_declarations(self):
if self.FUNCTYPE.RESULT is not Void:
yield '%s;' % cdecl(self.resulttypename, 'result')
def cfunction_body(self):
try:
convert_params = self.fnptr.convert_params
except AttributeError:
convert_params = lambda backend, args: [arg for _,arg in args]
call = '%s(%s)' % (self.fnptr._name, ', '.join(convert_params("c", zip(self.FUNCTYPE.ARGS, self.argnames()))))
if self.FUNCTYPE.RESULT is not Void:
yield 'result = %s;' % call
yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();'
yield 'return result;'
else:
yield '%s;' % call
yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();'
def implementation_end(self):
pass
assert not USESLOTS or '__dict__' not in dir(CExternalFunctionCodeGenerator)
| Python |
from pypy.objspace.flow.model import Variable, Constant
from pypy.objspace.flow.model import Block, Link, FunctionGraph, checkgraph
from pypy.rpython.lltypesystem.lltype import \
Ptr, PyObject, typeOf, Signed, FuncType, functionptr, nullptr, Void
from pypy.rpython.rtyper import LowLevelOpList
from pypy.rpython.rmodel import inputconst, PyObjPtr
from pypy.rpython.robject import pyobj_repr
from pypy.interpreter.pycode import CO_VARARGS
from pypy.rpython.typesystem import getfunctionptr
from pypy.annotation.model import s_None, SomeInstance
from pypy.translator.backendopt.inline import simple_inline_function
ALWAYS_INLINE = False
def gen_wrapper(func, translator, newname=None, as_method=False):
"""generate a wrapper function for 'func' that can be put in a
PyCFunction object. The wrapper has signature
PyObject *pyfn_xxx(PyObject *self, PyObject *args, PyObject* kw);
"""
# The basic idea is to produce a flow graph from scratch, using the
# help of the rtyper for the conversion of the arguments after they
# have been decoded.
# get the fully typed low-level pointer to the function, if available
do_inline = ALWAYS_INLINE
if translator.annotator is None:
# get the graph from the translator, "push it back" so that it's
# still available for further buildflowgraph() calls
graph = translator.buildflowgraph(func)
translator._prebuilt_graphs[func] = graph
else:
if isinstance(func, FunctionGraph):
graph = func
func = graph.func
# in this case we want to inline for sure, because we
# created this extra graph with a single call-site.
do_inline = True
else:
bk = translator.annotator.bookkeeper
graph = bk.getdesc(func).getuniquegraph()
f = getfunctionptr(graph)
FUNCTYPE = typeOf(f).TO
nb_positional_args = func.func_code.co_argcount
vararg = bool(func.func_code.co_flags & CO_VARARGS)
assert len(FUNCTYPE.ARGS) == nb_positional_args + vararg
newops = LowLevelOpList(translator.rtyper)
# "def wrapper(self, args, kwds)"
vself = Variable('self')
vargs = Variable('args')
vkwds = Variable('kwds')
vfname = Constant(func.func_name, PyObjPtr)
# avoid incref/decref on the arguments: 'self' and 'kwds' can be NULL
vself.concretetype = PyObjPtr
vargs.concretetype = PyObjPtr
vkwds.concretetype = PyObjPtr
varguments = []
varnames = func.func_code.co_varnames
func_defaults = func.func_defaults or ()
if as_method:
nb_positional_args -= 1
varnames = varnames[1:]
for i in range(nb_positional_args):
# "argument_i = decode_arg(fname, i, name, vargs, vkwds)" or
# "argument_i = decode_arg_def(fname, i, name, vargs, vkwds, default)"
vlist = [vfname,
inputconst(Signed, i),
Constant(varnames[i], PyObjPtr),
vargs,
vkwds]
try:
default_value = func_defaults[i - nb_positional_args]
except IndexError:
opname = 'decode_arg'
else:
opname = 'decode_arg_def'
vlist.append(Constant(default_value, PyObjPtr))
v = newops.genop(opname, vlist, resulttype=PyObjPtr)
#v.set_name('a', i)
varguments.append(v)
if vararg:
# "vararg = vargs[n:]"
vlist = [vargs,
Constant(nb_positional_args, PyObjPtr),
Constant(None, PyObjPtr),
]
vararg = newops.genop('getslice', vlist, resulttype=PyObjPtr)
#vararg.set_name('vararg', 0)
varguments.append(vararg)
else:
# "check_no_more_arg(fname, n, vargs)"
vlist = [vfname,
inputconst(Signed, nb_positional_args),
vargs,
]
newops.genop('check_no_more_arg', vlist, resulttype=Signed)
# use the rtyper to produce the conversions
inputargs = f._obj.graph.getargs()
if as_method:
varguments.insert(0, vself)
vlist = [vfname, vself]
newops.genop('check_self_nonzero', vlist, resulttype=Signed)
for i in range(len(varguments)):
if FUNCTYPE.ARGS[i] != PyObjPtr:
# "argument_i = type_conversion_operations(argument_i)"
rtyper = translator.rtyper
assert rtyper is not None, (
"needs the rtyper to perform argument conversions")
r_arg = rtyper.bindingrepr(inputargs[i])
# give the rtyper a chance to know which function we are wrapping
rtyper.set_wrapper_context(func)
varguments[i] = newops.convertvar(varguments[i],
r_from = pyobj_repr,
r_to = r_arg)
rtyper.set_wrapper_context(None)
# "result = direct_call(func, argument_0, argument_1, ..)"
vlist = [inputconst(typeOf(f), f)] + varguments
vresult = newops.genop('direct_call', vlist, resulttype=FUNCTYPE.RESULT)
if FUNCTYPE.RESULT != PyObjPtr:
# convert "result" back to a PyObject
rtyper = translator.rtyper
assert rtyper is not None, (
"needs the rtyper to perform function result conversions")
r_result = rtyper.bindingrepr(f._obj.graph.getreturnvar())
vresult = newops.convertvar(vresult,
r_from = r_result,
r_to = pyobj_repr)
# "return result"
block = Block([vself, vargs, vkwds])
wgraph = FunctionGraph('pyfn_' + (newname or func.func_name), block)
translator.update_call_graph(wgraph, graph, object())
translator.graphs.append(wgraph)
block.operations[:] = newops
block.closeblock(Link([vresult], wgraph.returnblock))
wgraph.getreturnvar().concretetype = PyObjPtr
checkgraph(wgraph)
if translator.rtyper is not None:
# the above convertvar()s may have created and annotated new helpers
# that need to be specialized now
translator.rtyper.specialize_more_blocks()
if do_inline:
simple_inline_function(translator, graph, wgraph)
return functionptr(FuncType([PyObjPtr,
PyObjPtr,
PyObjPtr],
PyObjPtr),
wgraph.name,
graph = wgraph,
exception_policy = "CPython")
def new_method_graph(graph, clsdef, newname, translator):
ann = translator.annotator
rtyper = translator.rtyper
f = getfunctionptr(graph)
FUNCTYPE = typeOf(f).TO
newops = LowLevelOpList(translator.rtyper)
callargs = graph.getargs()[:]
for v in callargs:
if not hasattr(v, 'concretetype'):
v.concretetype = PyObjPtr
v_self_old = callargs.pop(0)
v_self = Variable(v_self_old.name)
binding = SomeInstance(clsdef)
v_self.concretetype = rtyper.getrepr(binding).lowleveltype
ann.setbinding(v_self, binding)
v_self_call = newops.convertvar(v_self,
r_from = rtyper.bindingrepr(v_self),
r_to = rtyper.bindingrepr(v_self_old))
vlist = [inputconst(typeOf(f), f)] + [v_self_call] + callargs
newops.genop('direct_call', vlist, resulttype=Void)
# "return result"
funcargs = [v_self] + callargs
block = Block(funcargs)
newgraph = FunctionGraph(newname, block)
translator.update_call_graph(newgraph, graph, object())
translator.graphs.append(newgraph)
block.operations[:] = newops
block.closeblock(Link([inputconst(Void, None)], newgraph.returnblock))
vres = newgraph.getreturnvar()
ann.setbinding(vres, s_None)
vres.concretetype = Void
checkgraph(newgraph)
# pretend to be the same function, as we actually
# will become inlined.
newgraph.func = graph.func
translator.rtyper.specialize_more_blocks()
# not sure if we want this all the time?
if ALWAYS_INLINE:
simple_inline_function(translator, graph, newgraph)
return newgraph
| Python |
import sys
from pypy.translator.c.support import cdecl
from pypy.translator.c.node import ContainerNode
from pypy.rpython.lltypesystem.lltype import \
typeOf, Ptr, ContainerType, RttiStruct, \
RuntimeTypeInfo, getRuntimeTypeInfo, top_container
from pypy.rpython.memory.gctransform import \
refcounting, boehm, framework, stacklessframework
from pypy.rpython.lltypesystem import lltype, llmemory
class BasicGcPolicy(object):
requires_stackless = False
def __init__(self, db, thread_enabled=False):
self.db = db
self.thread_enabled = thread_enabled
def common_gcheader_definition(self, defnode):
return []
def common_gcheader_initdata(self, defnode):
return []
def struct_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def struct_gcheader_initdata(self, defnode):
return self.common_gcheader_initdata(defnode)
def array_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def array_gcheader_initdata(self, defnode):
return self.common_gcheader_initdata(defnode)
def struct_after_definition(self, defnode):
return []
def gc_libraries(self):
return []
def pre_pre_gc_code(self): # code that goes before include g_prerequisite.h
return []
def pre_gc_code(self):
return ['typedef void *GC_hidden_pointer;']
def gc_startup_code(self):
return []
def struct_setup(self, structdefnode, rtti):
return None
def array_setup(self, arraydefnode):
return None
def rtti_type(self):
return ''
def OP_GC_PUSH_ALIVE_PYOBJ(self, funcgen, op):
expr = funcgen.expr(op.args[0])
if expr == 'NULL':
return ''
return 'Py_XINCREF(%s);' % expr
def OP_GC_POP_ALIVE_PYOBJ(self, funcgen, op):
expr = funcgen.expr(op.args[0])
return 'Py_XDECREF(%s);' % expr
class RefcountingInfo:
static_deallocator = None
from pypy.rlib.objectmodel import CDefinedIntSymbolic
class RefcountingGcPolicy(BasicGcPolicy):
transformerclass = refcounting.RefcountingGCTransformer
def common_gcheader_definition(self, defnode):
if defnode.db.gctransformer is not None:
HDR = defnode.db.gctransformer.HDR
return [(name, HDR._flds[name]) for name in HDR._names]
else:
return []
def common_gcheader_initdata(self, defnode):
if defnode.db.gctransformer is not None:
gct = defnode.db.gctransformer
hdr = gct.gcheaderbuilder.header_of_object(top_container(defnode.obj))
HDR = gct.HDR
return [getattr(hdr, fldname) for fldname in HDR._names]
else:
return []
# for structs
def struct_setup(self, structdefnode, rtti):
if rtti is not None:
transformer = structdefnode.db.gctransformer
fptr = transformer.static_deallocation_funcptr_for_type(
structdefnode.STRUCT)
structdefnode.gcinfo = RefcountingInfo()
structdefnode.gcinfo.static_deallocator = structdefnode.db.get(fptr)
# for arrays
def array_setup(self, arraydefnode):
pass
# for rtti node
def rtti_type(self):
return 'void (@)(void *)' # void dealloc_xx(struct xx *)
def rtti_node_factory(self):
return RefcountingRuntimeTypeInfo_OpaqueNode
# zero malloc impl
def OP_GC_CALL_RTTI_DESTRUCTOR(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
line = '%s(%s);' % (args[0], ', '.join(args[1:]))
return line
def OP_GC_FREE(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
return 'OP_FREE(%s);' % (args[0], )
def OP_GC_FETCH_EXCEPTION(self, funcgen, op):
result = funcgen.expr(op.result)
return ('%s = RPyFetchExceptionValue();\n'
'RPyClearException();') % (result, )
def OP_GC_RESTORE_EXCEPTION(self, funcgen, op):
argh = funcgen.expr(op.args[0])
return 'if (%s != NULL) RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(%s), %s);' % (argh, argh, argh)
def OP_GC__COLLECT(self, funcgen, op):
return ''
class RefcountingRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'refcnt rtti'
globalcontainer = True
typename = 'void (@)(void *)'
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = 'void (@)(void *)'
self.name = defnode.gcinfo.static_deallocator
self.ptrname = '((void (*)(void *)) %s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
return []
class BoehmInfo:
finalizer = None
# for MoreExactBoehmGcPolicy
malloc_exact = False
class BoehmGcPolicy(BasicGcPolicy):
transformerclass = boehm.BoehmGCTransformer
def array_setup(self, arraydefnode):
pass
def struct_setup(self, structdefnode, rtti):
pass
def rtti_type(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode
def gc_libraries(self):
if sys.platform == 'win32':
return ['gc_pypy']
return ['gc']
def pre_pre_gc_code(self):
if sys.platform == "linux2":
yield "#define _REENTRANT 1"
yield "#define GC_LINUX_THREADS 1"
if sys.platform != "win32":
# GC_REDIRECT_TO_LOCAL is not supported on Win32 by gc6.8
yield "#define GC_REDIRECT_TO_LOCAL 1"
yield "#define GC_I_HIDE_POINTERS 1"
yield '#include <gc/gc.h>'
yield '#define USING_BOEHM_GC'
def pre_gc_code(self):
return []
def gc_startup_code(self):
if sys.platform == 'win32':
pass # yield 'assert(GC_all_interior_pointers == 0);'
else:
yield 'GC_all_interior_pointers = 0;'
yield 'GC_init();'
def OP_GC_FETCH_EXCEPTION(self, funcgen, op):
result = funcgen.expr(op.result)
return ('%s = RPyFetchExceptionValue();\n'
'RPyClearException();') % (result, )
def OP_GC_RESTORE_EXCEPTION(self, funcgen, op):
argh = funcgen.expr(op.args[0])
return 'if (%s != NULL) RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(%s), %s);' % (argh, argh, argh)
def OP_GC__COLLECT(self, funcgen, op):
return 'GC_gcollect(); GC_invoke_finalizers();'
class BoehmGcRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'boehm rtti'
globalcontainer = True
typename = 'char @'
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = self.typename
self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename)
self.ptrname = '(&%s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
yield 'char %s /* uninitialized */;' % self.name
class FrameworkGcRuntimeTypeInfo_OpaqueNode(BoehmGcRuntimeTypeInfo_OpaqueNode):
nodekind = 'framework rtti'
class MoreExactBoehmGcPolicy(BoehmGcPolicy):
""" policy to experiment with giving some layout information to boehm. Use
new class to prevent breakage. """
def __init__(self, db, thread_enabled=False):
super(MoreExactBoehmGcPolicy, self).__init__(db, thread_enabled)
self.exactly_typed_structs = {}
def get_descr_name(self, defnode):
# XXX somewhat illegal way of introducing a name
return '%s__gc_descr__' % (defnode.name, )
def pre_pre_gc_code(self):
for line in super(MoreExactBoehmGcPolicy, self).pre_pre_gc_code():
yield line
yield "#include <gc/gc_typed.h>"
def struct_setup(self, structdefnode, rtti):
T = structdefnode.STRUCT
if T._is_atomic():
malloc_exact = False
else:
if T._is_varsize():
malloc_exact = T._flds[T._arrayfld]._is_atomic()
else:
malloc_exact = True
if malloc_exact:
if structdefnode.gcinfo is None:
structdefnode.gcinfo = BoehmInfo()
structdefnode.gcinfo.malloc_exact = True
self.exactly_typed_structs[structdefnode.STRUCT] = structdefnode
def struct_after_definition(self, defnode):
if defnode.gcinfo and defnode.gcinfo.malloc_exact:
yield 'GC_descr %s;' % (self.get_descr_name(defnode), )
def gc_startup_code(self):
for line in super(MoreExactBoehmGcPolicy, self).gc_startup_code():
yield line
for TYPE, defnode in self.exactly_typed_structs.iteritems():
T = defnode.gettype().replace("@", "")
yield "{"
yield "GC_word T_bitmap[GC_BITMAP_SIZE(%s)] = {0};" % (T, )
for field in TYPE._flds:
if getattr(TYPE, field) == lltype.Void:
continue
yield "GC_set_bit(T_bitmap, GC_WORD_OFFSET(%s, %s));" % (
T, defnode.c_struct_field_name(field))
yield "%s = GC_make_descriptor(T_bitmap, GC_WORD_LEN(%s));" % (
self.get_descr_name(defnode), T)
yield "}"
# to get an idea how it looks like with no refcount/gc at all
class NoneGcPolicy(BoehmGcPolicy):
gc_libraries = RefcountingGcPolicy.gc_libraries.im_func
gc_startup_code = RefcountingGcPolicy.gc_startup_code.im_func
def pre_pre_gc_code(self):
yield '#define USING_NO_GC'
class FrameworkGcPolicy(BasicGcPolicy):
transformerclass = framework.FrameworkGCTransformer
def struct_setup(self, structdefnode, rtti):
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
# make sure this is seen by the database early, i.e. before
# finish_helpers() on the gctransformer
self.db.get(destrptr)
# the following, on the other hand, will only discover ll_finalizer
# helpers. The get() sees and records a delayed pointer. It is
# still important to see it so that it can be followed as soon as
# the mixlevelannotator resolves it.
gctransf = self.db.gctransformer
fptr = gctransf.finalizer_funcptr_for_type(structdefnode.STRUCT)
self.db.get(fptr)
def array_setup(self, arraydefnode):
pass
def rtti_type(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode
def pre_pre_gc_code(self):
yield '#define USING_FRAMEWORK_GC'
def gc_startup_code(self):
fnptr = self.db.gctransformer.frameworkgc_setup_ptr.value
yield '%s();' % (self.db.get(fnptr),)
def OP_GC_RELOAD_POSSIBLY_MOVED(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
# XXX this more or less assumes mark-and-sweep gc
return ''
# proper return value for moving GCs:
# %s = %s; /* for moving GCs */' % (args[1], args[0])
def common_gcheader_definition(self, defnode):
return defnode.db.gctransformer.gc_fields()
def common_gcheader_initdata(self, defnode):
o = top_container(defnode.obj)
return defnode.db.gctransformer.gc_field_values_for(o)
class StacklessFrameworkGcPolicy(FrameworkGcPolicy):
transformerclass = stacklessframework.StacklessFrameworkGCTransformer
requires_stackless = True
name_to_gcpolicy = {
'boehm': BoehmGcPolicy,
'exact_boehm': MoreExactBoehmGcPolicy,
'ref': RefcountingGcPolicy,
'none': NoneGcPolicy,
'framework': FrameworkGcPolicy,
'stacklessgc': StacklessFrameworkGcPolicy,
}
| Python |
from __future__ import generators
from pypy.rpython.lltypesystem.lltype import \
Struct, Array, FixedSizeArray, FuncType, PyObjectType, typeOf, \
GcStruct, GcArray, RttiStruct, PyStruct, ContainerType, \
parentlink, Ptr, PyObject, Void, OpaqueType, Float, \
RuntimeTypeInfo, getRuntimeTypeInfo, Char, _subarray, _pyobjheader
from pypy.rpython.lltypesystem.llmemory import WeakGcAddress
from pypy.translator.c.funcgen import FunctionCodeGenerator
from pypy.translator.c.external import CExternalFunctionCodeGenerator
from pypy.translator.c.support import USESLOTS # set to False if necessary while refactoring
from pypy.translator.c.support import cdecl, forward_cdecl, somelettersfrom
from pypy.translator.c.support import c_char_array_constant
from pypy.translator.c.primitive import PrimitiveType, isinf, isnan
from pypy.translator.c import extfunc
def needs_gcheader(T):
if not isinstance(T, ContainerType):
return False
if T._gckind != 'gc':
return False
if isinstance(T, GcStruct):
if T._first_struct() != (None, None):
return False # gcheader already in the first field
return True
class defaultproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, cls=None):
if obj is None:
return self
else:
return self.fget(obj)
class StructDefNode:
typetag = 'struct'
is_external = False
def __init__(self, db, STRUCT, varlength=1):
self.db = db
self.STRUCT = STRUCT
self.LLTYPE = STRUCT
self.varlength = varlength
if varlength == 1:
basename = STRUCT._name
with_number = True
else:
basename = db.gettypedefnode(STRUCT).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
if STRUCT._hints.get('union'):
self.typetag = 'union'
assert STRUCT._gckind == 'raw' # not supported: "GcUnion"
if STRUCT._hints.get('typedef'):
self.typetag = ''
assert STRUCT._hints.get('external')
if self.STRUCT._hints.get('external'): # XXX hack
self.is_external = True
if STRUCT._hints.get('c_name'):
self.barename = self.name = STRUCT._hints['c_name']
self.c_struct_field_name = self.verbatim_field_name
else:
(self.barename,
self.name) = db.namespace.uniquename(basename,
with_number=with_number,
bare=True)
self.prefix = somelettersfrom(STRUCT._name) + '_'
self.dependencies = {}
def setup(self):
# this computes self.fields
if self.STRUCT._hints.get('external'): # XXX hack
self.fields = None # external definition only
return
self.fields = []
db = self.db
STRUCT = self.STRUCT
varlength = self.varlength
if needs_gcheader(self.STRUCT):
for fname, T in db.gcpolicy.struct_gcheader_definition(self):
self.fields.append((fname, db.gettype(T, who_asks=self)))
for name in STRUCT._names:
T = self.c_struct_field_type(name)
if name == STRUCT._arrayfld:
typename = db.gettype(T, varlength=self.varlength,
who_asks=self)
else:
typename = db.gettype(T, who_asks=self)
self.fields.append((self.c_struct_field_name(name), typename))
self.gcinfo # force it to be computed
def computegcinfo(self):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
rtti = None
STRUCT = self.STRUCT
if isinstance(STRUCT, RttiStruct):
try:
rtti = getRuntimeTypeInfo(STRUCT)
except ValueError:
pass
if self.varlength == 1:
self.db.gcpolicy.struct_setup(self, rtti)
return self.gcinfo
gcinfo = defaultproperty(computegcinfo)
def gettype(self):
return '%s %s @' % (self.typetag, self.name)
def c_struct_field_name(self, name):
# occasionally overridden in __init__():
# self.c_struct_field_name = self.verbatim_field_name
return self.prefix + name
def verbatim_field_name(self, name):
if name.startswith('c_'): # produced in this way by rctypes
return name[2:]
else:
# field names have to start with 'c_' or be meant for names that
# vanish from the C source, like 'head' if 'inline_head' is set
raise ValueError("field %r should not be accessed in this way" % (
name,))
def c_struct_field_type(self, name):
return self.STRUCT._flds[name]
def access_expr(self, baseexpr, fldname):
if self.STRUCT._hints.get('inline_head'):
first, FIRST = self.STRUCT._first_struct()
if fldname == first:
# "invalid" cast according to C99 but that's what CPython
# requires and does all the time :-/
return '(*(%s) &(%s))' % (cdecl(self.db.gettype(FIRST), '*'),
baseexpr)
fldname = self.c_struct_field_name(fldname)
return '%s.%s' % (baseexpr, fldname)
def ptr_access_expr(self, baseexpr, fldname):
if self.STRUCT._hints.get('inline_head'):
first, FIRST = self.STRUCT._first_struct()
if fldname == first:
# "invalid" cast according to C99 but that's what CPython
# requires and does all the time :-/
return '(*(%s) %s)' % (cdecl(self.db.gettype(FIRST), '*'),
baseexpr)
fldname = self.c_struct_field_name(fldname)
return '%s->%s' % (baseexpr, fldname)
def definition(self):
if self.fields is None: # external definition only
return
yield '%s %s {' % (self.typetag, self.name)
is_empty = True
for name, typename in self.fields:
line = '%s;' % cdecl(typename, name)
if typename == PrimitiveType[Void]:
line = '/* %s */' % line
else:
is_empty = False
yield '\t' + line
if is_empty:
yield '\t' + 'char _dummy; /* this struct is empty */'
yield '};'
for line in self.db.gcpolicy.struct_after_definition(self):
yield line
def visitor_lines(self, prefix, on_field):
STRUCT = self.STRUCT
for name in STRUCT._names:
FIELD_T = self.c_struct_field_type(name)
cname = self.c_struct_field_name(name)
for line in on_field('%s.%s' % (prefix, cname),
FIELD_T):
yield line
def debug_offsets(self):
# generate number exprs giving the offset of the elements in the struct
STRUCT = self.STRUCT
for name in STRUCT._names:
FIELD_T = self.c_struct_field_type(name)
if FIELD_T is Void:
yield '-1'
else:
try:
cname = self.c_struct_field_name(name)
except ValueError:
yield '-1'
else:
yield 'offsetof(%s %s, %s)' % (self.typetag,
self.name, cname)
class ArrayDefNode:
typetag = 'struct'
def __init__(self, db, ARRAY, varlength=1):
self.db = db
self.ARRAY = ARRAY
self.LLTYPE = ARRAY
original_varlength = varlength
self.gcfields = []
if ARRAY._hints.get('isrpystring'):
varlength += 1 # for the NUL char terminator at the end of the string
self.varlength = varlength
if original_varlength == 1:
basename = 'array'
with_number = True
else:
basename = db.gettypedefnode(ARRAY).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
(self.barename,
self.name) = db.namespace.uniquename(basename, with_number=with_number,
bare=True)
self.dependencies = {}
def setup(self):
db = self.db
ARRAY = self.ARRAY
self.itemtypename = db.gettype(ARRAY.OF, who_asks=self)
self.gcinfo # force it to be computed
if needs_gcheader(ARRAY):
for fname, T in db.gcpolicy.array_gcheader_definition(self):
self.gcfields.append((fname, db.gettype(T, who_asks=self)))
def computegcinfo(self):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
if self.varlength == 1:
self.db.gcpolicy.array_setup(self)
return self.gcinfo
gcinfo = defaultproperty(computegcinfo)
def gettype(self):
return 'struct %s @' % self.name
def access_expr(self, baseexpr, index):
return '%s.items[%d]' % (baseexpr, index)
def ptr_access_expr(self, baseexpr, index):
return '%s->items[%d]' % (baseexpr, index)
def definition(self):
gcpolicy = self.db.gcpolicy
yield 'struct %s {' % self.name
for fname, typename in self.gcfields:
yield '\t' + cdecl(typename, fname) + ';'
if not self.ARRAY._hints.get('nolength', False):
yield '\tlong length;'
line = '%s;' % cdecl(self.itemtypename, 'items[%d]'% self.varlength)
if self.ARRAY.OF is Void: # strange
line = '/* %s */' % line
yield '\t' + line
yield '};'
def visitor_lines(self, prefix, on_item):
ARRAY = self.ARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, ARRAY.OF))
if body:
yield '{'
yield '\t%s = %s.items;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %s.length;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
prefix)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
def debug_offsets(self):
# generate three offsets for debugging inspection
if not self.ARRAY._hints.get('nolength', False):
yield 'offsetof(struct %s, length)' % (self.name,)
else:
yield '-1'
if self.ARRAY.OF is not Void:
yield 'offsetof(struct %s, items[0])' % (self.name,)
yield 'offsetof(struct %s, items[1])' % (self.name,)
else:
yield '-1'
yield '-1'
class FixedSizeArrayDefNode:
gcinfo = None
name = None
typetag = 'struct'
def __init__(self, db, FIXEDARRAY):
self.db = db
self.FIXEDARRAY = FIXEDARRAY
self.LLTYPE = FIXEDARRAY
self.dependencies = {}
self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self)
def setup(self):
"""Loops are forbidden by ForwardReference.become() because
there is no way to declare them in C."""
def gettype(self):
FIXEDARRAY = self.FIXEDARRAY
return self.itemtypename.replace('@', '(@)[%d]' % FIXEDARRAY.length)
def getptrtype(self):
return self.itemtypename.replace('@', '*@')
def access_expr(self, baseexpr, index):
if not isinstance(index, int):
assert index.startswith('item')
index = int(index[4:])
return '%s[%d]' % (baseexpr, index)
ptr_access_expr = access_expr
def definition(self):
return [] # no declaration is needed
def visitor_lines(self, prefix, on_item):
FIXEDARRAY = self.FIXEDARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, FIXEDARRAY.OF))
if body:
yield '{'
yield '\t%s = %s;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %d;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
FIXEDARRAY.length)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
def debug_offsets(self):
# XXX not implemented
return []
class ExtTypeOpaqueDefNode:
"For OpaqueTypes created by pypy.rpython.extfunctable.ExtTypeInfo."
typetag = 'struct'
def __init__(self, db, T):
self.db = db
self.T = T
self.dependencies = {}
self.name = 'RPyOpaque_%s' % (T.tag,)
def setup(self):
pass
def definition(self):
return []
# ____________________________________________________________
class ContainerNode(object):
if USESLOTS:
__slots__ = """db T obj
typename implementationtypename
name ptrname
globalcontainer""".split()
def __init__(self, db, T, obj):
self.db = db
self.T = T
self.obj = obj
#self.dependencies = {}
self.typename = db.gettype(T) #, who_asks=self)
self.implementationtypename = db.gettype(T, varlength=self.getlength())
parent, parentindex = parentlink(obj)
if parent is None:
self.name = db.namespace.uniquename('g_' + self.basename())
self.globalcontainer = True
else:
self.globalcontainer = False
parentnode = db.getcontainernode(parent)
defnode = db.gettypedefnode(parentnode.T)
self.name = defnode.access_expr(parentnode.name, parentindex)
self.ptrname = '(&%s)' % self.name
if self.typename != self.implementationtypename:
ptrtypename = db.gettype(Ptr(T))
self.ptrname = '((%s)(void*)%s)' % (cdecl(ptrtypename, ''),
self.ptrname)
def is_thread_local(self):
return hasattr(self.T, "_hints") and self.T._hints.get('thread_local')
def forward_declaration(self):
yield '%s;' % (
forward_cdecl(self.implementationtypename,
self.name, self.db.standalone, self.is_thread_local()))
def implementation(self):
lines = list(self.initializationexpr())
lines[0] = '%s = %s' % (
cdecl(self.implementationtypename, self.name, self.is_thread_local()),
lines[0])
lines[-1] += ';'
return lines
def startupcode(self):
return []
def getlength(self):
return 1
assert not USESLOTS or '__dict__' not in dir(ContainerNode)
class StructNode(ContainerNode):
nodekind = 'struct'
if USESLOTS:
__slots__ = ()
def basename(self):
return self.T._name
def enum_dependencies(self):
for name in self.T._names:
yield getattr(self.obj, name)
def getlength(self):
if self.T._arrayfld is None:
return 1
else:
array = getattr(self.obj, self.T._arrayfld)
return len(array.items)
def initializationexpr(self, decoration=''):
is_empty = True
yield '{'
defnode = self.db.gettypedefnode(self.T)
data = []
if needs_gcheader(self.T):
for i, thing in enumerate(self.db.gcpolicy.struct_gcheader_initdata(self)):
data.append(('gcheader%d'%i, thing))
for name in self.T._names:
data.append((name, getattr(self.obj, name)))
# You can only initialise the first field of a union in c
# XXX what if later fields have some initialisation?
if hasattr(self.T, "_hints") and self.T._hints.get('union'):
data = data[0:1]
for name, value in data:
if isinstance(value, _pyobjheader): # hack
node = self.db.getcontainernode(value)
lines = [node.pyobj_initexpr()]
else:
c_expr = defnode.access_expr(self.name, name)
lines = generic_initializationexpr(self.db, value, c_expr,
decoration + name)
for line in lines:
yield '\t' + line
if not lines[0].startswith('/*'):
is_empty = False
if is_empty:
yield '\t%s' % '0,'
yield '}'
assert not USESLOTS or '__dict__' not in dir(StructNode)
class ArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def basename(self):
return 'array'
def enum_dependencies(self):
return self.obj.items
def getlength(self):
return len(self.obj.items)
def initializationexpr(self, decoration=''):
yield '{'
if needs_gcheader(self.T):
for i, thing in enumerate(self.db.gcpolicy.array_gcheader_initdata(self)):
lines = generic_initializationexpr(self.db, thing,
'gcheader%d'%i,
'%sgcheader%d' % (decoration, i))
for line in lines:
yield line
if self.T.OF is Void or len(self.obj.items) == 0:
yield '\t%d' % len(self.obj.items)
yield '}'
elif self.T.OF == Char:
yield '\t%d, %s' % (len(self.obj.items),
c_char_array_constant(''.join(self.obj.items)))
yield '}'
else:
yield '\t%d, {' % len(self.obj.items)
for j in range(len(self.obj.items)):
value = self.obj.items[j]
lines = generic_initializationexpr(self.db, value,
'%s.items[%d]' % (self.name, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
yield '} }'
assert not USESLOTS or '__dict__' not in dir(ArrayNode)
class FixedSizeArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def __init__(self, db, T, obj):
ContainerNode.__init__(self, db, T, obj)
if not isinstance(obj, _subarray): # XXX hackish
self.ptrname = self.name
def basename(self):
return self.T._name
def enum_dependencies(self):
for i in range(self.obj.getlength()):
yield self.obj.getitem(i)
def getlength(self):
return 1 # not variable-sized!
def initializationexpr(self, decoration=''):
is_empty = True
yield '{'
# _names == ['item0', 'item1', ...]
for j, name in enumerate(self.T._names):
value = getattr(self.obj, name)
lines = generic_initializationexpr(self.db, value,
'%s[%d]' % (self.name, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
yield '}'
def generic_initializationexpr(db, value, access_expr, decoration):
if isinstance(typeOf(value), ContainerType):
node = db.getcontainernode(value)
lines = list(node.initializationexpr(decoration+'.'))
lines[-1] += ','
return lines
else:
comma = ','
if typeOf(value) == Ptr(PyObject) and value:
# cannot just write 'gxxx' as a constant in a structure :-(
node = db.getcontainernode(value._obj)
expr = 'NULL /*%s*/' % node.name
node.where_to_copy_me.append('&%s' % access_expr)
elif typeOf(value) == Float and (isinf(value) or isnan(value)):
db.late_initializations.append(('%s' % access_expr, db.get(value)))
expr = '0.0 /* patched later by %sinfinity */' % (
'-+'[value > 0])
elif typeOf(value) == WeakGcAddress and value.ref is not None:
db.late_initializations.append(('%s' % access_expr, db.get(value)))
expr = 'HIDE_POINTER(NULL) /* patched later */'
else:
expr = db.get(value)
if typeOf(value) is Void:
comma = ''
expr += comma
i = expr.find('\n')
if i<0: i = len(expr)
expr = '%s\t/* %s */%s' % (expr[:i], decoration, expr[i:])
return expr.split('\n')
# ____________________________________________________________
class FuncNode(ContainerNode):
nodekind = 'func'
# there not so many node of this kind, slots should not
# be necessary
def __init__(self, db, T, obj, forcename=None):
self.globalcontainer = True
self.db = db
self.T = T
self.obj = obj
if hasattr(obj, 'includes'):
self.includes = obj.includes
self.name = forcename or self.basename()
else:
self.name = (forcename or
db.namespace.uniquename('g_' + self.basename()))
if hasattr(obj, 'libraries'):
self.libraries = obj.libraries
if hasattr(obj, 'include_dirs'):
self.include_dirs = obj.include_dirs
self.make_funcgens()
#self.dependencies = {}
self.typename = db.gettype(T) #, who_asks=self)
self.ptrname = self.name
def make_funcgens(self):
if hasattr(self.obj, 'sources'):
self.sources = self.obj.sources
self.funcgens = select_function_code_generators(self.obj, self.db, self.name)
if self.funcgens:
argnames = self.funcgens[0].argnames() #Assume identical for all funcgens
self.implementationtypename = self.db.gettype(self.T, argnames=argnames)
def basename(self):
return self.obj._name
def enum_dependencies(self):
if not self.funcgens:
return []
return self.funcgens[0].allconstantvalues() #Assume identical for all funcgens
def forward_declaration(self):
for funcgen in self.funcgens:
yield '%s;' % (
forward_cdecl(self.implementationtypename,
funcgen.name(self.name), self.db.standalone))
def implementation(self):
for funcgen in self.funcgens:
for s in self.funcgen_implementation(funcgen):
yield s
def funcgen_implementation(self, funcgen):
funcgen.implementation_begin()
# recompute implementationtypename as the argnames may have changed
argnames = funcgen.argnames()
implementationtypename = self.db.gettype(self.T, argnames=argnames)
yield '%s {' % cdecl(implementationtypename, funcgen.name(self.name))
#
# declare the local variables
#
localnames = list(funcgen.cfunction_declarations())
lengths = [len(a) for a in localnames]
lengths.append(9999)
start = 0
while start < len(localnames):
# pack the local declarations over as few lines as possible
total = lengths[start] + 8
end = start+1
while total + lengths[end] < 77:
total += lengths[end] + 1
end += 1
yield '\t' + ' '.join(localnames[start:end])
start = end
#
# generate the body itself
#
bodyiter = funcgen.cfunction_body()
for line in bodyiter:
# performs some formatting on the generated body:
# indent normal lines with tabs; indent labels less than the rest
if line.endswith(':'):
if line.startswith('err'):
try:
nextline = bodyiter.next()
except StopIteration:
nextline = ''
# merge this 'err:' label with the following line
line = '\t%s\t%s' % (line, nextline)
else:
line = ' ' + line
elif line:
line = '\t' + line
yield line
yield '}'
del bodyiter
funcgen.implementation_end()
def select_function_code_generators(fnobj, db, functionname):
if hasattr(fnobj, '_external_name'):
db.externalfuncs[fnobj._external_name] = fnobj
return []
elif fnobj._callable in extfunc.EXTERNALS:
# 'fnobj' is one of the ll_xyz() functions with the suggested_primitive
# flag in pypy.rpython.module.*. The corresponding C wrappers are
# written by hand in src/ll_*.h, and declared in extfunc.EXTERNALS.
db.externalfuncs[fnobj._callable] = fnobj
return []
elif getattr(fnobj._callable, 'suggested_primitive', False):
raise ValueError, "trying to compile suggested primitive %r" % (
fnobj._callable,)
elif hasattr(fnobj, 'graph'):
exception_policy = getattr(fnobj, 'exception_policy', None)
return [FunctionCodeGenerator(fnobj.graph, db, exception_policy,
functionname)]
elif getattr(fnobj, 'external', None) == 'C':
# deprecated case
if hasattr(fnobj, 'includes'):
return [] # assume no wrapper needed
else:
return [CExternalFunctionCodeGenerator(fnobj, db)]
else:
raise ValueError, "don't know how to generate code for %r" % (fnobj,)
class ExtType_OpaqueNode(ContainerNode):
nodekind = 'rpyopaque'
def enum_dependencies(self):
return []
def initializationexpr(self, decoration=''):
yield 'RPyOpaque_INITEXPR_%s' % (self.T.tag,)
def startupcode(self):
args = [self.ptrname]
# XXX how to make this code more generic?
if self.T.tag == 'ThreadLock':
lock = self.obj.externalobj
if lock.locked():
args.append('1')
else:
args.append('0')
yield 'RPyOpaque_SETUP_%s(%s);' % (self.T.tag, ', '.join(args))
def opaquenode_factory(db, T, obj):
if T == RuntimeTypeInfo:
return db.gcpolicy.rtti_node_factory()(db, T, obj)
if hasattr(T, '_exttypeinfo'):
return ExtType_OpaqueNode(db, T, obj)
raise Exception("don't know about %r" % (T,))
class PyObjectNode(ContainerNode):
nodekind = 'pyobj'
globalcontainer = True
typename = 'PyObject @'
implementationtypename = 'PyObject *@'
def __init__(self, db, T, obj):
# obj is a _pyobject here; obj.value is the underlying CPython object
self.db = db
self.T = T
self.obj = obj
self.name = db.pyobjmaker.computenameof(obj.value)
self.ptrname = self.name
self.exported_name = self.name
# a list of expressions giving places where this constant PyObject
# must be copied. Normally just in the global variable of the same
# name, but see also StructNode.initializationexpr() :-(
self.where_to_copy_me = []
if self.name not in db.pyobjmaker.wrappers:
self.where_to_copy_me.append('&%s' % self.name)
def enum_dependencies(self):
return []
def implementation(self):
return []
class PyObjHeadNode(ContainerNode):
nodekind = 'pyobj'
def __init__(self, db, T, obj):
ContainerNode.__init__(self, db, T, obj)
self.where_to_copy_me = []
self.exported_name = db.namespace.uniquename('cpyobj')
def basename(self):
raise Exception("PyObjHead should always have a parent")
def enum_dependencies(self):
yield self.obj.ob_type
if self.obj.setup_fnptr:
yield self.obj.setup_fnptr
def get_setupfn_name(self):
if self.obj.setup_fnptr:
return self.db.get(self.obj.setup_fnptr)
else:
return 'NULL'
def pyobj_initexpr(self):
parent, parentindex = parentlink(self.obj)
typenode = self.db.getcontainernode(self.obj.ob_type._obj)
typenode.where_to_copy_me.append('(PyObject **) & %s.ob_type' % (
self.name,))
if typeOf(parent)._hints.get('inline_head'):
return 'PyObject_HEAD_INIT(NULL)'
else:
return '{ PyObject_HEAD_INIT(NULL) },'
def objectnode_factory(db, T, obj):
if isinstance(obj, _pyobjheader):
return PyObjHeadNode(db, T, obj)
else:
return PyObjectNode(db, T, obj)
ContainerNodeFactory = {
Struct: StructNode,
GcStruct: StructNode,
PyStruct: StructNode,
Array: ArrayNode,
GcArray: ArrayNode,
FixedSizeArray: FixedSizeArrayNode,
FuncType: FuncNode,
OpaqueType: opaquenode_factory,
PyObjectType: objectnode_factory,
}
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
from pypy.translator.simplify import join_blocks, cleanup_graph
from pypy.translator.unsimplify import copyvar, varoftype
from pypy.translator.unsimplify import insert_empty_block
from pypy.translator.backendopt import canraise, inline, support, removenoops
from pypy.objspace.flow.model import Block, Constant, Variable, Link, \
c_last_exception, SpaceOperation, checkgraph, FunctionGraph
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem import lloperation
from pypy.rpython.memory.lladdress import NULL
from pypy.rpython import rtyper
from pypy.rpython import rclass
from pypy.rpython.rmodel import inputconst
from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
from pypy.annotation import model as annmodel
from pypy.rpython.annlowlevel import MixLevelHelperAnnotator
PrimitiveErrorValue = {lltype.Signed: -1,
lltype.Unsigned: r_uint(-1),
lltype.SignedLongLong: r_longlong(-1),
lltype.UnsignedLongLong: r_ulonglong(-1),
lltype.Float: -1.0,
lltype.Char: chr(255),
lltype.UniChar: unichr(0xFFFF), # XXX is this always right?
lltype.Bool: True,
llmemory.Address: NULL,
llmemory.WeakGcAddress: llmemory.fakeweakaddress(None),
lltype.Void: None}
def error_value(T):
if isinstance(T, lltype.Primitive):
return PrimitiveErrorValue[T]
elif isinstance(T, lltype.Ptr):
return lltype.nullptr(T.TO)
assert 0, "not implemented yet"
def error_constant(T):
return Constant(error_value(T), T)
class ExceptionTransformer(object):
def __init__(self, translator):
self.translator = translator
self.raise_analyzer = canraise.RaiseAnalyzer(translator)
edata = translator.rtyper.getexceptiondata()
self.lltype_of_exception_value = edata.lltype_of_exception_value
self.lltype_of_exception_type = edata.lltype_of_exception_type
mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)
l2a = annmodel.lltype_to_annotation
EXCDATA = lltype.Struct('ExcData',
('exc_type', self.lltype_of_exception_type),
('exc_value', self.lltype_of_exception_value))
self.EXCDATA = EXCDATA
exc_data = lltype.malloc(EXCDATA, immortal=True)
null_type = lltype.nullptr(self.lltype_of_exception_type.TO)
null_value = lltype.nullptr(self.lltype_of_exception_value.TO)
def rpyexc_occured():
exc_type = exc_data.exc_type
return bool(exc_type)
# XXX tmp HACK for genllvm
# llvm is strongly typed between bools and ints, which means we have no way of
# calling rpyexc_occured() from c code with lltype.Bool
def _rpyexc_occured():
exc_type = exc_data.exc_type
return bool(exc_type)
def rpyexc_fetch_type():
return exc_data.exc_type
def rpyexc_fetch_value():
return exc_data.exc_value
def rpyexc_clear():
exc_data.exc_type = null_type
exc_data.exc_value = null_value
def rpyexc_raise(etype, evalue):
# assert(!RPyExceptionOccurred());
exc_data.exc_type = etype
exc_data.exc_value = evalue
RPYEXC_OCCURED_TYPE = lltype.FuncType([], lltype.Bool)
rpyexc_occured_graph = mixlevelannotator.getgraph(
rpyexc_occured, [], l2a(lltype.Bool))
self.rpyexc_occured_ptr = Constant(lltype.functionptr(
RPYEXC_OCCURED_TYPE, "RPyExceptionOccurred",
graph=rpyexc_occured_graph,
exception_policy="exc_helper"),
lltype.Ptr(RPYEXC_OCCURED_TYPE))
# XXX tmp HACK for genllvm
_RPYEXC_OCCURED_TYPE = lltype.FuncType([], lltype.Signed)
_rpyexc_occured_graph = mixlevelannotator.getgraph(
_rpyexc_occured, [], l2a(lltype.Signed))
self._rpyexc_occured_ptr = Constant(lltype.functionptr(
_RPYEXC_OCCURED_TYPE, "_RPyExceptionOccurred",
graph=_rpyexc_occured_graph,
exception_policy="exc_helper"),
lltype.Ptr(_RPYEXC_OCCURED_TYPE))
RPYEXC_FETCH_TYPE_TYPE = lltype.FuncType([], self.lltype_of_exception_type)
rpyexc_fetch_type_graph = mixlevelannotator.getgraph(
rpyexc_fetch_type, [],
l2a(self.lltype_of_exception_type))
self.rpyexc_fetch_type_ptr = Constant(lltype.functionptr(
RPYEXC_FETCH_TYPE_TYPE, "RPyFetchExceptionType",
graph=rpyexc_fetch_type_graph,
exception_policy="exc_helper"),
lltype.Ptr(RPYEXC_FETCH_TYPE_TYPE))
RPYEXC_FETCH_VALUE_TYPE = lltype.FuncType([], self.lltype_of_exception_value)
rpyexc_fetch_value_graph = mixlevelannotator.getgraph(
rpyexc_fetch_value, [],
l2a(self.lltype_of_exception_value))
self.rpyexc_fetch_value_ptr = Constant(lltype.functionptr(
RPYEXC_FETCH_VALUE_TYPE, "RPyFetchExceptionValue",
graph=rpyexc_fetch_value_graph,
exception_policy="exc_helper"),
lltype.Ptr(RPYEXC_FETCH_VALUE_TYPE))
RPYEXC_CLEAR = lltype.FuncType([], lltype.Void)
rpyexc_clear_graph = mixlevelannotator.getgraph(
rpyexc_clear, [], l2a(lltype.Void))
self.rpyexc_clear_ptr = Constant(lltype.functionptr(
RPYEXC_CLEAR, "RPyClearException",
graph=rpyexc_clear_graph,
exception_policy="exc_helper"),
lltype.Ptr(RPYEXC_CLEAR))
RPYEXC_RAISE = lltype.FuncType([self.lltype_of_exception_type,
self.lltype_of_exception_value],
lltype.Void)
rpyexc_raise_graph = mixlevelannotator.getgraph(
rpyexc_raise, [l2a(self.lltype_of_exception_type),
l2a(self.lltype_of_exception_value)],
l2a(lltype.Void))
self.rpyexc_raise_ptr = Constant(lltype.functionptr(
RPYEXC_RAISE, "RPyRaiseException",
graph=rpyexc_raise_graph,
exception_policy="exc_helper",
jitcallkind='rpyexc_raise', # for the JIT
),
lltype.Ptr(RPYEXC_RAISE))
mixlevelannotator.finish()
self.exc_data_ptr = exc_data
self.cexcdata = Constant(exc_data, lltype.Ptr(EXCDATA))
self.lltype_to_classdef = translator.rtyper.lltype_to_classdef_mapping()
p = lltype.nullptr(self.lltype_of_exception_type.TO)
self.c_null_etype = Constant(p, self.lltype_of_exception_type)
p = lltype.nullptr(self.lltype_of_exception_value.TO)
self.c_null_evalue = Constant(p, self.lltype_of_exception_value)
def gen_getfield(self, name, llops):
c_name = inputconst(lltype.Void, name)
return llops.genop('getfield', [self.cexcdata, c_name],
resulttype = getattr(self.EXCDATA, name))
def gen_setfield(self, name, v_value, llops):
c_name = inputconst(lltype.Void, name)
llops.genop('setfield', [self.cexcdata, c_name, v_value])
def transform_completely(self):
for graph in self.translator.graphs:
self.create_exception_handling(graph)
def create_exception_handling(self, graph, always_exc_clear=False):
"""After an exception in a direct_call (or indirect_call), that is not caught
by an explicit
except statement, we need to reraise the exception. So after this
direct_call we need to test if an exception had occurred. If so, we return
from the current graph with a special value (False/-1/-1.0/null).
Because of the added exitswitch we need an additional block.
"""
if hasattr(graph, 'exceptiontransformed'):
assert self.exc_data_ptr._same_obj(graph.exceptiontransformed)
return
else:
graph.exceptiontransformed = self.exc_data_ptr
self.always_exc_clear = always_exc_clear
join_blocks(graph)
# collect the blocks before changing them
n_need_exc_matching_blocks = 0
n_gen_exc_checks = 0
for block in list(graph.iterblocks()):
need_exc_matching, gen_exc_checks = self.transform_block(graph, block)
n_need_exc_matching_blocks += need_exc_matching
n_gen_exc_checks += gen_exc_checks
self.transform_except_block(graph, graph.exceptblock)
cleanup_graph(graph)
removenoops.remove_superfluous_keep_alive(graph)
return n_need_exc_matching_blocks, n_gen_exc_checks
def transform_block(self, graph, block):
need_exc_matching = False
n_gen_exc_checks = 0
if block is graph.exceptblock:
return need_exc_matching, n_gen_exc_checks
elif block is graph.returnblock:
return need_exc_matching, n_gen_exc_checks
last_operation = len(block.operations) - 1
if block.exitswitch == c_last_exception:
need_exc_matching = True
last_operation -= 1
elif (len(block.exits) == 1 and
block.exits[0].target is graph.returnblock and
len(block.operations) and
(block.exits[0].args[0].concretetype is lltype.Void or
block.exits[0].args[0] is block.operations[-1].result)):
last_operation -= 1
lastblock = block
for i in range(last_operation, -1, -1):
op = block.operations[i]
if not self.raise_analyzer.can_raise(op):
continue
splitlink = support.split_block_with_keepalive(block, i+1, False)
afterblock = splitlink.target
if lastblock is block:
lastblock = afterblock
self.gen_exc_check(block, graph.returnblock, afterblock)
n_gen_exc_checks += 1
if need_exc_matching:
assert lastblock.exitswitch == c_last_exception
if not self.raise_analyzer.can_raise(lastblock.operations[-1]):
#print ("operation %s cannot raise, but has exception"
# " guarding in graph %s" % (lastblock.operations[-1],
# graph))
lastblock.exitswitch = None
lastblock.recloseblock(lastblock.exits[0])
lastblock.exits[0].exitcase = None
else:
self.insert_matching(lastblock, graph)
return need_exc_matching, n_gen_exc_checks
def transform_except_block(self, graph, block):
# attach an except block -- let's hope that nobody uses it
graph.exceptblock = Block([Variable('etype'), # exception class
Variable('evalue')]) # exception value
graph.exceptblock.operations = ()
graph.exceptblock.closeblock()
result = Variable()
result.concretetype = lltype.Void
block.operations = [SpaceOperation(
"direct_call", [self.rpyexc_raise_ptr] + block.inputargs, result)]
l = Link([error_constant(graph.returnblock.inputargs[0].concretetype)], graph.returnblock)
block.recloseblock(l)
def insert_matching(self, block, graph):
proxygraph, op = self.create_proxy_graph(block.operations[-1])
block.operations[-1] = op
#non-exception case
block.exits[0].exitcase = block.exits[0].llexitcase = None
# use the dangerous second True flag :-)
inliner = inline.OneShotInliner(
self.translator, graph, self.lltype_to_classdef,
inline_guarded_calls=True, inline_guarded_calls_no_matter_what=True,
raise_analyzer=self.raise_analyzer)
inliner.inline_once(block, len(block.operations)-1)
#block.exits[0].exitcase = block.exits[0].llexitcase = False
def create_proxy_graph(self, op):
""" creates a graph which calls the original function, checks for
raised exceptions, fetches and then raises them again. If this graph is
inlined, the correct exception matching blocks are produced."""
# XXX slightly annoying: construct a graph by hand
# but better than the alternative
result = copyvar(None, op.result)
opargs = []
inputargs = []
callargs = []
ARGTYPES = []
for var in op.args:
if isinstance(var, Variable):
v = Variable()
v.concretetype = var.concretetype
inputargs.append(v)
opargs.append(v)
callargs.append(var)
ARGTYPES.append(var.concretetype)
else:
opargs.append(var)
newop = SpaceOperation(op.opname, opargs, result)
startblock = Block(inputargs)
startblock.operations.append(newop)
newgraph = FunctionGraph("dummy_exc1", startblock)
startblock.closeblock(Link([result], newgraph.returnblock))
newgraph.returnblock.inputargs[0].concretetype = op.result.concretetype
self.gen_exc_check(startblock, newgraph.returnblock)
excblock = Block([])
llops = rtyper.LowLevelOpList(None)
var_value = self.gen_getfield('exc_value', llops)
var_type = self.gen_getfield('exc_type' , llops)
self.gen_setfield('exc_value', self.c_null_evalue, llops)
self.gen_setfield('exc_type', self.c_null_etype, llops)
excblock.operations[:] = llops
newgraph.exceptblock.inputargs[0].concretetype = self.lltype_of_exception_type
newgraph.exceptblock.inputargs[1].concretetype = self.lltype_of_exception_value
excblock.closeblock(Link([var_type, var_value], newgraph.exceptblock))
startblock.exits[True].target = excblock
startblock.exits[True].args = []
FUNCTYPE = lltype.FuncType(ARGTYPES, op.result.concretetype)
fptr = Constant(lltype.functionptr(FUNCTYPE, "dummy_exc1", graph=newgraph),
lltype.Ptr(FUNCTYPE))
return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result)
def gen_exc_check(self, block, returnblock, normalafterblock=None):
#var_exc_occured = Variable()
#var_exc_occured.concretetype = lltype.Bool
#block.operations.append(SpaceOperation("safe_call", [self.rpyexc_occured_ptr], var_exc_occured))
llops = rtyper.LowLevelOpList(None)
alloc_shortcut = False
spaceop = block.operations[-1]
if spaceop.opname in ('malloc', 'malloc_varsize'):
alloc_shortcut = True
elif spaceop.opname == 'direct_call':
fnobj = spaceop.args[0].value._obj
if hasattr(fnobj, '_callable'):
oopspec = getattr(fnobj._callable, 'oopspec', None)
if oopspec and oopspec == 'newlist(length)':
alloc_shortcut = True
if alloc_shortcut:
T = spaceop.result.concretetype
var_no_exc = llops.genop('ptr_nonzero', [spaceop.result],
lltype.Bool)
else:
v_exc_type = self.gen_getfield('exc_type', llops)
var_no_exc = llops.genop('ptr_iszero', [v_exc_type],
lltype.Bool)
block.operations.extend(llops)
block.exitswitch = var_no_exc
#exception occurred case
l = Link([error_constant(returnblock.inputargs[0].concretetype)], returnblock)
l.exitcase = l.llexitcase = False
#non-exception case
l0 = block.exits[0]
l0.exitcase = l0.llexitcase = True
block.recloseblock(l0, l)
insert_zeroing_op = False
if spaceop.opname == 'malloc':
insert_zeroing_op = True
elif spaceop.opname == 'flavored_malloc':
flavor = spaceop.args[0].value
if flavor.startswith('gc'):
insert_zeroing_op = True
if insert_zeroing_op:
if normalafterblock is None:
normalafterblock = insert_empty_block(None, l0)
v_result = spaceop.result
if v_result in l0.args:
result_i = l0.args.index(v_result)
v_result_after = normalafterblock.inputargs[result_i]
else:
v_result_after = copyvar(None, v_result)
l0.args.append(v_result)
normalafterblock.inputargs.append(v_result_after)
normalafterblock.operations.insert(
0, SpaceOperation('zero_gc_pointers_inside',
[v_result_after],
varoftype(lltype.Void)))
if self.always_exc_clear:
# insert code that clears the exception even in the non-exceptional
# case... this is a hint for the JIT, but pointless otherwise
if normalafterblock is None:
normalafterblock = insert_empty_block(None, l0)
llops = rtyper.LowLevelOpList(None)
self.gen_setfield('exc_value', self.c_null_evalue, llops)
self.gen_setfield('exc_type', self.c_null_etype, llops)
normalafterblock.operations[:0] = llops
| Python |
from stackless import *
c1 = coroutine()
c2 = coroutine()
def f(name, n, other):
print "starting", name, n
for i in xrange(n):
print name, i, "switching to", other
other.switch()
print name, i, "back from", other
return name
c1.bind(f, "eins", 10, c2)
c2.bind(f, "zwei", 10, c1)
c1.switch()
| Python |
from pypy.rpython.lltypesystem import lltype
from pypy.translator.gensupp import NameManager
#
# use __slots__ declarations for node classes etc
# possible to turn it off while refactoring, experimenting
#
USESLOTS = True
PyObjPtr = lltype.Ptr(lltype.PyObject)
class ErrorValue:
def __init__(self, TYPE):
self.TYPE = TYPE
#
# helpers
#
def cdecl(ctype, cname, is_thread_local=False):
"""
Produce a C declaration from a 'type template' and an identifier.
The type template must contain a '@' sign at the place where the
name should be inserted, according to the strange C syntax rules.
"""
# the (@) case is for functions, where if there is a plain (@) around
# the function name, we don't need the very confusing parenthesis
__thread = ""
if is_thread_local:
__thread = "__thread "
return __thread + ctype.replace('(@)', '@').replace('@', cname).strip()
def forward_cdecl(ctype, cname, standalone, is_thread_local=False):
__thread = ""
if is_thread_local:
__thread = "__thread "
cdecl_str = __thread + cdecl(ctype, cname)
if standalone:
return 'extern ' + cdecl_str
else:
return cdecl_str
def somelettersfrom(s):
upcase = [c for c in s if c.isupper()]
if not upcase:
upcase = [c for c in s.title() if c.isupper()]
locase = [c for c in s if c.islower()]
if locase and upcase:
return ''.join(upcase).lower()
else:
return s[:2].lower()
def is_pointer_to_forward_ref(T):
if not isinstance(T, lltype.Ptr):
return False
return isinstance(T.TO, lltype.ForwardReference)
def llvalue_from_constant(c):
try:
T = c.concretetype
except AttributeError:
T = PyObjPtr
if T == PyObjPtr and not isinstance(c.value, lltype._ptr):
return lltype.pyobjectptr(c.value)
else:
if T == lltype.Void:
return None
else:
ACTUAL_TYPE = lltype.typeOf(c.value)
# If the type is still uncomputed, we can't make this
# check. Something else will blow up instead, probably
# very confusingly.
if not is_pointer_to_forward_ref(ACTUAL_TYPE):
assert ACTUAL_TYPE == T
return c.value
class CNameManager(NameManager):
def __init__(self, global_prefix='pypy_'):
NameManager.__init__(self, global_prefix=global_prefix)
# keywords cannot be reused. This is the C99 draft's list.
self.make_reserved_names('''
auto enum restrict unsigned
break extern return void
case float short volatile
char for signed while
const goto sizeof _Bool
continue if static _Complex
default inline struct _Imaginary
do int switch
double long typedef
else register union
''')
def _char_repr(c):
if c in '\\"': return '\\' + c
if ' ' <= c < '\x7F': return c
return '\\%03o' % ord(c)
def _line_repr(s):
return ''.join([_char_repr(c) for c in s])
def c_string_constant(s):
'''Returns a " "-delimited string literal for C.'''
lines = []
for i in range(0, len(s), 64):
lines.append('"%s"' % _line_repr(s[i:i+64]))
return '\n'.join(lines)
def c_char_array_constant(s):
'''Returns an initializer for a constant char[N] array,
where N is exactly len(s). This is either a " "-delimited
string or a { }-delimited array of small integers.
'''
if s.endswith('\x00') and len(s) < 1024:
# C++ is stricted than C: we can only use a " " literal
# if the last character is NULL, because such a literal
# always has an extra implicit NULL terminator.
return c_string_constant(s[:-1])
else:
lines = []
for i in range(0, len(s), 20):
lines.append(','.join([str(ord(c)) for c in s[i:i+20]]))
if len(lines) > 1:
return '{\n%s}' % ',\n'.join(lines)
else:
return '{%s}' % ', '.join(lines)
##def gen_assignments(assignments):
## # Generate a sequence of assignments that is possibly reordered
## # to avoid clashes -- i.e. do the equivalent of a tuple assignment,
## # reading all sources first, writing all targets next, but optimized
## allsources = []
## src2dest = {}
## types = {}
## for typename, dest, src in assignments:
## if src != dest: # ignore 'v=v;'
## allsources.append(src)
## src2dest.setdefault(src, []).append(dest)
## types[dest] = typename
## for starting in allsources:
## # starting from some starting variable, follow a chain of assignments
## # 'vn=vn-1; ...; v3=v2; v2=v1; v1=starting;'
## v = starting
## srcchain = []
## while src2dest.get(v):
## srcchain.append(v)
## v = src2dest[v].pop(0)
## if v == starting:
## break # loop
## if not srcchain:
## continue # already done in a previous chain
## srcchain.reverse() # ['vn-1', ..., 'v2', 'v1', 'starting']
## code = []
## for pair in zip([v] + srcchain[:-1], srcchain):
## code.append('%s = %s;' % pair)
## if v == starting:
## # assignment loop 'starting=vn-1; ...; v2=v1; v1=starting;'
## typename = types[starting]
## tmpdecl = cdecl(typename, 'tmp')
## code.insert(0, '{ %s = %s;' % (tmpdecl, starting))
## code[-1] = '%s = tmp; }' % (srcchain[-2],)
## yield ' '.join(code)
def gen_assignments(assignments):
# Generate a sequence of assignments that is possibly reordered
# to avoid clashes -- i.e. do the equivalent of a tuple assignment,
# reading all sources first, writing all targets next, but optimized
srccount = {}
dest2src = {}
for typename, dest, src in assignments:
if src != dest: # ignore 'v=v;'
srccount[src] = srccount.get(src, 0) + 1
dest2src[dest] = src, typename
while dest2src:
progress = False
for dst in dest2src.keys():
if dst not in srccount:
src, typename = dest2src.pop(dst)
yield '%s = %s;' % (dst, src)
srccount[src] -= 1
if not srccount[src]:
del srccount[src]
progress = True
if not progress:
# we are left with only pure disjoint cycles; break them
while dest2src:
dst, (src, typename) = dest2src.popitem()
assert srccount[dst] == 1
startingpoint = dst
tmpdecl = cdecl(typename, 'tmp')
code = ['{ %s = %s;' % (tmpdecl, dst)]
while src is not startingpoint:
code.append('%s = %s;' % (dst, src))
dst = src
src, typename = dest2src.pop(dst)
assert srccount[dst] == 1
code.append('%s = tmp; }' % (dst,))
yield ' '.join(code)
# logging
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("c")
py.log.setconsumer("c", ansi_log)
| Python |
import autopath
import py
import sys
from pypy.translator.c.node import PyObjectNode, PyObjHeadNode, FuncNode
from pypy.translator.c.database import LowLevelDatabase
from pypy.translator.c.extfunc import pre_include_code_lines
from pypy.translator.gensupp import uniquemodulename, NameManager
from pypy.translator.tool.cbuild import compile_c_module
from pypy.translator.tool.cbuild import build_executable, CCompiler, ProfOpt
from pypy.translator.tool.cbuild import import_module_from_directory
from pypy.translator.tool.cbuild import check_under_under_thread
from pypy.rpython.lltypesystem import lltype
from pypy.tool.udir import udir
from pypy.tool import isolate
from pypy.translator.locality.calltree import CallTree
from pypy.translator.c.support import log, c_string_constant
from pypy.rpython.typesystem import getfunctionptr
from pypy.translator.c import gc
class CBuilder(object):
c_source_filename = None
_compiled = False
symboltable = None
modulename = None
def __init__(self, translator, entrypoint, config, libraries=None,
gcpolicy=None):
self.translator = translator
self.entrypoint = entrypoint
self.originalentrypoint = entrypoint
self.gcpolicy = gcpolicy
if gcpolicy is not None and gcpolicy.requires_stackless:
config.translation.stackless = True
self.config = config
if libraries is None:
libraries = []
self.libraries = libraries
self.exports = {}
def build_database(self, exports=[], pyobj_options=None):
translator = self.translator
gcpolicyclass = self.get_gcpolicyclass()
if self.config.translation.stackless:
if not self.standalone:
raise Exception("stackless: only for stand-alone builds")
from pypy.translator.stackless.transform import StacklessTransformer
stacklesstransformer = StacklessTransformer(
translator, self.originalentrypoint,
stackless_gc=gcpolicyclass.requires_stackless)
self.entrypoint = stacklesstransformer.slp_entry_point
else:
stacklesstransformer = None
db = LowLevelDatabase(translator, standalone=self.standalone,
gcpolicyclass=gcpolicyclass,
stacklesstransformer=stacklesstransformer,
thread_enabled=self.config.translation.thread)
# pass extra options into pyobjmaker
if pyobj_options:
for key, value in pyobj_options.items():
setattr(db.pyobjmaker, key, value)
# we need a concrete gcpolicy to do this
self.libraries += db.gcpolicy.gc_libraries()
# give the gc a chance to register interest in the start-up functions it
# need (we call this for its side-effects of db.get())
list(db.gcpolicy.gc_startup_code())
# build entrypoint and eventually other things to expose
pf = self.getentrypointptr()
pfname = db.get(pf)
self.exports[self.entrypoint.func_name] = pf
for obj in exports:
if type(obj) is tuple:
objname, obj = obj
elif hasattr(obj, '__name__'):
objname = obj.__name__
else:
objname = None
po = self.getentrypointptr(obj)
poname = db.get(po)
objname = objname or poname
if objname in self.exports:
raise NameError, 'duplicate name in export: %s is %s and %s' % (
objname, db.get(self.exports[objname]), poname)
self.exports[objname] = po
db.complete()
# add library dependencies
seen = dict.fromkeys(self.libraries)
for node in db.globalcontainers():
if hasattr(node, 'libraries'):
for library in node.libraries:
if library not in seen:
self.libraries.append(library)
seen[library] = True
return db
have___thread = None
def get_gcpolicyclass(self):
if self.gcpolicy is None:
return gc.name_to_gcpolicy[self.config.translation.gc]
return self.gcpolicy
def generate_source(self, db=None, defines={}):
assert self.c_source_filename is None
translator = self.translator
if db is None:
db = self.build_database()
pf = self.getentrypointptr()
pfname = db.get(pf)
if self.modulename is None:
self.modulename = uniquemodulename('testing')
modulename = self.modulename
targetdir = udir.ensure(modulename, dir=1)
self.targetdir = targetdir
defines = defines.copy()
if self.config.translation.countmallocs:
defines['COUNT_OP_MALLOCS'] = 1
if CBuilder.have___thread is None:
CBuilder.have___thread = check_under_under_thread()
if not self.standalone:
assert not self.config.translation.instrument
from pypy.translator.c.symboltable import SymbolTable
# XXX fix symboltable
#self.symboltable = SymbolTable()
cfile, extra, extraincludes = gen_source(db, modulename, targetdir,
defines = defines,
exports = self.exports,
symboltable = self.symboltable)
else:
if self.config.translation.instrument:
defines['INSTRUMENT'] = 1
if CBuilder.have___thread:
if not self.config.translation.no__thread:
defines['USE___THREAD'] = 1
cfile, extra, extraincludes = \
gen_source_standalone(db, modulename, targetdir,
entrypointname = pfname,
defines = defines)
self.c_source_filename = py.path.local(cfile)
self.extrafiles = extra
self.extraincludes = extraincludes.keys()
if self.standalone:
self.gen_makefile(targetdir)
return cfile
def generate_graphs_for_llinterp(self, db=None):
# prepare the graphs as when the source is generated, but without
# actually generating the source.
if db is None:
db = self.build_database()
for node in db.containerlist:
if isinstance(node, FuncNode):
for funcgen in node.funcgens:
funcgen.patch_graph(copy_graph=False)
return db
class CExtModuleBuilder(CBuilder):
standalone = False
c_ext_module = None
def getentrypointptr(self, obj=None):
if obj is None:
obj = self.entrypoint
return lltype.pyobjectptr(obj)
def compile(self):
assert self.c_source_filename
assert not self._compiled
extra_includes = self.extraincludes
compile_c_module([self.c_source_filename] + self.extrafiles,
self.c_source_filename.purebasename,
include_dirs = [autopath.this_dir] + extra_includes,
libraries=self.libraries)
self._compiled = True
def import_module(self):
assert self._compiled
assert not self.c_ext_module
mod = import_module_from_directory(self.c_source_filename.dirpath(),
self.c_source_filename.purebasename)
self.c_ext_module = mod
if self.symboltable:
self.symboltable.attach(mod) # hopefully temporary hack
return mod
def isolated_import(self):
assert self._compiled
assert not self.c_ext_module
self.c_ext_module = isolate.Isolate((str(self.c_source_filename.dirpath()),
self.c_source_filename.purebasename))
return self.c_ext_module
def get_entry_point(self):
assert self.c_ext_module
return getattr(self.c_ext_module,
self.entrypoint.func_name)
def cleanup(self):
assert self.c_ext_module
if isinstance(self.c_ext_module, isolate.Isolate):
isolate.close_isolate(self.c_ext_module)
class CStandaloneBuilder(CBuilder):
standalone = True
executable_name = None
def getentrypointptr(self):
# XXX check that the entrypoint has the correct
# signature: list-of-strings -> int
bk = self.translator.annotator.bookkeeper
return getfunctionptr(bk.getdesc(self.entrypoint).getuniquegraph())
def getccompiler(self, extra_includes):
# XXX for now, we always include Python.h
from distutils import sysconfig
python_inc = sysconfig.get_python_inc()
cc = self.config.translation.cc
profbased = None
if self.config.translation.instrumentctl is not None:
profbased = self.config.translation.instrumentctl
else:
profopt = self.config.translation.profopt
if profopt is not None and not self.config.translation.noprofopt:
profbased = (ProfOpt, profopt)
return CCompiler(
[self.c_source_filename] + self.extrafiles,
include_dirs = [autopath.this_dir, python_inc] + extra_includes,
libraries = self.libraries,
compiler_exe = cc, profbased = profbased)
def compile(self):
assert self.c_source_filename
assert not self._compiled
compiler = self.getccompiler(extra_includes=[str(self.targetdir)] +
self.extraincludes)
if sys.platform == 'darwin':
compiler.compile_extra.append('-mdynamic-no-pic')
if self.config.translation.compilerflags:
compiler.compile_extra.append(self.config.translation.compilerflags)
if self.config.translation.linkerflags:
compiler.link_extra.append(self.config.translation.linkerflags)
compiler.build()
self.executable_name = str(compiler.outputfilename)
self._compiled = True
return self.executable_name
def cmdexec(self, args=''):
assert self._compiled
return py.process.cmdexec('"%s" %s' % (self.executable_name, args))
def gen_makefile(self, targetdir):
def write_list(lst, prefix):
for i, fn in enumerate(lst):
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
compiler = self.getccompiler(extra_includes=['.'])
if sys.platform == 'darwin':
compiler.compile_extra.append('-mdynamic-no-pic')
if self.config.translation.compilerflags:
compiler.compile_extra.append(self.config.translation.compilerflags)
if self.config.translation.linkerflags:
compiler.link_extra.append(self.config.translation.linkerflags)
cfiles = []
ofiles = []
for fn in compiler.cfilenames:
fn = py.path.local(fn).basename
assert fn.endswith('.c')
cfiles.append(fn)
ofiles.append(fn[:-2] + '.o')
if self.config.translation.cc:
cc = self.config.translation.cc
else:
cc = 'gcc'
if self.config.translation.profopt:
profopt = self.config.translation.profopt
else:
profopt = ''
f = targetdir.join('Makefile').open('w')
print >> f, '# automatically generated Makefile'
print >> f
print >> f, 'TARGET =', py.path.local(compiler.outputfilename).basename
print >> f
write_list(cfiles, 'SOURCES =')
print >> f
write_list(ofiles, 'OBJECTS =')
print >> f
args = ['-l'+libname for libname in compiler.libraries]
print >> f, 'LIBS =', ' '.join(args)
args = ['-L'+path for path in compiler.library_dirs]
print >> f, 'LIBDIRS =', ' '.join(args)
args = ['-I'+path for path in compiler.include_dirs]
write_list(args, 'INCLUDEDIRS =')
print >> f
print >> f, 'CFLAGS =', ' '.join(compiler.compile_extra)
print >> f, 'LDFLAGS =', ' '.join(compiler.link_extra)
if self.config.translation.thread:
print >> f, 'TFLAGS = ' + '-pthread'
else:
print >> f, 'TFLAGS = ' + ''
print >> f, 'PROFOPT = ' + profopt
print >> f, 'CC = ' + cc
print >> f
print >> f, MAKEFILE.strip()
f.close()
def translator2database(translator, entrypoint):
pf = lltype.pyobjectptr(entrypoint)
db = LowLevelDatabase(translator)
db.get(pf)
db.complete()
return db, pf
# ____________________________________________________________
SPLIT_CRITERIA = 65535 # support VC++ 7.2
#SPLIT_CRITERIA = 32767 # enable to support VC++ 6.0
MARKER = '/*/*/' # provide an easy way to split after generating
class SourceGenerator:
one_source_file = True
def __init__(self, database, preimplementationlines=[]):
self.database = database
self.preimpl = preimplementationlines
self.extrafiles = []
self.path = None
self.namespace = NameManager()
def set_strategy(self, path):
all_nodes = list(self.database.globalcontainers())
# split off non-function nodes. We don't try to optimize these, yet.
funcnodes = []
othernodes = []
for node in all_nodes:
if node.nodekind == 'func':
funcnodes.append(node)
else:
othernodes.append(node)
# for now, only split for stand-alone programs.
if self.database.standalone:
self.one_source_file = False
self.funcnodes = funcnodes
self.othernodes = othernodes
self.path = path
# disabled this for a while, does worsen things
# graph = CallTree(self.funcnodes, self.database)
# graph.simulate()
# graph.optimize()
# self.funcnodes = graph.ordered_funcnodes()
def uniquecname(self, name):
assert name.endswith('.c')
return self.namespace.uniquename(name[:-2]) + '.c'
def makefile(self, name):
log.writing(name)
filepath = self.path.join(name)
if name.endswith('.c'):
self.extrafiles.append(filepath)
return filepath.open('w')
def getextrafiles(self):
return self.extrafiles
def getothernodes(self):
return self.othernodes[:]
def splitnodesimpl(self, basecname, nodes, nextra, nbetween,
split_criteria=SPLIT_CRITERIA):
# produce a sequence of nodes, grouped into files
# which have no more than SPLIT_CRITERIA lines
iternodes = iter(nodes)
done = [False]
def subiter():
used = nextra
for node in iternodes:
impl = '\n'.join(list(node.implementation())).split('\n')
if not impl:
continue
cost = len(impl) + nbetween
yield node, impl
del impl
if used + cost > split_criteria:
# split if criteria met, unless we would produce nothing.
raise StopIteration
used += cost
done[0] = True
while not done[0]:
yield self.uniquecname(basecname), subiter()
def gen_readable_parts_of_source(self, f):
if py.std.sys.platform != "win32":
split_criteria_big = SPLIT_CRITERIA * 4
else:
split_criteria_big = SPLIT_CRITERIA
if self.one_source_file:
return gen_readable_parts_of_main_c_file(f, self.database,
self.preimpl)
#
# All declarations
#
database = self.database
structdeflist = database.getstructdeflist()
name = 'structdef.h'
fi = self.makefile(name)
print >> f, '#include "%s"' % name
print >> fi, '/***********************************************************/'
print >> fi, '/*** Structure definitions ***/'
print >> fi
for node in structdeflist:
if getattr(node, 'is_external', False):
continue
print >> fi, '%s %s;' % (node.typetag, node.name)
print >> fi
for node in structdeflist:
for line in node.definition():
print >> fi, line
print >> fi
print >> fi, '/***********************************************************/'
fi.close()
name = 'forwarddecl.h'
fi = self.makefile(name)
print >> f, '#include "%s"' % name
print >> fi, '/***********************************************************/'
print >> fi, '/*** Forward declarations ***/'
print >> fi
for node in database.globalcontainers():
for line in node.forward_declaration():
print >> fi, line
print >> fi
print >> fi, '/***********************************************************/'
fi.close()
#
# Implementation of functions and global structures and arrays
#
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Implementations ***/'
print >> f
for line in self.preimpl:
print >> f, line
print >> f, '#include "src/g_include.h"'
print >> f
name = self.uniquecname('structimpl.c')
print >> f, '/* %s */' % name
fc = self.makefile(name)
print >> fc, '/***********************************************************/'
print >> fc, '/*** Structure Implementations ***/'
print >> fc
print >> fc, '#define PYPY_NOT_MAIN_FILE'
print >> fc, '#include "common_header.h"'
print >> fc, '#include "structdef.h"'
print >> fc, '#include "forwarddecl.h"'
print >> fc
print >> fc, '#include "src/g_include.h"'
print >> fc
print >> fc, MARKER
print >> fc, '/***********************************************************/'
fc.close()
nextralines = 11 + 1
for name, nodeiter in self.splitnodesimpl('nonfuncnodes.c',
self.othernodes,
nextralines, 1):
print >> f, '/* %s */' % name
fc = self.makefile(name)
print >> fc, '/***********************************************************/'
print >> fc, '/*** Non-function Implementations ***/'
print >> fc
print >> fc, '#define PYPY_NOT_MAIN_FILE'
print >> fc, '#include "common_header.h"'
print >> fc, '#include "structdef.h"'
print >> fc, '#include "forwarddecl.h"'
print >> fc
print >> fc, '#include "src/g_include.h"'
print >> fc
print >> fc, MARKER
for node, impl in nodeiter:
print >> fc, '\n'.join(impl)
print >> fc, MARKER
print >> fc, '/***********************************************************/'
fc.close()
nextralines = 8 + len(self.preimpl) + 4 + 1
for name, nodeiter in self.splitnodesimpl('implement.c',
self.funcnodes,
nextralines, 1,
split_criteria_big):
print >> f, '/* %s */' % name
fc = self.makefile(name)
print >> fc, '/***********************************************************/'
print >> fc, '/*** Implementations ***/'
print >> fc
print >> fc, '#define PYPY_NOT_MAIN_FILE'
print >> fc, '#include "common_header.h"'
print >> fc, '#include "structdef.h"'
print >> fc, '#include "forwarddecl.h"'
print >> fc
for line in self.preimpl:
print >> fc, line
print >> fc
print >> fc, '#include "src/g_include.h"'
print >> fc
print >> fc, MARKER
for node, impl in nodeiter:
print >> fc, '\n'.join(impl)
print >> fc, MARKER
print >> fc, '/***********************************************************/'
fc.close()
print >> f
# this function acts as the fallback for small sources for now.
# Maybe we drop this completely if source splitting is the way
# to go. Currently, I'm quite fine with keeping a working fallback.
def gen_readable_parts_of_main_c_file(f, database, preimplementationlines=[]):
#
# All declarations
#
structdeflist = database.getstructdeflist()
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Structure definitions ***/'
print >> f
for node in structdeflist:
if node.name and not getattr(node, 'is_external', False):
print >> f, '%s %s;' % (node.typetag, node.name)
print >> f
for node in structdeflist:
for line in node.definition():
print >> f, line
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Forward declarations ***/'
print >> f
for node in database.globalcontainers():
for line in node.forward_declaration():
print >> f, line
#
# Implementation of functions and global structures and arrays
#
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Implementations ***/'
print >> f
for line in preimplementationlines:
print >> f, line
print >> f, '#include "src/g_include.h"'
print >> f
blank = True
for node in database.globalcontainers():
if blank:
print >> f
blank = False
for line in node.implementation():
print >> f, line
blank = True
def gen_startupcode(f, database):
# generate the start-up code and put it into a function
print >> f, 'char *RPython_StartupCode(void) {'
print >> f, '\tchar *error = NULL;'
for line in database.gcpolicy.gc_startup_code():
print >> f,"\t" + line
# put float infinities in global constants, we should not have so many of them for now to make
# a table+loop preferable
for dest, value in database.late_initializations:
print >> f, "\t%s = %s;" % (dest, value)
firsttime = True
for node in database.containerlist:
lines = list(node.startupcode())
if lines:
if firsttime:
firsttime = False
else:
print >> f, '\tif (error) return error;'
for line in lines:
print >> f, '\t'+line
print >> f, '\treturn error;'
print >> f, '}'
def extra_information(database):
includes = {}
sources = {}
include_dirs = {}
for node in database.globalcontainers():
if hasattr(node, 'includes'):
for include in node.includes:
includes[include] = True
if hasattr(node, 'sources'):
for source in node.sources:
sources[source] = True
if hasattr(node, 'include_dirs'):
for include_dir in node.include_dirs:
include_dirs[include_dir] = True
includes = includes.keys()
includes.sort()
sources = sources.keys()
sources.sort()
return includes, sources, include_dirs
def gen_source_standalone(database, modulename, targetdir,
entrypointname, defines={}):
assert database.standalone
if isinstance(targetdir, str):
targetdir = py.path.local(targetdir)
filename = targetdir.join(modulename + '.c')
f = filename.open('w')
incfilename = targetdir.join('common_header.h')
fi = incfilename.open('w')
#
# Header
#
print >> f, '#include "common_header.h"'
print >> f
defines['PYPY_STANDALONE'] = entrypointname
for key, value in defines.items():
print >> fi, '#define %s %s' % (key, value)
print >> fi, '#define Py_BUILD_CORE /* for Windows: avoid pulling libs in */'
print >> fi, '#include "pyconfig.h"'
for line in database.gcpolicy.pre_pre_gc_code():
print >> fi, line
print >> fi, '#include "src/g_prerequisite.h"'
for line in database.gcpolicy.pre_gc_code():
print >> fi, line
includes, sources, include_dirs = extra_information(database)
for include in includes:
print >> fi, '#include <%s>' % (include,)
fi.close()
preimplementationlines = list(
pre_include_code_lines(database, database.translator.rtyper))
#
# 1) All declarations
# 2) Implementation of functions and global structures and arrays
#
sg = SourceGenerator(database, preimplementationlines)
sg.set_strategy(targetdir)
sg.gen_readable_parts_of_source(f)
# 3) start-up code
print >> f
gen_startupcode(f, database)
f.close()
if 'INSTRUMENT' in defines:
fi = incfilename.open('a')
n = database.instrument_ncounter
print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n
fi.close()
return filename, sg.getextrafiles() + sources, include_dirs
def gen_source(database, modulename, targetdir, defines={}, exports={},
symboltable=None):
assert not database.standalone
if isinstance(targetdir, str):
targetdir = py.path.local(targetdir)
filename = targetdir.join(modulename + '.c')
f = filename.open('w')
incfilename = targetdir.join('common_header.h')
fi = incfilename.open('w')
#
# Header
#
print >> f, '#include "common_header.h"'
print >> f
for key, value in defines.items():
print >> fi, '#define %s %s' % (key, value)
print >> fi, '#include "pyconfig.h"'
for line in database.gcpolicy.pre_pre_gc_code():
print >> fi, line
print >> fi, '#include "src/g_prerequisite.h"'
for line in database.gcpolicy.pre_gc_code():
print >> fi, line
includes, sources, include_dirs = extra_information(database)
for include in includes:
print >> fi, '#include <%s>' % (include,)
fi.close()
if database.translator is None or database.translator.rtyper is None:
preimplementationlines = []
else:
preimplementationlines = list(
pre_include_code_lines(database, database.translator.rtyper))
#
# 1) All declarations
# 2) Implementation of functions and global structures and arrays
#
sg = SourceGenerator(database, preimplementationlines)
sg.set_strategy(targetdir)
sg.gen_readable_parts_of_source(f)
#
# Debugging info
#
if symboltable:
print >> f
print >> f, '/*******************************************************/'
print >> f, '/*** Debugging info ***/'
print >> f
print >> f, 'static int debuginfo_offsets[] = {'
for node in database.structdefnodes.values():
for expr in symboltable.generate_type_info(database, node):
print >> f, '\t%s,' % expr
print >> f, '\t0 };'
print >> f, 'static void *debuginfo_globals[] = {'
for node in database.globalcontainers():
if not isinstance(node, PyObjectNode):
result = symboltable.generate_global_info(database, node)
print >> f, '\t%s,' % (result,)
print >> f, '\tNULL };'
print >> f, '#include "src/debuginfo.h"'
#
# PyObject support (strange) code
#
pyobjmaker = database.pyobjmaker
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Table of global PyObjects ***/'
print >> f
print >> f, 'static globalobjectdef_t globalobjectdefs[] = {'
for node in database.containerlist:
if isinstance(node, (PyObjectNode, PyObjHeadNode)):
for target in node.where_to_copy_me:
print >> f, '\t{%s, "%s"},' % (target, node.exported_name)
print >> f, '\t{ NULL, NULL }\t/* Sentinel */'
print >> f, '};'
print >> f
print >> f, 'static cpyobjheaddef_t cpyobjheaddefs[] = {'
for node in database.containerlist:
if isinstance(node, PyObjHeadNode):
print >> f, '\t{"%s", %s, %s},' % (node.exported_name,
node.ptrname,
node.get_setupfn_name())
print >> f, '\t{ NULL, NULL, NULL }\t/* Sentinel */'
print >> f, '};'
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Table of functions ***/'
print >> f
print >> f, 'static globalfunctiondef_t globalfunctiondefs[] = {'
wrappers = pyobjmaker.wrappers.items()
wrappers.sort()
for globalobject_name, (base_name, wrapper_name, func_doc) in wrappers:
print >> f, ('\t{&%s, "%s", {"%s", (PyCFunction)%s, '
'METH_VARARGS|METH_KEYWORDS, %s}},' % (
globalobject_name,
globalobject_name,
base_name,
wrapper_name,
func_doc and c_string_constant(func_doc) or 'NULL'))
print >> f, '\t{ NULL }\t/* Sentinel */'
print >> f, '};'
print >> f, 'static globalfunctiondef_t *globalfunctiondefsptr = &globalfunctiondefs[0];'
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Frozen Python bytecode: the initialization code ***/'
print >> f
print >> f, 'static char *frozen_initcode[] = {"\\'
bytecode, originalsource = database.pyobjmaker.getfrozenbytecode()
g = targetdir.join('frozen.py').open('w')
g.write(originalsource)
g.close()
def char_repr(c):
if c in '\\"': return '\\' + c
if ' ' <= c < '\x7F': return c
return '\\%03o' % ord(c)
for i in range(0, len(bytecode), 32):
print >> f, ''.join([char_repr(c) for c in bytecode[i:i+32]])+'\\'
if (i+32) % 1024 == 0:
print >> f, '", "\\'
print >> f, '"};'
print >> f, "#define FROZEN_INITCODE_SIZE %d" % len(bytecode)
print >> f
#
# Module initialization function
#
print >> f, '/***********************************************************/'
print >> f, '/*** Module initialization function ***/'
print >> f
gen_startupcode(f, database)
print >> f
print >> f, 'MODULE_INITFUNC(%s)' % modulename
print >> f, '{'
print >> f, '\tSETUP_MODULE(%s);' % modulename
for publicname, pyobjptr in exports.items():
# some fishing needed to find the name of the obj
pyobjnode = database.containernodes[pyobjptr._obj]
print >> f, '\tPyModule_AddObject(m, "%s", %s);' % (publicname,
pyobjnode.name)
print >> f, '\tcall_postsetup(m);'
print >> f, '}'
f.close()
#
# Generate a setup.py while we're at it
#
pypy_include_dir = autopath.this_dir
f = targetdir.join('setup.py').open('w')
f.write(SETUP_PY % locals())
f.close()
return filename, sg.getextrafiles() + sources, include_dirs
SETUP_PY = '''
from distutils.core import setup
from distutils.extension import Extension
from distutils.ccompiler import get_default_compiler
PYPY_INCLUDE_DIR = %(pypy_include_dir)r
extra_compile_args = []
if get_default_compiler() == "unix":
extra_compile_args.extend(["-Wno-unused-label",
"-Wno-unused-variable"])
setup(name="%(modulename)s",
ext_modules = [Extension(name = "%(modulename)s",
sources = ["%(modulename)s.c"],
extra_compile_args = extra_compile_args,
include_dirs = [PYPY_INCLUDE_DIR])])
'''
MAKEFILE = '''
$(TARGET): $(OBJECTS)
\t$(CC) $(LDFLAGS) $(TFLAGS) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS)
%.o: %.c
\t$(CC) $(CFLAGS) -o $@ -c $< $(INCLUDEDIRS)
clean:
\trm -f $(OBJECTS) $(TARGET)
debug:
\tmake CFLAGS="-g -DRPY_ASSERT"
debug_exc:
\tmake CFLAGS="-g -DRPY_ASSERT -DDO_LOG_EXC"
debug_mem:
\tmake CFLAGS="-g -DRPY_ASSERT -DNO_OBMALLOC"
profile:
\tmake CFLAGS="-g -pg $(CFLAGS)" LDFLAGS="-pg $(LDFLAGS)"
profopt:
\tmake CFLAGS="-fprofile-generate $(CFLAGS)" LDFLAGS="-fprofile-generate $(LDFLAGS)"
\t./$(TARGET) $(PROFOPT)
\trm -f $(OBJECTS) $(TARGET)
\tmake CFLAGS="-fprofile-use $(CFLAGS)" LDFLAGS="-fprofile-use $(LDFLAGS)"
'''
| Python |
from pypy.rpython.lltypesystem.lltype import \
Primitive, Ptr, typeOf, RuntimeTypeInfo, \
Struct, Array, FuncType, PyObject, Void, \
ContainerType, OpaqueType, FixedSizeArray, _uninitialized
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem.llmemory import Address
from pypy.tool.sourcetools import valid_identifier
from pypy.translator.c.primitive import PrimitiveName, PrimitiveType
from pypy.translator.c.primitive import PrimitiveErrorValue
from pypy.translator.c.node import StructDefNode, ArrayDefNode
from pypy.translator.c.node import FixedSizeArrayDefNode
from pypy.translator.c.node import ContainerNodeFactory, ExtTypeOpaqueDefNode
from pypy.translator.c.support import cdecl, CNameManager, ErrorValue
from pypy.translator.c.support import log
from pypy.translator.c.extfunc import do_the_getting
from pypy import conftest
from pypy.translator.c import gc
# ____________________________________________________________
class LowLevelDatabase(object):
gctransformer = None
def __init__(self, translator=None, standalone=False,
gcpolicyclass=None,
stacklesstransformer=None,
thread_enabled=False):
self.translator = translator
self.standalone = standalone
self.stacklesstransformer = stacklesstransformer
if gcpolicyclass is None:
gcpolicyclass = gc.RefcountingGcPolicy
self.gcpolicy = gcpolicyclass(self, thread_enabled)
self.structdefnodes = {}
self.pendingsetupnodes = []
self.containernodes = {}
self.containerlist = []
self.delayedfunctionnames = {}
self.delayedfunctionptrs = []
self.completedcontainers = 0
self.containerstats = {}
self.externalfuncs = {}
self.helper2ptr = {}
# late_initializations is for when the value you want to
# assign to a constant object is something C doesn't think is
# constant
self.late_initializations = []
self.namespace = CNameManager()
if not standalone:
from pypy.translator.c.pyobj import PyObjMaker
self.pyobjmaker = PyObjMaker(self.namespace, self, translator)
if translator is None or translator.rtyper is None:
self.exctransformer = None
else:
self.exctransformer = translator.getexceptiontransformer()
if translator is not None:
self.gctransformer = self.gcpolicy.transformerclass(translator)
self.completed = False
self.instrument_ncounter = 0
def gettypedefnode(self, T, varlength=1):
if varlength <= 1:
varlength = 1 # it's C after all
key = T
else:
key = T, varlength
try:
node = self.structdefnodes[key]
except KeyError:
if isinstance(T, Struct):
if isinstance(T, FixedSizeArray):
node = FixedSizeArrayDefNode(self, T)
else:
node = StructDefNode(self, T, varlength)
elif isinstance(T, Array):
node = ArrayDefNode(self, T, varlength)
elif isinstance(T, OpaqueType) and hasattr(T, '_exttypeinfo'):
node = ExtTypeOpaqueDefNode(self, T)
else:
raise Exception("don't know about %r" % (T,))
self.structdefnodes[key] = node
self.pendingsetupnodes.append(node)
return node
def gettype(self, T, varlength=1, who_asks=None, argnames=[]):
if isinstance(T, Primitive):
return PrimitiveType[T]
elif isinstance(T, Ptr):
if isinstance(T.TO, FixedSizeArray):
# /me blames C
node = self.gettypedefnode(T.TO)
return node.getptrtype()
else:
typename = self.gettype(T.TO) # who_asks not propagated
return typename.replace('@', '*@')
elif isinstance(T, (Struct, Array)):
node = self.gettypedefnode(T, varlength=varlength)
if who_asks is not None:
who_asks.dependencies[node] = True
return node.gettype()
elif T == PyObject:
return 'PyObject @'
elif isinstance(T, FuncType):
resulttype = self.gettype(T.RESULT)
argtypes = []
for i in range(len(T.ARGS)):
if T.ARGS[i] is not Void:
argtype = self.gettype(T.ARGS[i])
try:
argname = argnames[i]
except IndexError:
argname = ''
argtypes.append(cdecl(argtype, argname))
argtypes = ', '.join(argtypes) or 'void'
return resulttype.replace('@', '(@)(%s)' % argtypes)
elif isinstance(T, OpaqueType):
if T == RuntimeTypeInfo:
return self.gcpolicy.rtti_type()
elif hasattr(T, '_exttypeinfo'):
# for external types (pypy.rpython.extfunctable.declaretype())
node = self.gettypedefnode(T, varlength=varlength)
if who_asks is not None:
who_asks.dependencies[node] = True
return 'struct %s @' % node.name
else:
#raise Exception("don't know about opaque type %r" % (T,))
return 'struct %s @' % (
valid_identifier('pypy_opaque_' + T.tag),)
else:
raise Exception("don't know about type %r" % (T,))
def getcontainernode(self, container, **buildkwds):
try:
node = self.containernodes[container]
except KeyError:
T = typeOf(container)
if isinstance(T, (lltype.Array, lltype.Struct)):
if hasattr(self.gctransformer, 'consider_constant'):
self.gctransformer.consider_constant(T, container)
nodefactory = ContainerNodeFactory[T.__class__]
node = nodefactory(self, T, container, **buildkwds)
self.containernodes[container] = node
self.containerlist.append(node)
kind = getattr(node, 'nodekind', '?')
self.containerstats[kind] = self.containerstats.get(kind, 0) + 1
if self.completed:
assert not node.globalcontainer
# non-global containers are found very late, e.g. _subarrays
# via addresses introduced by the GC transformer
return node
def get(self, obj):
if isinstance(obj, ErrorValue):
T = obj.TYPE
if isinstance(T, Primitive):
return PrimitiveErrorValue[T]
elif isinstance(T, Ptr):
return 'NULL'
else:
raise Exception("don't know about %r" % (T,))
else:
T = typeOf(obj)
if isinstance(T, Primitive):
return PrimitiveName[T](obj, self)
elif isinstance(T, Ptr):
if obj: # test if the ptr is non-NULL
try:
container = obj._obj
except lltype.DelayedPointer:
# hack hack hack
name = obj._obj0
assert name.startswith('delayed!')
n = len('delayed!')
if len(name) == n:
raise
if id(obj) in self.delayedfunctionnames:
return self.delayedfunctionnames[id(obj)][0]
funcname = name[n:]
funcname = self.namespace.uniquename('g_' + funcname)
self.delayedfunctionnames[id(obj)] = funcname, obj
self.delayedfunctionptrs.append(obj)
return funcname
# /hack hack hack
else:
# hack hack hack
if id(obj) in self.delayedfunctionnames:
# this used to be a delayed function,
# make sure we use the same name
forcename = self.delayedfunctionnames[id(obj)][0]
node = self.getcontainernode(container,
forcename=forcename)
assert node.ptrname == forcename
return forcename
# /hack hack hack
if isinstance(container, int):
# special case for tagged odd-valued pointers
return '((%s) %d)' % (cdecl(self.gettype(T), ''),
obj._obj)
node = self.getcontainernode(container)
return node.ptrname
else:
return '((%s) NULL)' % (cdecl(self.gettype(T), ''), )
else:
raise Exception("don't know about %r" % (obj,))
def complete(self, show_progress=True):
assert not self.completed
if self.translator and self.translator.rtyper:
do_the_getting(self, self.translator.rtyper)
def dump():
lst = ['%s: %d' % keyvalue
for keyvalue in self.containerstats.items()]
lst.sort()
log.event('%8d nodes [ %s ]' % (i, ' '.join(lst)))
i = self.completedcontainers
if show_progress:
show_i = (i//1000 + 1) * 1000
else:
show_i = -1
# The order of database completion is fragile with stackless and
# gc transformers. Here is what occurs:
#
# 1. follow dependencies recursively from the entry point: data
# structures pointing to other structures or functions, and
# constants in functions pointing to other structures or functions.
# Because of the mixlevelannotator, this might find delayed
# (not-annotated-and-rtyped-yet) function pointers. They are
# not followed at this point. User finalizers (__del__) on the
# other hand are followed during this step too.
#
# 2. gctransformer.finish_helpers() - after this, all functions in
# the program have been rtyped.
#
# 3. follow new dependencies. All previously delayed functions
# should have been resolved by 2 - they are gc helpers, like
# ll_finalize(). New FuncNodes are built for them. No more
# FuncNodes can show up after this step.
#
# 4. stacklesstransform.finish() - freeze the stackless resume point
# table.
#
# 5. follow new dependencies (this should be only the new frozen
# table, which contains only numbers and already-seen function
# pointers).
#
# 6. gctransformer.finish_tables() - freeze the gc types table.
#
# 7. follow new dependencies (this should be only the gc type table,
# which contains only numbers and pointers to ll_finalizer
# functions seen in step 3).
#
# I think that there is no reason left at this point that force
# step 4 to be done before step 6, nor to have a follow-new-
# dependencies step inbetween. It is important though to have step 3
# before steps 4 and 6.
#
# This is implemented by interleaving the follow-new-dependencies
# steps with calls to the next 'finish' function from the following
# list:
finish_callbacks = []
if self.gctransformer:
finish_callbacks.append(self.gctransformer.finish_helpers)
if self.stacklesstransformer:
finish_callbacks.append(self.stacklesstransformer.finish)
if self.gctransformer:
finish_callbacks.append(self.gctransformer.finish_tables)
def add_dependencies(newdependencies):
for value in newdependencies:
#if isinstance(value, _uninitialized):
# continue
if isinstance(typeOf(value), ContainerType):
self.getcontainernode(value)
else:
self.get(value)
while True:
while True:
if hasattr(self, 'pyobjmaker'):
self.pyobjmaker.collect_initcode()
while self.pendingsetupnodes:
lst = self.pendingsetupnodes
self.pendingsetupnodes = []
for nodedef in lst:
nodedef.setup()
if i == len(self.containerlist):
break
node = self.containerlist[i]
add_dependencies(node.enum_dependencies())
i += 1
self.completedcontainers = i
if i == show_i:
dump()
show_i += 1000
if self.delayedfunctionptrs:
lst = self.delayedfunctionptrs
self.delayedfunctionptrs = []
progress = False
for fnptr in lst:
try:
fnptr._obj
except lltype.DelayedPointer: # still not resolved
self.delayedfunctionptrs.append(fnptr)
else:
self.get(fnptr)
progress = True
if progress:
continue # progress - follow all dependencies again
if finish_callbacks:
finish = finish_callbacks.pop(0)
newdependencies = finish()
if newdependencies:
add_dependencies(newdependencies)
continue # progress - follow all dependencies again
break # database is now complete
assert not self.delayedfunctionptrs
self.completed = True
if show_progress:
dump()
def globalcontainers(self):
for node in self.containerlist:
if node.globalcontainer:
yield node
def get_lltype_of_exception_value(self):
if self.translator is not None and self.translator.rtyper is not None:
exceptiondata = self.translator.rtyper.getexceptiondata()
return exceptiondata.lltype_of_exception_value
else:
return Ptr(PyObject)
def getstructdeflist(self):
# return the StructDefNodes sorted according to dependencies
result = []
seen = {}
def produce(node):
if node not in seen:
for othernode in node.dependencies:
produce(othernode)
result.append(node)
seen[node] = True
for node in self.structdefnodes.values():
produce(node)
return result
| Python |
import sys
from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.rpython.lltypesystem.rffi import CConstant
from pypy.rpython.lltypesystem.lltype import *
from pypy.rpython.lltypesystem.llmemory import Address, \
AddressOffset, ItemOffset, ArrayItemsOffset, FieldOffset, \
CompositeOffset, ArrayLengthOffset, WeakGcAddress, fakeweakaddress, \
GCHeaderOffset
from pypy.translator.c.support import cdecl
# ____________________________________________________________
#
# Primitives
def name_signed(value, db):
if isinstance(value, Symbolic):
if isinstance(value, FieldOffset):
structnode = db.gettypedefnode(value.TYPE)
return 'offsetof(%s, %s)'%(
cdecl(db.gettype(value.TYPE), ''),
structnode.c_struct_field_name(value.fldname))
elif isinstance(value, ItemOffset):
if value.TYPE != Void:
return '(sizeof(%s) * %s)'%(
cdecl(db.gettype(value.TYPE), ''), value.repeat)
else:
return '0'
elif isinstance(value, ArrayItemsOffset):
if isinstance(value.TYPE, FixedSizeArray):
return '0'
elif value.TYPE.OF != Void:
return 'offsetof(%s, items)'%(
cdecl(db.gettype(value.TYPE), ''))
else:
return 'sizeof(%s)'%(cdecl(db.gettype(value.TYPE), ''),)
elif isinstance(value, ArrayLengthOffset):
return 'offsetof(%s, length)'%(
cdecl(db.gettype(value.TYPE), ''))
elif isinstance(value, CompositeOffset):
names = [name_signed(item, db) for item in value.offsets]
return '(%s)' % (' + '.join(names),)
elif type(value) == AddressOffset:
return '0'
elif type(value) == GCHeaderOffset:
return '0'
elif isinstance(value, CDefinedIntSymbolic):
return str(value.expr)
elif isinstance(value, ComputedIntSymbolic):
value = value.compute_fn()
elif isinstance(value, CConstant):
return value.c_name
else:
raise Exception("unimplemented symbolic %r"%value)
if value is None:
assert not db.completed
return None
if value == -sys.maxint-1: # blame C
return '(-%dL-1L)' % sys.maxint
else:
return '%dL' % value
def name_unsigned(value, db):
assert value >= 0
return '%dUL' % value
def name_unsignedlonglong(value, db):
assert value >= 0
return '%dULL' % value
def name_signedlonglong(value, db):
return '%dLL' % value
def isinf(x):
return x != 0.0 and x / 2 == x
# To get isnan, working x-platform and both on 2.3 and 2.4, is a
# horror. I think this works (for reasons I don't really want to talk
# about), and probably when implemented on top of pypy, too.
def isnan(v):
return v != v*1.0 or (v == 1.0 and v == 2.0)
def name_float(value, db):
if isinf(value):
if value > 0:
return '(Py_HUGE_VAL)'
else:
return '(-Py_HUGE_VAL)'
elif isnan(value):
return '(Py_HUGE_VAL/Py_HUGE_VAL)'
else:
return repr(value)
def name_char(value, db):
assert type(value) is str and len(value) == 1
if ' ' <= value < '\x7f':
return "'%s'" % (value.replace("\\", r"\\").replace("'", r"\'"),)
else:
return '%d' % ord(value)
def name_bool(value, db):
return '%d' % value
def name_void(value, db):
return '/* nothing */'
def name_unichar(value, db):
assert type(value) is unicode and len(value) == 1
return '%d' % ord(value)
def name_address(value, db):
if value:
return db.get(value.ref())
else:
return 'NULL'
def name_weakgcaddress(value, db):
assert isinstance(value, fakeweakaddress)
if value.ref is None:
return 'HIDE_POINTER(NULL)'
else:
ob = value.ref()
assert ob is not None
return 'HIDE_POINTER(%s)'%db.get(ob)
# On 64 bit machines, SignedLongLong and Signed are the same, so the
# order matters, because we want the Signed implementation.
PrimitiveName = {
SignedLongLong: name_signedlonglong,
Signed: name_signed,
UnsignedLongLong: name_unsignedlonglong,
Unsigned: name_unsigned,
Float: name_float,
Char: name_char,
UniChar: name_unichar,
Bool: name_bool,
Void: name_void,
Address: name_address,
WeakGcAddress: name_weakgcaddress,
}
PrimitiveType = {
SignedLongLong: 'long long @',
Signed: 'long @',
UnsignedLongLong: 'unsigned long long @',
Unsigned: 'unsigned long @',
Float: 'double @',
Char: 'char @',
UniChar: 'unsigned int @',
Bool: 'bool_t @',
Void: 'void @',
Address: 'void* @',
WeakGcAddress: 'GC_hidden_pointer @',
}
PrimitiveErrorValue = {
SignedLongLong: '-1LL',
Signed: '-1',
UnsignedLongLong: '((unsigned long long) -1)',
Unsigned: '((unsigned) -1)',
Float: '-1.0',
Char: '((char) -1)',
UniChar: '((unsigned) -1)',
Bool: '0 /* error */',
Void: '/* error */',
Address: 'NULL',
WeakGcAddress: 'HIDE_POINTER(NULL)',
}
def define_c_primitive(ll_type, c_name):
if ll_type in PrimitiveName:
return
if ll_type._cast(-1) > 0:
name_str = '((%s) %%dULL)' % c_name
else:
name_str = '((%s) %%dLL)' % c_name
PrimitiveName[ll_type] = lambda value, db: name_str % value
PrimitiveType[ll_type] = '%s @'% c_name
PrimitiveErrorValue[ll_type] = '((%s) -1)'% c_name
try:
import ctypes
except ImportError:
pass
else:
from pypy.rpython.rctypes import rcarithmetic as rcarith
for ll_type, c_name in [(rcarith.CByte, 'signed char'),
(rcarith.CUByte, 'unsigned char'),
(rcarith.CShort, 'short'),
(rcarith.CUShort, 'unsigned short'),
(rcarith.CInt, 'int'),
(rcarith.CUInt, 'unsigned int'),
(rcarith.CLong, 'long'),
(rcarith.CULong, 'unsigned long'),
(rcarith.CLonglong, 'long long'),
(rcarith.CULonglong, 'unsigned long long')]:
define_c_primitive(ll_type, c_name)
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
import sys, os
from pypy.translator.translator import TranslationContext, graphof
from pypy.translator.tool.taskengine import SimpleTaskEngine
from pypy.translator.goal import query
from pypy.annotation import model as annmodel
from pypy.annotation.listdef import s_list_of_strings
from pypy.annotation import policy as annpolicy
from py.compat import optparse
from pypy.tool.udir import udir
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("translation")
py.log.setconsumer("translation", ansi_log)
DEFAULTS = {
'translation.gc': 'ref',
'translation.cc': None,
'translation.profopt': None,
'translation.thread': False, # influences GC policy
'translation.stackless': False,
'translation.debug': True,
'translation.insist': False,
'translation.backend': 'c',
'translation.fork_before': None,
'translation.backendopt.raisingop2direct_call' : False,
'translation.backendopt.merge_if_blocks': True,
}
def taskdef(taskfunc, deps, title, new_state=None, expected_states=[], idemp=False):
taskfunc.task_deps = deps
taskfunc.task_title = title
taskfunc.task_newstate = None
taskfunc.task_expected_states = expected_states
taskfunc.task_idempotent = idemp
return taskfunc
# TODO:
# sanity-checks using states
_BACKEND_TO_TYPESYSTEM = {
'c': 'lltype',
'llvm': 'lltype'
}
def backend_to_typesystem(backend):
return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype')
class Instrument(Exception):
pass
class ProfInstrument(object):
name = "profinstrument"
def __init__(self, datafile, compiler):
self.datafile = datafile
self.compiler = compiler
def first(self):
self.compiler._build()
def probe(self, exe, args):
from py.compat import subprocess
env = os.environ.copy()
env['_INSTRUMENT_COUNTERS'] = str(self.datafile)
subprocess.call("'%s' %s" % (exe, args), env=env, shell=True)
def after(self):
# xxx
os._exit(0)
class TranslationDriver(SimpleTaskEngine):
def __init__(self, setopts=None, default_goal=None,
disable=[],
exe_name=None, extmod_name=None,
config=None, overrides=None):
SimpleTaskEngine.__init__(self)
self.log = log
if config is None:
from pypy.config.pypyoption import get_pypy_config
config = get_pypy_config(DEFAULTS, translating=True)
self.config = config
if overrides is not None:
self.config.override(overrides)
if setopts is not None:
self.config.set(**setopts)
self.exe_name = exe_name
self.extmod_name = extmod_name
self.done = {}
self.disable(disable)
if default_goal:
default_goal, = self.backend_select_goals([default_goal])
if default_goal in self._maybe_skip():
default_goal = None
self.default_goal = default_goal
self.extra_goals = []
self.exposed = []
# expose tasks
def expose_task(task, backend_goal=None):
if backend_goal is None:
backend_goal = task
def proc():
return self.proceed(backend_goal)
self.exposed.append(task)
setattr(self, task, proc)
backend, ts = self.get_backend_and_type_system()
for task in self.tasks:
explicit_task = task
parts = task.split('_')
if len(parts) == 1:
if task in ('annotate'):
expose_task(task)
else:
task, postfix = parts
if task in ('rtype', 'backendopt', 'llinterpret',
'prehannotatebackendopt', 'hintannotate',
'timeshift'):
if ts:
if ts == postfix:
expose_task(task, explicit_task)
else:
expose_task(explicit_task)
elif task in ('source', 'compile', 'run'):
if backend:
if backend == postfix:
expose_task(task, explicit_task)
elif ts:
if ts == backend_to_typesystem(postfix):
expose_task(explicit_task)
else:
expose_task(explicit_task)
def set_extra_goals(self, goals):
self.extra_goals = goals
def get_info(self): # XXX more?
d = {'backend': self.config.translation.backend}
return d
def get_backend_and_type_system(self):
type_system = self.config.translation.type_system
backend = self.config.translation.backend
return backend, type_system
def backend_select_goals(self, goals):
backend, ts = self.get_backend_and_type_system()
postfixes = [''] + ['_'+p for p in (backend, ts) if p]
l = []
for goal in goals:
for postfix in postfixes:
cand = "%s%s" % (goal, postfix)
if cand in self.tasks:
new_goal = cand
break
else:
raise Exception, "cannot infer complete goal from: %r" % goal
l.append(new_goal)
return l
def disable(self, to_disable):
self._disabled = to_disable
def _maybe_skip(self):
maybe_skip = []
if self._disabled:
for goal in self.backend_select_goals(self._disabled):
maybe_skip.extend(self._depending_on_closure(goal))
return dict.fromkeys(maybe_skip).keys()
def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None):
standalone = inputtypes is None
self.standalone = standalone
if standalone:
inputtypes = [s_list_of_strings]
self.inputtypes = inputtypes
if policy is None:
policy = annpolicy.AnnotatorPolicy()
if standalone:
policy.allow_someobjects = False
self.policy = policy
self.extra = extra
if empty_translator:
translator = empty_translator
else:
translator = TranslationContext(config=self.config)
self.entry_point = entry_point
self.translator = translator
self.libdef = None
self.translator.driver_instrument_result = self.instrument_result
def setup_library(self, libdef, policy=None, extra={}, empty_translator=None):
self.setup(None, None, policy, extra, empty_translator)
self.libdef = libdef
def instrument_result(self, args):
backend, ts = self.get_backend_and_type_system()
if backend != 'c' or sys.platform == 'win32':
raise Exception("instrumentation requires the c backend"
" and unix for now")
from pypy.tool.udir import udir
datafile = udir.join('_instrument_counters')
makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler)
pid = os.fork()
if pid == 0:
# child compiling and running with instrumentation
self.config.translation.instrument = True
self.config.translation.instrumentctl = (makeProfInstrument,
args)
raise Instrument
else:
pid, status = os.waitpid(pid, 0)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
if status != 0:
raise Exception, "instrumentation child failed: %d" % status
else:
raise Exception, "instrumentation child aborted"
import array, struct
n = datafile.size()//struct.calcsize('L')
datafile = datafile.open('rb')
counters = array.array('L')
counters.fromfile(datafile, n)
datafile.close()
return counters
def info(self, msg):
log.info(msg)
def _do(self, goal, func, *args, **kwds):
title = func.task_title
if goal in self.done:
self.log.info("already done: %s" % title)
return
else:
self.log.info("%s..." % title)
instrument = False
try:
res = func()
except Instrument:
instrument = True
if not func.task_idempotent:
self.done[goal] = True
if instrument:
self.proceed('compile')
assert False, 'we should not get here'
return res
def task_annotate(self):
# includes annotation and annotatation simplifications
translator = self.translator
policy = self.policy
self.log.info('with policy: %s.%s' % (policy.__class__.__module__, policy.__class__.__name__))
annmodel.DEBUG = self.config.translation.debug
annotator = translator.buildannotator(policy=policy)
if self.entry_point:
s = annotator.build_types(self.entry_point, self.inputtypes)
self.sanity_check_annotation()
if self.standalone and s.knowntype != int:
raise Exception("stand-alone program entry point must return an "
"int (and not, e.g., None or always raise an "
"exception).")
annotator.simplify()
return s
else:
assert self.libdef is not None
for func, inputtypes in self.libdef.functions:
annotator.build_types(func, inputtypes)
self.sanity_check_annotation()
annotator.simplify()
#
task_annotate = taskdef(task_annotate, [], "Annotating&simplifying")
def sanity_check_annotation(self):
translator = self.translator
irreg = query.qoutput(query.check_exceptblocks_qgen(translator))
if not irreg:
self.log.info("All exceptblocks seem sane")
lost = query.qoutput(query.check_methods_qgen(translator))
assert not lost, "lost methods, something gone wrong with the annotation of method defs"
self.log.info("No lost method defs")
so = query.qoutput(query.polluted_qgen(translator))
tot = len(translator.graphs)
percent = int(tot and (100.0*so / tot) or 0)
# if there are a few SomeObjects even if the policy doesn't allow
# them, it means that they were put there in a controlled way
# and then it's not a warning.
if not translator.annotator.policy.allow_someobjects:
pr = self.log.info
elif percent == 0:
pr = self.log.info
else:
pr = log.WARNING
pr("-- someobjectness %2d%% (%d of %d functions polluted by SomeObjects)" % (percent, so, tot))
def task_rtype_lltype(self):
rtyper = self.translator.buildrtyper(type_system='lltype')
insist = not self.config.translation.insist
rtyper.specialize(dont_simplify_again=True,
crash_on_first_typeerror=insist)
#
task_rtype_lltype = taskdef(task_rtype_lltype, ['annotate'], "RTyping")
RTYPE = 'rtype_lltype'
def task_rtype_ootype(self):
# Maybe type_system should simply be an option used in task_rtype
insist = not self.config.translation.insist
rtyper = self.translator.buildrtyper(type_system="ootype")
rtyper.specialize(dont_simplify_again=True,
crash_on_first_typeerror=insist)
#
task_rtype_ootype = taskdef(task_rtype_ootype, ['annotate'], "ootyping")
OOTYPE = 'rtype_ootype'
def task_prehannotatebackendopt_lltype(self):
from pypy.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator,
inline_threshold=0,
merge_if_blocks=True,
constfold=True,
raisingop2direct_call=False,
remove_asserts=True)
#
task_prehannotatebackendopt_lltype = taskdef(
task_prehannotatebackendopt_lltype,
[RTYPE],
"Backendopt before Hint-annotate")
def task_hintannotate_lltype(self):
from pypy.jit.hintannotator.annotator import HintAnnotator
from pypy.jit.hintannotator.model import OriginFlags
from pypy.jit.hintannotator.model import SomeLLAbstractConstant
get_portal = self.extra['portal']
PORTAL, POLICY = get_portal(self)
t = self.translator
self.portal_graph = graphof(t, PORTAL)
hannotator = HintAnnotator(base_translator=t, policy=POLICY)
self.hint_translator = hannotator.translator
hs = hannotator.build_types(self.portal_graph,
[SomeLLAbstractConstant(v.concretetype,
{OriginFlags(): True})
for v in self.portal_graph.getargs()])
count = hannotator.bookkeeper.nonstuboriggraphcount
stubcount = hannotator.bookkeeper.stuboriggraphcount
self.log.info("The hint-annotator saw %d graphs"
" (and made stubs for %d graphs)." % (count, stubcount))
n = len(list(hannotator.translator.graphs[0].iterblocks()))
self.log.info("portal has %d blocks" % n)
self.hannotator = hannotator
#
task_hintannotate_lltype = taskdef(task_hintannotate_lltype,
['prehannotatebackendopt_lltype'],
"Hint-annotate")
def task_timeshift_lltype(self):
from pypy.jit.timeshifter.hrtyper import HintRTyper
from pypy.jit.codegen import detect_cpu
cpu = detect_cpu.autodetect()
if cpu == 'i386':
from pypy.jit.codegen.i386.rgenop import RI386GenOp as RGenOp
RGenOp.MC_SIZE = 32 * 1024 * 1024
elif cpu == 'ppc':
from pypy.jit.codegen.ppc.rgenop import RPPCGenOp as RGenOp
RGenOp.MC_SIZE = 32 * 1024 * 1024
else:
raise Exception('Unsuported cpu %r'%cpu)
del self.hint_translator
ha = self.hannotator
t = self.translator
# make the timeshifted graphs
hrtyper = HintRTyper(ha, t.rtyper, RGenOp)
hrtyper.specialize(origportalgraph=self.portal_graph, view=False)
#
task_timeshift_lltype = taskdef(task_timeshift_lltype,
["hintannotate_lltype"],
"Timeshift")
def task_backendopt_lltype(self):
from pypy.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator)
#
task_backendopt_lltype = taskdef(task_backendopt_lltype,
[RTYPE,
'??timeshift_lltype'],
"lltype back-end optimisations")
BACKENDOPT = 'backendopt_lltype'
def task_backendopt_ootype(self):
from pypy.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator)
#
task_backendopt_ootype = taskdef(task_backendopt_ootype,
[OOTYPE], "ootype back-end optimisations")
OOBACKENDOPT = 'backendopt_ootype'
def task_stackcheckinsertion_lltype(self):
from pypy.translator.transform import insert_ll_stackcheck
insert_ll_stackcheck(self.translator)
task_stackcheckinsertion_lltype = taskdef(
task_stackcheckinsertion_lltype,
['?'+BACKENDOPT, RTYPE, 'annotate'],
"inserting stack checks")
STACKCHECKINSERTION = 'stackcheckinsertion_lltype'
def task_database_c(self):
translator = self.translator
if translator.annotator is not None:
translator.frozen = True
standalone = self.standalone
if standalone:
from pypy.translator.c.genc import CStandaloneBuilder as CBuilder
else:
from pypy.translator.c.genc import CExtModuleBuilder as CBuilder
cbuilder = CBuilder(self.translator, self.entry_point,
config=self.config)
cbuilder.stackless = self.config.translation.stackless
if not standalone: # xxx more messy
cbuilder.modulename = self.extmod_name
database = cbuilder.build_database()
self.log.info("database for generating C source was created")
self.cbuilder = cbuilder
self.database = database
#
task_database_c = taskdef(task_database_c,
[STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'],
"Creating database for generating c source")
def task_source_c(self): # xxx messy
translator = self.translator
cbuilder = self.cbuilder
database = self.database
c_source_filename = cbuilder.generate_source(database)
self.log.info("written: %s" % (c_source_filename,))
#
task_source_c = taskdef(task_source_c, ['database_c'], "Generating c source")
def create_exe(self):
if self.exe_name is not None:
import shutil
exename = mkexename(self.c_entryp)
info = {'backend': self.config.translation.backend}
newexename = self.exe_name % self.get_info()
if '/' not in newexename and '\\' not in newexename:
newexename = './' + newexename
newexename = mkexename(newexename)
shutil.copy(exename, newexename)
self.c_entryp = newexename
self.log.info("created: %s" % (self.c_entryp,))
def task_compile_c(self): # xxx messy
cbuilder = self.cbuilder
cbuilder.compile()
if self.standalone:
self.c_entryp = cbuilder.executable_name
self.create_exe()
else:
cbuilder.import_module()
self.c_entryp = cbuilder.get_entry_point()
#
task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source")
def backend_run(self, backend):
c_entryp = self.c_entryp
standalone = self.standalone
if standalone:
os.system(c_entryp)
else:
runner = self.extra.get('run', lambda f: f())
runner(c_entryp)
def task_run_c(self):
self.backend_run('c')
#
task_run_c = taskdef(task_run_c, ['compile_c'],
"Running compiled c source",
idemp=True)
def task_llinterpret_lltype(self):
from pypy.rpython.llinterp import LLInterpreter
py.log.setconsumer("llinterp operation", None)
translator = self.translator
interp = LLInterpreter(translator.rtyper)
bk = translator.annotator.bookkeeper
graph = bk.getdesc(self.entry_point).getuniquegraph()
v = interp.eval_graph(graph,
self.extra.get('get_llinterp_args',
lambda: [])())
log.llinterpret.event("result -> %s" % v)
#
task_llinterpret_lltype = taskdef(task_llinterpret_lltype,
[STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE],
"LLInterpreting")
def task_source_llvm(self):
translator = self.translator
if translator.annotator is None:
raise ValueError, "llvm requires annotation."
from pypy.translator.llvm import genllvm
self.llvmgen = genllvm.GenLLVM(translator, self.standalone)
llvm_filename = self.llvmgen.gen_source(self.entry_point)
self.log.info("written: %s" % (llvm_filename,))
#
task_source_llvm = taskdef(task_source_llvm,
[STACKCHECKINSERTION, BACKENDOPT, RTYPE],
"Generating llvm source")
def task_compile_llvm(self):
gen = self.llvmgen
if self.standalone:
exe_name = (self.exe_name or 'testing') % self.get_info()
self.c_entryp = gen.compile_standalone(exe_name)
self.create_exe()
else:
self.c_module, self.c_entryp = gen.compile_module()
#
task_compile_llvm = taskdef(task_compile_llvm,
['source_llvm'],
"Compiling llvm source")
def task_run_llvm(self):
self.backend_run('llvm')
#
task_run_llvm = taskdef(task_run_llvm, ['compile_llvm'],
"Running compiled llvm source",
idemp=True)
def task_source_cl(self):
from pypy.translator.lisp.gencl import GenCL
self.gen = GenCL(self.translator, self.entry_point)
filename = self.gen.emitfile()
self.log.info("Wrote %s" % (filename,))
task_source_cl = taskdef(task_source_cl, [OOTYPE],
'Generating Common Lisp source')
def task_compile_cl(self):
pass
task_compile_cl = taskdef(task_compile_cl, ['source_cl'],
'XXX')
def task_run_cl(self):
pass
task_run_cl = taskdef(task_run_cl, ['compile_cl'],
'XXX')
def task_source_squeak(self):
from pypy.translator.squeak.gensqueak import GenSqueak
self.gen = GenSqueak(dir, self.translator)
filename = self.gen.gen()
self.log.info("Wrote %s" % (filename,))
task_source_squeak = taskdef(task_source_squeak, [OOTYPE],
'Generating Squeak source')
def task_compile_squeak(self):
pass
task_compile_squeak = taskdef(task_compile_squeak, ['source_squeak'],
'XXX')
def task_run_squeak(self):
pass
task_run_squeak = taskdef(task_run_squeak, ['compile_squeak'],
'XXX')
def task_source_js(self):
from pypy.translator.js.js import JS
self.gen = JS(self.translator, functions=[self.entry_point],
stackless=self.config.translation.stackless)
filename = self.gen.write_source()
self.log.info("Wrote %s" % (filename,))
task_source_js = taskdef(task_source_js,
[OOTYPE],
'Generating Javascript source')
def task_compile_js(self):
pass
task_compile_js = taskdef(task_compile_js, ['source_js'],
'Skipping Javascript compilation')
def task_run_js(self):
pass
task_run_js = taskdef(task_run_js, ['compile_js'],
'Please manually run the generated code')
def task_source_flex(self):
from pypy.translator.flex.js import JS
self.gen = JS(self.translator, functions=[self.entry_point],
stackless=self.config.translation.stackless)
filename = self.gen.write_source()
self.log.info("Wrote %s" % (filename,))
task_source_flex = taskdef(task_source_flex,
[OOTYPE],
'Generating Flex source')
def task_compile_flex(self):
pass
task_compile_flex = taskdef(task_compile_flex, ['source_flex'],
'Skipping Javascript compilation')
def task_run_flex(self):
pass
task_run_flex = taskdef(task_run_flex, ['compile_flex'],
'Please manually run the generated code')
def task_source_cli(self):
from pypy.translator.cli.gencli import GenCli
from pypy.translator.cli.entrypoint import get_entrypoint
if self.entry_point is not None: # executable mode
entry_point_graph = self.translator.graphs[0]
entry_point = get_entrypoint(entry_point_graph)
else:
# library mode
assert self.libdef is not None
bk = self.translator.annotator.bookkeeper
entry_point = self.libdef.get_entrypoint(bk)
self.gen = GenCli(udir, self.translator, entry_point, config=self.config)
filename = self.gen.generate_source()
self.log.info("Wrote %s" % (filename,))
task_source_cli = taskdef(task_source_cli, ["?" + OOBACKENDOPT, OOTYPE],
'Generating CLI source')
def task_compile_cli(self):
from pypy.translator.cli.support import unpatch
from pypy.translator.cli.test.runtest import CliFunctionWrapper
filename = self.gen.build_exe()
self.c_entryp = CliFunctionWrapper(filename)
# restore original os values
if hasattr(self, 'old_cli_defs'):
unpatch(*self.old_cli_defs)
self.log.info("Compiled %s" % filename)
if self.standalone and self.exe_name:
self.copy_cli_exe()
task_compile_cli = taskdef(task_compile_cli, ['source_cli'],
'Compiling CLI source')
def copy_cli_exe(self):
# XXX messy
import os.path
import shutil
main_exe = self.c_entryp._exe
usession_path, main_exe_name = os.path.split(main_exe)
pypylib_dll = os.path.join(usession_path, 'pypylib.dll')
basename = self.exe_name % self.get_info()
dirname = basename + '-data/'
if '/' not in dirname and '\\' not in dirname:
dirname = './' + dirname
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy(main_exe, dirname)
shutil.copy(pypylib_dll, dirname)
newexename = basename
f = file(newexename, 'w')
f.write("""#!/bin/bash
mono "$(dirname $0)/$(basename $0)-data/%s" "$@" # XXX doesn't work if it's placed in PATH
""" % main_exe_name)
f.close()
os.chmod(newexename, 0755)
def copy_cli_dll(self):
import os.path
import shutil
dllname = self.gen.outfile
usession_path, dll_name = os.path.split(dllname)
pypylib_dll = os.path.join(usession_path, 'pypylib.dll')
shutil.copy(dllname, '.')
shutil.copy(pypylib_dll, '.')
# main.exe is a stub but is needed right now because it's
# referenced by pypylib.dll. Will be removed in the future
translator_path, _ = os.path.split(__file__)
main_exe = os.path.join(translator_path, 'cli/src/main.exe')
shutil.copy(main_exe, '.')
self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname))
def task_run_cli(self):
pass
task_run_cli = taskdef(task_run_cli, ['compile_cli'],
'XXX')
def task_source_jvm(self):
from pypy.translator.jvm.genjvm import GenJvm
from pypy.translator.jvm.node import EntryPoint
entry_point_graph = self.translator.graphs[0]
is_func = not self.standalone
entry_point = EntryPoint(entry_point_graph, is_func, is_func)
self.gen = GenJvm(udir, self.translator, entry_point)
self.jvmsource = self.gen.generate_source()
self.log.info("Wrote JVM code")
task_source_jvm = taskdef(task_source_jvm, ["?" + OOBACKENDOPT, OOTYPE],
'Generating JVM source')
def task_compile_jvm(self):
self.jvmsource.compile()
self.c_entryp = lambda *args: eval(self.jvmsource.execute(args))
self.log.info("Compiled JVM source")
task_compile_jvm = taskdef(task_compile_jvm, ['source_jvm'],
'Compiling JVM source')
def task_run_jvm(self):
pass
task_run_jvm = taskdef(task_run_jvm, ['compile_jvm'],
'XXX')
def proceed(self, goals):
if not goals:
if self.default_goal:
goals = [self.default_goal]
else:
self.log.info("nothing to do")
return
elif isinstance(goals, str):
goals = [goals]
goals.extend(self.extra_goals)
goals = self.backend_select_goals(goals)
return self._execute(goals, task_skip = self._maybe_skip())
def from_targetspec(targetspec_dic, config=None, args=None,
empty_translator=None,
disable=[],
default_goal=None):
if args is None:
args = []
driver = TranslationDriver(config=config, default_goal=default_goal,
disable=disable)
# patch some attributes of the os module to make sure they
# have the same value on every platform.
backend, ts = driver.get_backend_and_type_system()
if backend == 'cli':
from pypy.translator.cli.support import patch
driver.old_cli_defs = patch()
target = targetspec_dic['target']
spec = target(driver, args)
try:
entry_point, inputtypes, policy = spec
except ValueError:
entry_point, inputtypes = spec
policy = None
driver.setup(entry_point, inputtypes,
policy=policy,
extra=targetspec_dic,
empty_translator=empty_translator)
return driver
from_targetspec = staticmethod(from_targetspec)
def prereq_checkpt_rtype(self):
assert 'pypy.rpython.rmodel' not in sys.modules, (
"cannot fork because the rtyper has already been imported")
prereq_checkpt_rtype_lltype = prereq_checkpt_rtype
prereq_checkpt_rtype_ootype = prereq_checkpt_rtype
# checkpointing support
def _event(self, kind, goal, func):
if kind == 'pre':
fork_before = self.config.translation.fork_before
if fork_before:
fork_before, = self.backend_select_goals([fork_before])
if not fork_before in self.done and fork_before == goal:
prereq = getattr(self, 'prereq_checkpt_%s' % goal, None)
if prereq:
prereq()
from pypy.translator.goal import unixcheckpoint
unixcheckpoint.restartable_point(auto='run')
def mkexename(name):
if sys.platform == 'win32':
name = os.path.normpath(name + '.exe')
return name
| Python |
import autopath
from pypy.tool import testit
from pypy.tool.udir import udir
from pypy.translator.tool.cbuild import build_cfunc
from pypy.translator.test.test_cltrans import global_cl, make_cl_func
def benchmark(func):
try:
func = func.im_func
except AttributeError:
pass
c_func = build_cfunc(func, dot=False)
if global_cl:
cl_func = make_cl_func(func)
print "generated c-func for", func.func_name
t1 = timeit(100, func)
t2 = timeit(100, c_func)
if global_cl:
t3 = timeit(100, cl_func)
print "cpython func ", t1, "seconds"
print "pypy/pyrex/cmodule ", t2, "seconds"
if global_cl:
print "cl (experimental) ", t3, "seconds", global_cl
def timeit(num, func, *args):
from time import time as now
start = now()
for i in xrange(num):
func(*args)
return now()-start
if __name__ == '__main__':
from pypy.translator.test.snippet import sieve_of_eratosthenes
benchmark(sieve_of_eratosthenes)
| Python |
import types, sys
from pypy.annotation.model import SomeValue, debugname
from pypy.annotation.annset import AnnotationSet
from pypy.annotation.annrpython import RPythonAnnotator
indent1 = ['']
def show(n):
if isinstance(n, AnnotationSet):
return 'heap'
elif isinstance(n, RPythonAnnotator):
return 'rpyann'
else:
return repr(n)
def trace(o):
if isinstance(o, types.ClassType):
for key, value in o.__dict__.items():
o.__dict__[key] = trace(value)
elif isinstance(o, types.FunctionType):
d = {'o': o, 'show': show, 'indent1': indent1, 'stderr': sys.stderr}
exec """
def %s(*args, **kwds):
indent, = indent1
rargs = [show(a) for a in args]
for kw, value in kwds.items():
rargs.append('%%s=%%r' %% (kw, value))
print >> stderr, indent + %r + '(%%s)' %% ', '.join(rargs)
indent1[0] += '| '
try:
result = o(*args, **kwds)
except Exception, e:
indent1[0] = indent
print >> stderr, indent + '+--> %%s: %%s' %% (e.__class__.__name__, e)
raise
indent1[0] = indent
if result is not None:
print >> stderr, indent + '+-->', show(result)
return result
result = %s
""" % (o.__name__, o.__name__, o.__name__) in d
return d['result']
| Python |
import pdb
import types
import code
import sys
from pypy.objspace.flow.model import FunctionGraph
class NoTTY(Exception):
pass
class PdbPlusShow(pdb.Pdb):
def __init__(self, translator):
pdb.Pdb.__init__(self)
self.prompt = "(Pdb+) "
self.translator = translator
self.exposed = {}
def post_mortem(self, t):
self.reset()
while t.tb_next is not None:
t = t.tb_next
self.interaction(t.tb_frame, t)
def preloop(self):
if not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
raise NoTTY("Cannot start the debugger when stdout is captured.")
pdb.Pdb.preloop(self)
def expose(self, d):
self.exposed.update(d)
def _show(self, page):
page.display_background()
def _importobj(self, fullname):
obj = None
name = ''
for comp in fullname.split('.'):
name += comp
obj = getattr(obj, comp, None)
if obj is None:
try:
obj = __import__(name, {}, {}, ['*'])
except ImportError:
raise NameError
name += '.'
return obj
TRYPREFIXES = ['','pypy.','pypy.objspace.','pypy.interpreter.', 'pypy.objspace.std.' ]
def _mygetval(self, arg, errmsg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
if not isinstance(arg, str):
print '*** %s' % errmsg, "\t[%s: %s]" % (exc_type_name, v)
else:
print '*** %s:' % errmsg, arg, "\t[%s: %s]" % (exc_type_name, v)
raise
def _getobj(self, name):
if '.' in name:
for pfx in self.TRYPREFIXES:
try:
return self._importobj(pfx+name)
except NameError:
pass
try:
return self._mygetval(name, "Not found")
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
pass
return None
def do_find(self, arg):
"""find obj [as var]
find dotted named obj, possibly using prefixing with some packages
in pypy (see help pypyprefixes); the result is assigned to var or _."""
objarg, var = self._parse_modif(arg)
obj = self._getobj(objarg)
if obj is None:
return
print obj
self._setvar(var, obj)
def _parse_modif(self, arg, modif='as'):
var = '_'
aspos = arg.rfind(modif+' ')
if aspos != -1:
objarg = arg[:aspos].strip()
var = arg[aspos+(1+len(modif)):].strip()
else:
objarg = arg
return objarg, var
def _setvar(self, var, obj):
self.curframe.f_locals[var] = obj
class GiveUp(Exception):
pass
def _getcdef(self, cls):
try:
return self.translator.annotator.bookkeeper.getuniqueclassdef(cls)
except Exception:
print "*** cannot get classdef: likely specialized class: %s" % cls
return None
def _make_flt(self, expr):
try:
expr = compile(expr, '<filter>', 'eval')
except SyntaxError:
print "*** syntax: %s" % expr
return None
def flt(c):
marker = object()
try:
old = self.curframe.f_locals.get('cand', marker)
self.curframe.f_locals['cand'] = c
try:
return self._mygetval(expr, "oops")
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
raise self.GiveUp
finally:
if old is not marker:
self.curframe.f_locals['cand'] = old
else:
del self.curframe.f_locals['cand']
return flt
def do_finddescs(self, arg):
"""finddescs kind expr [as var]
find annotation descs of kind (ClassDesc|FuncionDesc|...)
for which expr is true, cand in it referes to
the candidate desc; the result list is assigned to var or _."""
expr, var = self._parse_modif(arg)
kind, expr = expr.split(None, 1)
flt = self._make_flt(expr)
if flt is None:
return
from pypy.annotation import description
kind_cls = getattr(description, kind, None)
if kind_cls is None:
kind = kind.title()+'Desc'
kind_cls = getattr(description, kind, None)
if kind_cls is None:
return
descs = []
try:
for c in self.translator.annotator.bookkeeper.descs.itervalues():
if isinstance(c, kind_cls) and flt(c):
descs.append(c)
except self.GiveUp:
return
self._setvar(var, descs)
def do_showg(self, arg):
"""showg obj
show graph for obj, obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes)).
if obj is a function or method, the localized call graph is shown;
if obj is a class or ClassDef the class definition graph is shown"""
from pypy.annotation.classdef import ClassDef
from pypy.translator.tool import graphpage
translator = self.translator
obj = self._getobj(arg)
if obj is None:
return
if hasattr(obj, 'im_func'):
obj = obj.im_func
if isinstance(obj, types.FunctionType):
page = graphpage.LocalizedCallGraphPage(translator, self._allgraphs(obj))
elif isinstance(obj, FunctionGraph):
page = graphpage.FlowGraphPage(translator, [obj])
elif isinstance(obj, (type, types.ClassType)):
classdef = self._getcdef(obj)
if classdef is None:
return
page = graphpage.ClassDefPage(translator, classdef)
elif isinstance(obj, ClassDef):
page = graphpage.ClassDefPage(translator, obj)
else:
print "*** Nothing to do"
return
self._show(page)
def _attrs(self, arg, pr):
arg, expr = self._parse_modif(arg, 'match')
if expr == '_':
expr = 'True'
obj = self._getobj(arg)
if obj is None:
return
try:
obj = list(obj)
except:
obj = [obj]
clsdefs = []
for x in obj:
if isinstance(x, (type, types.ClassType)):
cdef = self._getcdef(x)
if cdef is None:
continue
clsdefs.append(cdef)
else:
clsdefs.append(x)
def longname(c):
return c.name
clsdefs.sort(lambda x,y: cmp(longname(x), longname(y)))
flt = self._make_flt(expr)
if flt is None:
return
for cdef in clsdefs:
try:
attrs = [a for a in cdef.attrs.itervalues() if flt(a)]
except self.GiveUp:
return
if attrs:
print "%s:" % cdef.name
pr(attrs)
def do_attrs(self, arg):
"""attrs obj [match expr]
list annotated attrs of class|def obj or list of classe(def)s obj,
obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes));
expr is an optional filtering expression; cand in it refer to the candidate Attribute
information object, which has a .name and .s_value."""
def pr(attrs):
print " " + ' '.join([a.name for a in attrs])
self._attrs(arg, pr)
def do_attrsann(self, arg):
"""attrsann obj [match expr]
list with their annotation annotated attrs of class|def obj or list of classe(def)s obj,
obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes));
expr is an optional filtering expression; cand in it refer to the candidate Attribute
information object, which has a .name and .s_value."""
def pr(attrs):
for a in attrs:
print ' %s %s' % (a.name, a.s_value)
self._attrs(arg, pr)
def do_readpos(self, arg):
"""readpos obj attrname [match expr] [as var]
list the read positions of annotated attr with attrname of class or classdef obj,
obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes));
expr is an optional filtering expression; cand in it refer to the candidate read
position information, which has a .func (which can be None), a .graph and .block and .i;
the list of the read positions functions is set to var or _."""
class Pos:
def __init__(self, graph, func, block, i):
self.graph = graph
self.func = func
self.block = block
self.i = i
arg, var = self._parse_modif(arg, 'as')
arg, expr = self._parse_modif(arg, 'match')
if expr == '_':
expr = 'True'
args = arg.split()
if len(args) != 2:
print "*** expected obj attrname:", arg
return
arg, attrname = args
# allow quotes around attrname
if (attrname.startswith("'") and attrname.endswith("'")
or attrname.startswith('"') and attrname.endswith('"')):
attrname = attrname[1:-1]
obj = self._getobj(arg)
if obj is None:
return
if isinstance(obj, (type, types.ClassType)):
obj = self._getcdef(obj)
if obj is None:
return
attrs = obj.attrs
if attrname not in attrs:
print "*** bogus:", attrname
return
pos = attrs[attrname].read_locations
if not pos:
return
flt = self._make_flt(expr)
if flt is None:
return
r = {}
try:
for p in pos:
graph, block, i = p
if hasattr(graph, 'func'):
func = graph.func
else:
func = None
if flt(Pos(graph, func, block, i)):
if func is not None:
print func.__module__ or '?', func.__name__, block, i
else:
print graph, block, i
if i >= 0:
op = block.operations[i]
print " ", op
print " ",
for arg in op.args:
print "%s: %s" % (arg, self.translator.annotator.binding(arg)),
print
r[func] = True
except self.GiveUp:
return
self._setvar(var, r.keys())
def do_flowg(self, arg):
"""flowg obj
show flow graph for function obj, obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes))"""
from pypy.translator.tool import graphpage
obj = self._getobj(arg)
if obj is None:
return
if hasattr(obj, 'im_func'):
obj = obj.im_func
if isinstance(obj, types.FunctionType):
graphs = self._allgraphs(obj)
elif isinstance(obj, FunctionGraph):
graphs = [obj]
else:
print "*** Not a function"
return
self._show(graphpage.FlowGraphPage(self.translator, graphs))
def _allgraphs(self, func):
graphs = {}
funcdesc = self.translator.annotator.bookkeeper.getdesc(func)
for graph in funcdesc._cache.itervalues():
graphs[graph] = True
for graph in self.translator.graphs:
if getattr(graph, 'func', None) is func:
graphs[graph] = True
return graphs.keys()
def do_callg(self, arg):
"""callg obj
show localized call-graph for function obj, obj can be an expression or a dotted name
(in which case prefixing with some packages in pypy is tried (see help pypyprefixes))"""
from pypy.translator.tool import graphpage
obj = self._getobj(arg)
if obj is None:
return
if hasattr(obj, 'im_func'):
obj = obj.im_func
if isinstance(obj, types.FunctionType):
graphs = self._allgraphs(obj)
elif isinstance(obj, FunctionGraph):
graphs = [obj]
else:
print "*** Not a function"
return
self._show(graphpage.LocalizedCallGraphPage(self.translator, graphs))
def do_classhier(self, arg):
"""classhier
show class hierarchy graph"""
from pypy.translator.tool import graphpage
self._show(graphpage.ClassHierarchyPage(self.translator))
def do_interact(self, arg):
"""invoke a code.py sub prompt"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe.f_locals)
code.interact("*interactive*", local=ns)
def help_graphs(self):
print "graph commands are: showg, flowg, callg, classhier, enable_graphic"
def help_ann_other(self):
print "other annotation related commands are: find, finddescs, attrs, attrsann, readpos"
def help_pypyprefixes(self):
print "these prefixes are tried for dotted names in graph commands:"
print self.TRYPREFIXES
# start helpers
def start(self, tb):
if tb is None:
fn, args = self.set_trace, ()
else:
fn, args = self.post_mortem, (tb,)
try:
t = self.translator # define enviroments, xxx more stuff
exec ""
locals().update(self.exposed)
fn(*args)
pass # for debugger to land
except pdb.bdb.BdbQuit:
pass
def pdbcatch(f):
"A decorator that throws you in a pdbplus if the given function raises."
from pypy.tool.sourcetools import func_with_new_name
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except:
import sys
PdbPlusShow(None).post_mortem(sys.exc_info()[2])
raise
wrapper = func_with_new_name(wrapper, f.__name__)
return wrapper
| Python |
from __future__ import generators
"""
"""
import autopath, os
import inspect, linecache
from pypy.objspace.flow.model import *
from pypy.objspace.flow import Space
from pypy.tool.udir import udir
from py.process import cmdexec
from pypy.interpreter.pytraceback import offset2lineno
class DotGen:
def __init__(self, graphname, rankdir=None):
self.graphname = safename(graphname)
self.lines = []
self.source = None
self.emit("digraph %s {" % self.graphname)
if rankdir:
self.emit('rankdir="%s"' % rankdir)
def generate(self, storedir=None, target='ps'):
source = self.get_source()
if target is None:
return source # unprocessed
if storedir is None:
storedir = udir
pdot = storedir.join('%s.dot' % self.graphname)
pdot.write(source)
ptarget = pdot.new(ext=target)
cmdexec('dot -T%s %s>%s' % (target, str(pdot),str(ptarget)))
return ptarget
def get_source(self):
if self.source is None:
self.emit("}")
self.source = '\n'.join(self.lines)
del self.lines
return self.source
def emit(self, line):
self.lines.append(line)
def enter_subgraph(self, name):
self.emit("subgraph %s {" % (safename(name),))
def leave_subgraph(self):
self.emit("}")
def emit_edge(self, name1, name2, label="",
style="dashed",
color="black",
dir="forward",
weight="5",
):
d = locals()
attrs = [('%s="%s"' % (x, d[x].replace('"', '\\"').replace('\n', '\\n')))
for x in ['label', 'style', 'color', 'dir', 'weight']]
self.emit('edge [%s];' % ", ".join(attrs))
self.emit('%s -> %s' % (safename(name1), safename(name2)))
def emit_node(self, name,
shape="diamond",
label="",
color="black",
fillcolor="white",
style="filled",
width="0.75",
):
d = locals()
attrs = [('%s="%s"' % (x, d[x].replace('"', '\\"').replace('\n', '\\n')))
for x in ['shape', 'label', 'color', 'fillcolor', 'style', 'width']]
self.emit('%s [%s];' % (safename(name), ", ".join(attrs)))
TAG_TO_COLORS = {
"timeshifted": "#cfa5f0",
"portal": "#cfa5f0",
"PortalEntry": "#84abf0",
"PortalReentry": "#f084c2",
}
DEFAULT_TAG_COLOR = "#a5e6f0"
RETURN_COLOR = "green"
EXCEPT_COLOR = "#ffa000"
class FlowGraphDotGen(DotGen):
VERBOSE = False
def __init__(self, graphname, rankdir=None):
DotGen.__init__(self, graphname.replace('.', '_'), rankdir)
def emit_subgraph(self, name, node):
name = name.replace('.', '_') + '_'
self.blocks = {id(None): '(None)'}
self.func = None
self.prefix = name
self.enter_subgraph(name)
tagcolor = TAG_TO_COLORS.get(node.tag, DEFAULT_TAG_COLOR)
self.visit_FunctionGraph(node, tagcolor)
for block in safe_iterblocks(node):
self.visit_Block(block, tagcolor)
self.leave_subgraph()
def blockname(self, block):
i = id(block)
try:
return self.blocks[i]
except KeyError:
self.blocks[i] = name = "%s_%d" % (self.prefix, len(self.blocks))
return name
def visit_FunctionGraph(self, funcgraph, tagcolor):
name = self.prefix # +'_'+funcgraph.name
data = funcgraph.name
if getattr(funcgraph, 'source', None) is not None:
source = funcgraph.source
if self.VERBOSE:
data += "\\n"
else:
data = ""
data += "\\l".join(source.split('\n'))
if hasattr(funcgraph, 'func'):
self.func = funcgraph.func
self.emit_node(name, label=data, shape="box", fillcolor=tagcolor, style="filled")
if hasattr(funcgraph, 'startblock'):
self.emit_edge(name, self.blockname(funcgraph.startblock), 'startblock')
def visit_Block(self, block, tagcolor):
# do the block itself
name = self.blockname(block)
if not isinstance(block, Block):
data = "BROKEN BLOCK\\n%r" % (block,)
self.emit_node(name, label=data)
return
lines = []
for op in block.operations:
lines.extend(repr(op).split('\n'))
lines.append("")
numblocks = len(block.exits)
color = "black"
fillcolor = getattr(block, "blockcolor", "white")
if not numblocks:
shape = "box"
if len(block.inputargs) == 1:
lines[-1] += 'return %s' % tuple(block.inputargs)
fillcolor= RETURN_COLOR
elif len(block.inputargs) == 2:
lines[-1] += 'raise %s, %s' % tuple(block.inputargs)
fillcolor= EXCEPT_COLOR
elif numblocks == 1:
shape = "box"
else:
color = "red"
shape = "octagon"
if block.exitswitch is not None:
lines.append("exitswitch: %s" % block.exitswitch)
iargs = " ".join(map(repr, block.inputargs))
if self.VERBOSE:
if block.exc_handler:
eh = ' (EH)'
else:
eh = ''
data = "%s%s%s\\n" % (name, block.at(), eh)
else:
data = "%s\\n" % (name,)
data += "inputargs: %s\\n\\n" % (iargs,)
if self.VERBOSE and block.operations and self.func:
maxoffs = max([op.offset for op in block.operations])
if maxoffs >= 0:
minoffs = min([op.offset for op in block.operations
if op.offset >= 0])
minlineno = offset2lineno(self.func.func_code, minoffs)
maxlineno = offset2lineno(self.func.func_code, maxoffs)
filename = inspect.getsourcefile(self.func)
source = "\l".join([linecache.getline(filename, line).rstrip()
for line in range(minlineno, maxlineno+1)])
if minlineno == maxlineno:
data = data + r"line %d:\n%s\l\n" % (minlineno, source)
else:
data = data + r"lines %d-%d:\n%s\l\n" % (minlineno,
maxlineno, source)
data = data + "\l".join(lines)
self.emit_node(name, label=data, shape=shape, color=color, style="filled", fillcolor=fillcolor)
# do links/exits
for link in block.exits:
name2 = self.blockname(link.target)
label = " ".join(map(repr, link.args))
if link.exitcase is not None:
label = "%s: %s" %(repr(link.exitcase).replace('\\', '\\\\'), label)
self.emit_edge(name, name2, label, style="dotted", color="red")
else:
self.emit_edge(name, name2, label, style="solid")
def make_dot(graphname, graph, storedir=None, target='ps'):
return make_dot_graphs(graph.name, [(graphname, graph)], storedir, target)
def show_dot(graph, storedir = None, target = 'ps'):
name = graph.name
fn = make_dot(name, graph, storedir, target)
os.system('gv %s' % fn)
def make_dot_graphs(basefilename, graphs, storedir=None, target='ps'):
dotgen = FlowGraphDotGen(basefilename)
names = {basefilename: True}
for graphname, graph in graphs:
if graphname in names:
i = 2
while graphname + str(i) in names:
i += 1
graphname = graphname + str(i)
names[graphname] = True
dotgen.emit_subgraph(graphname, graph)
return dotgen.generate(storedir, target)
def _makecharmap():
result = {}
for i in range(256):
result[chr(i)] = '_%02X' % i
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789':
result[c] = c
result['_'] = '__'
return result
CHAR_MAP = _makecharmap()
del _makecharmap
def safename(name):
# turn a random string into something that is a valid dot identifier,
# avoiding invalid characters and prepending '_' to make sure it is
# not a keyword
name = ''.join([CHAR_MAP[c] for c in name])
return '_' + name
if __name__ == '__main__':
def f(x):
i = 0
while i < x:
i += 1
return i
space = Space()
graph = space.build_flow(f)
make_dot('f', graph)
| Python |
"""
A quick hack to capture stdout/stderr.
"""
import os, sys
class Capture:
def __init__(self, mixed_out_err = False):
"Start capture of the Unix-level stdout and stderr."
if (not hasattr(os, 'tmpfile') or
not hasattr(os, 'dup') or
not hasattr(os, 'dup2') or
not hasattr(os, 'fdopen')):
self.dummy = 1
else:
self.dummy = 0
# make new stdout/stderr files if needed
self.localoutfd = os.dup(1)
self.localerrfd = os.dup(2)
if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1:
self.saved_stdout = sys.stdout
sys.stdout = os.fdopen(self.localoutfd, 'w', 1)
else:
self.saved_stdout = None
if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2:
self.saved_stderr = sys.stderr
sys.stderr = os.fdopen(self.localerrfd, 'w', 0)
else:
self.saved_stderr = None
self.tmpout = os.tmpfile()
if mixed_out_err:
self.tmperr = self.tmpout
else:
self.tmperr = os.tmpfile()
os.dup2(self.tmpout.fileno(), 1)
os.dup2(self.tmperr.fileno(), 2)
def done(self):
"End capture and return the captured text (stdoutfile, stderrfile)."
if self.dummy:
import cStringIO
return cStringIO.StringIO(), cStringIO.StringIO()
else:
os.dup2(self.localoutfd, 1)
os.dup2(self.localerrfd, 2)
if self.saved_stdout is not None:
f = sys.stdout
sys.stdout = self.saved_stdout
f.close()
else:
os.close(self.localoutfd)
if self.saved_stderr is not None:
f = sys.stderr
sys.stderr = self.saved_stderr
f.close()
else:
os.close(self.localerrfd)
self.tmpout.seek(0)
self.tmperr.seek(0)
return self.tmpout, self.tmperr
if __name__ == '__main__':
# test
c = Capture()
try:
os.system('echo hello')
finally:
fout, ferr = c.done()
print 'Output:', `fout.read()`
print 'Error:', `ferr.read()`
| Python |
import autopath
import os, sys, inspect, re, imp
from pypy.translator.tool import stdoutcapture
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("cbuild")
py.log.setconsumer("cbuild", ansi_log)
debug = 0
def make_module_from_pyxstring(name, dirpath, string):
dirpath = py.path.local(dirpath)
pyxfile = dirpath.join('%s.pyx' % name)
i = 0
while pyxfile.check():
pyxfile = pyxfile.new(basename='%s%d.pyx' % (name, i))
i+=1
pyxfile.write(string)
if debug: print "made pyxfile", pyxfile
cfile = make_c_from_pyxfile(pyxfile)
module = make_module_from_c(cfile)
#print "made module", module
return module
def compiler_command():
# e.g. for tcc, you might set this to
# "tcc -shared -o %s.so %s.c"
return os.getenv('PYPY_CC')
def enable_fast_compilation():
if sys.platform == 'win32':
dash = '/'
else:
dash = '-'
from distutils import sysconfig
gcv = sysconfig.get_config_vars()
opt = gcv.get('OPT') # not always existent
if opt:
opt = re.sub('%sO\d+' % dash, '%sO0' % dash, opt)
else:
opt = '%sO0' % dash
gcv['OPT'] = opt
def ensure_correct_math():
if sys.platform != 'win32':
return # so far
from distutils import sysconfig
gcv = sysconfig.get_config_vars()
opt = gcv.get('OPT') # not always existent
if opt and '/Op' not in opt:
opt += '/Op'
gcv['OPT'] = opt
def compile_c_module(cfiles, modname, include_dirs=None, libraries=[]):
#try:
# from distutils.log import set_threshold
# set_threshold(10000)
#except ImportError:
# print "ERROR IMPORTING"
# pass
if include_dirs is None:
include_dirs = []
library_dirs = []
if sys.platform == 'darwin': # support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if s + 'include' not in include_dirs and \
os.path.exists(s + 'include'):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and \
os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
dirpath = cfiles[0].dirpath()
lastdir = dirpath.chdir()
ensure_correct_math()
try:
if debug: print "modname", modname
c = stdoutcapture.Capture(mixed_out_err = True)
try:
try:
if compiler_command():
# GCC-ish options only
from distutils import sysconfig
gcv = sysconfig.get_config_vars()
cmd = compiler_command().replace('%s',
str(dirpath.join(modname)))
for dir in [gcv['INCLUDEPY']] + list(include_dirs):
cmd += ' -I%s' % dir
os.system(cmd)
else:
from distutils.dist import Distribution
from distutils.extension import Extension
from distutils.ccompiler import get_default_compiler
saved_environ = os.environ.items()
try:
# distutils.core.setup() is really meant for end-user
# interactive usage, because it eats most exceptions and
# turn them into SystemExits. Instead, we directly
# instantiate a Distribution, which also allows us to
# ignore unwanted features like config files.
extra_compile_args = []
# ensure correct math on windows
if sys.platform == 'win32':
extra_compile_args.append('/Op') # get extra precision
if get_default_compiler() == 'unix':
old_version = False
try:
g = os.popen('gcc --version', 'r')
verinfo = g.read()
g.close()
except (OSError, IOError):
pass
else:
old_version = verinfo.startswith('2')
if not old_version:
extra_compile_args.extend(["-Wno-unused-label",
"-Wno-unused-variable"])
attrs = {
'name': "testmodule",
'ext_modules': [
Extension(modname, [str(cfile) for cfile in cfiles],
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
libraries=libraries,)
],
'script_name': 'setup.py',
'script_args': ['-q', 'build_ext', '--inplace'],
}
dist = Distribution(attrs)
if not dist.parse_command_line():
raise ValueError, "distutils cmdline parse error"
dist.run_commands()
finally:
for key, value in saved_environ:
if os.environ.get(key) != value:
os.environ[key] = value
finally:
foutput, foutput = c.done()
data = foutput.read()
if data:
fdump = open("%s.errors" % modname, "w")
fdump.write(data)
fdump.close()
# XXX do we need to do some check on fout/ferr?
# XXX not a nice way to import a module
except:
print >>sys.stderr, data
raise
finally:
lastdir.chdir()
def make_module_from_c(cfile, include_dirs=None, libraries=[]):
cfile = py.path.local(cfile)
modname = cfile.purebasename
compile_c_module([cfile], modname, include_dirs, libraries)
return import_module_from_directory(cfile.dirpath(), modname)
def import_module_from_directory(dir, modname):
file, pathname, description = imp.find_module(modname, [str(dir)])
try:
mod = imp.load_module(modname, file, pathname, description)
finally:
if file:
file.close()
return mod
def make_c_from_pyxfile(pyxfile):
from pypy.translator.pyrex import genpyrex
pyrexdir = os.path.dirname(genpyrex.__file__)
if pyrexdir not in sys.path:
sys.path.insert(0, pyrexdir)
from Pyrex.Compiler.Main import CompilationOptions, Context, PyrexError
try:
options = CompilationOptions(show_version = 0,
use_listing_file = 0,
c_only = 1,
output_file = None)
context = Context(options.include_path)
result = context.compile(str(pyxfile), options)
if result.num_errors > 0:
raise ValueError, "failure %s" % result
except PyrexError, e:
print >>sys.stderr, e
cfile = pyxfile.new(ext='.c')
return cfile
def build_cfunc(func, simplify=1, dot=1, inputargtypes=None):
""" return a pyrex-generated cfunction from the given func.
simplify is true -> perform simplifications on the flowgraph.
dot is true -> generate a dot-configuration file and postscript.
inputargtypes is a list (allowed to be empty) ->
then annotation will be performed before generating
dot/pyrex/c code.
"""
try: func = func.im_func
except AttributeError: pass
# build the flow graph
from pypy.objspace.flow import Space
from pypy.tool.udir import udir
space = Space()
name = func.func_name
funcgraph = space.build_flow(func)
if not inputargtypes:
source = inspect.getsource(func)
base = udir.join(name).new(ext='.py').write(source)
if dot:
from pypy.translator.tool.make_dot import FlowGraphDotGen
dotgen = FlowGraphDotGen(name)
dotgen.emit_subgraph(name, funcgraph)
# apply transformations
if simplify:
from pypy.translator.simplify import simplify_graph
simplify_graph(funcgraph)
name += '_s'
# get the pyrex generator
from pypy.translator.pyrex.genpyrex import GenPyrex
genpyrex = GenPyrex(funcgraph)
# generate pyrex (without type inference)
# apply type inference
if inputargtypes is not None:
genpyrex.annotate(inputargtypes)
name += '_t'
#a = Annotator(self.functiongraph)
#a.build_types(input_arg_types)
#a.simplify()
pyxstring = genpyrex.emitcode()
#funcgraph.source = inspect.getsource(func)
else:
pyxstring = genpyrex.emitcode()
pyxheader = genpyrex.globaldeclarations()
mod = make_module_from_pyxstring(name, udir, pyxheader + '\n' + pyxstring)
if dot:
if name != func.func_name: # if some transformations have been done
dotgen.emit_subgraph(name, funcgraph)
dotgen.generate()
return getattr(mod, func.func_name)
def log_spawned_cmd(spawn):
def spawn_and_log(cmd, *args, **kwds):
log.execute(' '.join(cmd))
return spawn(cmd, *args, **kwds)
return spawn_and_log
class ProfOpt(object):
#XXX assuming gcc style flags for now
name = "profopt"
def __init__(self, compiler):
self.compiler = compiler
def first(self):
self.build('-fprofile-generate')
def probe(self, exe, args):
# 'args' is a single string typically containing spaces
# and quotes, which represents several arguments.
os.system("'%s' %s" % (exe, args))
def after(self):
self.build('-fprofile-use')
def build(self, option):
compiler = self.compiler
compiler.compile_extra.append(option)
compiler.link_extra.append(option)
try:
compiler._build()
finally:
compiler.compile_extra.pop()
compiler.link_extra.pop()
class CCompiler:
def __init__(self, cfilenames, outputfilename=None, include_dirs=[],
libraries=[], library_dirs=[], compiler_exe=None,
profbased=None):
self.cfilenames = cfilenames
ext = ''
self.compile_extra = []
self.link_extra = []
self.libraries = list(libraries)
self.include_dirs = list(include_dirs)
self.library_dirs = list(library_dirs)
self.compiler_exe = compiler_exe
self.profbased = profbased
if not sys.platform in ('win32', 'darwin'): # xxx
if 'm' not in self.libraries:
self.libraries.append('m')
if 'pthread' not in self.libraries:
self.libraries.append('pthread')
self.compile_extra += ['-O2', '-pthread']
self.link_extra += ['-pthread']
if sys.platform == 'win32':
self.link_extra += ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if s + 'include' not in self.include_dirs and \
os.path.exists(s + 'include'):
self.include_dirs.append(s + 'include')
if s + 'lib' not in self.library_dirs and \
os.path.exists(s + 'lib'):
self.library_dirs.append(s + 'lib')
self.compile_extra += ['-O2']
if outputfilename is None:
self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext)
else:
self.outputfilename = py.path.local(outputfilename)
def build(self, noerr=False):
basename = self.outputfilename.new(ext='')
try:
try:
c = stdoutcapture.Capture(mixed_out_err = True)
if self.profbased is None:
self._build()
else:
ProfDriver, args = self.profbased
profdrv = ProfDriver(self)
dolog = getattr(log, profdrv.name)
dolog(args)
profdrv.first()
dolog('Gathering profile data from: %s %s' % (
str(self.outputfilename), args))
profdrv.probe(str(self.outputfilename),args)
profdrv.after()
finally:
foutput, foutput = c.done()
data = foutput.read()
if data:
fdump = basename.new(ext='errors').open("w")
fdump.write(data)
fdump.close()
except:
if not noerr:
print >>sys.stderr, data
raise
def _build(self):
from distutils.ccompiler import new_compiler
compiler = new_compiler(force=1)
if self.compiler_exe is not None:
for c in '''compiler compiler_so compiler_cxx
linker_exe linker_so'''.split():
compiler.executables[c][0] = self.compiler_exe
compiler.spawn = log_spawned_cmd(compiler.spawn)
objects = []
for cfile in self.cfilenames:
cfile = py.path.local(cfile)
old = cfile.dirpath().chdir()
try:
res = compiler.compile([cfile.basename],
include_dirs=self.include_dirs,
extra_preargs=self.compile_extra)
assert len(res) == 1
cobjfile = py.path.local(res[0])
assert cobjfile.check()
objects.append(str(cobjfile))
finally:
old.chdir()
compiler.link_executable(objects, str(self.outputfilename),
libraries=self.libraries,
extra_preargs=self.link_extra,
library_dirs=self.library_dirs)
def build_executable(*args, **kwds):
noerr = kwds.pop('noerr', False)
compiler = CCompiler(*args, **kwds)
compiler.build(noerr=noerr)
return str(compiler.outputfilename)
def check_boehm_presence():
from pypy.tool.udir import udir
try:
cfile = udir.join('check_boehm.c')
cfname = str(cfile)
cfile = cfile.open('w')
cfile.write("""
#include <gc/gc.h>
int main() {
return 0;
}
""")
cfile.close()
if sys.platform == 'win32':
build_executable([cfname], libraries=['gc_pypy'], noerr=True)
else:
build_executable([cfname], libraries=['gc'], noerr=True)
except:
return False
else:
return True
def check_under_under_thread():
from pypy.tool.udir import udir
cfile = py.path.local(autopath.this_dir).join('__thread_test.c')
fsource = cfile.open('r')
source = fsource.read()
fsource.close()
cfile = udir.join('__thread_test.c')
fsource = cfile.open('w')
fsource.write(source)
fsource.close()
try:
exe = build_executable([str(cfile)],
noerr=True)
py.process.cmdexec(exe)
except (KeyboardInterrupt, SystemExit):
raise
except:
return False
else:
return True
| Python |
from pypy.translator.translator import TranslationContext
from pypy import conftest
from py.test import raises
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.annotation import model as annmodel
from pypy.annotation.policy import AnnotatorPolicy
from pypy.rpython.lltypesystem import lltype
from pypy.rpython import robject, rclass, rint
from pypy.translator.tool.cbuild import enable_fast_compilation
from pypy.interpreter.baseobjspace import ObjSpace
import sys, types
P = False # debug printing
class RaymondAnnotatorPolicy(AnnotatorPolicy):
allow_someobjects = True
do_imports_immediately = False
SPECIAL_METHODS = {}
def setup_special_methods():
for name, op, arity, funcs in ObjSpace.MethodTable:
for fname in funcs:
if fname.startswith('__'):
ann = [None] * arity # replaced by class
if 'attr' in fname:
ann[1] = str
elif 'item' in fname:
ann[1] = int
elif 'pow' in fname:
ann[1] = int
elif 'shift' in fname:
ann[1] = int
if arity == 3 and '_set' in fname:
ann[-1] = object
SPECIAL_METHODS[fname] = ann
# __init__ is not in the table.
SPECIAL_METHODS['__init__'] = [None]
setup_special_methods()
def get_annotation(func, pre=[]):
argstypelist = pre[:]
if hasattr(func, '_initialannotation_'):
for spec in func._initialannotation_:
argstypelist.append(spec)
if len(argstypelist) == 1:
argstypelist = guess_methannotation(func, argstypelist[0])
missing = [object] * (func.func_code.co_argcount - len(argstypelist))
return argstypelist + missing
def guess_methannotation(func, cls):
ret = [cls]
if func.__name__ in SPECIAL_METHODS:
pattern = SPECIAL_METHODS[func.__name__]
ret = [thetype or cls for thetype in pattern]
return ret
def should_expose(func):
# expose all special methods but hide those starting with _
name = func.__name__
return name in SPECIAL_METHODS or not name.startswith('_') or must_expose(func)
def must_expose(func):
return hasattr(func, '_initialannotation_')
def get_compiled_module(func, view=conftest.option.view,
use_boehm=False, exports=None, expose_all=True):
from pypy.translator.translator import TranslationContext
from pypy.translator.backendopt.all import backend_optimizations
from pypy.translator.c import gc
from pypy.translator.c.genc import CExtModuleBuilder
global _t # allow us to view later
_t = t = TranslationContext(do_imports_immediately=False)
ann = t.buildannotator(policy=RaymondAnnotatorPolicy())
rtyper = t.buildrtyper()
bk = rtyper.annotator.bookkeeper
if not exports:
exports = []
ann.build_types(func, get_annotation(func))
pyobj_options = {}
for obj in exports:
if isinstance(obj, tuple):
_, obj = obj
if isinstance(obj, type):
cls = obj
clsdef = bk.getuniqueclassdef(cls)
rtyper.add_wrapper(clsdef)
for obj in cls.__dict__.values():
if isinstance(obj, types.FunctionType):
if should_expose(obj) and expose_all or must_expose(obj):
if not ann.bookkeeper.getdesc(obj).querycallfamily():
# not annotated, so enforce it
ann.build_types(obj, get_annotation(obj, [cls]), complete_now=False)
elif isinstance(obj, property):
for obj in obj.fget, obj.fset, obj.fdel:
if obj and not ann.bookkeeper.getdesc(obj).querycallfamily():
ann.build_types(obj, get_annotation(obj, [cls]), complete_now=False)
elif isinstance(obj, types.FunctionType):
if not ann.bookkeeper.getdesc(obj).querycallfamily():
# not annotated, so enforce it
ann.build_types(obj, get_annotation(obj), complete_now=False)
if obj.__name__ == '__init__':
pyobj_options['use_true_methods'] = True
elif isinstance(obj, types.ClassType):
raise TypeError, 'old-style classes are not supported:%r' % obj
all = []
for obj in exports:
if isinstance(obj, tuple):
name, obj = obj
else:
name = obj.__name__
if name != '__init__':
all.append(name)
exports = exports + [('__all__', all)]
ann.build_types(func, get_annotation(func))
if view:
t.viewcg()
rtyper.specialize()
if view:
t.viewcg()
t.checkgraphs()
gcpolicy = gc.RefcountingGcPolicy
if use_boehm:
gcpolicy = gc.BoehmGcPolicy
backend_optimizations(t)
if view:
t.viewcg()
cbuilder = CExtModuleBuilder(t, func, config=t.config, gcpolicy=gcpolicy)
# explicit build of database
db = cbuilder.build_database(exports=exports, pyobj_options=pyobj_options)
cbuilder.generate_source(db)
if view:
t.viewcg()
cbuilder.compile()
return cbuilder.import_module()
def get_compiled(func, *args, **kwds):
module = get_compiled_module(func, *args, **kwds)
return getattr(module, func.__name__)
# _______________________________________________-
# stubs for special annotation/rtyping
## these are not used for production right now.
def wrap(thing):
return thing # untranslated case
def unwrap(pyobj, typ):
assert isinstance(pyobj, typ)
return pyobj # untranslated case
unwrap._annspecialcase_ = 'specialize:arg(1)'
# XXX
# wrapping/unwrapping should be annotatable.
# Idea: create tunnel objects which share
# annotation across SomeObjectness, sharing a key!
class Entry(ExtRegistryEntry):
_about_ = unwrap
def compute_result_annotation(self, s_wrapped, s_spec):
# this will go away, much better way found!
assert hasattr(s_spec, 'descriptions'), 'need a class in unwrap 2nd arg'
descs = s_spec.descriptions
assert len(descs) == 1, 'missing specialisation, classdesc not unique!'
for desc in descs.keys():
classdef = desc.getuniqueclassdef()
return annmodel.SomeInstance(classdef)
def specialize_call(self, hop):
v_obj = hop.inputarg(hop.args_r[0], 0)
return hop.llops.convertvar(v_obj, hop.args_r[0], hop.r_result)
class Entry(ExtRegistryEntry):
_about_ = wrap
s_result_annotation = annmodel.SomeObject()
def specialize_call(self, hop):
assert len(hop.args_r) == 1, 'wrap() takes exactly one argument'
v_obj, = hop.inputargs(*hop.args_r)
return hop.llops.convertvar(v_obj, hop.args_r[0], robject.pyobj_repr)
# _______________________________________________
# creating our own setup function for the module
# this class *can* be used for faster access.
# the compiler anyway chews quite a bit on it...
class BuiltinHelper(object):
# the following would be much easier if we had
# loop unrolling right inside the flowing process
src = []
src.append('def _setup(self):')
src.append(' import __builtin__ as b')
import __builtin__
for name in dir(__builtin__):
obj = getattr(__builtin__, name)
if callable(obj) and hasattr(obj, '__name__'):
src.append(' self.%s = b.%s' % (name, obj.__name__))
src = '\n'.join(src)
#print src
exec src
def __init__(self):
self._initialized = False
def _freeze_(self):
self._initialized = False
return False
del __builtin__, name, obj, src
bltn_singleton = BuiltinHelper()
def get_bltn():
if not bltn_singleton._initialized:
bltn_singleton._setup()
bltn_singleton._initialized = True
return bltn_singleton
def get_methodname(funcidx):
pass
class Entry(ExtRegistryEntry):
_about_ = get_methodname
s_result_annotation = annmodel.SomeObject()
def specialize_call(self, hop):
v_idx, = hop.inputargs(*hop.args_r)
if hop.args_r[0] <> rint.signed_repr:
v_idx = hop.llops.convertvar(v_idx,
r_from=hop.args_r[0],
r_to=rint.signed_repr)
v_res = hop.llops.gencapicall('postsetup_get_methodname', [v_idx],
resulttype=robject.pyobj_repr)
return v_res
def build_method(funcidx):
pass
class Entry(ExtRegistryEntry):
_about_ = build_method
s_result_annotation = annmodel.SomeObject()
def specialize_call(self, hop):
v_idx, v_type = hop.inputargs(*hop.args_r)
if hop.args_r[0] <> rint.signed_repr:
v_idx = hop.llops.convertvar(v_idx,
r_from=hop.args_r[0],
r_to=rint.signed_repr)
assert hop.args_r[1] == robject.pyobj_repr, (
'build_method works for Python types only')
v_res = hop.llops.gencapicall('postsetup_build_method', [v_idx, v_type],
resulttype=robject.pyobj_repr)
return v_res
def get_typedict(cls):
pass
class Entry(ExtRegistryEntry):
_about_ = get_typedict
s_result_annotation = annmodel.SomeObject()
def specialize_call(self, hop):
v_type, = hop.inputargs(*hop.args_r)
assert hop.args_r[0] == robject.pyobj_repr, (
'get_typedict works for Python types only')
v_res = hop.llops.gencapicall('postsetup_get_typedict', [v_type],
resulttype=robject.pyobj_repr)
return v_res
def __init__(mod):
"""
this module init function walks through all exported classes
and tries to build real methods from the functions.
properties are re-created, too.
"""
import types
bltn = get_bltn()
hasattr = bltn.hasattr
isinstance = bltn.isinstance
funcs = bltn.dict() # no hashing function for PyObject
idx = 0
while 1:
name = get_methodname(idx)
if not name:
break
func = getattr(mod, name)
funcs[func] = idx
idx += 1
for name in mod.__all__:
obj = getattr(mod, name)
if isinstance(obj, type) and hasattr(obj, '__self__'):
cls = obj
dic = get_typedict(cls)
for name, value in dic.items():
if isinstance(value, types.BuiltinFunctionType) and value in funcs:
idx = funcs[value]
meth = build_method(idx, cls)
dic[name] = meth
elif isinstance(value, property):
stuff = [value.fget, value.fset, value.fdel, value.__doc__]
for i, fn in enumerate(stuff):
if fn in funcs:
idx = funcs[fn]
stuff[i] = build_method(idx, cls)
if not stuff[-1]:
# use fget's doc if we don't ahve one
stuff[-1] = getattr(stuff[0], '__doc__', None)
dic[name] = property(*stuff)
class ExtCompiler(object):
def __init__(self, startupfunc, use_true_methods=True, expose_all=True):
self.startupfunc = startupfunc
self.expose_all = expose_all
self.exports = {}
if use_true_methods:
self.export(__init__)
def export(self, obj, name=None):
if name:
self.exports[name] = (name, obj)
else:
self.exports[obj.__name__] = obj
def build(self, modname):
mod = get_compiled_module(self.startupfunc, exports=self.exports.values(),
expose_all=self.expose_all)
return mod
| Python |
"""
General-purpose reference tracker.
Usage: call track(obj).
"""
import autopath, sys, os
import gc
from pypy.translator.tool.graphpage import GraphPage, DotGen
from pypy.tool.uid import uid
MARKER = object()
class BaseRefTrackerPage(GraphPage):
def compute(self, objectlist):
assert objectlist[0] is MARKER
self.objectlist = objectlist
dotgen = DotGen('reftracker')
id2typename = {}
nodes = {}
edges = {}
def addedge(o1, o2):
key = (uid(o1), uid(o2))
edges[key] = self.edgelabel(o1, o2)
for i in range(1, len(objectlist)):
typename, s, linktext = self.formatobject(objectlist[i])
word = '0x%x' % uid(objectlist[i])
if linktext:
self.links[word] = linktext
s = '<%s> %s\\n%s' % (typename, word, s)
nodename = 'node%d' % len(nodes)
dotgen.emit_node(nodename, label=s, shape="box")
nodes[uid(objectlist[i])] = nodename
for o2 in self.get_referents(objectlist[i]):
if o2 is None:
continue
addedge(objectlist[i], o2)
id2typename[uid(o2)] = type(o2).__name__
del o2
for o2 in self.get_referrers(objectlist[i]):
if o2 is None:
continue
if type(o2) is list and o2 and o2[0] is MARKER:
continue
addedge(o2, objectlist[i])
id2typename[uid(o2)] = type(o2).__name__
del o2
for ids, label in edges.items():
for id1 in ids:
if id1 not in nodes:
nodename = 'node%d' % len(nodes)
word = '0x%x' % id1
s = '<%s> %s' % (id2typename[id1], word)
dotgen.emit_node(nodename, label=s)
nodes[id1] = nodename
self.links[word] = s
id1, id2 = ids
dotgen.emit_edge(nodes[id1], nodes[id2], label=label)
self.source = dotgen.generate(target=None)
def followlink(self, word):
id1 = int(word, 16)
found = None
objectlist = self.objectlist
for i in range(1, len(objectlist)):
for o2 in self.get_referents(objectlist[i]):
if uid(o2) == id1:
found = o2
for o2 in self.get_referrers(objectlist[i]):
if uid(o2) == id1:
found = o2
if found is not None:
objectlist = objectlist + [found]
else:
print '*** NOTE: object not found'
return self.newpage(objectlist)
def formatobject(self, o):
s = repr(o)
if len(s) > 50:
linktext = s
s = s[:20] + ' ... ' + s[-20:]
else:
linktext = ''
return type(o).__name__, s, linktext
def edgelabel(self, o1, o2):
return ''
def newpage(self, objectlist):
return self.__class__(objectlist)
class RefTrackerPage(BaseRefTrackerPage):
get_referrers = staticmethod(gc.get_referrers)
get_referents = staticmethod(gc.get_referents)
def edgelabel(self, o1, o2):
slst = []
if type(o1) in (list, tuple):
for i in range(len(o1)):
if o1[i] is o2:
slst.append('[%d]' % i)
elif type(o1) is dict:
for k, v in o1.items():
if v is o2:
slst.append('[%r]' % (k,))
else:
for basetype in type(o1).__mro__:
for key, value in basetype.__dict__.items():
if (type(value) is MemberDescriptorType or
type(value) is AttributeType):
try:
o1value = value.__get__(o1)
except:
pass
else:
if o1value is o2:
slst.append(str(key))
return ', '.join(slst)
def track(*objs):
"""Invoke a dot+pygame object reference tracker."""
page = RefTrackerPage([MARKER] + list(objs))
del objs
gc.collect()
gc.collect()
page.display()
def track_server(*objs, **kwds):
page = RefTrackerPage([MARKER] + list(objs))
del objs
gc.collect()
gc.collect()
try:
port = kwds.pop('port')
except KeyError:
port = 8888
from pypy.translator.tool.graphserver import run_server
run_server(page, port)
class _A(object):
__slots__ = 'a'
class _B(object):
pass
MemberDescriptorType = type(_A.a)
AttributeType = type(_B.__dict__['__dict__'])
if __name__ == '__main__':
try:
sys.path.remove(os.getcwd())
except ValueError:
pass
class A(object):
__slots__ = ['a']
d = {"lskjadldjslkj": "adjoiadoixmdoiemdwoi"}
a1 = A()
a1.a = d
track(d)
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
import inspect, types
from pypy.objspace.flow.model import Block, Link, FunctionGraph
from pypy.objspace.flow.model import safe_iterblocks, safe_iterlinks
from pypy.translator.tool.make_dot import DotGen, make_dot, make_dot_graphs
from pypy.annotation.model import SomePBC
from pypy.annotation.description import MethodDesc
from pypy.annotation.classdef import ClassDef
from pypy.tool.uid import uid
from dotviewer.graphpage import GraphPage
class VariableHistoryGraphPage(GraphPage):
""" A GraphPage showing the history of variable bindings. """
def compute(self, translator, name, info, caused_by, history, func_names):
self.linkinfo = {}
self.translator = translator
self.func_names = func_names
dotgen = DotGen('binding')
label = "Most recent binding of %s\\n\\n%s" % (name, nottoowide(info))
if info.origin is not None:
label += "\\n" + self.createlink(info.origin, 'Originated at')
if caused_by is not None:
label += '\\n' + self.createlink(caused_by)
if info.caused_by_merge is not None:
data = 'unionof%r' % (info.caused_by_merge,)
label += '\\n%s' % nottoowide(data)
dotgen.emit_node('0', shape="box", color="red", label=label)
for n, (data, caused_by) in zip(range(len(history)), history):
label = nottoowide(data)
if data.origin is not None:
label += "\\n" + self.createlink(data.origin, 'Originated at')
if caused_by is not None:
label += '\\n' + self.createlink(caused_by)
dotgen.emit_node(str(n+1), shape="box", label=label)
dotgen.emit_edge(str(n+1), str(n))
self.source = dotgen.generate(target=None)
def createlink(self, position_key, wording='Caused by a call from'):
graph, block, pos = position_key
basename = self.func_names.get(graph, graph.name)
linkname = basename
n = 1
while self.linkinfo.get(linkname, position_key) != position_key:
n += 1
linkname = '%s_%d' % (basename, n)
self.linkinfo[linkname] = position_key
# It would be nice to get the block name somehow
blockname = block.__class__.__name__
self.links[linkname] = '%s, %s, position %r:\n%s' % (basename,
blockname, pos, block.operations[pos])
return '%s %s' % (wording, linkname)
def followlink(self, funcname):
graph, block, pos = self.linkinfo[funcname]
# It would be nice to focus on the block
return FlowGraphPage(self.translator, [graph], self.func_names)
def graphsof(translator, func):
if isinstance(func, (FunctionGraph, IncompleteGraph)):
return [func] # already a graph
graphs = []
if translator.annotator:
funcdesc = translator.annotator.bookkeeper.getdesc(func)
graphs = funcdesc._cache.values()
if not graphs:
# build a new graph, mark it as "to be returned to the annotator the
# next time it asks for a graph for the same function"
# (note that this buildflowgraph() call will return the same graph
# if called again, from the _prebuilt_graphs cache)
graph = translator.buildflowgraph(func)
translator._prebuilt_graphs[func] = graph
graphs = [graph]
return graphs
class FlowGraphPage(GraphPage):
""" A GraphPage showing a Flow Graph (or a few flow graphs).
"""
def compute(self, translator, functions=None, func_names=None):
self.translator = translator
self.annotator = getattr(translator, 'annotator', None)
self.func_names = func_names or {}
if functions:
graphs = []
for func in functions:
graphs += graphsof(translator, func)
else:
graphs = self.translator.graphs
if not graphs and hasattr(translator, 'entrypoint'):
graphs = list(graphs)
graphs += graphsof(translator, translator.entrypoint)
gs = [(graph.name, graph) for graph in graphs]
gs.sort(lambda (_, g), (__ ,h): cmp(g.tag, h.tag))
if self.annotator and self.annotator.blocked_graphs:
for block, was_annotated in self.annotator.annotated.items():
if not was_annotated:
block.blockcolor = "red"
if graphs:
name = graphs[0].name+"_graph"
else:
name = 'no_graph'
self.source = make_dot_graphs(name, gs, target=None)
# make the dictionary of links -- one per annotated variable
self.binding_history = {}
self.current_value = {}
self.caused_by = {}
if self.annotator:
for var, s_value in self.annotator.bindings.items():
info = '%s: %s' % (var.name, s_value)
annotationcolor = getattr(s_value, 'annotationcolor', None)
self.links[var.name] = info, annotationcolor
self.current_value[var.name] = s_value
self.caused_by[var.name] = (
self.annotator.binding_caused_by.get(var))
for var, history in self.annotator.bindingshistory.items():
cause_history = (
self.annotator.binding_cause_history.get(var, []))
self.binding_history[var.name] = zip(history, cause_history)
from pypy.jit.hintannotator.annotator import HintAnnotator
if isinstance(self.annotator, HintAnnotator):
return
vars = {}
for graph in graphs:
for block in safe_iterblocks(graph):
if isinstance(block, Block):
for v in block.getvariables():
vars[v] = True
for link in safe_iterlinks(graph):
if isinstance(link, Link):
for v in link.getextravars():
vars[v] = True
for var in vars:
if hasattr(var, 'concretetype'):
#info = self.links.get(var.name, var.name)
#info = '(%s) %s' % (var.concretetype, info)
info = str(var.concretetype)
if info == 'Void': # gray out Void variables
info = info, (160,160,160)
self.links[var.name] = info
def followlink(self, varname):
# clicking on a variable name shows its binding history
cur_value = self.current_value[varname]
caused_by = self.caused_by[varname]
history = list(self.binding_history.get(varname, []))
history.reverse()
return VariableHistoryGraphPage(self.translator, varname, cur_value,
caused_by, history, self.func_names)
class SingleGraphPage(FlowGraphPage):
""" A GraphPage showing a single precomputed FlowGraph."""
def compute(self, graph):
return FlowGraphPage.compute(self, None, [graph])
def nottoowide(text, width=72):
parts = str(text).split(' ')
lines = []
line = parts.pop(0)
for s in parts:
if len(line)+len(s) < width:
line = line + ' ' + s
else:
lines.append(line)
line = s
lines.append(line)
return '\\n'.join(lines)
class ClassDefPage(GraphPage):
"""A GraphPage showing the attributes of a class.
"""
def compute(self, translator, cdef):
self.translator = translator
dotgen = DotGen(cdef.shortname, rankdir="LR")
def writecdef(cdef):
lines = [cdef.name, '']
attrs = cdef.attrs.items()
attrs.sort()
def writeadefs(prefix, classattrs):
for name, attrdef in attrs:
if bool(attrdef.readonly) == bool(classattrs):
s_value = attrdef.s_value
linkname = name
info = s_value
if (classattrs and isinstance(s_value, SomePBC)
and s_value.getKind() == MethodDesc):
name += '()'
info = 'SomePBC(%s)' % ', '.join(
['method %s.%s' % (
desc.originclassdef.shortname,
desc.name) for desc in s_value.descriptions],)
lines.append(name)
self.links[linkname] = '%s.%s: %s' % (prefix, name, info)
prefix = cdef.shortname
writeadefs(prefix + '()', False)
lines.append('')
writeadefs(prefix, True)
dotgen.emit_node(nameof(cdef), color="red", shape="box",
label='\n'.join(lines))
prevcdef = None
while cdef is not None:
writecdef(cdef)
if prevcdef:
dotgen.emit_edge(nameof(cdef), nameof(prevcdef), color="red")
prevcdef = cdef
cdef = cdef.basedef
self.source = dotgen.generate(target=None)
def followlink(self, name):
return self
class BaseTranslatorPage(GraphPage):
"""Abstract GraphPage for showing some of the call graph between functions
and possibily the class hierarchy."""
def allgraphs(self):
return list(self.translator.graphs)
def graph_name(self, *args):
raise NotImplementedError
def compute(self, translator, *args):
self.translator = translator
self.object_by_name = {}
self.name_by_object = {}
dotgen = DotGen(self.graph_name(*args))
dotgen.emit('mclimit=15.0')
self.do_compute(dotgen, *args)
self.source = dotgen.generate(target=None)
# link the function names to the individual flow graphs
for name, obj in self.object_by_name.items():
if isinstance(obj, ClassDef):
data = repr(obj)
elif isinstance(obj, FunctionGraph):
graph = obj
data = graph.name
if hasattr(graph, 'func'):
data += ':%d' % graph.func.func_code.co_firstlineno
if hasattr(graph, 'source'):
data += '\n%s' % graph.source.split('\n', 1)[0]
else:
continue
self.links.setdefault(name, data)
def get_blocked_graphs(self, graphs):
translator = self.translator
blocked_graphs = {}
if translator.annotator:
# don't use translator.annotator.blocked_graphs here because
# it is not populated until the annotator finishes.
annotated = translator.annotator.annotated
for graph in graphs:
for block in graph.iterblocks():
if annotated.get(block) is False:
blocked_graphs[graph] = True
return blocked_graphs
def compute_class_hieararchy(self, dotgen):
# show the class hierarchy
if self.translator.annotator:
dotgen.emit_node(nameof(None), color="red", shape="octagon",
label="Root Class\\nobject")
for classdef in self.translator.annotator.getuserclassdefinitions():
data = self.labelof(classdef, classdef.shortname)
dotgen.emit_node(nameof(classdef), label=data, shape="box")
dotgen.emit_edge(nameof(classdef.basedef), nameof(classdef))
def labelof(self, obj, objname):
name = objname
i = 1
while name in self.object_by_name:
i += 1
name = '%s__%d' % (objname, i)
self.object_by_name[name] = obj
self.name_by_object[obj] = name
return name
def followlink(self, name):
if name.endswith('...'):
obj = self.object_by_name[name]
return LocalizedCallGraphPage(self.translator, [obj])
obj = self.object_by_name[name]
if isinstance(obj, ClassDef):
return ClassDefPage(self.translator, obj)
else:
return FlowGraphPage(self.translator, [obj], self.name_by_object)
class TranslatorPage(BaseTranslatorPage):
"""A GraphPage showing a the call graph between functions
as well as the class hierarchy."""
def graph_name(self, huge=0):
return 'translator'
def do_compute(self, dotgen, huge=100):
translator = self.translator
# show the call graph
callgraph = translator.callgraph.values()
graphs = self.allgraphs()
if len(graphs) > huge:
assert graphs, "no graph to show!"
LocalizedCallGraphPage.do_compute.im_func(self, dotgen, [graphs[0]])
return
blocked_graphs = self.get_blocked_graphs(graphs)
highlight_graphs = getattr(translator, 'highlight_graphs', {}) # XXX
dotgen.emit_node('entry', fillcolor="green", shape="octagon",
label="Translator\\nEntry Point")
for graph in graphs:
data = self.labelof(graph, graph.name)
if graph in blocked_graphs:
kw = {'fillcolor': 'red'}
elif graph in highlight_graphs:
kw = {'fillcolor': '#ffcccc'}
else:
kw = {}
dotgen.emit_node(nameof(graph), label=data, shape="box", **kw)
if graphs:
dotgen.emit_edge('entry', nameof(graphs[0]), color="green")
for g1, g2 in callgraph: # captured above (multithreading fun)
dotgen.emit_edge(nameof(g1), nameof(g2))
# show the class hierarchy
self.compute_class_hieararchy(dotgen)
class LocalizedCallGraphPage(BaseTranslatorPage):
"""A GraphPage showing the localized call graph for a function,
that means just including direct callers and callees"""
def graph_name(self, centers):
if centers:
return 'LCG_%s' % nameof(centers[0])
else:
return 'EMPTY'
def do_compute(self, dotgen, centers):
centers = dict.fromkeys(centers)
translator = self.translator
graphs = {}
for g1, g2 in translator.callgraph.values():
if g1 in centers or g2 in centers:
graphs[g1] = True
graphs[g2] = True
# show all edges that exist between these graphs
for g1, g2 in translator.callgraph.values():
if g1 in graphs and g2 in graphs:
dotgen.emit_edge(nameof(g1), nameof(g2))
graphs = graphs.keys()
# show the call graph
blocked_graphs = self.get_blocked_graphs(graphs)
highlight_graphs = getattr(translator, 'highlight_graphs', {}) # XXX
for graph in graphs:
data = self.labelof(graph, graph.name)
if graph in blocked_graphs:
kw = {'fillcolor': 'red'}
elif graph in highlight_graphs:
kw = {'fillcolor': '#ffcccc'}
else:
kw = {}
dotgen.emit_node(nameof(graph), label=data, shape="box", **kw)
if graph not in centers:
lcg = 'LCG_%s' % nameof(graph)
label = data+'...'
dotgen.emit_node(lcg, label=label)
dotgen.emit_edge(nameof(graph), lcg)
self.links[label] = 'go to its localized call graph'
self.object_by_name[label] = graph
class ClassHierarchyPage(BaseTranslatorPage):
"""A GraphPage showing the class hierarchy."""
def graph_name(self):
return 'class_hierarchy'
def do_compute(self, dotgen):
translator = self.translator
# show the class hierarchy
self.compute_class_hieararchy(dotgen)
def nameof(obj, cache={}):
# NB. the purpose of the cache is not performance, but to ensure that
# two objects that compare equal get the same name
try:
return cache[obj]
except KeyError:
result = '%s__0x%x' % (getattr(obj, '__name__', ''), uid(obj))
cache[obj] = result
return result
# ____________________________________________________________
#
# Helpers to try to show a graph when we only have a Block or a Link
def try_show(obj):
if isinstance(obj, FunctionGraph):
obj.show()
elif isinstance(obj, Link):
try_show(obj.prevblock)
elif isinstance(obj, Block):
import gc
pending = [obj] # pending blocks
seen = {obj: True, None: True}
for x in pending:
for y in gc.get_referrers(x):
if isinstance(y, FunctionGraph):
y.show()
return
elif isinstance(y, Link):
block = y.prevblock
if block not in seen:
pending.append(block)
seen[block] = True
graph = IncompleteGraph(pending)
SingleGraphPage(graph).display()
else:
raise TypeError("try_show(%r object)" % (type(obj).__name__,))
class IncompleteGraph:
name = '(incomplete graph)'
tag = None
def __init__(self, bunch_of_blocks):
self.bunch_of_blocks = bunch_of_blocks
def iterblocks(self):
return iter(self.bunch_of_blocks)
| Python |
class SimpleTaskEngine(object):
def __init__(self):
self._plan_cache = {}
self.tasks = tasks = {}
for name in dir(self):
if name.startswith('task_'):
task_name = name[len('task_'):]
task = getattr(self, name)
assert callable(task)
task_deps = getattr(task, 'task_deps', [])
tasks[task_name] = task, task_deps
def _plan(self, goals, skip=[]):
skip = [toskip for toskip in skip if toskip not in goals]
key = (tuple(goals), tuple(skip))
try:
return self._plan_cache[key]
except KeyError:
pass
constraints = []
def subgoals(task_name):
taskcallable, deps = self.tasks[task_name]
for dep in deps:
if dep.startswith('??'): # optional
dep = dep[2:]
if dep not in goals:
continue
if dep.startswith('?'): # suggested
dep = dep[1:]
if dep in skip:
continue
yield dep
seen = {}
def consider(subgoal):
if subgoal in seen:
return
else:
seen[subgoal] = True
constraints.append([subgoal])
deps = subgoals(subgoal)
for dep in deps:
constraints.append([subgoal, dep])
consider(dep)
for goal in goals:
consider(goal)
#sort
plan = []
while True:
cands = dict.fromkeys([constr[0] for constr in constraints if constr])
if not cands:
break
for cand in cands:
for constr in constraints:
if cand in constr[1:]:
break
else:
break
else:
raise RuntimeError, "circular dependecy"
plan.append(cand)
for constr in constraints:
if constr and constr[0] == cand:
del constr[0]
plan.reverse()
self._plan_cache[key] = plan
return plan
def _depending_on(self, goal):
l = []
for task_name, (task, task_deps) in self.tasks.iteritems():
if goal in task_deps:
l.append(task_name)
return l
def _depending_on_closure(self, goal):
d = {}
def track(goal):
if goal in d:
return
d[goal] = True
for depending in self._depending_on(goal):
track(depending)
track(goal)
return d.keys()
def _execute(self, goals, *args, **kwds):
task_skip = kwds.get('task_skip', [])
res = None
for goal in self._plan(goals, skip=task_skip):
taskcallable, _ = self.tasks[goal]
self._event('pre', goal, taskcallable)
try:
res = self._do(goal, taskcallable, *args, **kwds)
except (SystemExit, KeyboardInterrupt):
raise
except:
self._error(goal)
raise
self._event('post', goal, taskcallable)
return res
def _do(self, goal, func, *args, **kwds):
return func()
def _event(self, kind, goal, func):
pass
def _error(self, goal):
pass
| Python |
"""
Reference tracker for lltype data structures.
"""
import autopath, sys, os
import gc
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.memory.gcheader import header2obj
from pypy.translator.tool.reftracker import BaseRefTrackerPage, MARKER
from pypy.tool.uid import uid
class LLRefTrackerPage(BaseRefTrackerPage):
def compute(self, objectlist, size_gc_header):
self.size_gc_header = size_gc_header
return BaseRefTrackerPage.compute(self, objectlist)
def formatobject(self, o):
lines = []
for name, value in self.enum_content(o):
if not isinstance(value, str):
value = '0x%x' % uid(value)
lines.append('%s = %s' % (name, value))
s = '\n'.join(lines)
t = shorttypename(lltype.typeOf(o))
return t, s, ''
def get_referrers(self, o):
return [] # not implemented
def get_referents(self, o):
for name, value in self.enum_content(o):
if not isinstance(value, str):
yield value
def edgelabel(self, o1, o2):
slst = []
for name, value in self.enum_content(o1):
if value is o2:
slst.append(name)
return '/'.join(slst)
def newpage(self, objectlist):
return self.__class__(objectlist, self.size_gc_header)
def normalize(self, o):
if self.size_gc_header is not None:
try:
return header2obj[o]._obj
except (KeyError, TypeError):
pass
return o
def enum_content(self, o, name='', with_header=True):
# XXX clean up
T = lltype.typeOf(o)
if (self.size_gc_header is not None and with_header
and isinstance(T, lltype.ContainerType) and T._gckind == 'gc'):
adr = llmemory.cast_ptr_to_adr(o._as_ptr())
adr -= self.size_gc_header
o = adr.get()._obj
T = lltype.typeOf(o)
if isinstance(T, lltype.Struct):
try:
gcobjptr = header2obj[o]
fmt = '(%s)'
except KeyError:
gcobjptr = None
fmt = '%s'
for name in T._names:
for name, value in self.enum_content(getattr(o, name), name,
with_header=False):
yield fmt % (name,), value
if gcobjptr:
GCT = lltype.typeOf(gcobjptr)
if self.size_gc_header is not None:
for sub in self.enum_content(gcobjptr._obj,
with_header=False):
yield sub
else:
# display as a link to avoid the same data showing up
# twice in the graph
yield 'header of', gcobjptr._obj
elif isinstance(T, lltype.Array):
for index, o1 in enumerate(o.items):
for sub in self.enum_content(o1, str(index)):
yield sub
elif isinstance(T, lltype.Ptr):
if not o:
yield name, 'null'
else:
yield name, self.normalize(lltype.normalizeptr(o)._obj)
elif isinstance(T, lltype.OpaqueType) and hasattr(o, 'container'):
T = lltype.typeOf(o.container)
yield 'container', '<%s>' % (shorttypename(T),)
for sub in self.enum_content(o.container, name, with_header=False):
yield sub
elif T == llmemory.Address:
if not o:
yield name, 'NULL'
else:
addrof = o.get()
T1 = lltype.typeOf(addrof)
if (isinstance(T1, lltype.Ptr) and
isinstance(T1.TO, lltype.Struct) and
addrof._obj in header2obj):
yield name + ' @hdr', self.normalize(addrof._obj)
else:
yield name + ' @', self.normalize(o.ob._obj)
if o.offset:
yield '... offset', str(o.offset)
else:
yield name, str(o)
def shorttypename(T):
return '%s %s' % (T.__class__.__name__, getattr(T, '__name__', ''))
def track(*ll_objects):
"""Invoke a dot+pygame object reference tracker."""
lst = [MARKER]
size_gc_header = None
seen = {}
for ll_object in ll_objects:
if isinstance(ll_object, llmemory.GCHeaderOffset):
size_gc_header = ll_object
continue
#if isinstance(lltype.typeOf(ll_object), lltype.Ptr):
# ptr = lltype.normalizeptr(ll_object)
# if ptr is not None:
# ll_object = ptr._obj
# else:
# ll_object = None
if ll_object is not None and id(ll_object) not in seen:
lst.append(ll_object)
seen[id(ll_object)] = ll_object
page = LLRefTrackerPage(lst, size_gc_header)
# auto-expand one level, for now
auto_expand = 1
for i in range(auto_expand):
page = page.content()
for ll_object in lst[1:]:
for name, value in page.enum_content(ll_object):
if not isinstance(value, str) and id(value) not in seen:
lst.append(value)
seen[id(value)] = value
page = page.newpage(lst)
page.display()
if __name__ == '__main__':
try:
sys.path.remove(os.getcwd())
except ValueError:
pass
T = lltype.GcArray(lltype.Signed)
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S', ('t', lltype.Ptr(T)),
('next', lltype.Ptr(S))))
s = lltype.malloc(S)
s.next = lltype.malloc(S)
s.next.t = lltype.malloc(T, 5)
s.next.t[1] = 123
track(s)
| Python |
# empty
| Python |
import os, pickle, sys, time, re
STAT2TITLE = {
'stat:st_mtime': "date",
'exe_name': "executable",
}
def stat2title(s):
if s.startswith('bench:'):
return s[6:]
else:
return STAT2TITLE.get(s, s)
class BenchmarkResultSet(object):
def __init__(self, max_results=10):
self.benchmarks = {}
self.max_results = max_results
def result(self, exe, allowcreate=False):
if exe in self.benchmarks or not allowcreate:
return self.benchmarks[exe]
else:
r = self.benchmarks[exe] = BenchmarkResult(exe, self.max_results)
return r
def txt_summary(self, stats, **kw):
sortkey = kw.get('sortby', 'stat:st_mtime')
lst = self.benchmarks.values()
lst.sort(key=lambda x:x.getstat(sortkey, None), reverse=kw.get('reverse', False))
if 'filteron' in kw:
filteron = kw['filteron']
lst = [r for r in lst if filteron(r)]
relto = kw.get('relto', None)
table = [[(stat2title(s),0) for s in stats]]
for r in lst:
row = []
for stat in stats:
if stat.startswith('bench:'):
benchname = stat[6:]
if r.getstat(stat, None) is None:
row.append(('XXX',-1))
elif relto:
factor = self.result(relto).getstat(stat)/r.getstat(stat)
if not r.asc_goods[benchname]:
factor = 1/factor
s, f = r.fmtstat(stat)
row.append((s + ' (%6.2fx)'%factor, f))
else:
row.append(r.fmtstat(stat))
else:
row.append(r.fmtstat(stat))
table.append(row)
widths = [0 for thing in stats]
for row in table:
for i, cell in enumerate(row):
widths[i] = max(len(cell[0]), widths[i])
concretetable = []
concreterow = []
for w, cell in zip(widths, table[0]):
concreterow.append(cell[0].center(w))
concretetable.append(' '.join(concreterow))
for row in table[1:]:
concreterow = []
for w, cell in zip(widths, row):
concreterow.append("%*s"%(cell[1]*w, cell[0]))
concretetable.append(' '.join(concreterow))
return concretetable
class BenchmarkResult(object):
def __init__(self, exe, max_results=10):
self.max_results = max_results
self.exe_stat = os.stat(exe)
self.exe_name = exe
self.codesize = os.popen('size "%s" | tail -n1 | cut -f1'%(exe,)).read().strip()
try:
self.pypy_rev = int(os.popen(
exe + ' -c "import sys; print sys.pypy_version_info[-1]" 2>/dev/null').read().strip())
except ValueError:
self.pypy_rev = -1
self.best_benchmarks = {}
self.benchmarks = {}
self.asc_goods = {}
self.run_counts = {}
def run_benchmark(self, benchmark, verbose=False):
self.asc_goods[benchmark.name] = benchmark.asc_good
if self.run_counts.get(benchmark.name, 0) > self.max_results:
return
if verbose:
print 'running', benchmark.name, 'for', self.exe_name,
sys.stdout.flush()
new_result = benchmark.run(self.exe_name)
if verbose:
print new_result
self.run_counts[benchmark.name] = self.run_counts.get(benchmark.name, 0) + 1
if new_result == '-FAILED-':
return
self.benchmarks.setdefault(benchmark.name, []).append(new_result)
if benchmark.name in self.best_benchmarks:
old_result = self.best_benchmarks[benchmark.name]
if benchmark.asc_good:
new_result = max(new_result, old_result)
else:
new_result = min(new_result, old_result)
self.best_benchmarks[benchmark.name] = new_result
def getstat(self, *args):
# oh for supplied-p!
return_default = False
if len(args) == 1:
stat, = args
else:
stat, default = args
return_default = True
if hasattr(self, stat):
return getattr(self, stat)
statkind, statdetail = stat.split(':')
if statkind == 'stat':
return getattr(self.exe_stat, statdetail)
elif statkind == 'bench':
if return_default:
return self.best_benchmarks.get(statdetail, default)
else:
return self.best_benchmarks[statdetail]
else:
1/0
def fmtstat(self, *args):
stat = args[0]
statvalue = self.getstat(*args)
if stat == 'stat:st_mtime':
return time.ctime(statvalue), -1
elif stat == 'exe_name':
return os.path.basename(statvalue), -1
elif stat.startswith('bench:'):
from pypy.translator.benchmark import benchmarks
statkind, statdetail = stat.split(':', 1)
b = benchmarks.BENCHMARKS_BY_NAME[statdetail]
return "%8.2f%s"%(statvalue, b.units), 1
elif stat == 'pypy_rev':
return str(statvalue), 1
else:
return str(statvalue), -1
def summary(self, stats):
return [self.getstat(stat) for stat in stats]
def is_stable(self, name):
try:
return self.n_results[name] >= self.max_results
except:
return False
if __name__ == '__main__':
import autopath
from pypy.translator.benchmark import benchmarks, result
import cPickle
if os.path.exists('foo.pickle'):
s = cPickle.load(open('foo.pickle', 'rb'))
else:
s = result.BenchmarkResultSet(4)
for exe in sys.argv[1:]:
r = s.result(exe)
r.run_benchmark(benchmarks.BENCHMARKS_BY_NAME['richards'])
r.run_benchmark(benchmarks.BENCHMARKS_BY_NAME['pystone'])
cPickle.dump(s, open('foo.pickle', 'wb'))
stats = ['stat:st_mtime', 'exe_name', 'bench:richards', 'bench:pystone']
for row in s.txt_summary(stats, sortby="exe_name", reverse=True, relto="/usr/local/bin/python2.4"):
print row
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
#
| Python |
import os, sys, time, pickle, re, py
class BenchmarkFailed(Exception):
pass
PYSTONE_CMD = 'from test import pystone;pystone.main(%s)'
PYSTONE_PATTERN = 'This machine benchmarks at'
PYSTONE_ASCENDING_GOOD = True
RICHARDS_CMD = 'from richards import *;main(iterations=%d)'
RICHARDS_PATTERN = 'Average time per iteration:'
RICHARDS_ASCENDING_GOOD = False
def get_result(txt, pattern):
for line in txt.split('\n'):
if line.startswith(pattern):
break
else:
raise BenchmarkFailed
return float(line.split()[len(pattern.split())])
class Benchmark(object):
def __init__(self, name, runner, asc_good, units, check=lambda:True):
self.name = name
self._run = runner
self.asc_good = asc_good
self.units = units
self.check = check
def run(self, exe):
try:
return self._run(exe)
except BenchmarkFailed:
return '-FAILED-'
def external_dependency(dirname, svnurl, revision):
"""Check out (if necessary) a given fixed revision of a svn url."""
dirpath = py.magic.autopath().dirpath().join(dirname)
revtag = dirpath.join('-svn-rev-')
if dirpath.check():
if not revtag.check() or int(revtag.read()) != revision:
print >> sys.stderr, ("Out-of-date benchmark checkout!"
" I won't update it automatically.")
print >> sys.stderr, ("To continue, move away or remove the "
"%r directory." % (dirname,))
sys.exit(1)
return True
CMD = "svn co -r%d %s@%d %s" % (revision, svnurl, revision, dirpath)
print >> sys.stderr, CMD
err = os.system(CMD)
if err != 0:
print >> sys.stderr, "* checkout failed, skipping this benchmark"
return False
revtag.write(str(revision))
return True
def run_cmd(cmd):
#print "running", cmd
pipe = os.popen(cmd + ' 2>&1')
r = pipe.read()
status = pipe.close()
if status:
raise BenchmarkFailed(status)
return r
def run_pystone(executable='/usr/local/bin/python', n=''):
distdir = py.magic.autopath().dirpath().dirpath().dirpath().dirpath()
pystone = distdir.join('lib-python').join('2.4.1').join('test').join('pystone.py')
txt = run_cmd('"%s" "%s" %s' % (executable, pystone, n))
return get_result(txt, PYSTONE_PATTERN)
def run_richards(executable='/usr/local/bin/python', n=5):
richards = py.magic.autopath().dirpath().dirpath().join('goal').join('richards.py')
txt = run_cmd('"%s" %s %s' % (executable, richards, n))
return get_result(txt, RICHARDS_PATTERN)
def run_translate(executable='/usr/local/bin/python'):
translate = py.magic.autopath().dirpath().dirpath().join('goal').join('translate.py')
target = py.magic.autopath().dirpath().dirpath().join('goal').join('targetrpystonedalone.py')
argstr = '%s %s --text --batch --backendopt --no-compile %s > /dev/null 2> /dev/null'
T = time.time()
status = os.system(argstr%(executable, translate, target))
r = time.time() - T
if status:
raise BenchmarkFailed(status)
return r
def run_docutils(executable='/usr/local/bin/python'):
docutilssvnpath = 'docutils' # subdir of the local dir
translatetxt = py.magic.autopath().dirpath().dirpath().dirpath().join('doc').join('translation.txt')
command = """import sys
sys.modules['unicodedata'] = sys # docutils need 'import unicodedata' to work, but no more...
sys.path[0:0] = ['%s', '%s/extras']
from docutils.core import publish_cmdline
publish_cmdline(writer_name='html')
"""%(docutilssvnpath, docutilssvnpath)
T = time.time()
pid = os.fork()
if not pid:
davenull = os.open('/dev/null', os.O_RDWR)
os.dup2(davenull, 0)
os.dup2(davenull, 1)
os.dup2(davenull, 2)
status = os.spawnv(os.P_WAIT, executable, [executable, '-c', command, str(translatetxt)])
os._exit(status)
else:
status = os.waitpid(pid, 0)[1]
r = time.time() - T
if status:
raise BenchmarkFailed(status)
return r
def check_docutils():
return False # useless benchmark - I've seen 15% of difference
# between two successive runs on the same machine!
#return external_dependency('docutils',
# 'svn://svn.berlios.de/docutils/trunk/docutils',
# 4821)
def run_templess(executable='/usr/local/bin/python'):
""" run some script in the templess package
templess is some simple templating language, to check out use
'svn co -r100 http://johnnydebris.net/templess/trunk templess'
"""
here = py.magic.autopath().dirpath()
pypath = py.__package__.getpath().dirpath()
templessdir = here.join('templess')
testscript = templessdir.join('test/oneshot.py')
command = 'PYTHONPATH="%s:%s" "%s" "%s" 100' % (here, pypath,
executable, testscript)
txt = run_cmd(command)
try:
result = float([line for line in txt.split('\n') if line.strip()][-1])
except ValueError:
raise BenchmarkFailed
return result
def check_templess():
return external_dependency('templess',
'http://johnnydebris.net/templess/trunk',
100)
def run_gadfly(executable='/usr/local/bin/python'):
""" run some tests in the gadfly pure Python database """
here = py.magic.autopath().dirpath()
gadfly = here.join('gadfly')
testscript = gadfly.join('test', 'testsubset.py')
command = 'PYTHONPATH="%s" "%s" "%s"' % (gadfly, executable, testscript)
txt = run_cmd(command)
lines = [line for line in txt.split('\n') if line.strip()]
if lines[-1].strip() != 'OK':
raise BenchmarkFailed
lastword = lines[-2].split()[-1]
if not lastword.endswith('s'):
raise BenchmarkFailed
try:
result = float(lastword[:-1])
except ValueError:
raise BenchmarkFailed
return result
def check_gadfly():
return external_dependency('gadfly',
'http://codespeak.net/svn/user/arigo/hack/pypy-hack/gadflyZip',
40406)
def run_mako(executable='/usr/local/bin/python'):
""" run some tests in the mako templating system """
here = py.magic.autopath().dirpath()
mako = here.join('mako')
testscript = mako.join('examples', 'bench', 'basic.py')
command = 'PYTHONPATH="%s" "%s" "%s" mako' % (mako.join('lib'),
executable, testscript)
txt = run_cmd(command)
lines = [line for line in txt.split('\n') if line.strip()]
words = lines[-1].split()
if words[0] != 'Mako:':
raise BenchmarkFailed
try:
result = float(words[1])
except ValueError:
raise BenchmarkFailed
return result
def check_mako():
return external_dependency('mako',
'http://codespeak.net/svn/user/arigo/hack/pypy-hack/mako',
40235)
def check_translate():
return False # XXX what should we do about the dependency on ctypes?
BENCHMARKS = [Benchmark('richards', run_richards, RICHARDS_ASCENDING_GOOD, 'ms'),
Benchmark('pystone', run_pystone, PYSTONE_ASCENDING_GOOD, ''),
Benchmark('translate', run_translate, RICHARDS_ASCENDING_GOOD, 'ms', check_translate),
Benchmark('docutils', run_docutils, RICHARDS_ASCENDING_GOOD,
's', check_docutils),
Benchmark('templess', run_templess, RICHARDS_ASCENDING_GOOD,
's', check_templess),
Benchmark('gadfly2', run_gadfly, RICHARDS_ASCENDING_GOOD,
's', check_gadfly),
Benchmark('mako', run_mako, RICHARDS_ASCENDING_GOOD,
's', check_mako),
]
BENCHMARKS_BY_NAME = {}
for _b in BENCHMARKS:
BENCHMARKS_BY_NAME[_b.name] = _b
| Python |
# benchmarks on a unix machine.
import autopath
from pypy.translator.benchmark.result import BenchmarkResultSet
from pypy.translator.benchmark.benchmarks import BENCHMARKS
import os, sys, time, pickle, re, py
def get_executables(args): #sorted by revision number (highest first)
exes = sorted(args, key=os.path.getmtime)
r = []
for exe in exes:
if '/' not in exe:
r.append('./' + exe)
else:
r.append(exe)
return r
def main(options, args):
if os.path.exists(options.picklefile):
benchmark_result = pickle.load(open(options.picklefile, 'rb'))
else:
benchmark_result = BenchmarkResultSet()
benchmarks = []
for b in BENCHMARKS:
if b.name in options.benchmarks:
if not b.check():
print "can't run %s benchmark for some reason"%(b.name,)
else:
benchmarks.append(b)
exes = get_executables(args)
pythons = 'python2.5 python2.4 python2.3'.split()
full_pythons = []
for python in pythons:
full_python = py.path.local.sysfind(python)
if full_python:
full_pythons.append(str(full_python))
sys.stdout.flush()
refs = {}
if not options.nocpython:
exes = full_pythons + exes
for i in range(int(options.runcount)) + [None]:
if i is not None:
for exe in exes:
for b in benchmarks:
benchmark_result.result(exe, allowcreate=True).run_benchmark(b, verbose=True)
pickle.dump(benchmark_result, open(options.picklefile, 'wb'))
stats = ['stat:st_mtime', 'exe_name', 'pypy_rev']
for b in benchmarks:
stats.append('bench:'+b.name)
if options.relto:
relto = options.relto
else:
relto = full_pythons[0]
for row in benchmark_result.txt_summary(stats,
relto=relto,
filteron=lambda r: r.exe_name in exes):
print row
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
default_benches = ','.join([b.name for b in BENCHMARKS if b.check()])
parser.add_option(
'--benchmarks', dest='benchmarks',
default=default_benches,
)
parser.add_option(
'--pickle', dest='picklefile',
default='bench-custom.benchmark_result'
)
parser.add_option(
'--runcount', dest='runcount',
default='1',
)
parser.add_option(
'--relto', dest='relto',
default=None,
)
parser.add_option(
'-v', '--verbose', action='store_true', dest='verbose',
default=None,
)
parser.add_option(
'--no-cpython', action='store_true', dest='nocpython',
default=None,
)
options, args = parser.parse_args(sys.argv[1:])
main(options, args)
| Python |
#
| Python |
from pypy.objspace.flow.model import Constant, checkgraph, c_last_exception
from pypy.rpython.rtyper import LowLevelOpList, inputconst
from pypy.translator.simplify import eliminate_empty_blocks, join_blocks
#from pypy.translator.simplify import transform_dead_op_vars
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem import rclass
from pypy.translator.backendopt.support import log
def remove_asserts(translator, graphs):
rtyper = translator.rtyper
clsdef = translator.annotator.bookkeeper.getuniqueclassdef(AssertionError)
r_AssertionError = rclass.getclassrepr(rtyper, clsdef)
ll_AssertionError = r_AssertionError.convert_const(AssertionError)
total_count = [0, 0]
for graph in graphs:
count = 0
morework = True
while morework:
morework = False
eliminate_empty_blocks(graph)
join_blocks(graph)
for link in graph.iterlinks():
if (link.target is graph.exceptblock
and isinstance(link.args[0], Constant)
and link.args[0].value == ll_AssertionError):
if kill_assertion_link(graph, link):
count += 1
morework = True
break
else:
total_count[0] += 1
if translator.config.translation.verbose:
log.removeassert("cannot remove an assert from %s" % (graph.name,))
if count:
# now melt away the (hopefully) dead operation that compute
# the condition
total_count[1] += count
if translator.config.translation.verbose:
log.removeassert("removed %d asserts in %s" % (count, graph.name))
checkgraph(graph)
#transform_dead_op_vars(graph, translator)
log.removeassert("Could not remove %d asserts, but removed %d asserts." % tuple(total_count))
def kill_assertion_link(graph, link):
block = link.prevblock
exits = list(block.exits)
if len(exits) <= 1:
return False
remove_condition = len(exits) == 2
if block.exitswitch == c_last_exception:
if link is exits[0]:
return False # cannot remove the non-exceptional path
else:
if block.exitswitch.concretetype is not lltype.Bool: # a switch
remove_condition = False
else:
# common case: if <cond>: raise AssertionError
# turn it into a debug_assert operation
assert remove_condition
newops = LowLevelOpList()
if link.exitcase:
v = newops.genop('bool_not', [block.exitswitch],
resulttype = lltype.Bool)
else:
v = block.exitswitch
msg = "assertion failed in %s" % (graph.name,)
c_msg = inputconst(lltype.Void, msg)
newops.genop('debug_assert', [v, c_msg])
block.operations.extend(newops)
exits.remove(link)
if remove_condition:
# condition no longer necessary
block.exitswitch = None
exits[0].exitcase = None
exits[0].llexitcase = None
block.recloseblock(*exits)
return True
| Python |
from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block
from pypy.tool.algo.unionfind import UnionFind
class DataFlowFamilyBuilder:
"""Follow the flow of the data in the graph. Builds a UnionFind grouping
all the variables by families: each family contains exactly one variable
where a value is stored into -- either by an operation or a merge -- and
all following variables where the value is just passed unmerged into the
next block.
"""
def __init__(self, graph):
# Build a list of "unification opportunities": for each block and each
# 'n', an "opportunity" groups the block's nth input variable with
# the nth output variable from each of the incoming links, in a list:
# [Block, blockvar, linkvar, linkvar, linkvar...]
opportunities = []
opportunities_with_const = []
for block, links in mkinsideentrymap(graph).items():
assert links
for n, inputvar in enumerate(block.inputargs):
vars = [block, inputvar]
put_in = opportunities
for link in links:
var = link.args[n]
if not isinstance(var, Variable):
put_in = opportunities_with_const
vars.append(var)
# if any link provides a Constant, record this in
# the opportunities_with_const list instead
put_in.append(vars)
self.opportunities = opportunities
self.opportunities_with_const = opportunities_with_const
self.variable_families = UnionFind()
def complete(self):
# An "opportunitiy" that lists exactly two distinct variables means that
# the two variables can be unified. We maintain the unification status
# in 'variable_families'. When variables are unified, it might reduce
# the number of distinct variables and thus open other "opportunities"
# for unification.
variable_families = self.variable_families
any_progress_at_all = False
progress = True
while progress:
progress = False
pending_opportunities = []
for vars in self.opportunities:
repvars = [variable_families.find_rep(v1) for v1 in vars[1:]]
repvars_without_duplicates = dict.fromkeys(repvars)
count = len(repvars_without_duplicates)
if count > 2:
# cannot unify now, but maybe later?
pending_opportunities.append(vars[:1] + repvars)
elif count == 2:
# unify!
variable_families.union(*repvars_without_duplicates)
progress = True
self.opportunities = pending_opportunities
any_progress_at_all |= progress
return any_progress_at_all
def merge_identical_phi_nodes(self):
variable_families = self.variable_families
any_progress_at_all = False
progress = True
while progress:
progress = False
block_phi_nodes = {} # in the SSA sense
for vars in self.opportunities + self.opportunities_with_const:
block, blockvar = vars[:2]
linksvars = vars[2:] # from the incoming links (vars+consts)
linksvars = [variable_families.find_rep(v) for v in linksvars]
phi_node = (block,) + tuple(linksvars) # ignoring n and blockvar
if phi_node in block_phi_nodes:
# already seen: we have two phi nodes in the same block that
# get exactly the same incoming vars. Identify the results.
blockvar1 = block_phi_nodes[phi_node]
if variable_families.union(blockvar1, blockvar)[0]:
progress = True
else:
block_phi_nodes[phi_node] = blockvar
any_progress_at_all |= progress
return any_progress_at_all
def get_variable_families(self):
self.complete()
return self.variable_families
def SSI_to_SSA(graph):
"""Rename the variables in a flow graph as much as possible without
violating the SSA rule. 'SSI' means that each Variable in a flow graph is
defined only once in the whole graph; all our graphs are SSI. This
function does not break that rule, but changes the 'name' of some
Variables to give them the same 'name' as other Variables. The result
looks like an SSA graph. 'SSA' means that each var name appears as the
result of an operation only once in the whole graph, but it can be
passed to other blocks across links.
"""
variable_families = DataFlowFamilyBuilder(graph).get_variable_families()
# rename variables to give them the name of their familiy representant
for v in variable_families.keys():
v1 = variable_families.find_rep(v)
if v1 != v:
v.set_name_from(v1)
# sanity-check that the same name is never used several times in a block
variables_by_name = {}
for block in flatten(graph):
if not isinstance(block, Block):
continue
vars = [op.result for op in block.operations]
for link in block.exits:
vars += link.getextravars()
assert len(dict.fromkeys([v.name for v in vars])) == len(vars), (
"duplicate variable name in %r" % (block,))
for v in vars:
variables_by_name.setdefault(v.name, []).append(v)
# sanity-check that variables with the same name have the same concretetype
for vname, vlist in variables_by_name.items():
vct = [getattr(v, 'concretetype', None) for v in vlist]
assert vct == vct[:1] * len(vct), (
"variables called %s have mixed concretetypes: %r" % (vname, vct))
# ____________________________________________________________
def mkinsideentrymap(graph_or_blocks):
# graph_or_blocks can be a full FunctionGraph, or a mapping
# {block: reachable-from-outside-flag}.
if isinstance(graph_or_blocks, dict):
blocks = graph_or_blocks
entrymap = {}
for block in blocks:
for link in block.exits:
if link.target in blocks and not blocks[link.target]:
entrymap.setdefault(link.target, []).append(link)
return entrymap
else:
graph = graph_or_blocks
entrymap = mkentrymap(graph)
del entrymap[graph.startblock]
return entrymap
def variables_created_in(block):
result = {}
for v in block.inputargs:
result[v] = True
for op in block.operations:
result[op.result] = True
return result
def SSA_to_SSI(graph_or_blocks, annotator=None):
"""Turn a number of blocks belonging to a flow graph into valid (i.e. SSI)
form, assuming that they are only in SSA form (i.e. they can use each
other's variables directly, without having to pass and rename them along
links).
'graph_or_blocks' can be a graph, or just a dict that lists some blocks
from a graph, as follows: {block: reachable-from-outside-flag}.
"""
from pypy.translator.unsimplify import copyvar
entrymap = mkinsideentrymap(graph_or_blocks)
builder = DataFlowFamilyBuilder(graph_or_blocks)
variable_families = builder.get_variable_families()
del builder
pending = [] # list of (block, var-used-but-not-defined)
for block in entrymap:
variables_created = variables_created_in(block)
variables_used = {}
for op in block.operations:
for v in op.args:
variables_used[v] = True
variables_used[block.exitswitch] = True
for link in block.exits:
for v in link.args:
variables_used[v] = True
for v in variables_used:
if isinstance(v, Variable):
if v not in variables_created:
pending.append((block, v))
while pending:
block, v = pending.pop()
v_rep = variable_families.find_rep(v)
variables_created = variables_created_in(block)
if v in variables_created:
continue # already ok
for w in variables_created:
w_rep = variable_families.find_rep(w)
if v_rep is w_rep:
# 'w' is in the same family as 'v', so we can simply
# reuse its value for 'v'
block.renamevariables({v: w})
break
else:
# didn't find it. Add it to all incoming links.
try:
links = entrymap[block]
except KeyError:
raise Exception("SSA_to_SSI failed: no way to give a value to"
" %r in %r" % (v, block))
w = copyvar(annotator, v)
variable_families.union(v, w)
block.renamevariables({v: w})
block.inputargs.append(w)
for link in links:
link.args.append(v)
pending.append((link.prevblock, v))
| Python |
from pypy.translator.backendopt.support import log, all_operations, annotate
import pypy.rpython.raisingops.raisingops
log = log.raisingop2directcall
def is_raisingop(op):
s = op.opname
if (not s.startswith('int_') and not s.startswith('uint_') and
not s.startswith('float_') and not s.startswith('llong_')):
return False
if not s.endswith('_zer') and not s.endswith('_ovf') and not s.endswith('_val'): #not s in special_operations:
return False
return True
def raisingop2direct_call(translator, graphs=None):
"""search for operations that could raise an exception and change that
operation into a direct_call to a function from the raisingops directory.
This function also needs to be annotated and specialized.
note: this could be extended to allow for any operation to be changed into
a direct_call to a (RPython) function!
"""
#special_operations = "int_floordiv int_mod".split()
if graphs is None:
graphs = translator.graphs
log('starting')
seen = {}
for op in all_operations(graphs):
if not is_raisingop(op):
continue
func = getattr(pypy.rpython.raisingops.raisingops, op.opname, None)
if not func:
log.warning("%s not found" % op.opname)
continue
if op.opname not in seen:
seen[op.opname] = 0
seen[op.opname] += 1
op.args.insert(0, annotate(translator, func, op.result, op.args))
op.opname = 'direct_call'
#statistics...
for k, v in seen.iteritems():
log("%dx %s" % (v, k))
#specialize newly annotated functions
if seen != {}:
translator.rtyper.specialize_more_blocks()
#rename some operations (that were introduced in the newly specialized graphs)
#so this transformation becomes idempotent...
#for op in all_operations(graphs):
# if op.opname in special_operations:
# log('renamed %s to %s_' % (op.opname, op.opname))
# op.opname += '_'
#selfdiagnostics... assert that there are no more raisingops
for op in all_operations(graphs):
if is_raisingop(op):
log.warning("%s not transformed" % op.opname)
#translator.view()
log('finished')
| Python |
from pypy.translator.simplify import get_graph
from pypy.objspace.flow.model import mkentrymap, checkgraph
# this transformation is very academical -- I had too much time
def _remove_tail_call(translator, graph, block):
print "removing tail call"
assert len(block.exits) == 1
assert block.exits[0].target is graph.returnblock
assert block.operations[-1].result == block.exits[0].args[0]
op = block.operations[-1]
block.operations = block.operations[:-1]
block.exits[0].args = op.args[1:]
block.exits[0].target = graph.startblock
def remove_tail_calls_to_self(translator, graph):
entrymap = mkentrymap(graph)
changed = False
for link in entrymap[graph.returnblock]:
block = link.prevblock
if (len(block.exits) == 1 and
len(block.operations) > 0 and
block.operations[-1].opname == 'direct_call' and
block.operations[-1].result == link.args[0]):
call = get_graph(block.operations[-1].args[0], translator)
print "getgraph", graph
if graph is graph:
_remove_tail_call(translator, graph, block)
changed = True
if changed:
from pypy.translator import simplify
checkgraph(graph)
simplify.remove_identical_vars(graph)
simplify.eliminate_empty_blocks(graph)
simplify.join_blocks(graph)
| Python |
from pypy.objspace.flow.model import Block, Constant, Variable, flatten
from pypy.objspace.flow.model import checkgraph, mkentrymap
from pypy.translator.backendopt.support import log
log = log.mergeifblocks
def is_chain_block(block, first=False):
if len(block.operations) == 0:
return False
if len(block.operations) > 1 and not first:
return False
op = block.operations[-1]
if (op.opname not in ('int_eq', 'uint_eq', 'llong_eq', 'ullong_eq',
'char_eq', 'unichar_eq')
or op.result != block.exitswitch):
return False
if isinstance(op.args[0], Variable) and isinstance(op.args[1], Variable):
return False
if isinstance(op.args[0], Constant) and isinstance(op.args[1], Constant):
return False
return True
def merge_chain(chain, checkvar, varmap, graph):
def get_new_arg(var_or_const):
if isinstance(var_or_const, Constant):
return var_or_const
return varmap[var_or_const]
firstblock, case = chain[0]
firstblock.operations = firstblock.operations[:-1]
firstblock.exitswitch = checkvar
values = {}
links = []
default = chain[-1][0].exits[0]
default.exitcase = "default"
default.llexitcase = None
default.args = [get_new_arg(arg) for arg in default.args]
for block, case in chain:
if case.value in values:
log.WARNING("unreachable code with value %s in graph %s" % (
case.value, graph))
continue
values[case.value] = True
link = block.exits[1]
links.append(link)
link.exitcase = case.value
link.llexitcase = case.value
link.args = [get_new_arg(arg) for arg in link.args]
links.append(default)
firstblock.recloseblock(*links)
def merge_if_blocks_once(graph):
"""Convert consecutive blocks that all compare a variable (of Primitive type)
with a constant into one block with multiple exits. The backends can in
turn output this block as a switch statement.
"""
candidates = [block for block in graph.iterblocks()
if is_chain_block(block, first=True)]
entrymap = mkentrymap(graph)
for firstblock in candidates:
chain = []
checkvars = []
varmap = {} # {var in a block in the chain: var in the first block}
for var in firstblock.exits[0].args:
varmap[var] = var
for var in firstblock.exits[1].args:
varmap[var] = var
def add_to_varmap(var, newvar):
if isinstance(var, Variable):
varmap[newvar] = varmap[var]
else:
varmap[newvar] = var
current = firstblock
while 1:
# check whether the chain can be extended with the block that follows the
# False link
checkvar = [var for var in current.operations[-1].args
if isinstance(var, Variable)][0]
case = [var for var in current.operations[-1].args
if isinstance(var, Constant)][0]
chain.append((current, case))
checkvars.append(checkvar)
falseexit = current.exits[0]
assert not falseexit.exitcase
trueexit = current.exits[1]
targetblock = falseexit.target
if len(entrymap[targetblock]) != 1:
break
if checkvar not in falseexit.args:
break
newcheckvar = targetblock.inputargs[falseexit.args.index(checkvar)]
if not is_chain_block(targetblock):
break
if newcheckvar not in targetblock.operations[0].args:
break
for i, var in enumerate(trueexit.args):
add_to_varmap(var, trueexit.target.inputargs[i])
for i, var in enumerate(falseexit.args):
add_to_varmap(var, falseexit.target.inputargs[i])
current = targetblock
if len(chain) > 1:
break
else:
return False
merge_chain(chain, checkvars[0], varmap, graph)
checkgraph(graph)
return True
def merge_if_blocks(graph, verbose=True):
merge = False
while merge_if_blocks_once(graph):
merge = True
if merge:
if verbose:
log("merging blocks in %s" % (graph.name, ))
else:
log.dot()
| Python |
from pypy.translator.simplify import get_graph
import md5
def get_statistics(graph, translator, save_per_graph_details=None, ignore_stack_checks=False):
seen_graphs = {}
stack = [graph]
num_graphs = 0
num_blocks = 0
num_ops = 0
per_graph = {}
while stack:
graph = stack.pop()
if graph in seen_graphs:
continue
seen_graphs[graph] = True
num_graphs += 1
old_num_blocks = num_blocks
old_num_ops = num_ops
for block in graph.iterblocks():
num_blocks += 1
for op in block.operations:
if op.opname == "direct_call":
called_graph = get_graph(op.args[0], translator)
if called_graph is not None and ignore_stack_checks:
if called_graph.name.startswith('ll_stack_check'):
continue
if called_graph is not None:
stack.append(called_graph)
elif op.opname == "indirect_call":
called_graphs = op.args[-1].value
if called_graphs is not None:
stack.extend(called_graphs)
num_ops += 1
per_graph[graph] = (num_blocks-old_num_blocks, num_ops-old_num_ops)
if save_per_graph_details:
details = []
for graph, (nblocks, nops) in per_graph.iteritems():
try:
code = graph.func.func_code.co_code
except AttributeError:
code = "None"
hash = md5.new(code).hexdigest()
details.append((hash, graph.name, nblocks, nops))
details.sort()
f = open(save_per_graph_details, "w")
try:
for hash, name, nblocks, nops in details:
print >>f, hash, name, nblocks, nops
finally:
f.close()
return num_graphs, num_blocks, num_ops
def print_statistics(graph, translator, save_per_graph_details=None, ignore_stack_checks=False):
num_graphs, num_blocks, num_ops = get_statistics(graph, translator, save_per_graph_details,
ignore_stack_checks=ignore_stack_checks)
print ("Statistics:\nnumber of graphs %s\n"
"number of blocks %s\n"
"number of operations %s\n") % (num_graphs, num_blocks, num_ops)
| Python |
"""
Visit all known INSTANCEs to see which methods can be marked as
non-virtual: a method is marked as non-virtual when it's never
overridden in the subclasses: this means that backends can translate
oosends relative to that method into non-virtual call (or maybe
switching back to a direct_call if the backend doesn't support
non-virtual calls, such as JVM).
"""
from pypy.rpython.ootypesystem import ootype
def check_virtual_methods(INSTANCE=ootype.ROOT, super_methods = {}):
my_methods = super_methods.copy()
for name, method in INSTANCE._methods.iteritems():
method._virtual = False
my_methods[name] = method
if name in super_methods:
super_methods[name]._virtual = True
for SUB_INSTANCE in INSTANCE._subclasses:
check_virtual_methods(SUB_INSTANCE, my_methods)
| Python |
from pypy.objspace.flow.model import Block, Variable, Constant
from pypy.rpython.lltypesystem.lltype import Void
from pypy.translator.backendopt.support import log
from pypy.translator import simplify
from pypy import conftest
def remove_unaryops(graph, opnames):
"""Removes unary low-level ops with a name appearing in the opnames list.
"""
positions = []
for block in graph.iterblocks():
for i, op in enumerate(block.operations):
if op.opname in opnames:
positions.append((block, i))
while positions:
block, index = positions.pop()
op_result = block.operations[index].result
op_arg = block.operations[index].args[0]
# replace the new variable (op_result) with the old variable
# (from all subsequent positions)
for op in block.operations[index:]:
if op is not None:
for i in range(len(op.args)):
if op.args[i] == op_result:
op.args[i] = op_arg
if (op.opname == "indirect_call"
and isinstance(op.args[0], Constant)):
op.opname = "direct_call"
op.args = op.args[:-1]
for link in block.exits:
for i in range(len(link.args)):
if link.args[i] == op_result:
link.args[i] = op_arg
if block.exitswitch == op_result:
if isinstance(op_arg, Variable):
block.exitswitch = op_arg
else:
simplify.replace_exitswitch_by_constant(block, op_arg)
block.operations[index] = None
# remove all operations
for block in graph.iterblocks():
if block.operations:
block.operations[:] = filter(None, block.operations)
def remove_same_as(graph):
remove_unaryops(graph, ["same_as"])
def remove_duplicate_casts(graph, translator):
simplify.join_blocks(graph)
num_removed = 0
# remove chains of casts
for block in graph.iterblocks():
comes_from = {}
for op in block.operations:
if op.opname == "cast_pointer":
if op.args[0] in comes_from:
from_var = comes_from[op.args[0]]
comes_from[op.result] = from_var
if from_var.concretetype == op.result.concretetype:
op.opname = "same_as"
op.args = [from_var]
num_removed += 1
else:
op.args = [from_var]
else:
comes_from[op.result] = op.args[0]
if num_removed:
remove_same_as(graph)
# remove duplicate casts
for block in graph.iterblocks():
available = {}
for op in block.operations:
if op.opname == "cast_pointer":
key = (op.args[0], op.result.concretetype)
if key in available:
op.opname = "same_as"
op.args = [available[key]]
num_removed += 1
else:
available[key] = op.result
elif op.opname == 'resume_point':
available.clear()
if num_removed:
remove_same_as(graph)
# remove casts with unused results
for block in graph.iterblocks():
used = {}
for link in block.exits:
for arg in link.args:
used[arg] = True
for i, op in list(enumerate(block.operations))[::-1]:
if op.opname == "cast_pointer" and op.result not in used:
del block.operations[i]
num_removed += 1
else:
for arg in op.args:
used[arg] = True
if translator.config.translation.verbose:
log.removecasts(
"removed %s cast_pointers in %s" % (num_removed, graph.name))
return num_removed
def remove_superfluous_keep_alive(graph):
for block in graph.iterblocks():
used = {}
for i, op in list(enumerate(block.operations))[::-1]:
if op.opname == "keepalive":
if op.args[0] in used:
del block.operations[i]
else:
used[op.args[0]] = True
##def rename_extfunc_calls(translator):
## from pypy.rpython.extfunctable import table as extfunctable
## def visit(block):
## if isinstance(block, Block):
## for op in block.operations:
## if op.opname != 'direct_call':
## continue
## functionref = op.args[0]
## if not isinstance(functionref, Constant):
## continue
## _callable = functionref.value._obj._callable
## for func, extfuncinfo in extfunctable.iteritems(): # precompute a dict?
## if _callable is not extfuncinfo.ll_function or not extfuncinfo.backend_functiontemplate:
## continue
## language, functionname = extfuncinfo.backend_functiontemplate.split(':')
## if language is 'C':
## old_name = functionref.value._obj._name[:]
## functionref.value._obj._name = functionname
## #print 'rename_extfunc_calls: %s -> %s' % (old_name, functionref.value._obj._name)
## break
## for func, graph in translator.flowgraphs.iteritems():
## traverse(visit, graph)
| Python |
from pypy.translator.backendopt.raisingop2direct_call import raisingop2direct_call
from pypy.translator.backendopt import removenoops
from pypy.translator.backendopt import inline
from pypy.translator.backendopt.malloc import remove_mallocs
from pypy.translator.backendopt.constfold import constant_fold_graph
from pypy.translator.backendopt.stat import print_statistics
from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks
from pypy.translator import simplify
from pypy.translator.backendopt.escape import malloc_to_stack
from pypy.translator.backendopt import mallocprediction
from pypy.translator.backendopt.removeassert import remove_asserts
from pypy.translator.backendopt.support import log
from pypy.translator.backendopt.checkvirtual import check_virtual_methods
from pypy.objspace.flow.model import checkgraph
INLINE_THRESHOLD_FOR_TEST = 33
def get_function(dottedname):
parts = dottedname.split('.')
module = '.'.join(parts[:-1])
name = parts[-1]
try:
mod = __import__(module, {}, {}, ['__doc__'])
except ImportError, e:
raise Exception, "Import error loading %s: %s" % (dottedname, e)
try:
func = getattr(mod, name)
except AttributeError:
raise Exception, "Function %s not found in module" % dottedname
return func
def backend_optimizations(translator, graphs=None, secondary=False, **kwds):
# sensible keywords are
# raisingop2direct_call, inline_threshold, mallocs
# merge_if_blocks, constfold, heap2stack
# clever_malloc_removal, remove_asserts
config = translator.config.translation.backendopt.copy(as_default=True)
config.set(**kwds)
if graphs is None:
graphs = translator.graphs
if config.print_statistics:
print "before optimizations:"
print_statistics(translator.graphs[0], translator, "per-graph.txt")
if config.raisingop2direct_call:
raisingop2direct_call(translator, graphs)
if translator.rtyper.type_system.name == 'ootypesystem':
check_virtual_methods()
# remove obvious no-ops
for graph in graphs:
removenoops.remove_same_as(graph)
simplify.eliminate_empty_blocks(graph)
simplify.transform_dead_op_vars(graph, translator)
removenoops.remove_duplicate_casts(graph, translator)
if config.print_statistics:
print "after no-op removal:"
print_statistics(translator.graphs[0], translator)
if config.inline or config.mallocs:
heuristic = get_function(config.inline_heuristic)
if config.inline:
threshold = config.inline_threshold
else:
threshold = 0
inline_malloc_removal_phase(config, translator, graphs,
threshold,
inline_heuristic=heuristic)
constfold(config, graphs)
if config.clever_malloc_removal:
threshold = config.clever_malloc_removal_threshold
heuristic = get_function(config.clever_malloc_removal_heuristic)
log.inlineandremove("phase with threshold factor: %s" % threshold)
log.inlineandremove("heuristic: %s.%s" % (heuristic.__module__,
heuristic.__name__))
count = mallocprediction.clever_inlining_and_malloc_removal(
translator, graphs,
threshold = threshold,
heuristic=heuristic)
log.inlineandremove("removed %d simple mallocs in total" % count)
constfold(config, graphs)
if config.print_statistics:
print "after clever inlining and malloc removal"
print_statistics(translator.graphs[0], translator)
if config.profile_based_inline and not secondary:
threshold = config.profile_based_inline_threshold
heuristic = get_function(config.profile_based_inline_heuristic)
inline.instrument_inline_candidates(graphs, threshold)
counters = translator.driver_instrument_result(
config.profile_based_inline)
n = len(counters)
def call_count_pred(label):
if label >= n:
return False
return counters[label] > 250 # xxx introduce an option for this
inline_malloc_removal_phase(config, translator, graphs,
threshold,
inline_heuristic=heuristic,
call_count_pred=call_count_pred)
constfold(config, graphs)
if config.remove_asserts:
remove_asserts(translator, graphs)
if config.heap2stack:
assert graphs is translator.graphs # XXX for now
malloc_to_stack(translator)
if config.merge_if_blocks:
log.mergeifblocks("starting to merge if blocks")
for graph in graphs:
merge_if_blocks(graph, translator.config.translation.verbose)
if config.print_statistics:
print "after if-to-switch:"
print_statistics(translator.graphs[0], translator)
for graph in graphs:
checkgraph(graph)
def constfold(config, graphs):
if config.constfold:
for graph in graphs:
constant_fold_graph(graph)
def inline_malloc_removal_phase(config, translator, graphs, inline_threshold,
inline_heuristic,
call_count_pred=None):
type_system = translator.rtyper.type_system.name
# inline functions in each other
if inline_threshold:
log.inlining("phase with threshold factor: %s" % inline_threshold)
log.inlining("heuristic: %s.%s" % (inline_heuristic.__module__,
inline_heuristic.__name__))
inline.auto_inline_graphs(translator, graphs, inline_threshold,
heuristic=inline_heuristic,
call_count_pred=call_count_pred)
if config.print_statistics:
print "after inlining:"
print_statistics(translator.graphs[0], translator)
# vaporize mallocs
if config.mallocs:
log.malloc("starting malloc removal")
remove_mallocs(translator, graphs, type_system)
if config.print_statistics:
print "after malloc removal:"
print_statistics(translator.graphs[0], translator)
| Python |
from pypy.annotation.model import setunion
from pypy.objspace.flow.model import Variable, Constant
from pypy.rpython.lltypesystem import lltype
from pypy.translator.simplify import get_graph
from pypy.rpython.rmodel import inputconst
from pypy.translator.backendopt import support
from pypy.tool.uid import uid
class CreationPoint(object):
def __init__(self, creation_method, lltype):
self.changes = False
self.escapes = False
self.creation_method = creation_method
if creation_method == "constant":
self.changes = True
self.escapes = True
self.malloced = False
self.lltype = lltype
def __repr__(self):
return ("CreationPoint(<0x%x>, %r, %s, esc=%s, cha=%s)" %
(uid(self), self.lltype, self.creation_method, self.escapes, self.changes))
class VarState(object):
def __init__(self, crep=None):
self.creation_points = {}
if crep is not None:
self.creation_points[crep] = True
def contains(self, other):
for crep in other.creation_points:
if crep not in self.creation_points:
return False
return True
def merge(self, other):
creation_points = setunion(self.creation_points, other.creation_points)
newstate = VarState()
newstate.creation_points = creation_points
return newstate
def setescapes(self):
changed = []
for crep in self.creation_points:
if not crep.escapes:
changed.append(crep)
crep.escapes = True
return changed
def setchanges(self):
changed = []
for crep in self.creation_points:
if not crep.changes:
changed.append(crep)
crep.changes = True
return changed
def does_escape(self):
for crep in self.creation_points:
if crep.escapes:
return True
return False
def does_change(self):
for crep in self.creation_points:
if crep.changes:
return True
return False
def __repr__(self):
crepsrepr = (", ".join([repr(crep) for crep in self.creation_points]), )
return "VarState({%s})" % crepsrepr
class AbstractDataFlowInterpreter(object):
def __init__(self, translation_context):
self.translation_context = translation_context
self.scheduled = {} # block: graph containing it
self.varstates = {} # var-or-const: state
self.creationpoints = {} # var: creationpoint
self.constant_cps = {} # const: creationpoint
self.dependencies = {} # creationpoint: {block: graph containing it}
self.functionargs = {} # graph: list of state of args
self.flown_blocks = {} # block: True
def seen_graphs(self):
return self.functionargs.keys()
def getstate(self, var_or_const):
if not isonheap(var_or_const):
return None
if var_or_const in self.varstates:
return self.varstates[var_or_const]
if isinstance(var_or_const, Variable):
varstate = VarState()
else:
if var_or_const not in self.constant_cps:
crep = CreationPoint("constant", var_or_const.concretetype)
self.constant_cps[var_or_const] = crep
else:
crep = self.constant_cps[var_or_const]
varstate = VarState(crep)
self.varstates[var_or_const] = varstate
return varstate
def getstates(self, varorconstlist):
return [self.getstate(var) for var in varorconstlist]
def setstate(self, var, state):
self.varstates[var] = state
def get_creationpoint(self, var, method="?"):
if var in self.creationpoints:
return self.creationpoints[var]
crep = CreationPoint(method, var.concretetype)
self.creationpoints[var] = crep
return crep
def schedule_function(self, graph):
#print "scheduling function:", graph.name
startblock = graph.startblock
if graph in self.functionargs:
args = self.functionargs[graph]
else:
args = []
for var in startblock.inputargs:
if not isonheap(var):
varstate = None
else:
crep = self.get_creationpoint(var, "arg")
varstate = VarState(crep)
self.setstate(var, varstate)
args.append(varstate)
self.scheduled[startblock] = graph
self.functionargs[graph] = args
resultstate = self.getstate(graph.returnblock.inputargs[0])
return resultstate, args
def flow_block(self, block, graph):
#print "flowing in block %s of function %s" % (block, graph.name)
self.flown_blocks[block] = True
if block is graph.returnblock:
if isonheap(block.inputargs[0]):
changed = self.getstate(block.inputargs[0]).setescapes()
self.handle_changed(changed)
return
if block is graph.exceptblock:
if isonheap(block.inputargs[0]):
changed = self.getstate(block.inputargs[0]).setescapes()
self.handle_changed(changed)
if isonheap(block.inputargs[1]):
changed = self.getstate(block.inputargs[1]).setescapes()
self.handle_changed(changed)
return
self.curr_block = block
self.curr_graph = graph
#print "inputargs", self.getstates(block.inputargs)
for op in block.operations:
self.flow_operation(op)
#print "checking exits..."
for exit in block.exits:
#print "exit", exit
args = self.getstates(exit.args)
targetargs = self.getstates(exit.target.inputargs)
#print " newargs", args
#print " targetargs", targetargs
# flow every block at least once:
if (multicontains(targetargs, args) and
exit.target in self.flown_blocks):
#print " not necessary"
continue
#else:
#print " scheduling for flowin"
for prevstate, origstate, var in zip(args, targetargs,
exit.target.inputargs):
if not isonheap(var):
continue
newstate = prevstate.merge(origstate)
self.setstate(var, newstate)
#print " args", self.getstates(exit.target.inputargs)
self.scheduled[exit.target] = graph
def flow_operation(self, op):
#print "handling", op
args = self.getstates(op.args)
#print "args:", args
opimpl = getattr(self, 'op_'+op.opname, None)
if opimpl is None:
if isonheap(op.result) or filter(None, args):
for arg in args:
if arg is not None:
changed = arg.setchanges()
self.handle_changed(changed)
changed = arg.setescapes()
self.handle_changed(changed)
#raise NotImplementedError("can't handle %s" % (op.opname, ))
#print "assuming that '%s' is irrelevant" % op
return
res = opimpl(op, *args)
self.setstate(op.result, res)
def complete(self):
while self.scheduled:
block, graph = self.scheduled.popitem()
self.flow_block(block, graph)
def handle_changed(self, changed):
for crep in changed:
if crep not in self.dependencies:
continue
self.scheduled.update(self.dependencies[crep])
def register_block_dependency(self, state, block=None, graph=None):
if block is None:
block = self.curr_block
graph = self.curr_graph
for crep in state.creation_points:
self.dependencies.setdefault(crep, {})[block] = graph
def register_state_dependency(self, state1, state2):
"state1 depends on state2: if state2 does escape/change, so does state1"
# change state1 according to how state2 is now
#print "registering dependency of %s on %s" % (state1, state2)
if state2.does_escape():
changed = state1.setescapes() # mark all crep's as escaping
self.handle_changed(changed)
if state2.does_change():
changed = state1.setchanges() # mark all crep's as changing
self.handle_changed(changed)
# register a dependency of the current block on state2:
# that means that if state2 changes the current block will be reflown
# triggering this function again and thus updating state1
self.register_block_dependency(state2)
# _____________________________________________________________________
# operation implementations
def op_malloc(self, op, typestate):
return VarState(self.get_creationpoint(op.result, "malloc"))
def op_malloc_varsize(self, op, typestate, lengthstate):
return VarState(self.get_creationpoint(op.result, "malloc_varsize"))
def op_keepalive(self, op, state):
return None
def op_cast_pointer(self, op, state):
return state
def op_setfield(self, op, objstate, fieldname, valuestate):
changed = objstate.setchanges()
self.handle_changed(changed)
if valuestate is not None:
# be pessimistic for now:
# everything that gets stored into a structure escapes and changes
self.handle_changed(changed)
changed = valuestate.setchanges()
self.handle_changed(changed)
changed = valuestate.setescapes()
self.handle_changed(changed)
return None
def op_setarrayitem(self, op, objstate, indexstate, valuestate):
changed = objstate.setchanges()
self.handle_changed(changed)
if valuestate is not None:
# everything that gets stored into a structure escapes and changes
self.handle_changed(changed)
changed = valuestate.setchanges()
self.handle_changed(changed)
changed = valuestate.setescapes()
self.handle_changed(changed)
return None
def op_getarrayitem(self, op, objstate, indexstate):
if isonheap(op.result):
return VarState(self.get_creationpoint(op.result, "getarrayitem"))
def op_getfield(self, op, objstate, fieldname):
if isonheap(op.result):
# assume that getfield creates a new value
return VarState(self.get_creationpoint(op.result, "getfield"))
def op_getsubstruct(self, op, objstate, fieldname):
# since this is really an embedded struct, it has the same
# state, the same creationpoints, etc.
return objstate
def op_getarraysubstruct(self, op, arraystate, indexstate):
# since this is really a struct embedded somewhere in the array it has
# the same state, creationpoints, etc. in most cases the resulting
# pointer should not be used much anyway
return arraystate
def op_getarraysize(self, op, arraystate):
pass
def op_direct_call(self, op, function, *args):
graph = get_graph(op.args[0], self.translation_context)
if graph is None:
for arg in args:
if arg is None:
continue
# an external function can change every parameter:
changed = arg.setchanges()
self.handle_changed(changed)
funcargs = [None] * len(args)
else:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
if funcarg is not None:
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "direct_call"))
def op_indirect_call(self, op, function, *args):
graphs = op.args[-1].value
args = args[:-1]
if graphs is None:
for localarg in args:
if localarg is None:
continue
changed = localarg.setescapes()
self.handle_changed(changed)
changed = localarg.setchanges()
self.handle_changed(changed)
else:
for graph in graphs:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "indirect_call"))
def op_ptr_iszero(self, op, ptrstate):
return None
op_cast_ptr_to_int = op_keepalive = op_ptr_nonzero = op_ptr_iszero
def op_ptr_eq(self, op, ptr1state, ptr2state):
return None
op_ptr_ne = op_ptr_eq
def op_same_as(self, op, objstate):
return objstate
def isonheap(var_or_const):
return isinstance(var_or_const.concretetype, lltype.Ptr)
def multicontains(l1, l2):
assert len(l1) == len(l2)
for a, b in zip(l1, l2):
if a is None:
assert b is None
elif not a.contains(b):
return False
return True
def malloc_to_stack(t):
adi = AbstractDataFlowInterpreter(t)
for graph in t.graphs:
if graph.startblock not in adi.flown_blocks:
adi.schedule_function(graph)
adi.complete()
for graph in t.graphs:
loop_blocks = support.find_loop_blocks(graph)
for block, op in graph.iterblockops():
if op.opname == 'malloc':
STRUCT = op.args[0].value
# must not remove mallocs of structures that have a RTTI with a destructor
try:
destr_ptr = lltype.getRuntimeTypeInfo(STRUCT)._obj.destructor_funcptr
if destr_ptr:
continue
except (ValueError, AttributeError), e:
pass
varstate = adi.getstate(op.result)
assert len(varstate.creation_points) == 1
crep = varstate.creation_points.keys()[0]
if not crep.escapes:
if block not in loop_blocks:
print "moving object from heap to stack %s in %s" % (op, graph.name)
op.opname = 'flavored_malloc'
op.args.insert(0, inputconst(lltype.Void, 'stack'))
else:
print "%s in %s is a non-escaping malloc in a loop" % (op, graph.name)
| Python |
from pypy.translator.simplify import get_graph
from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
from pypy.rpython.lltypesystem import lltype
from pypy.translator.backendopt import graphanalyze
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("canraise")
py.log.setconsumer("canraise", ansi_log)
class RaiseAnalyzer(graphanalyze.GraphAnalyzer):
def operation_is_true(self, op):
try:
return bool(LL_OPERATIONS[op.opname].canraise)
except KeyError:
log.WARNING("Unknown operation: %s" % op.opname)
return True
def analyze_exceptblock(self, block, seen=None):
return True
# backward compatible interface
def can_raise(self, op, seen=None):
return self.analyze(op, seen)
| Python |
import py
from pypy.rpython.lltypesystem import lltype
from pypy.translator.simplify import get_graph
from pypy.rpython.rmodel import inputconst
from pypy.tool.ansi_print import ansi_log
from pypy.annotation.model import setunion, s_ImpossibleValue
from pypy.translator.unsimplify import split_block, copyvar, insert_empty_block
from pypy.objspace.flow.model import Constant, Variable, SpaceOperation, c_last_exception
from pypy.rpython.lltypesystem import lltype
log = py.log.Producer("backendopt")
py.log.setconsumer("backendopt", ansi_log)
def graph_operations(graph):
for block in graph.iterblocks():
for op in block.operations:
yield op
def all_operations(graphs):
for graph in graphs:
for block in graph.iterblocks():
for op in block.operations:
yield op
def annotate(translator, func, result, args):
args = [arg.concretetype for arg in args]
graph = translator.rtyper.annotate_helper(func, args)
fptr = lltype.functionptr(lltype.FuncType(args, result.concretetype), func.func_name, graph=graph)
c = inputconst(lltype.typeOf(fptr), fptr)
return c
def var_needsgc(var):
if hasattr(var, 'concretetype'):
vartype = var.concretetype
return isinstance(vartype, lltype.Ptr) and vartype._needsgc()
else:
# assume PyObjPtr
return True
def needs_conservative_livevar_calculation(block):
from pypy.rpython.lltypesystem import rclass
vars = block.getvariables()
assert len(block.exits) == 1
exitingvars = block.exits[0].args
for var in vars:
TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject))
if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var):
if isinstance(TYPE.TO, lltype.FuncType):
continue
try:
lltype.castable(TYPE, rclass.CLASSTYPE)
except lltype.InvalidCast:
if var in exitingvars:
return True
else:
return False
def generate_keepalive(vars, annotator=None):
keepalive_ops = []
for v in vars:
if isinstance(v, Constant):
continue
if v.concretetype._is_atomic():
continue
v_keepalive = Variable()
v_keepalive.concretetype = lltype.Void
if annotator is not None:
annotator.setbinding(v_keepalive, s_ImpossibleValue)
keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive))
return keepalive_ops
def split_block_with_keepalive(block, index_operation,
keep_alive_op_args=True,
annotator=None):
splitlink = split_block(annotator, block, index_operation)
afterblock = splitlink.target
conservative_keepalives = needs_conservative_livevar_calculation(block)
if conservative_keepalives:
keep_alive_vars = [var for var in block.getvariables()
if var_needsgc(var)]
# XXX you could maybe remove more, if the variables are kept
# alive by something else. but this is sometimes hard to know
for i, var in enumerate(keep_alive_vars):
try:
index = splitlink.args.index(var)
newvar = afterblock.inputargs[index]
except ValueError:
splitlink.args.append(var)
newvar = copyvar(annotator, var)
afterblock.inputargs.append(newvar)
keep_alive_vars[i] = newvar
elif keep_alive_op_args and afterblock.operations:
keep_alive_vars = [var for var in afterblock.operations[0].args
if isinstance(var, Variable) and var_needsgc(var)]
if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception:
afterblock.operations[1:1] = generate_keepalive(keep_alive_vars,
annotator=annotator)
keep_alive_vars = []
else:
keep_alive_vars = []
pos = len(afterblock.operations)
if afterblock.exitswitch == c_last_exception:
pos -= 1 # insert the keepalives just before the last operation
# in case of exception-catching
afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars)
return splitlink
def find_calls_from(translator, graph):
if getattr(getattr(graph, "func", None), "suggested_primitive", False):
return
for block in graph.iterblocks():
for op in block.operations:
if op.opname == "direct_call":
called_graph = get_graph(op.args[0], translator)
if called_graph is not None:
yield block, called_graph
if op.opname == "indirect_call":
graphs = op.args[-1].value
if graphs is not None:
for called_graph in graphs:
yield block, called_graph
def find_backedges(graph, block=None, seen=None, seeing=None):
"""finds the backedges in the flow graph"""
backedges = []
if block is None:
block = graph.startblock
if seen is None:
seen = {block: None}
if seeing is None:
seeing = {}
seeing[block] = True
for link in block.exits:
if link.target in seen:
if link.target in seeing:
backedges.append(link)
else:
seen[link.target] = None
backedges.extend(find_backedges(graph, link.target, seen, seeing))
del seeing[block]
return backedges
def compute_reachability(graph):
reachable = {}
blocks = list(graph.iterblocks())
for block in py.builtin.reversed(blocks): # this order should make the reuse path more likely
reach = {}
scheduled = [block]
while scheduled:
current = scheduled.pop()
for link in current.exits:
if link.target in reachable:
reach[link.target] = True
reach = setunion(reach, reachable[link.target])
continue
if link.target not in reach:
reach[link.target] = True
scheduled.append(link.target)
reachable[block] = reach
return reachable
def find_loop_blocks(graph):
"""find the blocks in a graph that are part of a loop"""
loop = {}
reachable = compute_reachability(graph)
for backedge in find_backedges(graph):
start = backedge.target
end = backedge.prevblock
loop[start] = start
loop[end] = start
scheduled = [start]
seen = {}
while scheduled:
current = scheduled.pop()
connects = end in reachable[current]
seen[current] = True
if connects:
loop[current] = start
for link in current.exits:
if link.target not in seen:
scheduled.append(link.target)
return loop
def md5digest(translator):
import md5
m = md5.new()
for op in all_operations(translator.graphs):
m.update(op.opname + str(op.result))
for a in op.args:
m.update(str(a))
return m.digest()[:]
| Python |
from pypy.translator.backendopt.escape import AbstractDataFlowInterpreter
from pypy.translator.backendopt.all import remove_mallocs
from pypy.translator.backendopt import inline
from pypy.rpython.lltypesystem import lltype
from pypy.translator import simplify
from pypy.translator.backendopt import removenoops
from pypy.translator.backendopt.support import log
SMALL_THRESHOLD = 15
BIG_THRESHOLD = 50
def find_malloc_creps(graph, adi, translator):
# mapping from malloc creation point to graphs that it flows into
malloc_creps = {}
# find all mallocs that don't escape
for block, op in graph.iterblockops():
if op.opname == 'malloc':
STRUCT = op.args[0].value
# must not remove mallocs of structures that have a RTTI with a destructor
try:
destr_ptr = lltype.getRuntimeTypeInfo(
STRUCT)._obj.destructor_funcptr
if destr_ptr:
continue
except (ValueError, AttributeError), e:
pass
varstate = adi.getstate(op.result)
assert len(varstate.creation_points) == 1
crep = varstate.creation_points.keys()[0]
if not crep.escapes:
malloc_creps[crep] = {}
return malloc_creps
def find_calls_where_creps_go(interesting_creps, graph, adi,
translator, seen):
#print "find_calls_where_creps_go", interesting_creps, graph.name
#print seen
# drop creps that are merged with another creation point
for block in graph.iterblocks():
for var in block.getvariables():
varstate = adi.getstate(var)
if varstate is None:
continue
for crep in varstate.creation_points:
if crep in interesting_creps:
if len(varstate.creation_points) != 1:
del interesting_creps[crep]
break
# drop creps that are passed into an indirect_call
for block, op in graph.iterblockops():
if not interesting_creps:
return
if op.opname == "indirect_call":
for var in op.args[:-1]:
varstate = adi.getstate(var)
if varstate is None:
continue
for crep in varstate.creation_points:
if crep in interesting_creps:
del interesting_creps[crep]
elif op.opname == "direct_call":
#print op, interesting_creps
called_graph = simplify.get_graph(op.args[0], translator)
interesting = {}
for i, var in enumerate(op.args[1:]):
#print i, var,
varstate = adi.getstate(var)
if varstate is None:
#print "no varstate"
continue
if len(varstate.creation_points) == 1:
crep = varstate.creation_points.keys()[0]
if crep not in interesting_creps:
#print "not interesting"
continue
if called_graph is None:
del interesting_creps[crep]
#print "graph not found"
continue
if (called_graph, i) in seen:
seen[(called_graph, i)][graph] = True
#print "seen already"
else:
#print "taking", crep
seen[(called_graph, i)] = {graph: True}
arg = called_graph.startblock.inputargs[i]
argstate = adi.getstate(arg)
argcrep = [c for c in argstate.creation_points
if c.creation_method == "arg"][0]
interesting[argcrep] = True
#print interesting
if interesting:
find_calls_where_creps_go(interesting, called_graph,
adi, translator, seen)
return interesting_creps
def find_malloc_removal_candidates(t, graphs):
adi = AbstractDataFlowInterpreter(t)
for graph in graphs:
if graph.startblock not in adi.flown_blocks:
adi.schedule_function(graph)
adi.complete()
targetset = dict.fromkeys(graphs)
caller_candidates = {}
seen = {}
for graph in adi.seen_graphs():
creps = find_malloc_creps(graph, adi, t)
#print "malloc creps", creps
if creps:
find_calls_where_creps_go(creps, graph, adi, t, seen)
if creps:
if graph in targetset:
caller_candidates[graph] = True
callgraph = []
for (called_graph, i), callers in seen.iteritems():
for caller in callers:
if caller in targetset:
callgraph.append((caller, called_graph))
else:
log.inlineandremove.WARNING("would like to inline into"
" out of target set: %r"
% caller)
return callgraph, caller_candidates
def inline_and_remove(t, graphs, threshold=BIG_THRESHOLD,
heuristic=inline.inlining_heuristic):
callgraph, caller_candidates = find_malloc_removal_candidates(t, graphs)
log.inlineandremove("found %s malloc removal candidates" %
len(caller_candidates))
if callgraph:
count = inline.auto_inlining(t, callgraph=callgraph,
threshold=threshold,
heuristic=heuristic)
if not count:
return False
log.inlineandremove('inlined %d callsites.'% (count,))
count = remove_mallocs(t, caller_candidates.keys())
return count
else:
return False
def preparation(translator, graphs, threshold=SMALL_THRESHOLD,
heuristic=inline.inlining_heuristic):
count = 0
inline.auto_inline_graphs(translator, graphs, threshold,
heuristic=heuristic)
count += remove_mallocs(translator, graphs)
log.inlineandremove("preparation removed %s mallocs in total" % count)
return count
def clever_inlining_and_malloc_removal(translator, graphs=None,
threshold=BIG_THRESHOLD,
heuristic=inline.inlining_heuristic):
if graphs is None:
graphs = translator.graphs
count = 0
while 1:
newcount = inline_and_remove(translator, graphs, threshold=threshold,
heuristic=heuristic)
if not newcount:
break
count += newcount
for graph in graphs:
removenoops.remove_superfluous_keep_alive(graph)
removenoops.remove_duplicate_casts(graph, translator)
return count
| Python |
from pypy.translator.simplify import get_graph
from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
from pypy.rpython.lltypesystem import lltype
class GraphAnalyzer(object):
"""generic way to analyze graphs: recursively follow it until the first
operation is found on which self.operation_is_true returns True"""
def __init__(self, translator):
self.translator = translator
self.analyzed_calls = {}
# methods to be overridden by subclass
def operation_is_true(self, op):
raise NotImplementedError("abstract base class")
def analyze_exceptblock(self, block, seen=None):
return False
def analyze_startblock(self, block, seen=None):
return False
def analyze_external_call(self, op):
return True
def analyze_link(self, graph, link):
return False
# general methods
def analyze(self, op, seen=None):
if op.opname == "direct_call":
graph = get_graph(op.args[0], self.translator)
if graph is None:
return self.analyze_external_call(op)
return self.analyze_direct_call(graph, seen)
elif op.opname == "indirect_call":
if op.args[-1].value is None:
return True
return self.analyze_indirect_call(op.args[-1].value, seen)
if self.operation_is_true(op):
return True
def analyze_direct_call(self, graph, seen=None):
if graph in self.analyzed_calls:
return self.analyzed_calls[graph]
if seen is None:
seen = {}
if graph in seen:
self.analyzed_calls[graph] = False
return False
else:
seen[graph] = True
for block in graph.iterblocks():
if block is graph.startblock:
if self.analyze_startblock(block, seen):
self.analyzed_calls[graph] = True
return True
if block is graph.exceptblock:
if self.analyze_exceptblock(block, seen):
self.analyzed_calls[graph] = True
return True
for op in block.operations:
if self.analyze(op, seen):
self.analyzed_calls[graph] = True
return True
for exit in block.exits:
if self.analyze_link(graph, exit):
self.analyzed_calls[graph] = True
return True
self.analyzed_calls[graph] = False
return False
def analyze_indirect_call(self, graphs, seen=None):
for graph in graphs:
if self.analyze_direct_call(graph, seen):
return True
return False
def analyze_all(self, graphs=None):
if graphs is None:
graphs = self.translator.graphs
for graph in graphs:
for block, op in graph.iterblockops():
self.analyze(op)
| Python |
from pypy.objspace.flow.model import Variable, Constant, Block, Link
from pypy.objspace.flow.model import SpaceOperation, traverse
from pypy.tool.algo.unionfind import UnionFind
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.ootypesystem import ootype
from pypy.translator import simplify
from pypy.translator.backendopt import removenoops
from pypy.translator.backendopt.support import log
class LifeTime:
def __init__(self, (block, var)):
assert isinstance(var, Variable)
self.variables = {(block, var) : True}
self.creationpoints = {} # set of ("type of creation point", ...)
self.usepoints = {} # set of ("type of use point", ...)
def update(self, other):
self.variables.update(other.variables)
self.creationpoints.update(other.creationpoints)
self.usepoints.update(other.usepoints)
class BaseMallocRemover(object):
IDENTITY_OPS = ('same_as',)
SUBSTRUCT_OPS = ()
MALLOC_OP = None
FIELD_ACCESS = {}
SUBSTRUCT_ACCESS = {}
CHECK_ARRAY_INDEX = {}
def __init__(self, verbose=True):
self.verbose = verbose
def get_STRUCT(self, TYPE):
raise NotImplementedError
def visit_substruct_op(self, node, union, op):
raise NotImplementedError
def do_substruct_access(self, op):
raise NotImplementedError
def union_wrapper(self, S):
return False
def RTTI_dtor(self, STRUCT):
return False
def flatten(self, S):
raise NotImplementedError
def key_for_field_access(self, S, fldname):
raise NotImplementedError
def inline_type(self, TYPE):
raise NotImplementedError
def flowin(self, block, count, var, newvarsmap):
# in this 'block', follow where the 'var' goes to and replace
# it by a flattened-out family of variables. This family is given
# by newvarsmap, whose keys are the 'flatnames'.
vars = {var: True}
self.last_removed_access = None
def list_newvars():
return [newvarsmap[key] for key in self.flatnames]
assert block.operations != ()
self.newops = []
for op in block.operations:
for arg in op.args[1:]: # should be the first arg only
assert arg not in vars
if op.args and op.args[0] in vars:
self.flowin_op(op, vars, newvarsmap)
elif op.result in vars:
assert op.opname == self.MALLOC_OP
assert vars == {var: True}
progress = True
# drop the "malloc" operation
newvarsmap = self.flatconstants.copy() # zero initial values
# if there are substructures, they are now individually
# malloc'ed in an exploded way. (They will typically be
# removed again by the next malloc removal pass.)
for key in self.needsubmallocs:
v = Variable()
v.concretetype = self.newvarstype[key]
c = Constant(v.concretetype.TO, lltype.Void)
if c.value == op.args[0].value:
progress = False # replacing a malloc with
# the same malloc!
newop = SpaceOperation(self.MALLOC_OP, [c], v)
self.newops.append(newop)
newvarsmap[key] = v
count[0] += progress
else:
self.newops.append(op)
assert block.exitswitch not in vars
for link in block.exits:
newargs = []
for arg in link.args:
if arg in vars:
newargs += list_newvars()
else:
newargs.append(arg)
link.args[:] = newargs
self.insert_keepalives(list_newvars())
block.operations[:] = self.newops
def compute_lifetimes(self, graph):
"""Compute the static data flow of the graph: returns a list of LifeTime
instances, each of which corresponds to a set of Variables from the graph.
The variables are grouped in the same LifeTime if a value can pass from
one to the other by following the links. Each LifeTime also records all
places where a Variable in the set is used (read) or build (created).
"""
lifetimes = UnionFind(LifeTime)
def set_creation_point(block, var, *cp):
_, _, info = lifetimes.find((block, var))
info.creationpoints[cp] = True
def set_use_point(block, var, *up):
_, _, info = lifetimes.find((block, var))
info.usepoints[up] = True
def union(block1, var1, block2, var2):
if isinstance(var1, Variable):
lifetimes.union((block1, var1), (block2, var2))
elif isinstance(var1, Constant):
set_creation_point(block2, var2, "constant", var1)
else:
raise TypeError(var1)
for var in graph.startblock.inputargs:
set_creation_point(graph.startblock, var, "inputargs")
set_use_point(graph.returnblock, graph.returnblock.inputargs[0], "return")
set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except")
set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except")
def visit(node):
if isinstance(node, Block):
for op in node.operations:
if op.opname in self.IDENTITY_OPS:
# special-case these operations to identify their input
# and output variables
union(node, op.args[0], node, op.result)
continue
if op.opname in self.SUBSTRUCT_OPS:
if self.visit_substruct_op(node, union, op):
continue
for i in range(len(op.args)):
if isinstance(op.args[i], Variable):
set_use_point(node, op.args[i], "op", node, op, i)
set_creation_point(node, op.result, "op", node, op)
if isinstance(node.exitswitch, Variable):
set_use_point(node, node.exitswitch, "exitswitch", node)
if isinstance(node, Link):
if isinstance(node.last_exception, Variable):
set_creation_point(node.prevblock, node.last_exception,
"last_exception")
if isinstance(node.last_exc_value, Variable):
set_creation_point(node.prevblock, node.last_exc_value,
"last_exc_value")
d = {}
for i, arg in enumerate(node.args):
union(node.prevblock, arg,
node.target, node.target.inputargs[i])
if isinstance(arg, Variable):
if arg in d:
# same variable present several times in link.args
# consider it as a 'use' of the variable, which
# will disable malloc optimization (aliasing problems)
set_use_point(node.prevblock, arg, "dup", node, i)
else:
d[arg] = True
traverse(visit, graph)
return lifetimes.infos()
def _try_inline_malloc(self, info):
"""Try to inline the mallocs creation and manipulation of the Variables
in the given LifeTime."""
# the values must be only ever created by a "malloc"
lltypes = {}
for cp in info.creationpoints:
if cp[0] != "op":
return False
op = cp[2]
if op.opname != self.MALLOC_OP:
return False
if not self.inline_type(op.args[0].value):
return False
lltypes[op.result.concretetype] = True
# there must be a single largest malloced GcStruct;
# all variables can point to it or to initial substructures
if len(lltypes) != 1:
return False
STRUCT = self.get_STRUCT(lltypes.keys()[0])
# must be only ever accessed via getfield/setfield/getsubstruct/
# direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero.
# Note that same_as and cast_pointer are not recorded in usepoints.
self.accessed_substructs = {}
for up in info.usepoints:
if up[0] != "op":
return False
kind, node, op, index = up
if index != 0:
return False
if op.opname in self.CHECK_ARRAY_INDEX:
if not isinstance(op.args[1], Constant):
return False # non-constant array index
if op.opname in self.FIELD_ACCESS:
pass # ok
elif op.opname in self.SUBSTRUCT_ACCESS:
self.do_substruct_access(op)
else:
return False
# must not remove mallocs of structures that have a RTTI with a destructor
if self.RTTI_dtor(STRUCT):
return False
# must not remove unions inlined as the only field of a GcStruct
if self.union_wrapper(STRUCT):
return False
# success: replace each variable with a family of variables (one per field)
# 'flatnames' is a list of (STRUCTTYPE, fieldname_in_that_struct) that
# describes the list of variables that should replace the single
# malloc'ed pointer variable that we are about to remove. For primitive
# or pointer fields, the new corresponding variable just stores the
# actual value. For substructures, if pointers to them are "equivalent"
# to pointers to the parent structure (see equivalent_substruct()) then
# they are just merged, and flatnames will also list the fields within
# that substructure. Other substructures are replaced by a single new
# variable which is a pointer to a GcStruct-wrapper; each is malloc'ed
# individually, in an exploded way. (The next malloc removal pass will
# get rid of them again, in the typical case.)
self.flatnames = []
self.flatconstants = {}
self.needsubmallocs = []
self.newvarstype = {} # map {item-of-flatnames: concretetype}
self.direct_fieldptr_key = {}
self.flatten(STRUCT)
assert len(self.direct_fieldptr_key) <= 1
variables_by_block = {}
for block, var in info.variables:
vars = variables_by_block.setdefault(block, {})
vars[var] = True
count = [0]
for block, vars in variables_by_block.items():
# look for variables arriving from outside the block
for var in vars:
if var in block.inputargs:
i = block.inputargs.index(var)
newinputargs = block.inputargs[:i]
newvarsmap = {}
for key in self.flatnames:
newvar = Variable()
newvar.concretetype = self.newvarstype[key]
newvarsmap[key] = newvar
newinputargs.append(newvar)
newinputargs += block.inputargs[i+1:]
block.inputargs[:] = newinputargs
assert var not in block.inputargs
self.flowin(block, count, var, newvarsmap)
# look for variables created inside the block by a malloc
vars_created_here = []
for op in block.operations:
if op.opname == self.MALLOC_OP and op.result in vars:
vars_created_here.append(op.result)
for var in vars_created_here:
self.flowin(block, count, var, newvarsmap=None)
return count[0]
def remove_mallocs_once(self, graph):
"""Perform one iteration of malloc removal."""
simplify.remove_identical_vars(graph)
lifetimes = self.compute_lifetimes(graph)
progress = 0
for info in lifetimes:
progress += self._try_inline_malloc(info)
return progress
def remove_simple_mallocs(self, graph):
"""Iteratively remove (inline) the mallocs that can be simplified away."""
tot = 0
while True:
count = self.remove_mallocs_once(graph)
if count:
if self.verbose:
log.malloc('%d simple mallocs removed in %r' % (count, graph.name))
else:
log.dot()
tot += count
else:
break
return tot
class LLTypeMallocRemover(BaseMallocRemover):
IDENTITY_OPS = ("same_as", "cast_pointer")
SUBSTRUCT_OPS = ("getsubstruct", "direct_fieldptr")
MALLOC_OP = "malloc"
FIELD_ACCESS = dict.fromkeys(["getfield",
"setfield",
"keepalive",
"ptr_iszero",
"ptr_nonzero",
"getarrayitem",
"setarrayitem"])
SUBSTRUCT_ACCESS = dict.fromkeys(["getsubstruct",
"direct_fieldptr",
"getarraysubstruct"])
CHECK_ARRAY_INDEX = dict.fromkeys(["getarrayitem",
"setarrayitem",
"getarraysubstruct"])
def get_STRUCT(self, TYPE):
STRUCT = TYPE.TO
assert isinstance(STRUCT, lltype.GcStruct)
return STRUCT
def visit_substruct_op(self, node, union, op):
S = op.args[0].concretetype.TO
if self.equivalent_substruct(S, op.args[1].value):
# assumed to be similar to a cast_pointer
union(node, op.args[0], node, op.result)
return True
return False
def do_substruct_access(self, op):
S = op.args[0].concretetype.TO
name = op.args[1].value
if not isinstance(name, str): # access by index
name = 'item%d' % (name,)
self.accessed_substructs[S, name] = True
def inline_type(self, TYPE):
return True
def equivalent_substruct(self, S, fieldname):
# we consider a pointer to a GcStruct S as equivalent to a
# pointer to a substructure 'S.fieldname' if it's the first
# inlined sub-GcStruct. As an extension we also allow a pointer
# to a GcStruct containing just one field to be equivalent to
# a pointer to that field only (although a mere cast_pointer
# would not allow casting). This is needed to malloc-remove
# the 'wrapper' GcStructs introduced by previous passes of
# malloc removal.
if not isinstance(S, lltype.GcStruct):
return False
if fieldname != S._names[0]:
return False
FIELDTYPE = S._flds[fieldname]
if isinstance(FIELDTYPE, lltype.GcStruct):
if FIELDTYPE._hints.get('union'):
return False
return True
if len(S._names) == 1:
return True
return False
def union_wrapper(self, S):
# check if 'S' is a GcStruct containing a single inlined *union* Struct
if not isinstance(S, lltype.GcStruct):
return False
assert not S._hints.get('union') # not supported: "GcUnion"
return (len(S._names) == 1 and
isinstance(S._flds[S._names[0]], lltype.Struct) and
S._flds[S._names[0]]._hints.get('union'))
def RTTI_dtor(self, STRUCT):
try:
destr_ptr = lltype.getRuntimeTypeInfo(STRUCT)._obj.destructor_funcptr
if destr_ptr:
return True
except (ValueError, AttributeError), e:
pass
return False
def flatten(self, S):
start = 0
if S._names and self.equivalent_substruct(S, S._names[0]):
SUBTYPE = S._flds[S._names[0]]
if isinstance(SUBTYPE, lltype.Struct):
self.flatten(SUBTYPE)
start = 1
else:
ARRAY = lltype.FixedSizeArray(SUBTYPE, 1)
self.direct_fieldptr_key[ARRAY, 'item0'] = S, S._names[0]
for name in S._names[start:]:
key = S, name
FIELDTYPE = S._flds[name]
if key in self.accessed_substructs:
self.needsubmallocs.append(key)
self.flatnames.append(key)
self.newvarstype[key] = lltype.Ptr(lltype.GcStruct('wrapper',
('data', FIELDTYPE)))
elif not isinstance(FIELDTYPE, lltype.ContainerType):
example = FIELDTYPE._defl()
constant = Constant(example)
constant.concretetype = FIELDTYPE
self.flatconstants[key] = constant
self.flatnames.append(key)
self.newvarstype[key] = FIELDTYPE
#else:
# the inlined substructure is never accessed, drop it
def key_for_field_access(self, S, fldname):
if isinstance(S, lltype.FixedSizeArray):
if not isinstance(fldname, str): # access by index
fldname = 'item%d' % (fldname,)
try:
return self.direct_fieldptr_key[S, fldname]
except KeyError:
pass
return S, fldname
def flowin_op(self, op, vars, newvarsmap):
if op.opname in ("getfield", "getarrayitem"):
S = op.args[0].concretetype.TO
fldname = op.args[1].value
key = self.key_for_field_access(S, fldname)
if key in self.accessed_substructs:
c_name = Constant('data', lltype.Void)
newop = SpaceOperation("getfield",
[newvarsmap[key], c_name],
op.result)
else:
newop = SpaceOperation("same_as",
[newvarsmap[key]],
op.result)
self.newops.append(newop)
self.last_removed_access = len(self.newops)
elif op.opname in ("setfield", "setarrayitem"):
S = op.args[0].concretetype.TO
fldname = op.args[1].value
key = self.key_for_field_access(S, fldname)
assert key in newvarsmap
if key in self.accessed_substructs:
c_name = Constant('data', lltype.Void)
newop = SpaceOperation("setfield",
[newvarsmap[key], c_name, op.args[2]],
op.result)
self.newops.append(newop)
else:
newvarsmap[key] = op.args[2]
self.last_removed_access = len(self.newops)
elif op.opname in ("same_as", "cast_pointer"):
assert op.result not in vars
vars[op.result] = True
# Consider the two pointers (input and result) as
# equivalent. We can, and indeed must, use the same
# flattened list of variables for both, as a "setfield"
# via one pointer must be reflected in the other.
elif op.opname == 'keepalive':
self.last_removed_access = len(self.newops)
elif op.opname in ("getsubstruct", "getarraysubstruct",
"direct_fieldptr"):
S = op.args[0].concretetype.TO
fldname = op.args[1].value
if op.opname == "getarraysubstruct":
fldname = 'item%d' % fldname
equiv = self.equivalent_substruct(S, fldname)
if equiv:
# exactly like a cast_pointer
assert op.result not in vars
vars[op.result] = True
else:
# do it with a getsubstruct on the independently
# malloc'ed GcStruct
if op.opname == "direct_fieldptr":
opname = "direct_fieldptr"
else:
opname = "getsubstruct"
v = newvarsmap[S, fldname]
cname = Constant('data', lltype.Void)
newop = SpaceOperation(opname,
[v, cname],
op.result)
self.newops.append(newop)
elif op.opname in ("ptr_iszero", "ptr_nonzero"):
# we know the pointer is not NULL if it comes from
# a successful malloc
c = Constant(op.opname == "ptr_nonzero", lltype.Bool)
newop = SpaceOperation('same_as', [c], op.result)
self.newops.append(newop)
else:
raise AssertionError, op.opname
def insert_keepalives(self, newvars):
if self.last_removed_access is not None:
keepalives = []
for v in newvars:
T = v.concretetype
if isinstance(T, lltype.Ptr) and T._needsgc():
v0 = Variable()
v0.concretetype = lltype.Void
newop = SpaceOperation('keepalive', [v], v0)
keepalives.append(newop)
self.newops[self.last_removed_access:self.last_removed_access] = keepalives
class OOTypeMallocRemover(BaseMallocRemover):
IDENTITY_OPS = ('same_as', 'ooupcast', 'oodowncast')
SUBSTRUCT_OPS = ()
MALLOC_OP = 'new'
FIELD_ACCESS = dict.fromkeys(["oogetfield",
"oosetfield",
"oononnull",
#"oois", # ???
#"instanceof", # ???
])
SUBSTRUCT_ACCESS = {}
CHECK_ARRAY_INDEX = {}
def get_STRUCT(self, TYPE):
return TYPE
def union_wrapper(self, S):
return False
def RTTI_dtor(self, STRUCT):
return False
def inline_type(self, TYPE):
return isinstance(TYPE, (ootype.Record, ootype.Instance))
def _get_fields(self, TYPE):
if isinstance(TYPE, ootype.Record):
return TYPE._fields
elif isinstance(TYPE, ootype.Instance):
return TYPE._allfields()
else:
assert False
def flatten(self, TYPE):
for name, (FIELDTYPE, default) in self._get_fields(TYPE).iteritems():
key = self.key_for_field_access(TYPE, name)
constant = Constant(default)
constant.concretetype = FIELDTYPE
self.flatconstants[key] = constant
self.flatnames.append(key)
self.newvarstype[key] = FIELDTYPE
def key_for_field_access(self, S, fldname):
CLS, TYPE = S._lookup_field(fldname)
return CLS, fldname
def flowin_op(self, op, vars, newvarsmap):
if op.opname == "oogetfield":
S = op.args[0].concretetype
fldname = op.args[1].value
key = self.key_for_field_access(S, fldname)
newop = SpaceOperation("same_as",
[newvarsmap[key]],
op.result)
self.newops.append(newop)
last_removed_access = len(self.newops)
elif op.opname == "oosetfield":
S = op.args[0].concretetype
fldname = op.args[1].value
key = self.key_for_field_access(S, fldname)
assert key in newvarsmap
newvarsmap[key] = op.args[2]
last_removed_access = len(self.newops)
elif op.opname in ("same_as", "oodowncast", "ooupcast"):
assert op.result not in vars
vars[op.result] = True
# Consider the two pointers (input and result) as
# equivalent. We can, and indeed must, use the same
# flattened list of variables for both, as a "setfield"
# via one pointer must be reflected in the other.
elif op.opname == "oononnull":
# we know the pointer is not NULL if it comes from
# a successful malloc
c = Constant(True, lltype.Bool)
newop = SpaceOperation('same_as', [c], op.result)
self.newops.append(newop)
else:
raise AssertionError, op.opname
def insert_keepalives(self, newvars):
pass
def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True):
if type_system == 'lltypesystem':
remover = LLTypeMallocRemover(verbose)
else:
remover = OOTypeMallocRemover(verbose)
return remover.remove_simple_mallocs(graph)
def remove_mallocs(translator, graphs=None, type_system="lltypesystem"):
if graphs is None:
graphs = translator.graphs
tot = 0
for graph in graphs:
count = remove_simple_mallocs(graph, type_system=type_system, verbose=translator.config.translation.verbose)
if count:
# remove typical leftovers from malloc removal
removenoops.remove_same_as(graph)
simplify.eliminate_empty_blocks(graph)
simplify.transform_dead_op_vars(graph, translator)
tot += count
log.malloc("removed %d simple mallocs in total" % tot)
return tot
| Python |
from pypy.objspace.flow.model import Constant, Variable, SpaceOperation
from pypy.objspace.flow.model import c_last_exception
from pypy.objspace.flow.model import mkentrymap
from pypy.translator.backendopt.support import split_block_with_keepalive
from pypy.translator.backendopt.support import log
from pypy.translator.simplify import eliminate_empty_blocks
from pypy.translator.unsimplify import insert_empty_block
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.lltypesystem import lltype
def fold_op_list(operations, constants, exit_early=False, exc_catch=False):
newops = []
keepalives = []
folded_count = 0
first_sideeffect_index = None
for spaceop in operations:
vargsmodif = False
vargs = []
args = []
for v in spaceop.args:
if isinstance(v, Constant):
args.append(v.value)
elif v in constants:
v = constants[v]
vargsmodif = True
args.append(v.value)
vargs.append(v)
try:
op = getattr(llop, spaceop.opname)
except AttributeError:
sideeffects = True
else:
if len(args) == len(vargs):
RESTYPE = spaceop.result.concretetype
try:
result = op(RESTYPE, *args)
except TypeError:
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
log.WARNING('constant-folding %r:' % (spaceop,))
log.WARNING(' %s: %s' % (e.__class__.__name__, e))
else:
# success in folding this space operation
if spaceop.opname in fixup_op_result:
result = fixup_op_result[spaceop.opname](result)
constants[spaceop.result] = Constant(result, RESTYPE)
folded_count += 1
continue
sideeffects = op.sideeffects
# failed to fold an operation, exit early if requested
if exit_early:
return folded_count
if spaceop.opname == 'keepalive' and first_sideeffect_index is None:
if vargsmodif:
continue # keepalive(constant) is not useful
keepalives.append(spaceop)
else:
if vargsmodif:
if (spaceop.opname == 'indirect_call'
and isinstance(vargs[0], Constant)):
spaceop = SpaceOperation('direct_call', vargs[:-1],
spaceop.result)
else:
spaceop = SpaceOperation(spaceop.opname, vargs,
spaceop.result)
if sideeffects and first_sideeffect_index is None:
first_sideeffect_index = len(newops)
newops.append(spaceop)
# end
if exit_early:
return folded_count
else:
# move the keepalives to the end of the block, which makes the life
# of prepare_constant_fold_link() easier. Don't put them past the
# exception-raising operation, though. There is also no point in
# moving them past the first sideeffect-ing operation.
if first_sideeffect_index is None:
first_sideeffect_index = len(newops) - exc_catch
newops[first_sideeffect_index:first_sideeffect_index] = keepalives
return newops
def constant_fold_block(block):
constants = {}
block.operations = fold_op_list(block.operations, constants,
exc_catch = block.exitswitch == c_last_exception)
if constants:
if block.exitswitch in constants:
switch = constants[block.exitswitch].value
remaining_exits = [link for link in block.exits
if link.llexitcase == switch]
assert len(remaining_exits) == 1
remaining_exits[0].exitcase = None
remaining_exits[0].llexitcase = None
block.exitswitch = None
block.recloseblock(*remaining_exits)
for link in block.exits:
link.args = [constants.get(v, v) for v in link.args]
def fixup_solid(p):
# Operations returning pointers to inlined parts of a constant object
# have to be tweaked so that the inlined part keeps the whole object alive.
# XXX This is done with a hack. (See test_keepalive_const_*())
container = p._obj
assert isinstance(container, lltype._parentable)
container._keepparent = container._parentstructure()
# Instead of 'p', return a solid pointer, to keep the inlined part
# itself alive.
return container._as_ptr()
fixup_op_result = {
"getsubstruct": fixup_solid,
"getarraysubstruct": fixup_solid,
"direct_fieldptr": fixup_solid,
"direct_arrayitems": fixup_solid,
}
def complete_constants(link, constants):
# 'constants' maps some Variables of 'block' to Constants.
# Some input args of 'block' may be absent from 'constants'
# and must be fixed in the link to be passed directly from
# 'link.prevblock' instead of via 'block'.
for v1, v2 in zip(link.args, link.target.inputargs):
if v2 in constants:
assert constants[v2] is v1
else:
constants[v2] = v1
def prepare_constant_fold_link(link, constants, splitblocks):
block = link.target
folded_count = fold_op_list(block.operations, constants, exit_early=True)
n = len(block.operations)
if block.exitswitch == c_last_exception:
n -= 1
# is the next, non-folded operation an indirect_call?
m = folded_count
while m < n and block.operations[m].opname == 'keepalive':
m += 1
if m < n:
nextop = block.operations[m]
if nextop.opname == 'indirect_call' and nextop.args[0] in constants:
# indirect_call -> direct_call
callargs = [constants[nextop.args[0]]]
constants1 = constants.copy()
complete_constants(link, constants1)
newkeepalives = []
for i in range(folded_count, m):
[v] = block.operations[i].args
v = constants1.get(v, v)
v_void = Variable()
v_void.concretetype = lltype.Void
newkeepalives.append(SpaceOperation('keepalive', [v], v_void))
for v in nextop.args[1:-1]:
callargs.append(constants1.get(v, v))
v_result = Variable(nextop.result)
v_result.concretetype = nextop.result.concretetype
constants[nextop.result] = v_result
callop = SpaceOperation('direct_call', callargs, v_result)
newblock = insert_empty_block(None, link, newkeepalives + [callop])
[link] = newblock.exits
assert link.target is block
folded_count = m+1
if folded_count > 0:
splits = splitblocks.setdefault(block, [])
splits.append((folded_count, link, constants))
def rewire_links(splitblocks, graph):
for block, splits in splitblocks.items():
# A splitting position is given by how many operations were
# folded with the knowledge of an incoming link's constant.
# Various incoming links may cause various splitting positions.
# We split the block gradually, starting from the end.
splits.sort()
splits.reverse()
for position, link, constants in splits:
assert link.target is block
if position == len(block.operations) and block.exitswitch is None:
# a split here would leave nothing in the 2nd part, so
# directly rewire the links
assert len(block.exits) == 1
splitlink = block.exits[0]
else:
# split the block at the given position
splitlink = split_block_with_keepalive(block, position)
assert list(block.exits) == [splitlink]
assert link.target is block
assert splitlink.prevblock is block
complete_constants(link, constants)
args = [constants.get(v, v) for v in splitlink.args]
link.args = args
link.target = splitlink.target
def constant_diffuse(graph):
# if the same constants appear at the same positions in all links
# into a block remove them from the links, remove the corresponding
# input variables and introduce equivalent same_as at the beginning
# of the block then try to fold the block further
count = 0
for block, links in mkentrymap(graph).iteritems():
if block is graph.startblock:
continue
if block.exits == ():
continue
firstlink = links[0]
rest = links[1:]
diffuse = []
for i, c in enumerate(firstlink.args):
if not isinstance(c, Constant):
continue
for lnk in rest:
if lnk.args[i] != c:
break
else:
diffuse.append((i, c))
diffuse.reverse()
same_as = []
for i, c in diffuse:
for lnk in links:
del lnk.args[i]
v = block.inputargs.pop(i)
same_as.append(SpaceOperation('same_as', [c], v))
count += 1
block.operations = same_as + block.operations
if same_as:
constant_fold_block(block)
return count
def constant_fold_graph(graph):
# first fold inside the blocks
for block in graph.iterblocks():
if block.operations:
constant_fold_block(block)
# then fold along the links - a fixpoint process, because new links
# with new constants show up, even though we can probably prove that
# a single iteration is enough under some conditions, like the graph
# is in a join_blocks() form.
while 1:
diffused = constant_diffuse(graph)
splitblocks = {}
for link in list(graph.iterlinks()):
constants = {}
for v1, v2 in zip(link.args, link.target.inputargs):
if isinstance(v1, Constant):
constants[v2] = v1
if constants:
prepare_constant_fold_link(link, constants, splitblocks)
if splitblocks:
rewire_links(splitblocks, graph)
if not diffused and not splitblocks:
break # finished
| Python |
import sys
from pypy.translator.simplify import join_blocks, cleanup_graph
from pypy.translator.simplify import get_graph, get_funcobj
from pypy.translator.unsimplify import copyvar
from pypy.objspace.flow.model import Variable, Constant, Block, Link
from pypy.objspace.flow.model import SpaceOperation, c_last_exception
from pypy.objspace.flow.model import FunctionGraph
from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr
from pypy.rpython.lltypesystem.lltype import normalizeptr
from pypy.rpython.ootypesystem import ootype
from pypy.rpython import rmodel
from pypy.tool.algo import sparsemat
from pypy.translator.backendopt import removenoops
from pypy.translator.backendopt.support import log, split_block_with_keepalive
from pypy.translator.backendopt.support import find_backedges, find_loop_blocks
from pypy.translator.backendopt.canraise import RaiseAnalyzer
class CannotInline(Exception):
pass
def get_meth_from_oosend(op):
method_name = op.args[0].value
INSTANCE = op.args[1].concretetype
_, meth = INSTANCE._lookup(op.args[0].value)
virtual = getattr(meth, '_virtual', True)
if virtual:
return None
else:
return meth
class CanRaise:
def __init__(self, can_raise):
self.can_raise = can_raise
def collect_called_graphs(graph, translator, include_oosend=True):
graphs_or_something = {}
for block in graph.iterblocks():
for op in block.operations:
if op.opname == "direct_call":
graph = get_graph(op.args[0], translator)
if graph is not None:
graphs_or_something[graph] = True
else:
graphs_or_something[op.args[0]] = True
if op.opname == "indirect_call":
graphs = op.args[-1].value
if graphs is None:
graphs_or_something[op.args[0]] = True
else:
for graph in graphs:
graphs_or_something[graph] = True
if op.opname == 'oosend' and include_oosend:
meth = get_meth_from_oosend(op)
if hasattr(meth, 'graph'):
key = meth.graph
elif hasattr(meth, '_can_raise'):
key = CanRaise(meth._can_raise)
else:
key = op.args[0]
graphs_or_something[key] = True
return graphs_or_something
def iter_callsites(graph, calling_what):
for block in graph.iterblocks():
for i, op in enumerate(block.operations):
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
elif op.opname == "oosend":
funcobj = get_meth_from_oosend(op)
if funcobj is None:
continue # cannot inline virtual methods
else:
continue
graph = getattr(funcobj, 'graph', None)
# accept a function or a graph as 'inline_func'
if (graph is calling_what or
getattr(funcobj, '_callable', None) is calling_what):
yield graph, block, i
def find_callsites(graph, calling_what):
return list(iter_callsites(graph, calling_what))
def iter_first_callsites(graph, calling_what):
# restart the iter_callsites iterator every time, since the graph might
# have changed
while 1:
iterator = iter_callsites(graph, calling_what)
yield iterator.next()
def contains_call(graph, calling_what):
try:
iterator = iter_callsites(graph, calling_what)
iterator.next()
return True
except StopIteration:
return False
def inline_function(translator, inline_func, graph, lltype_to_classdef,
raise_analyzer, call_count_pred=None):
inliner = Inliner(translator, graph, inline_func, lltype_to_classdef,
raise_analyzer = raise_analyzer,
call_count_pred=call_count_pred)
return inliner.inline_all()
def simple_inline_function(translator, inline_func, graph):
inliner = Inliner(translator, graph, inline_func,
translator.rtyper.lltype_to_classdef_mapping(),
raise_analyzer = RaiseAnalyzer(translator))
return inliner.inline_all()
def _find_exception_type(block):
#XXX slightly brittle: find the exception type for simple cases
#(e.g. if you do only raise XXXError) by doing pattern matching
currvar = block.exits[0].args[1]
ops = block.operations
i = len(ops)-1
while True:
if isinstance(currvar, Constant):
value = currvar.value
if isinstance(typeOf(value), ootype.Instance):
TYPE = ootype.dynamicType(value)
else:
TYPE = typeOf(normalizeptr(value))
return TYPE, block.exits[0]
if i < 0:
return None, None
op = ops[i]
i -= 1
if op.opname in ("same_as", "cast_pointer", "ooupcast", "oodowncast") and op.result is currvar:
currvar = op.args[0]
elif op.opname == "malloc" and op.result is currvar:
return Ptr(op.args[0].value), block.exits[0]
elif op.opname == "new" and op.result is currvar:
return op.args[0].value, block.exits[0]
def does_raise_directly(graph, raise_analyzer):
""" this function checks, whether graph contains operations which can raise
and which are not exception guarded """
for block in graph.iterblocks():
if block is graph.exceptblock:
return True # the except block is reachable
if block.exitswitch == c_last_exception:
consider_ops_to = -1
else:
consider_ops_to = len(block.operations)
for op in block.operations[:consider_ops_to]:
if raise_analyzer.can_raise(op):
return True
return False
def any_call_to_raising_graphs(from_graph, translator, raise_analyzer):
for graph_or_something in collect_called_graphs(from_graph, translator):
if isinstance(graph_or_something, FunctionGraph):
if does_raise_directly(graph_or_something, raise_analyzer):
return True
elif isinstance(graph_or_something, CanRaise):
if graph_or_something.can_raise:
return True
else:
return True # conservatively
return False
class BaseInliner(object):
def __init__(self, translator, graph, lltype_to_classdef,
inline_guarded_calls=False,
inline_guarded_calls_no_matter_what=False,
raise_analyzer=None,
call_count_pred=None):
self.translator = translator
self.graph = graph
self.inline_guarded_calls = inline_guarded_calls
# if this argument is set, the inliner will happily produce wrong code!
# it is used by the exception transformation
self.inline_guarded_calls_no_matter_what = inline_guarded_calls_no_matter_what
assert raise_analyzer is not None
self.raise_analyzer = raise_analyzer
self.lltype_to_classdef = lltype_to_classdef
self.call_count_pred = call_count_pred
def inline_all(self):
count = 0
non_recursive = {}
call_count_pred = self.call_count_pred
while self.block_to_index:
block, d = self.block_to_index.popitem()
index_operation, subgraph = d.popitem()
if d:
self.block_to_index[block] = d
if subgraph not in non_recursive and contains_call(subgraph, subgraph):
raise CannotInline("inlining a recursive function")
else:
non_recursive[subgraph] = True
if call_count_pred:
countop = block.operations[index_operation-1]
assert countop.opname == 'instrument_count'
assert countop.args[0].value == 'inline'
label = countop.args[1].value
if not call_count_pred(label):
continue
operation = block.operations[index_operation]
self.inline_once(block, index_operation)
count += 1
self.cleanup()
return count
def get_graph_from_op(self, op):
assert op.opname in ('direct_call', 'oosend')
if op.opname == 'direct_call':
return get_funcobj(self.op.args[0].value).graph
else:
return get_meth_from_oosend(op).graph
def inline_once(self, block, index_operation):
self.varmap = {}
self._copied_blocks = {}
self.op = block.operations[index_operation]
self.graph_to_inline = self.get_graph_from_op(self.op)
self.exception_guarded = False
if (block.exitswitch == c_last_exception and
index_operation == len(block.operations) - 1):
self.exception_guarded = True
if self.inline_guarded_calls:
if (not self.inline_guarded_calls_no_matter_what and
does_raise_directly(self.graph_to_inline, self.raise_analyzer)):
raise CannotInline("can't inline because the call is exception guarded")
elif any_call_to_raising_graphs(self.graph_to_inline,
self.translator, self.raise_analyzer):
raise CannotInline("can't handle exceptions")
self._passon_vars = {}
self.entrymap = mkentrymap(self.graph_to_inline)
self.do_inline(block, index_operation)
def search_for_calls(self, block):
d = {}
for i, op in enumerate(block.operations):
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
elif op.opname == "oosend":
funcobj = get_meth_from_oosend(op)
if funcobj is None:
continue
else:
continue
graph = getattr(funcobj, 'graph', None)
# accept a function or a graph as 'inline_func'
if (graph is self.inline_func or
getattr(funcobj, '_callable', None) is self.inline_func):
d[i] = graph
if d:
self.block_to_index[block] = d
else:
try:
del self.block_to_index[block]
except KeyError:
pass
def get_new_name(self, var):
if var is None:
return None
if isinstance(var, Constant):
return var
if var not in self.varmap:
self.varmap[var] = copyvar(None, var)
return self.varmap[var]
def generate_keepalive(self, *args):
from pypy.translator.backendopt.support import generate_keepalive
if self.translator.rtyper.type_system.name == 'lltypesystem':
return generate_keepalive(*args)
else:
return []
def passon_vars(self, cache_key):
if cache_key in self._passon_vars:
return self._passon_vars[cache_key]
result = [copyvar(None, var)
for var in self.original_passon_vars]
self._passon_vars[cache_key] = result
return result
def copy_operation(self, op):
args = [self.get_new_name(arg) for arg in op.args]
result = SpaceOperation(op.opname, args, self.get_new_name(op.result))
return result
def copy_block(self, block):
if block in self._copied_blocks:
return self._copied_blocks[block]
args = ([self.get_new_name(var) for var in block.inputargs] +
self.passon_vars(block))
newblock = Block(args)
self._copied_blocks[block] = newblock
newblock.operations = [self.copy_operation(op) for op in block.operations]
newblock.closeblock(*[self.copy_link(link, block) for link in block.exits])
newblock.exitswitch = self.get_new_name(block.exitswitch)
self.search_for_calls(newblock)
return newblock
def copy_link(self, link, prevblock):
newargs = [self.get_new_name(a) for a in link.args] + self.passon_vars(prevblock)
newlink = Link(newargs, self.copy_block(link.target), link.exitcase)
newlink.last_exception = self.get_new_name(link.last_exception)
newlink.last_exc_value = self.get_new_name(link.last_exc_value)
if hasattr(link, 'llexitcase'):
newlink.llexitcase = link.llexitcase
return newlink
def find_args_in_exceptional_case(self, link, block, etype, evalue, afterblock, passon_vars):
linkargs = []
for arg in link.args:
if arg == link.last_exception:
linkargs.append(etype)
elif arg == link.last_exc_value:
linkargs.append(evalue)
elif isinstance(arg, Constant):
linkargs.append(arg)
else:
index = afterblock.inputargs.index(arg)
linkargs.append(passon_vars[index - 1])
return linkargs
def rewire_returnblock(self, afterblock):
copiedreturnblock = self.copy_block(self.graph_to_inline.returnblock)
linkargs = ([copiedreturnblock.inputargs[0]] +
self.passon_vars(self.graph_to_inline.returnblock))
linkfrominlined = Link(linkargs, afterblock)
copiedreturnblock.exitswitch = None
copiedreturnblock.recloseblock(linkfrominlined)
assert copiedreturnblock.exits[0].target == afterblock
def rewire_exceptblock(self, afterblock):
#let links to exceptblock of the graph to inline go to graphs exceptblock
copiedexceptblock = self.copy_block(self.graph_to_inline.exceptblock)
if not self.exception_guarded:
self.rewire_exceptblock_no_guard(afterblock, copiedexceptblock)
else:
# first try to match exceptions using a very simple heuristic
self.rewire_exceptblock_with_guard(afterblock, copiedexceptblock)
# generate blocks that do generic matching for cases when the
# heuristic did not work
self.generic_exception_matching(afterblock, copiedexceptblock)
def rewire_exceptblock_no_guard(self, afterblock, copiedexceptblock):
# find all copied links that go to copiedexceptblock
for link in self.entrymap[self.graph_to_inline.exceptblock]:
copiedblock = self.copy_block(link.prevblock)
for copiedlink in copiedblock.exits:
if copiedlink.target is copiedexceptblock:
copiedlink.args = copiedlink.args[:2]
copiedlink.target = self.graph.exceptblock
for a1, a2 in zip(copiedlink.args,
self.graph.exceptblock.inputargs):
if hasattr(a2, 'concretetype'):
assert a1.concretetype == a2.concretetype
else:
# if self.graph.exceptblock was never used before
a2.concretetype = a1.concretetype
def rewire_exceptblock_with_guard(self, afterblock, copiedexceptblock):
# this rewiring does not always succeed. in the cases where it doesn't
# there will be generic code inserted
rclass = self.translator.rtyper.type_system.rclass
exc_match = self.translator.rtyper.getexceptiondata().fn_exception_match
for link in self.entrymap[self.graph_to_inline.exceptblock]:
copiedblock = self.copy_block(link.prevblock)
VALUE, copiedlink = _find_exception_type(copiedblock)
#print copiedblock.operations
if VALUE is None or VALUE not in self.lltype_to_classdef:
continue
classdef = self.lltype_to_classdef[VALUE]
rtyper = self.translator.rtyper
classrepr = rclass.getclassrepr(rtyper, classdef)
vtable = classrepr.getruntime()
var_etype = copiedlink.args[0]
var_evalue = copiedlink.args[1]
for exceptionlink in afterblock.exits[1:]:
if exc_match(vtable, exceptionlink.llexitcase):
passon_vars = self.passon_vars(link.prevblock)
copiedblock.operations += self.generate_keepalive(passon_vars)
copiedlink.target = exceptionlink.target
linkargs = self.find_args_in_exceptional_case(
exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars)
copiedlink.args = linkargs
break
def generic_exception_matching(self, afterblock, copiedexceptblock):
#XXXXX don't look: insert blocks that do exception matching
#for the cases where direct matching did not work
exc_match = Constant(
self.translator.rtyper.getexceptiondata().fn_exception_match)
exc_match.concretetype = typeOf(exc_match.value)
blocks = []
for i, link in enumerate(afterblock.exits[1:]):
etype = copyvar(None, copiedexceptblock.inputargs[0])
evalue = copyvar(None, copiedexceptblock.inputargs[1])
passon_vars = self.passon_vars(i)
block = Block([etype, evalue] + passon_vars)
res = Variable()
res.concretetype = Bool
cexitcase = Constant(link.llexitcase)
cexitcase.concretetype = typeOf(cexitcase.value)
args = [exc_match, etype, cexitcase]
block.operations.append(SpaceOperation("direct_call", args, res))
block.exitswitch = res
linkargs = self.find_args_in_exceptional_case(link, link.target,
etype, evalue, afterblock,
passon_vars)
l = Link(linkargs, link.target)
l.prevblock = block
l.exitcase = True
l.llexitcase = True
block.closeblock(l)
if i > 0:
l = Link(blocks[-1].inputargs, block)
l.exitcase = False
l.llexitcase = False
blocks[-1].recloseblock(l, *blocks[-1].exits)
blocks.append(block)
blocks[-1].recloseblock(*blocks[-1].exits[:1])
blocks[-1].operations = []
blocks[-1].exitswitch = None
blocks[-1].exits[0].exitcase = None
del blocks[-1].exits[0].llexitcase
linkargs = copiedexceptblock.inputargs
copiedexceptblock.recloseblock(Link(linkargs, blocks[0]))
copiedexceptblock.operations += self.generate_keepalive(linkargs)
def do_inline(self, block, index_operation):
splitlink = split_block_with_keepalive(block, index_operation)
afterblock = splitlink.target
# these variables have to be passed along all the links in the inlined
# graph because the original function needs them in the blocks after
# the inlined function
# for every inserted block we need a new copy of these variables,
# this copy is created with the method passon_vars
self.original_passon_vars = [arg for arg in block.exits[0].args
if isinstance(arg, Variable)]
n = 0
while afterblock.operations[n].opname == 'keepalive':
n += 1
assert afterblock.operations[n].opname == self.op.opname
self.op = afterblock.operations.pop(n)
#vars that need to be passed through the blocks of the inlined function
linktoinlined = splitlink
copiedstartblock = self.copy_block(self.graph_to_inline.startblock)
copiedstartblock.isstartblock = False
#find args passed to startblock of inlined function
passon_args = []
for arg in self.op.args[1:]:
if isinstance(arg, Constant):
passon_args.append(arg)
else:
index = afterblock.inputargs.index(arg)
passon_args.append(linktoinlined.args[index])
passon_args += self.original_passon_vars
if self.op.opname == 'oosend' and not isinstance(self.op.args[1], Constant):
# if we try to inline a graph defined in a superclass, the
# type of 'self' on the graph differs from the current
linkv = passon_args[0]
inputv = copiedstartblock.inputargs[0]
LINK_SELF = linkv.concretetype
INPUT_SELF = inputv.concretetype
if LINK_SELF != INPUT_SELF:
# need to insert an upcast
assert ootype.isSubclass(LINK_SELF, INPUT_SELF)
v = Variable()
v.concretetype = INPUT_SELF
upcast = SpaceOperation('ooupcast', [linkv], v)
block.operations.append(upcast)
passon_args[0] = v
#rewire blocks
linktoinlined.target = copiedstartblock
linktoinlined.args = passon_args
afterblock.inputargs = [self.op.result] + afterblock.inputargs
if self.graph_to_inline.returnblock in self.entrymap:
self.rewire_returnblock(afterblock)
if self.graph_to_inline.exceptblock in self.entrymap:
self.rewire_exceptblock(afterblock)
if self.exception_guarded:
assert afterblock.exits[0].exitcase is None
afterblock.recloseblock(afterblock.exits[0])
afterblock.exitswitch = None
self.search_for_calls(afterblock)
self.search_for_calls(block)
def cleanup(self):
""" cleaning up -- makes sense to be done after inlining, because the
inliner inserted quite some empty blocks and blocks that can be
joined. """
cleanup_graph(self.graph)
class Inliner(BaseInliner):
def __init__(self, translator, graph, inline_func, lltype_to_classdef,
inline_guarded_calls=False,
inline_guarded_calls_no_matter_what=False,
raise_analyzer=None,
call_count_pred=None):
BaseInliner.__init__(self, translator, graph, lltype_to_classdef,
inline_guarded_calls,
inline_guarded_calls_no_matter_what,
raise_analyzer,
call_count_pred)
self.inline_func = inline_func
# to simplify exception matching
join_blocks(graph)
# find callsites *after* joining blocks...
callsites = find_callsites(graph, inline_func)
self.block_to_index = {}
for g, block, i in callsites:
self.block_to_index.setdefault(block, {})[i] = g
class OneShotInliner(BaseInliner):
def search_for_calls(self, block):
pass
# ____________________________________________________________
#
# Automatic inlining
OP_WEIGHTS = {'same_as': 0,
'cast_pointer': 0,
'keepalive': 0,
'malloc': 2,
'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme
'resume_point': sys.maxint, # XXX bit extreme
'instrument_count': 0,
'debug_assert': -1,
}
def block_weight(block, weights=OP_WEIGHTS):
total = 0
for op in block.operations:
if op.opname == "direct_call":
total += 1.5 + len(op.args) / 2
elif op.opname == "indirect_call":
total += 2 + len(op.args) / 2
elif op.opname == "oosend":
total += 2 + len(op.args) / 2
total += weights.get(op.opname, 1)
if block.exitswitch is not None:
total += 1
return max(0, total)
def measure_median_execution_cost(graph):
blocks = []
blockmap = {}
for block in graph.iterblocks():
blockmap[block] = len(blocks)
blocks.append(block)
loops = find_loop_blocks(graph)
M = sparsemat.SparseMatrix(len(blocks))
vector = []
for i, block in enumerate(blocks):
vector.append(block_weight(block))
M[i, i] = 1
if block.exits:
if block not in loops:
current_loop_start = None
else:
current_loop_start = loops[block]
loop_exits = []
for link in block.exits:
if (link.target in loops and
loops[link.target] is current_loop_start):
loop_exits.append(link)
if len(loop_exits) and len(loop_exits) < len(block.exits):
f = 0.3 / (len(block.exits) - len(loop_exits))
b = 0.7 / len(loop_exits)
else:
b = f = 1.0 / len(block.exits)
for link in block.exits:
if (link.target in loops and
loops[link.target] is current_loop_start):
M[i, blockmap[link.target]] -= b
else:
M[i, blockmap[link.target]] -= f
try:
Solution = M.solve(vector)
except ValueError:
return sys.maxint
else:
res = Solution[blockmap[graph.startblock]]
assert res >= 0
return res
def static_instruction_count(graph):
count = 0
for block in graph.iterblocks():
count += block_weight(block)
return count
def inlining_heuristic(graph):
# XXX ponderation factors?
count = static_instruction_count(graph)
if count >= 200:
return count, True
return (0.9999 * measure_median_execution_cost(graph) +
count), True
def inlinable_static_callers(graphs):
result = []
for parentgraph in graphs:
for block in parentgraph.iterblocks():
for op in block.operations:
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
graph = getattr(funcobj, 'graph', None)
if graph is not None:
if getattr(getattr(funcobj, '_callable', None),
'suggested_primitive', False):
continue
if getattr(getattr(funcobj, '_callable', None),
'dont_inline', False):
continue
result.append((parentgraph, graph))
if op.opname == "oosend":
meth = get_meth_from_oosend(op)
graph = getattr(meth, 'graph', None)
if graph is not None:
result.append((parentgraph, graph))
return result
def instrument_inline_candidates(graphs, threshold):
cache = {None: False}
def candidate(graph):
try:
return cache[graph]
except KeyError:
res = static_instruction_count(graph) <= threshold
cache[graph] = res
return res
n = 0
for parentgraph in graphs:
for block in parentgraph.iterblocks():
ops = block.operations
i = len(ops)-1
while i >= 0:
op = ops[i]
i -= 1
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
graph = getattr(funcobj, 'graph', None)
if graph is not None:
if getattr(getattr(funcobj, '_callable', None),
'suggested_primitive', False):
continue
if getattr(getattr(funcobj, '_callable', None),
'dont_inline', False):
continue
if candidate(graph):
tag = Constant('inline', Void)
label = Constant(n, Signed)
dummy = Variable()
dummy.concretetype = Void
count = SpaceOperation('instrument_count',
[tag, label], dummy)
ops.insert(i+1, count)
n += 1
log.inlining("%d call sites instrumented" % n)
def auto_inlining(translator, threshold=None,
callgraph=None,
call_count_pred=None,
heuristic=inlining_heuristic):
assert threshold is not None and threshold != 1
from heapq import heappush, heappop, heapreplace, heapify
callers = {} # {graph: {graphs-that-call-it}}
callees = {} # {graph: {graphs-that-it-calls}}
if callgraph is None:
callgraph = inlinable_static_callers(translator.graphs)
for graph1, graph2 in callgraph:
callers.setdefault(graph2, {})[graph1] = True
callees.setdefault(graph1, {})[graph2] = True
# the -len(callers) change is OK
heap = [(0.0, -len(callers[graph]), graph) for graph in callers]
valid_weight = {}
try_again = {}
lltype_to_classdef = translator.rtyper.lltype_to_classdef_mapping()
raise_analyzer = RaiseAnalyzer(translator)
count = 0
while heap:
weight, _, graph = heap[0]
if not valid_weight.get(graph):
weight, fixed = heuristic(graph)
#print ' + cost %7.2f %50s' % (weight, graph.name)
heapreplace(heap, (weight, -len(callers[graph]), graph))
valid_weight[graph] = True
if not fixed:
try_again[graph] = True
continue
if weight >= threshold:
# finished... unless some graphs not in valid_weight would now
# have a weight below the threshold. Re-insert such graphs
# at the start of the heap
finished = True
for i in range(len(heap)):
graph = heap[i][2]
if not valid_weight.get(graph):
heap[i] = (0.0, heap[i][1], graph)
finished = False
if finished:
break
else:
heapify(heap)
continue
heappop(heap)
if callers[graph]:
if translator.config.translation.verbose:
log.inlining('%7.2f %50s' % (weight, graph.name))
else:
log.dot()
for parentgraph in callers[graph]:
if parentgraph == graph:
continue
subcount = 0
try:
subcount = inline_function(translator, graph, parentgraph,
lltype_to_classdef, raise_analyzer,
call_count_pred)
res = bool(subcount)
except CannotInline:
try_again[graph] = True
res = CannotInline
if res is True:
count += subcount
# the parentgraph should now contain all calls that were
# done by 'graph'
for graph2 in callees.get(graph, {}):
callees[parentgraph][graph2] = True
callers[graph2][parentgraph] = True
if parentgraph in try_again:
# the parentgraph was previously uninlinable, but it has
# been modified. Maybe now we can inline it into further
# parents?
del try_again[parentgraph]
heappush(heap, (0.0, -len(callers[parentgraph]), parentgraph))
valid_weight[parentgraph] = False
return count
def auto_inline_graphs(translator, graphs, threshold, call_count_pred=None,
heuristic=inlining_heuristic):
callgraph = inlinable_static_callers(graphs)
count = auto_inlining(translator, threshold, callgraph=callgraph,
heuristic=heuristic,
call_count_pred=call_count_pred)
log.inlining('inlined %d callsites.'% (count,))
for graph in graphs:
removenoops.remove_superfluous_keep_alive(graph)
removenoops.remove_duplicate_casts(graph, translator)
| Python |
"""
Implementation of a translator from application Python to
interpreter level RPython.
The idea is that we can automatically transform application level
implementations of methods into some equivalent representation at
interpreter level. Then, the RPython to C translation might
hopefully spit out some more efficient code than always interpreting
these methods.
Note that the application level functions are treated as rpythonic,
in a sense that globals are constants, for instance. This definition
is not exact and might change.
The interface for this module is
(initfunc, newsrc) = translate_as_module(
sourcetext,
filename=None,
modname="app2interpexec",
tmpname=None)
If filename is given, it is used as a reference where
this sourcetext can be literally found, to produce
real line numbers. It cannot be just any name but
must exist and contain the source code somewhere.
modname is optional and will be put into the dictionary
to be created.
tmpname is optional. If given, a temporary file will
be created for debugging purposes.
The returned newsrc is the generated source text.
It is used in gateway.py's caching mechanism.
The initfunc result is a function named "init"+modname
It must be called with a space instance and returns
a wrapped dict which is suitable to use as a module dict,
containing all trnaslatedobjects with their originalname.
Integration of this module is finished.
There are no longer hand-generated source
pieces in pypy svn.
"""
from __future__ import generators
import autopath, os, sys, types
import inspect
import cPickle as pickle, __builtin__
from copy_reg import _HEAPTYPE
from pypy.objspace.flow.model import Variable, Constant, SpaceOperation
from pypy.objspace.flow.model import c_last_exception, checkgraph
from pypy.interpreter.pycode import CO_VARARGS, CO_VARKEYWORDS
from types import FunctionType, CodeType, ModuleType
from pypy.interpreter.error import OperationError
from pypy.interpreter.argument import Arguments
from pypy.translator.backendopt.ssa import SSI_to_SSA
from pypy.translator.translator import TranslationContext
from pypy.objspace.flow import FlowObjSpace
from pypy.tool.sourcetools import render_docstr, NiceCompile
from pypy.translator.gensupp import ordered_blocks, UniqueList, builtin_base, \
uniquemodulename, C_IDENTIFIER, NameManager
import pypy # __path__
import py.path
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("geninterp")
py.log.setconsumer("geninterp", ansi_log)
GI_VERSION = '1.1.23' # bump this for substantial changes
# ____________________________________________________________
try:
set
except NameError:
class fake_set(object):
pass
class fake_frozenset(object):
pass
builtin_set = fake_set
builtin_frozenset = fake_frozenset
faked_set = True
else:
builtin_set = set
builtin_frozenset = frozenset
faked_set = False
def eval_helper(self, typename, expr):
name = self.uniquename("gtype_%s" % typename)
unique = self.uniquenameofprebuilt("eval_helper", eval_helper)
self.initcode.append1(
'def %s(expr):\n'
' dic = space.newdict()\n'
' if "types." in expr:\n'
' space.exec_("import types", dic, dic)\n'
' else:\n'
' space.exec_("", dic, dic)\n'
' return space.eval(expr, dic, dic)' % (unique, ))
self.initcode.append1('%s = %s(%r)' % (name, unique, expr))
return name
def unpickle_helper(self, name, value):
unique = self.uniquenameofprebuilt("unpickle_helper", unpickle_helper)
self.initcode.append1(
'def %s(value):\n'
' dic = space.newdict()\n'
' space.exec_("import cPickle as pickle", dic, dic)\n'
' return space.eval("pickle.loads(%%r)" %% value, dic, dic)' % unique)
self.initcode.append1('%s = %s(%r)' % (
name, unique, pickle.dumps(value, 2)) )
# hey, for longs we can do even easier:
def long_helper(self, name, value):
unique = self.uniquenameofprebuilt("long_helper", long_helper)
self.initcode.append1(
'def %s(value):\n'
' dic = space.newdict()\n'
' space.exec_("", dic, dic) # init __builtins__\n'
' return space.eval(value, dic, dic)' % unique)
self.initcode.append1('%s = %s(%r)' % (
name, unique, repr(value) ) )
def bltinmod_helper(self, mod):
name = self.uniquename("mod_%s" % mod.__name__)
unique = self.uniquenameofprebuilt("bltinmod_helper", bltinmod_helper)
self.initcode.append1(
'def %s(name):\n'
' dic = space.newdict()\n'
' space.exec_("import %%s" %% name, dic, dic)\n'
' return space.eval("%%s" %% name, dic, dic)' % (unique, ))
self.initcode.append1('%s = %s(%r)' % (name, unique, mod.__name__))
return name
class GenRpy:
def __init__(self, translator, entrypoint=None, modname=None, moddict=None):
self.translator = translator
if entrypoint is None:
entrypoint = translator.entrypoint
self.entrypoint = entrypoint
self.modname = self.trans_funcname(modname or
uniquemodulename(entrypoint))
self.moddict = moddict # the dict if we translate a module
def late_OperationError():
self.initcode.append1(
'from pypy.interpreter.error import OperationError as gOperationError')
return 'gOperationError'
def late_Arguments():
self.initcode.append1('from pypy.interpreter import gateway')
return 'gateway.Arguments'
self.rpynames = {Constant(None).key: 'space.w_None',
Constant(False).key: 'space.w_False',
Constant(True).key: 'space.w_True',
Constant(Ellipsis).key: 'space.w_Ellipsis',
Constant(NotImplemented).key: 'space.w_NotImplemented',
Constant(OperationError).key: late_OperationError,
Constant(Arguments).key: late_Arguments,
}
u = UniqueList
self.initcode = u() # list of lines for the module's initxxx()
self.latercode = u() # list of generators generating extra lines
# for later in initxxx() -- for recursive
# objects
self.namespace = NameManager()
self.namespace.make_reserved_names('__doc__ __args__ space goto')
self.globaldecl = []
self.globalobjects = []
self.pendingfunctions = []
self.currentfunc = None
self.debugstack = () # linked list of nested nameof()
# special constructors:
self.has_listarg = {}
for name in "newtuple newlist newstring".split():
self.has_listarg[name] = name
# catching all builtins in advance, to avoid problems
# with modified builtins
# add a dummy _issubtype() to builtins
if not hasattr(__builtin__, '_issubtype'):
def _issubtype(cls1, cls2):
raise TypeError, "this dummy should *not* be reached"
__builtin__._issubtype = _issubtype
class bltinstub:
def __init__(self, name):
self.__name__ = name
def __repr__(self):
return '<%s>' % self.__name__
self.builtin_ids = dict( [
(id(value), bltinstub(key))
for key, value in __builtin__.__dict__.items()
if callable(value) and type(value) not in [types.ClassType, type] ] )
self.space = FlowObjSpace() # for introspection
self.use_fast_call = True
self.specialize_goto = False
self._labeltable = {} # unique label names, reused per func
self._space_arities = None
def expr(self, v, localscope, wrapped = True):
if isinstance(v, Variable):
return localscope.localname(v.name, wrapped)
elif isinstance(v, Constant):
return self.nameof(v.value,
debug=('Constant in the graph of', self.currentfunc))
else:
raise TypeError, "expr(%r)" % (v,)
def arglist(self, args, localscope):
res = [self.expr(arg, localscope) for arg in args]
return ", ".join(res)
def oper(self, op, localscope):
if op.opname == 'issubtype':
arg = op.args[1]
if (not isinstance(arg, Constant)
or not isinstance(arg.value, (type, types.ClassType))):
op = SpaceOperation("simple_call",
[Constant(issubclass)]+op.args,
op.result)
if op.opname == "simple_call":
v = op.args[0]
space_shortcut = self.try_space_shortcut_for_builtin(v, len(op.args)-1,
op.args[1:])
if space_shortcut is not None:
# space method call
exv = space_shortcut
fmt = "%(res)s = %(func)s(%(args)s)"
else:
# import sys|__builtin__|_codecs avoid going through __import__
if isinstance(v, Constant) and v.value is __builtin__.__import__:
name, glb, loc, frm_lst = op.args[1:]
if (isinstance(name, Constant) and name.value in ('sys', '__builtin__', '_codecs') and
isinstance(loc, Constant) and loc.value is None and
isinstance(frm_lst, Constant) and frm_lst.value is None):
return "%s = space.getbuiltinmodule(%r)" % (self.expr(op.result, localscope),
name.value)
exv = self.expr(v, localscope)
# default for a spacecall:
fmt = "%(res)s = space.call_function(%(func)s, %(args)s)"
# see if we can optimize for a fast call.
# we just do the very simple ones.
if self.use_fast_call and (isinstance(v, Constant)
and exv.startswith('gfunc_')):
func = v.value
if (not func.func_code.co_flags & CO_VARARGS) and (
func.func_defaults is None):
fmt = "%(res)s = fastf_%(func)s(space, %(args)s)"
exv = exv[6:]
return fmt % {"res" : self.expr(op.result, localscope),
"func": exv,
"args": self.arglist(op.args[1:], localscope) }
if op.opname == "call_args":
v = op.args[0]
exv = self.expr(v, localscope)
fmt = (
"_args = %(Arg)s.fromshape(space, %(shape)s, [%(data_w)s])\n"
"%(res)s = space.call_args(%(func)s, _args)")
assert isinstance(op.args[1], Constant)
shape = op.args[1].value
# make a list out of the second shape elt.
shape = shape[0], list(shape[1]), shape[2], shape[3]
return fmt % {"res": self.expr(op.result, localscope),
"func": exv,
"shape": repr(shape),
"data_w": self.arglist(op.args[2:], localscope),
'Arg': self.nameof(Arguments) }
if op.opname == "hint":
return "%s = %s" % (self.expr(op.result, localscope),
self.expr(op.args[0], localscope))
if op.opname in self.has_listarg:
fmt = "%s = %s([%s])"
else:
fmt = "%s = %s(%s)"
# special case is_true
opname = op.opname
if opname.startswith('getitem_'):
opname = 'getitem'
wrapped = opname != "is_true"
oper = "space.%s" % opname
return fmt % (self.expr(op.result, localscope, wrapped), oper,
self.arglist(op.args, localscope))
def large_assignment(self, left, right, margin=65):
expr = "(%s) = (%s)" % (", ".join(left), ", ".join(right))
pieces = expr.split(",")
res = [pieces.pop(0)]
for piece in pieces:
if len(res[-1])+len(piece)+1 > margin:
res[-1] += ","
res.append(piece)
else:
res[-1] += (","+piece)
return res
def large_initialize(self, vars, margin=65):
res = []
nonestr = "None"
margin -= len(nonestr)
for var in vars:
ass = var+"="
if not res or len(res[-1]) >= margin:
res.append(ass)
else:
res[-1] += ass
res = [line + nonestr for line in res]
return res
def mklabel(self, blocknum):
if self.specialize_goto:
lbname = self._labeltable.get(blocknum)
if not lbname:
self.initcode.append1(
'from pypy.objspace.flow.framestate import SpecTag')
lbname = self.uniquename("glabel_%d" % blocknum)
self._labeltable[blocknum] = lbname
self.initcode.append1('%s = SpecTag()' % lbname)
return lbname
else:
return repr(blocknum)
def gen_link(self, link, localscope, blocknum, block, linklocalvars=None):
"Generate the code to jump across the given Link."
linklocalvars = linklocalvars or {}
left, right = [], []
for a1, a2 in zip(link.args, link.target.inputargs):
if a1 in linklocalvars:
src = linklocalvars[a1]
else:
src = self.expr(a1, localscope)
dest = self.expr(a2, localscope)
if src != dest:
left.append(dest)
right.append(src)
if left: # anything at all?
txt = "%s = %s" % (", ".join(left), ", ".join(right))
if len(txt) <= 65: # arbitrary
yield txt
else:
for line in self.large_assignment(left, right):
yield line
goto = blocknum[link.target]
yield 'goto = %s' % self.mklabel(goto)
if goto <= blocknum[block]:
yield 'continue'
def register_early(self, obj, name):
# this was needed for recursive lists.
# note that self.latercode led to too late initialization.
key = Constant(obj).key
self.rpynames[key] = name
def nameof(self, obj, debug=None, namehint=None):
key = Constant(obj).key
try:
txt = self.rpynames[key]
if type(txt) is not str:
# this is a predefined constant, initialized on first use
func = txt
txt = func()
self.rpynames[key] = txt
return txt
except KeyError:
if debug:
stackentry = debug, obj
else:
stackentry = obj
self.debugstack = (self.debugstack, stackentry)
obj_builtin_base = builtin_base(obj)
if obj_builtin_base in (object, int, long) and type(obj) is not obj_builtin_base:
# assume it's a user defined thingy
name = self.nameof_instance(obj)
else:
# shortcutting references to __builtin__
if id(obj) in self.builtin_ids:
func = self.builtin_ids[id(obj)]
#name = self.get_nameof_builtin_func(func)
# the above is quicker in principle, but pulls more
# stuff in, so it is slower right now.
name = "(space.builtin.get(space.str_w(%s)))" % self.nameof(func.__name__)
else:
for cls in type(obj).__mro__:
meth = getattr(self,
'nameof_' + cls.__name__.replace(' ', ''),
None)
if meth:
break
else:
raise Exception, "nameof(%r)" % (obj,)
code = meth.im_func.func_code
if namehint and 'namehint' in code.co_varnames[:code.co_argcount]:
name = meth(obj, namehint=namehint)
else:
name = meth(obj)
self.debugstack, x = self.debugstack
assert x is stackentry
self.rpynames[key] = name
return name
def get_nameof_builtin_func(self, func):
# this is a hack!
# in some cases, like exceptions, we don't have space.builtin available,
#so we crate a fall-back...
name = self.uniquename('gbltin_' + func.__name__)
self.initcode.append1('''\
try:
# see if we have space.builtin in this context
space.builtin
except AttributeError:
print "didn't get", %(bltin)r
def %(name)s(space, __args__):
w_func = space.builtin.get(%(bltin)r)
return space.call_args(w_func, __args__)
%(name)s = space.wrap(gateway.interp2app(%(name)s, unwrap_spec=[gateway.ObjSpace, gateway.Arguments]))
else:
print "got it:", %(bltin)r
%(name)s = space.builtin.get(%(bltin)r)'''
% {'name': name, 'bltin': func.__name__} )
return name
def uniquename(self, basename):
name = self.namespace.uniquename(basename)
self.globalobjects.append(name)
self.globaldecl.append('# global object %s' % (name,))
return name
def uniquenameofprebuilt(self, basename, obj):
# identifying an object and giving it a name,
# without the attempt to render it.
key = Constant(obj).key
try:
txt = self.rpynames[key]
except KeyError:
self.rpynames[key] = txt = self.uniquename(basename)
return txt
def nameof_object(self, value):
if type(value) is not object:
# try to just wrap it?
name = self.uniquename('g_%sinst_%r' % (type(value).__name__, value))
self.initcode.append1('%s = space.wrap(%r)' % (name, value))
return name
name = self.uniquename('g_object')
self.initcode.append('_tup = space.newtuple([])\n'
'%s = space.call(space.w_object, _tup)'
% name)
return name
def is_module_builtin(self, mod):
if not hasattr(mod, "__file__") or mod.__file__ is None:
return True
if not (mod.__file__.endswith('.pyc') or
mod.__file__.endswith('.py') or
mod.__file__.endswith('.pyo')):
return True
if mod.__file__.endswith('*.py'): # on top of PyPy, a mixed module
return True
return False
def nameof_module(self, value):
if value is os or self.is_module_builtin(value):
return bltinmod_helper(self, value)
# we might have createda reference to a module
# that is non-standard.
# SKIPPING
return "space.w_None"
# check whether we can import
try:
import value
need_extra_path = False
except ImportError:
need_extra_path = True
name = self.uniquename('mod_%s' % value.__name__)
if need_extra_path:
self.initcode.append1('import pypy')
self.initcode.append1('import sys')
self.initcode.append1('import os')
self.initcode.append1('for pkgdir in pypy.__path__:\n'
' libdir = os.path.join(pkgdir, "lib")\n'
' if os.path.isdir(libdir):\n'
' break\n'
'else:\n'
' raise Exception, "cannot find pypy/lib directory"\n'
'sys.path.insert(0, libdir)\n')
self.initcode.append1('try:\n'
' import %s as _tmp\n'
'finally:\n'
' if libdir in sys.path:\n'
' sys.path.remove(libdir)\n' % value.__name__)
else:
self.initcode.append1('import %s as _tmp' % value.__name__)
self.initcode.append1('%s = space.wrap(_tmp)' % (name))
return name
def nameof_int(self, value):
if value >= 0:
name = 'gi_%d' % value
else:
# make sure that the type ident is completely described by
# the prefixbefore the initial '_' for easy postprocessing
name = 'gi_minus_%d' % abs(value)
name = self.uniquename(name)
self.initcode.append1('%s = space.wrap(%d)' % (name, value))
return name
def nameof_long(self, value):
# assume we want them in hex most of the time
if value < 256L:
s = "%dL" % value
else:
s = "0x%08xL" % value
if value >= 0:
name = 'glong_%s' % s
else:
# mae sure that the type ident is completely described by
# the prefix before the initial '_'
name = 'glong_minus_%d' % abs(value)
name = self.uniquename(name)
# allow literally short longs only, meaning they
# must fit into a machine word.
if (sys.maxint*2+1)&value == value:
self.initcode.append1('%s = space.wrap(%s) # XXX implement long!' % (name, s))
else:
long_helper(self, name, value)
return name
def nameof_float(self, value):
name = 'gfloat_%s' % value
name = (name.replace('-', 'minus')
.replace('.', 'dot'))
name = self.uniquename(name)
# handle overflows
if value != 0.0 and 2*value == value:
self.initcode.append1('float_inf = 1e200\nfloat_inf *= float_inf')
sign = '-+'[value >= 0]
self.initcode.append('%s = space.wrap(%sfloat_inf)' % (name, sign))
else:
self.initcode.append('%s = space.wrap(%r)' % (name, value))
return name
def nameof_str(self, value):
if [c for c in value if c<' ' or c>'~' or c=='"' or c=='\\']:
# non-printable string
namestr = repr(value)[1:-1]
else:
# printable string
namestr = value
if not namestr:
namestr = "_emptystr_"
name = self.uniquename('gs_' + namestr[:32])
if len(value) < 30 and "\n" not in value:
txt = '%s = space.new_interned_str(%r)' % (name, value)
else:
txt = render_docstr(value, '%s = space.new_interned_str(\n' % name, ')')
txt = txt, # not splitted
self.initcode.append(txt)
return name
def skipped_function(self, func):
# debugging only! Generates a placeholder for missing functions
# that raises an exception when called.
name = self.uniquename('gskippedfunc_' + func.__name__)
self.globaldecl.append('# global decl %s' % (name, ))
self.initcode.append('# build func %s' % name)
return name
def skipped_class(self, cls):
# debugging only! Generates a placeholder for missing classes
# that raises an exception when called.
name = self.uniquename('gskippedclass_' + cls.__name__)
self.globaldecl.append('# global decl %s' % (name, ))
self.initcode.append1('# build class %s' % name)
return name
def trans_funcname(self, s):
return s.translate(C_IDENTIFIER)
def nameof_function(self, func, namehint=''):
if hasattr(func, 'geninterplevel_name'):
return func.geninterplevel_name(self)
if func.func_globals is None:
# built-in functions on top of PyPy
return self.nameof_builtin_function(func)
printable_name = '(%s:%d) %s' % (
self.trans_funcname(func.func_globals.get('__name__', '?')),
func.func_code.co_firstlineno,
func.__name__)
if (func.func_doc and
func.func_doc.lstrip().startswith('NOT_RPYTHON')):
log.WARNING("skipped %s" % printable_name)
return self.skipped_function(func)
name = self.uniquename('gfunc_' + self.trans_funcname(
namehint + func.__name__))
positional, varargs, varkwds, defs = inspect.getargspec(func)
if varargs is varkwds is defs is None:
unwrap = ', '.join(['gateway.W_Root']*len(positional))
interp_name = 'fastf_' + name[6:]
else:
unwrap = 'gateway.Arguments'
interp_name = 'f_' + name[6:]
self.initcode.append1('from pypy.interpreter import gateway')
self.initcode.append1('%s = space.wrap(gateway.interp2app(%s, unwrap_spec=[gateway.ObjSpace, %s]))' %
(name, interp_name, unwrap))
self.pendingfunctions.append(func)
return name
def nameof_staticmethod(self, sm):
# XXX XXX XXXX
func = sm.__get__(42.5)
name = self.uniquename('gsm_' + func.__name__)
functionname = self.nameof(func)
self.initcode.append1('%s = space.wrap(%s)' % (name, functionname))
return name
def nameof_instancemethod(self, meth):
if meth.im_func.func_globals is None:
# built-in methods (bound or not) on top of PyPy
return self.nameof_builtin_method(meth)
if meth.im_self is None:
# no error checking here
return self.nameof(meth.im_func, namehint="%s_" % meth.im_class.__name__)
else:
ob = self.nameof(meth.im_self)
func = self.nameof(meth.im_func)
typ = self.nameof(meth.im_class)
name = self.uniquename('gmeth_' + meth.im_func.__name__)
funcname = self.nameof(meth.im_func.__name__)
self.initcode.append1(
'%s = space.getattr(%s, %s)' % (name, ob, funcname))
return name
nameof_method = nameof_instancemethod # when run on top of PyPy
def should_translate_attr(self, pbc, attr):
ignore = getattr(pbc.__class__, 'NOT_RPYTHON_ATTRIBUTES', [])
if attr in ignore:
return False
else:
return "probably" # True
def later(self, gen):
self.latercode.append1((gen, self.debugstack))
def nameof_instance(self, instance):
klass = instance.__class__
name = self.uniquename('ginst_' + klass.__name__)
cls = self.nameof(klass)
if hasattr(klass, '__base__'):
base_class = builtin_base(instance)
base = self.nameof(base_class)
else:
base_class = None
base = cls
def initinstance():
content = instance.__dict__.items()
content.sort()
for key, value in content:
if self.should_translate_attr(instance, key):
try:
yield 'space.setattr(%s, %s, %s)' % (
name, self.nameof(key), self.nameof(value))
except:
log.ERROR("Problem while generating %s of %r" % (
name, instance))
raise
self.initcode.append1("%s = space.call_method(%s, '__new__', %s)" % (
name, cls, cls))
self.later(initinstance())
return name
def space_arities(self):
if self._space_arities is None:
arities = self._space_arities = {}
for name, sym, arity, specnames in self.space.MethodTable:
arities[name] = arity
arities['isinstance'] = 2
return self._space_arities
def try_space_shortcut_for_builtin(self, v, nargs, args):
if isinstance(v, Constant) and id(v.value) in self.builtin_ids:
name = self.builtin_ids[id(v.value)].__name__
if hasattr(self.space, name):
if self.space_arities().get(name, -1) == nargs:
if name != 'isinstance':
return "space.%s" % name
else:
arg = args[1]
if (isinstance(arg, Constant)
and isinstance(arg.value, (type, types.ClassType))):
return "space.isinstance"
return None
def nameof_builtin_function_or_method(self, func):
if func.__self__ is None:
return self.nameof_builtin_function(func)
else:
return self.nameof_builtin_method(func)
def nameof_builtin_function(self, func):
# builtin function
if id(func) in self.builtin_ids:
func = self.builtin_ids[id(func)]
return "(space.builtin.get(space.str_w(%s)))" % self.nameof(func.__name__)
# where does it come from? Python2.2 doesn't have func.__module__
for modname, module in sys.modules.items():
if not self.is_module_builtin(module):
continue # skip non-builtin modules
if func is getattr(module, func.__name__, None):
break
else:
raise Exception, '%r not found in any built-in module' % (func,)
#if modname == '__builtin__':
# # be lazy
# return "(space.builtin.get(space.str_w(%s)))" % self.nameof(func.__name__)
if modname == 'sys':
# be lazy
return "(space.sys.get(space.str_w(%s)))" % self.nameof(func.__name__)
else:
name = self.uniquename('gbltin_' + func.__name__)
self.initcode.append1('%s = space.getattr(%s, %s)' % (
name, self.nameof(module), self.nameof(func.__name__)))
return name
def nameof_builtin_method(self, meth):
try:
im_self = meth.__self__
except AttributeError:
im_self = meth.im_self # on top of PyPy
if im_self is None:
# builtin unbound method (only on top of PyPy)
name = self.nameof_wrapper_descriptor(meth)
else:
# builtin (bound) method
name = self.uniquename('gbltinmethod_' + meth.__name__)
self.initcode.append1('%s = space.getattr(%s, %s)' % (
name, self.nameof(im_self), self.nameof(meth.__name__)))
return name
def nameof_classobj(self, cls):
initcode = []
printable_name = cls.__name__
if cls.__doc__ and cls.__doc__.lstrip().startswith('NOT_RPYTHON'):
#raise Exception, "%r should never be reached" % (cls,)
log.WARNING("skipped class %s" % printable_name)
return self.skipped_class(cls)
metaclass = "space.w_type"
name = self.uniquename('gcls_' + cls.__name__)
if issubclass(cls, py.builtin.BaseException):
# if cls.__module__ == 'exceptions':
# don't rely on this, py.magic redefines AssertionError
if getattr(__builtin__,cls.__name__,None) is cls:
# exception are defined on the space
return 'space.w_%s' % cls.__name__
if not isinstance(cls, type):
assert type(cls) is types.ClassType
# do *not* change metaclass, but leave the
# decision to what PyPy thinks is correct.
# metaclass = 'space.w_classobj'
basenames = [self.nameof(base) for base in cls.__bases__]
def initclassobj():
content = cls.__dict__.items()
content.sort()
for key, value in content:
if key.startswith('__'):
if key in ['__module__', '__doc__', '__dict__',
'__weakref__', '__metaclass__', '__slots__',
'__new__', '__del__']:
continue
# redirect value through class interface, in order to
# get methods instead of functions.
value = getattr(cls, key)
if isinstance(value, staticmethod) and value.__get__(1) not in self.translator.flowgraphs and self.translator.frozen:
log.WARNING("skipped staticmethod: %s" % value)
continue
## if isinstance(value, FunctionType) and value not in self.translator.flowgraphs and self.translator.frozen:
## log.WARNING("skipped function: %s" % value)
## continue
yield 'space.setattr(%s, %s, %s)' % (
name, self.nameof(key), self.nameof(value))
baseargs = ", ".join(basenames)
initcode.append('_dic = space.newdict()')
for key, value in cls.__dict__.items():
if key.startswith('__'):
if key in ['__module__', '__metaclass__', '__slots__',
'__new__', '__del__']:
keyname = self.nameof(key)
valname = self.nameof(value)
initcode.append("space.setitem(_dic, %s, %s)" % (
keyname, valname))
if cls.__doc__ is not None:
sdoc = self.nameof("__doc__")
docobj = cls.__dict__["__doc__"]
if type(docobj) in (str, unicode):
docstr = render_docstr(cls, "_doc = space.wrap(", ")")
initcode.append((docstr,)) # not splitted
else:
initcode.append("_doc = %s" % self.nameof(docobj) )
initcode.append("space.setitem(_dic, %s, _doc)" % (sdoc,))
cls_name = self.nameof(cls.__name__)
for l in initcode:
self.initcode.append(l)
self.initcode.append1('_bases = space.newtuple([%(bases)s])\n'
'_args = space.newtuple([%(name)s, _bases, _dic])\n'
'%(klass)s = space.call(%(meta)s, _args)'
% {"bases": baseargs,
"klass": name,
"name" : cls_name,
"meta" : metaclass} )
self.later(initclassobj())
return name
nameof_class = nameof_classobj # for Python 2.2
typename_mapping = {
object: 'space.w_object',
int: 'space.w_int',
long: 'space.w_long',
bool: 'space.w_bool',
list: 'space.w_list',
tuple: 'space.w_tuple',
dict: 'space.w_dict',
str: 'space.w_str',
float: 'space.w_float',
slice: 'space.w_slice',
types.InstanceType: (eval_helper, 'InstanceType', 'types.InstanceType'),
type: 'space.w_type',
complex: (eval_helper, 'complex', 'types.ComplexType'),
unicode:'space.w_unicode',
basestring: (eval_helper, 'basestring', 'basestring'),
file: (eval_helper, 'file', 'file'),
type(None): (eval_helper, 'NoneType', 'type(None)'),
CodeType: (eval_helper, 'code', 'type((lambda:42).func_code)'),
ModuleType: (eval_helper, 'ModuleType', 'types.ModuleType'),
xrange: (eval_helper, 'xrange', 'xrange'),
##r_int: 'space.w_int',
##r_uint: 'space.w_int',
type(len): (eval_helper, 'FunctionType', 'type(lambda:42)'),
# type 'method_descriptor':
# XXX small problem here:
# XXX with space.eval, we get <W_TypeObject(method)>
# XXX but with wrap, we get <W_TypeObject(instancemethod)>
type(list.append): (eval_helper, "method_descriptor", "type(list.append)"),
# type 'wrapper_descriptor':
type(type(None).__repr__): (eval_helper, "wrapper_descriptor",
"type(type(None).__repr__)"),
# type 'getset_descriptor':
# XXX here we get <W_TypeObject(FakeDescriptor)>,
# while eval gives us <W_TypeObject(GetSetProperty)>
type(type.__dict__['__dict__']): (eval_helper, "getset_descriptor",
"type(type.__dict__[\'__dict__\'])"),
# type 'member_descriptor':
# XXX this does not work in eval!
# type(type.__dict__['__basicsize__']): "cannot eval type(type.__dict__['__basicsize__'])",
# XXX there seems to be no working support for member descriptors ???
type(types.GeneratorType.gi_frame):
(eval_helper, "member_descriptor", 'type(property.fdel)'),
types.ClassType: 'space.w_classobj',
types.MethodType: (eval_helper, "instancemethod",
"type((lambda:42).__get__(42))"),
type(Ellipsis): (eval_helper, 'EllipsisType', 'types.EllipsisType'),
builtin_set: (eval_helper, "set", "set"),
builtin_frozenset: (eval_helper, "frozenset", "frozenset"),
buffer: (eval_helper, "buffer", "buffer"),
}
def nameof_type(self, cls):
if cls in self.typename_mapping:
ret = self.typename_mapping[cls]
if type(ret) is tuple:
ret = ret[0](self, ret[1], ret[2])
return ret
if issubclass(cls, py.builtin.BaseException): # Python 2.5 only
# if cls.__module__ == 'exceptions':
# don't rely on this, py.magic redefines AssertionError
if getattr(__builtin__,cls.__name__,None) is cls:
# exception are defined on the space
return 'space.w_%s' % cls.__name__
assert cls.__module__ != '__builtin__' or cls.__flags__&_HEAPTYPE, (
"built-in class %r not found in typename_mapping "
"while compiling %s" % (cls, self.currentfunc and
self.currentfunc.__name__ or "*no function at all*"))
return self.nameof_classobj(cls)
def nameof_tuple(self, tup):
name = self.uniquename('g%dtuple' % len(tup))
args = [self.nameof(x) for x in tup]
args = ', '.join(args)
self.initcode.append1('%s = space.newtuple([%s])' % (name, args))
return name
def nameof_list(self, lis):
name = self.uniquename('g%dlist' % len(lis))
# note that self.latercode led to too late initialization.
self.register_early(lis, name)
# try to save at least one assignment.
if lis and lis[0] is not lis:
default = lis[0]
else:
default = None
self.initcode.append('%s = space.newlist([%s])' % (name, self.nameof(default)))
self.initcode.append('%s = space.mul(%s, %s)' % (name, name, self.nameof(len(lis))))
for i in range(len(lis)):
if lis[i] is not default:
item = self.nameof(lis[i])
self.initcode.append('space.setitem(%s, %s, %s);' % (
name, self.nameof(i), item))
return name
def nameof_dict(self, dic):
assert dic is not __builtins__
name = self.uniquename('g%ddict' % len(dic))
self.register_early(dic, name)
self.initcode.append('%s = space.newdict()' % (name,))
for k in dic:
if k == '__builtins__':
continue
self.initcode.append('space.setitem(%s, %s, %s)'%(
name, self.nameof(k), self.nameof(dic[k])))
return name
# strange prebuilt instances below, don't look too closely
# XXX oh well.
def nameof_member_descriptor(self, md):
try:
im_class = md.__objclass__
except AttributeError:
im_class = md.im_class # on top of PyPy
name = self.uniquename('gdescriptor_%s_%s' % (
im_class.__name__, md.__name__))
cls = self.nameof(im_class)
self.initcode.append1('%s = space.getattr(%s, %s)' %
(name, cls, self.nameof(md.__name__)))
return name
nameof_getset_descriptor = nameof_member_descriptor
nameof_method_descriptor = nameof_member_descriptor
nameof_wrapper_descriptor = nameof_member_descriptor
def nameof_property(self, prop):
origin = prop.__doc__ # XXX quite a hack
name = self.uniquename('gprop_' + origin)
if not origin:
raise ValueError("sorry, cannot build properties"
" without a helper in __doc__")
# property is lazy loaded app-level as well, trigger it*s creation
self.initcode.append1('space.builtin.get("property") # pull it in')
globname = self.nameof(self.moddict)
self.initcode.append('space.setitem(%s, space.new_interned_str("__builtins__"), '
'space.builtin.w_dict)' % globname)
self.initcode.append('%s = space.eval("property(%s)", %s, %s)' %(
name, origin, globname, globname) )
self.initcode.append('space.delitem(%s, space.new_interned_str("__builtins__"))'
% globname)
return name
def nameof_file(self, fil):
if fil is sys.stdin:
return 'space.sys.get("stdin")'
if fil is sys.stdout:
return 'space.sys.get("stdout")'
if fil is sys.stderr:
return 'space.sys.get("stderr")'
raise Exception, 'Cannot translate an already-open file: %r' % (fil,)
def gen_source(self, fname, ftmpname=None, file=file):
self.fname = fname
self.ftmpname = ftmpname
# generate unordered source file, first.
# I prefer this over ordering everything in memory.
fname = self.fname
if self.ftmpname:
fname = self.ftmpname
f = file(fname, "w")
# generate ordered source file
try:
self.f = f
self.gen_source_temp()
finally:
f.close()
def copyfile(source, target):
f = file(source)
data = f.read()
f.close()
f = file(target, "w")
f.write(data)
f.close()
def order_sections(fname):
sep = "\n##SECTION##\n"
f = file(fname)
txt = f.read()
f.close()
pieces = txt.split(sep)
prelude = pieces.pop(0)
postlude = pieces.pop()
dic = {}
while pieces:
func = pieces.pop()
head = pieces.pop()
key = makekey(head, len(pieces))
dic[key] = head + sep + func
lis = dic.items()
lis.sort()
lis = [prelude] + [func for head, func in lis] + [postlude]
txt = sep.join(lis)
f = file(fname, "w")
f.write(txt)
f.close()
def makekey(txt, uniqueno):
dic = {}
for line in txt.split("\n"):
ign, name, value = line.split(None, 2)
dic[name] = eval(value, {})
key = (dic["filename"], dic["firstlineno"],
dic["function"], uniqueno)
return key
order_sections(fname)
if self.ftmpname:
copyfile(self.ftmpname, self.fname)
def gen_source_temp(self):
f = self.f
# header
print >> f, self.RPY_HEADER
print >> f
info = {
'modname': self.modname,
# the side-effects of this is kick-start the process
'entrypoint': None # self.nameof(self.entrypoint),
}
# header """def initmodule(space):"""
print >> f, self.RPY_INIT_HEADER % info
# doc
if self.moddict and self.moddict.get("__doc__"):
doc = self.moddict["__doc__"]
print >> f, render_docstr(doc, " __doc__ = \\\n")
print >> f
# make sure it is not rendered again
key = Constant(doc).key
self.rpynames[key] = "w__doc__"
self.initcode.append("w__doc__ = space.new_interned_str(__doc__)")
# info.entrypoint must be done *after* __doc__ is handled,
# because nameof(entrypoint) might touch __doc__ early.
info["entrypoint"] = self.nameof(self.entrypoint)
# function implementations
while self.pendingfunctions or self.latercode:
if self.pendingfunctions:
func = self.pendingfunctions.pop()
self.currentfunc = func
self.gen_rpyfunction(func)
# collect more of the latercode after each function
while self.latercode:
gen, self.debugstack = self.latercode.pop()
#self.initcode.extend(gen) -- eats TypeError! bad CPython!
for line in gen:
self.initcode.append1(line)
self.debugstack = ()
self.gen_global_declarations()
# set the final splitter
print >> f, "##SECTION##"
# footer, init code
for codelines in self.initcode:
# keep docstrings unindented
indent = " "
if type(codelines) is tuple:
codelines = codelines[0].split("\n", 1)
codelines[0] = indent + codelines[0]
indent = ""
else:
codelines = codelines.split("\n")
for codeline in codelines:
print >> f, indent + codeline
self.gen_trailer(info, " ")
# do not close the file here!
def gen_trailer(self, info, indent):
if self.moddict:
# we are generating a module, no __main__ etc.
print >> self.f, indent + "return %s" % self.nameof(self.entrypoint)
print >> self.f
else:
# we should have an entrypoint function
info['entrypointname'] = self.trans_funcname(self.entrypoint.__name__)
print >> self.f, self.RPY_INIT_FOOTER % info
def gen_global_declarations(self):
g = self.globaldecl
if g:
f = self.f
print >> f, '# global declaration%s' % ('s'*(len(g)>1))
for line in g:
print >> f, line
print >> f
del g[:]
g = self.globalobjects
for name in g:
pass # self.initcode.append1('# REGISTER_GLOBAL(%s)' % (name,))
del g[:]
def rel_filename(self, name):
# try to find a name relative to pypy and unify.
# if not possible, stick with the original.
ref = py.path.local(pypy.__path__[0])
rel = py.path.local(name).relto(ref)
if rel:
# make it os independent
return rel.replace('\\', '/')
return name # no success
def gen_rpyfunction(self, func):
try:
graph = self.translator.buildflowgraph(func, True)
except Exception, e:
print 20*"*", e
print func
raise
SSI_to_SSA(graph)
checkgraph(graph)
f = self.f
print >> f, "##SECTION##" # simple to split, afterwards
print >> f, ("## filename %r\n"
"## function %r\n"
"## firstlineno %d") % (
self.rel_filename(func.func_code.co_filename),
func.func_code.co_name,
func.func_code.co_firstlineno)
print >> f, "##SECTION##"
localscope = self.namespace.localScope()
body = list(self.rpyfunction_body(graph, localscope))
name_of_defaults = [self.nameof(x, debug=('Default argument of', func))
for x in (func.func_defaults or ())]
self.gen_global_declarations()
# print header
docstr = render_docstr(func, " ")
cname = self.nameof(func)
assert cname.startswith('gfunc_')
f_name = 'f_' + cname[6:]
## # collect all the local variables
## graph = self.translator.getflowgraph(func)
## localslst = []
## def visit(node):
## if isinstance(node, Block):
## localslst.extend(node.getvariables())
## traverse(visit, graph)
## localnames = [self.expr(a, localscope) for a in uniqueitems(localslst)]
# collect all the arguments
vararg = varkw = None
varargname = varkwname = None
all_args = graph.getargs()
p = len(all_args)
if func.func_code.co_flags & CO_VARKEYWORDS:
p -= 1
varkw = graph.getargs()[p]
varkwname = func.func_code.co_varnames[p]
if func.func_code.co_flags & CO_VARARGS:
p -= 1
vararg = graph.getargs()[p]
varargname = func.func_code.co_varnames[p]
positional_args = all_args[:p]
fast_args = [self.expr(a, localscope) for a in positional_args]
if vararg is not None:
vararg = self.expr(vararg, localscope)
fast_args.append(vararg)
if varkw is not None:
varkw = self.expr(varkw, localscope)
fast_args.append(varkw)
fast_name = 'fast' + f_name
fast_set = dict(zip(fast_args, fast_args))
simple = (varargname is varkwname is None) and not name_of_defaults
# create function declaration
name = self.trans_funcname(func.__name__) # for <lambda>
argstr = ", ".join(['space'] + fast_args)
fast_function_header = (' def %s(%s):'
% (name, argstr))
def install_func(f_name, name):
yield ''
yield ' %s = %s' % (f_name, name)
#import __builtin__
#dic = __builtin__.__dict__
#if dic.get(name):
# yield 'del %s # hiding a builtin!' % name
#else:
# self.initcode.append1('del m.%s' % (name,))
def tupstr(seq):
if len(seq) == 1:
fmt = '%s,'
else:
fmt = '%s'
return fmt % ', '.join(seq)
def tupassstr(seq):
if not seq:
return ""
else:
return tupstr(seq) + " = "
if not simple:
print >> f, ' def %s(space, __args__):' % (name,)
if docstr is not None:
print >> f, docstr
print >> f
print >> f, ' funcname = "%s"' % func.__name__
kwlist = list(func.func_code.co_varnames[:func.func_code.co_argcount])
signature = ' signature = %r' % kwlist
signature = ", ".join([signature, repr(varargname), repr(varkwname)])
print >> f, signature
print >> f, ' defaults_w = [%s]' % ", ".join(name_of_defaults)
print >> f, ' %s__args__.parse(funcname, signature, defaults_w)' % (
tupassstr(fast_args),)
print >> f, ' return %s(%s)' % (fast_name, ', '.join(["space"]+fast_args))
for line in install_func(f_name, name):
print >> f, line
print >> f
print >> f, fast_function_header
if docstr is not None:
print >> f, docstr
## fast_locals = [arg for arg in localnames if arg not in fast_set]
## # if goto is specialized, the false detection of
## # uninitialized variables goes away.
## if fast_locals and not self.specialize_goto:
## print >> f
## for line in self.large_initialize(fast_locals):
## print >> f, " %s" % line
## print >> f
# print the body
for line in body:
print >> f, line
for line in install_func("fast"+f_name, name):
print >> f, line
print >> f
# print the PyMethodDef
# skipped
def rpyfunction_body(self, graph, localscope):
start = graph.startblock
allblocks = ordered_blocks(graph)
nblocks = len(allblocks)
blocknum = {}
for block in allblocks:
blocknum[block] = len(blocknum)+1
yield " goto = %s # startblock" % self.mklabel(blocknum[start])
yield " while True:"
def render_block(block):
catch_exception = block.exitswitch == c_last_exception
regular_op = len(block.operations) - catch_exception
# render all but maybe the last op
for op in block.operations[:regular_op]:
for line in self.oper(op, localscope).split("\n"):
yield "%s" % line
# render the last op if it is exception handled
for op in block.operations[regular_op:]:
yield "try:"
for line in self.oper(op, localscope).split("\n"):
yield " %s" % line
if len(block.exits) == 0:
if len(block.inputargs) == 2: # exc_cls, exc_value
# exceptional return block
exc_cls = self.expr(block.inputargs[0], localscope)
exc_val = self.expr(block.inputargs[1], localscope)
yield "raise %s(%s, %s)" % (self.nameof(OperationError),
exc_cls, exc_val)
else:
# regular return block
retval = self.expr(block.inputargs[0], localscope)
yield "return %s" % retval
return
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
for op in self.gen_link(block.exits[0], localscope, blocknum, block):
yield "%s" % op
elif catch_exception:
# block catching the exceptions raised by its last operation
# we handle the non-exceptional case first
link = block.exits[0]
assert link.exitcase is None
for op in self.gen_link(link, localscope, blocknum, block):
yield " %s" % op
# we must catch the exception raised by the last operation,
# which goes to the last err%d_%d label written above.
# Since we only have OperationError, we need to select:
yield "except %s, e:" % (self.nameof(OperationError),)
yield " e.normalize_exception(space)"
q = "if"
for link in block.exits[1:]:
assert issubclass(link.exitcase, py.builtin.BaseException)
# Exeption classes come unwrapped in link.exitcase
yield " %s e.match(space, %s):" % (q,
self.nameof(link.exitcase))
q = "elif"
for op in self.gen_link(link, localscope, blocknum, block, {
link.last_exception: 'e.w_type',
link.last_exc_value: 'e.w_value'}):
yield " %s" % op
yield " else:raise # unhandled case, should not happen"
else:
# block ending in a switch on a value
exits = list(block.exits)
if len(exits) == 2 and (
exits[0].exitcase is False and exits[1].exitcase is True):
# order these guys like Python does
exits.reverse()
q = "if"
for link in exits[:-1]:
yield "%s %s == %s:" % (q, self.expr(block.exitswitch,
localscope),
link.exitcase)
for op in self.gen_link(link, localscope, blocknum, block):
yield " %s" % op
q = "elif"
link = exits[-1]
yield "else:"
# debug only, creates lots of fluffy C code
##yield " assert %s == %s" % (self.expr(block.exitswitch,
## localscope),
## link.exitcase)
for op in self.gen_link(exits[-1], localscope, blocknum, block):
yield " %s" % op
cmpop = ('==', 'is') [self.specialize_goto]
for block in allblocks:
blockno = blocknum[block]
yield ""
yield " if goto %s %s:" % (cmpop, self.mklabel(blockno))
for line in render_block(block):
yield " %s" % line
# ____________________________________________________________
RPY_HEADER = '''#!/bin/env python
# -*- coding: LATIN-1 -*-'''
RPY_SEP = "#*************************************************************"
RPY_INIT_HEADER = RPY_SEP + '''
#__name__ = %(modname)r
_geninterp_ = True
def init%(modname)s(space):
"""NOT_RPYTHON"""
'''
RPY_INIT_FOOTER = '''
# entry point: %(entrypointname)s, %(entrypoint)s
if __name__ == "__main__":
from pypy.objspace.std import StdObjSpace
from pypy.objspace.std.model import UnwrapError
space = StdObjSpace()
init%(modname)s(space)
ret = space.call(%(entrypoint)s, space.newtuple([]))
try:
print space.unwrap(ret)
except UnwrapError:
print "cannot unwrap, here the wrapped result:"
print ret
'''
# _____________________________________________________________________
# implementation of the interface that is finally only
# used: translate_as_module
import py.code
import cStringIO as StringIO
class memfile(object):
_storage = {}
def __init__(self, name, mode="r"):
if mode == "w":
self._storage[name] = StringIO.StringIO()
elif mode == "r":
try:
data = self._storage[name].getvalue()
except IndexError:
f = file(name)
data = f.read()
f.close()
self._storage[name] = StringIO.StringIO(data)
else:
raise ValueError, "mode %s not supported" % mode
self._file = self._storage[name]
def __getattr__(self, name):
return getattr(self._file, name)
def close(self):
pass
def translate_as_module(sourcetext, filename=None, modname="app2interpexec",
do_imports_immediately=False, tmpname=None):
""" compile sourcetext as a module, translating to interp level.
The result is the init function that creates the wrapped module dict,
together with the generated source text.
This init function needs a space as argument.
tmpname can be passed for debugging purposes.
Example:
initfunc, newsrc = translate_as_module(text)
from pypy.objspace.std import Space
space = Space()
dic = initfunc(space)
# and now use the members of the dict
"""
# create something like a module
if type(sourcetext) is str:
if filename is None:
code = py.code.Source(sourcetext).compile()
else:
code = NiceCompile(filename)(sourcetext)
else:
# assume we got an already compiled source
code = sourcetext
dic = {'__name__': modname}
if filename:
dic['__file__'] = filename
# XXX allow the app-level code to contain e.g. "import _formatting"
for pkgdir in pypy.__path__:
libdir = os.path.join(pkgdir, "lib")
if os.path.isdir(libdir):
break
else:
raise Exception, "cannot find pypy/lib directory"
sys.path.insert(0, libdir)
try:
if faked_set:
import __builtin__
__builtin__.set = fake_set
__builtin__.frozenset = fake_frozenset
try:
exec code in dic
finally:
if libdir in sys.path:
sys.path.remove(libdir)
entrypoint = dic
t = TranslationContext(verbose=False, simplifying=True,
builtins_can_raise_exceptions=True,
list_comprehension_operations=False)
t.no_annotator_but_do_imports_immediately = do_imports_immediately
gen = GenRpy(t, entrypoint, modname, dic)
finally:
if faked_set:
del __builtin__.set
del __builtin__.frozenset
if tmpname:
_file = file
else:
_file = memfile
tmpname = 'nada'
out = _file(tmpname, 'w')
gen.f = out
try:
if faked_set:
import __builtin__
__builtin__.set = fake_set
__builtin__.frozenset = fake_frozenset
gen.gen_source(tmpname, file=_file)
finally:
if faked_set:
del __builtin__.set
del __builtin__.frozenset
out.close()
f = _file(tmpname)
newsrc = f.read()
f.close()
code = py.code.Source(newsrc).compile()
dic = {'__name__': modname}
exec code in dic
# now we just need to return the init function,
# which then needs to be called with the space to return the dict.
return dic['init%s' % modname], newsrc
#___________________________________________________________________
# some testing code
testcode = """
def f(a, b):
return a + b
def g():
return f(f(1, 2), f(4, 8))
"""
if __name__ == '__main__':
res = translate_as_module(testcode, tmpname='/tmp/look.py')
| Python |
"""
Some support for genxxx implementations of source generators.
Another name could be genEric, but well...
"""
from __future__ import generators
import sys
from pypy.objspace.flow.model import Block
from pypy.objspace.flow.model import traverse
# ordering the blocks of a graph by source position
def ordered_blocks(graph):
# collect all blocks
allblocks = []
def visit(block):
if isinstance(block, Block):
# first we order by offset in the code string
if block.operations:
ofs = block.operations[0].offset
else:
ofs = sys.maxint
# then we order by input variable name or value
if block.inputargs:
txt = str(block.inputargs[0])
else:
txt = "dummy"
allblocks.append((ofs, txt, block))
traverse(visit, graph)
allblocks.sort()
#for ofs, txt, block in allblocks:
# print ofs, txt, block
return [block for ofs, txt, block in allblocks]
# a unique list, similar to a list.
# append1 appends an object only if it is not there, already.
class UniqueList(list):
def __init__(self, *args, **kwds):
list.__init__(self, *args, **kwds)
self.dic = {}
def append1(self, arg):
try:
self.dic[arg]
except KeyError:
self.dic[arg] = 1
list.append(self, arg)
except TypeError: # not hashable
if arg not in self:
list.append(self, arg)
def builtin_base(obj):
typ = type(obj)
return builtin_type_base(typ)
def builtin_type_base(typ):
from copy_reg import _HEAPTYPE
while typ.__flags__&_HEAPTYPE:
typ = typ.__base__
return typ
def c_string(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\"'),)
def uniquemodulename(name, SEEN={}):
# never reuse the same module name within a Python session!
i = 0
while True:
i += 1
result = '%s_%d' % (name, i)
if result not in SEEN:
SEEN[result] = True
return result
# a translation table suitable for str.translate() to remove
# non-C characters from an identifier
C_IDENTIFIER = ''.join([(('0' <= chr(i) <= '9' or
'a' <= chr(i) <= 'z' or
'A' <= chr(i) <= 'Z') and chr(i) or '_')
for i in range(256)])
# a name manager knows about all global and local names in the
# program and keeps them disjoint. It provides ways to generate
# shorter local names with and without wrapping prefixes,
# while always keeping all globals visible.
class NameManager(object):
def __init__(self, global_prefix='', number_sep='_'):
self.seennames = {}
self.scope = 0
self.scopelist = []
self.global_prefix = global_prefix
self.number_sep = number_sep
def make_reserved_names(self, txt):
"""add names to list of known names. If one exists already,
then we raise an exception. This function should be called
before generating any new names."""
for name in txt.split():
if name in self.seennames:
raise NameError, "%s has already been seen!"
self.seennames[name] = 1
def _ensure_unique(self, basename):
n = self.seennames.get(basename, 0)
self.seennames[basename] = n+1
if n:
return self._ensure_unique('%s_%d' % (basename, n))
return basename
def uniquename(self, basename, with_number=None, bare=False, lenmax=50):
basename = basename[:lenmax].translate(C_IDENTIFIER)
n = self.seennames.get(basename, 0)
self.seennames[basename] = n+1
if with_number is None:
with_number = basename in ('v', 'w_')
fmt = '%%s%s%%d' % self.number_sep
if with_number and not basename[-1].isdigit():
fmt = '%s%d'
if n != 0 or with_number:
basename = self._ensure_unique(fmt % (basename, n))
if bare:
return basename, self.global_prefix + basename
else:
return self.global_prefix + basename
def localScope(self, parent=None):
ret = _LocalScope(self, parent)
while ret.scope >= len(self.scopelist):
self.scopelist.append({})
return ret
class _LocalScope(object):
"""track local names without hiding globals or nested locals"""
def __init__(self, glob, parent):
self.glob = glob
if not parent:
parent = glob
self.parent = parent
self.mapping = {}
self.usednames = {}
self.scope = parent.scope + 1
def uniquename(self, basename):
basename = basename.translate(C_IDENTIFIER)
glob = self.glob
p = self.usednames.get(basename, 0)
self.usednames[basename] = p+1
namesbyscope = glob.scopelist[self.scope]
namelist = namesbyscope.setdefault(basename, [])
if p == len(namelist):
namelist.append(glob.uniquename(basename))
return namelist[p]
def localname(self, name, wrapped=False):
"""modify and mangle local names"""
if name in self.mapping:
return self.mapping[name]
scorepos = name.rfind("_")
if name.startswith("v") and name[1:].isdigit():
basename = ('v', 'w_') [wrapped]
elif scorepos >= 0 and name[scorepos+1:].isdigit():
basename = name[:scorepos]
# for wrapped named things, prepend a w_
# for other named things, prepend a l_.
# XXX The latter is needed because tcc has a nasty parser bug that
# produces errors if names co-incide with global typedefs,
# if the type prefix is itself a typedef reference!
# XXX report this bug to the tcc maintainer(s)
# YYY drop this comment afterwards, but keep the code, it's better.
basename = ("l_", "w_")[wrapped] + basename
else:
basename = name
ret = self.uniquename(basename)
self.mapping[name] = ret
return ret
| Python |
from py.compat import optparse
import autopath
from pypy.translator.translator import TranslationContext
from pypy.translator import driver
DEFAULTS = {
'translation.backend': None,
'translation.type_system': None,
'translation.verbose': True,
}
class Translation(object):
def __init__(self, entry_point, argtypes=None, **kwds):
self.driver = driver.TranslationDriver(overrides=DEFAULTS)
self.config = self.driver.config
self.entry_point = entry_point
self.context = TranslationContext(config=self.config)
# hook into driver events
driver_own_event = self.driver._event
def _event(kind, goal, func):
self.driver_event(kind, goal, func)
driver_own_event(kind, goal, func)
self.driver._event = _event
self.driver_setup = False
self.update_options(argtypes, kwds)
# for t.view() to work just after construction
graph = self.context.buildflowgraph(entry_point)
self.context._prebuilt_graphs[entry_point] = graph
def view(self):
self.context.view()
def viewcg(self):
self.context.viewcg()
def driver_event(self, kind, goal, func):
if kind == 'pre':
#print goal
self.ensure_setup()
elif kind == 'post':
pass
def ensure_setup(self, argtypes=None, policy=None, standalone=False):
if not self.driver_setup:
if standalone:
assert argtypes is None
else:
if argtypes is None:
argtypes = []
self.driver.setup(self.entry_point, argtypes, policy,
empty_translator=self.context)
self.ann_argtypes = argtypes
self.ann_policy = policy
self.driver_setup = True
else:
# check consistency
if standalone:
assert argtypes is None
assert self.ann_argtypes is None
elif argtypes is not None and argtypes != self.ann_argtypes:
raise Exception("inconsistent argtype supplied")
if policy is not None and policy != self.ann_policy:
raise Exception("inconsistent annotation polish supplied")
def update_options(self, argtypes, kwds):
if argtypes or kwds.get('policy') or kwds.get('standalone'):
self.ensure_setup(argtypes, kwds.get('policy'),
kwds.get('standalone'))
kwds.pop('policy', None)
kwds.pop('standalone', None)
self.config.translation.set(**kwds)
def ensure_opt(self, name, value=None, fallback=None):
if value is not None:
self.update_options(None, {name: value})
return value
val = getattr(self.config.translation, name, None)
if fallback is not None and val is None:
self.update_options(None, {name: fallback})
return fallback
if val is not None:
return val
raise Exception(
"the %r option should have been specified at this point" %name)
def ensure_type_system(self, type_system=None):
if self.config.translation.backend is not None:
return self.ensure_opt('type_system')
return self.ensure_opt('type_system', type_system, 'lltype')
def ensure_backend(self, backend=None):
backend = self.ensure_opt('backend', backend)
self.ensure_type_system()
return backend
# disable some goals (steps)
def disable(self, to_disable):
self.driver.disable(to_disable)
# backend independent
def annotate(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
return self.driver.annotate()
# type system dependent
def rtype(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
ts = self.ensure_type_system()
return getattr(self.driver, 'rtype_'+ts)()
def backendopt(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
ts = self.ensure_type_system('lltype')
return getattr(self.driver, 'backendopt_'+ts)()
# backend depedent
def source(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
backend = self.ensure_backend()
getattr(self.driver, 'source_'+backend)()
def source_c(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('c')
self.driver.source_c()
def source_llvm(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('llvm')
self.driver.source_llvm()
def source_js(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('js')
self.driver.source_js()
return open(str(self.driver.gen.filename)).read()
def source_cl(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('cl')
self.driver.source_cl()
def compile(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
backend = self.ensure_backend()
getattr(self.driver, 'compile_'+backend)()
return self.driver.c_entryp
def compile_c(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('c')
self.driver.compile_c()
return self.driver.c_entryp
def compile_llvm(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('llvm')
self.driver.compile_llvm()
return self.driver.c_entryp
def compile_cli(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('cli')
self.driver.compile_cli()
return self.driver.c_entryp
def source_cli(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('cli')
self.driver.source_cli()
def compile_jvm(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('jvm')
self.driver.compile_jvm()
return self.driver.c_entryp
def source_jvm(self, argtypes=None, **kwds):
self.update_options(argtypes, kwds)
self.ensure_backend('jvm')
self.driver.source_jvm()
| Python |
"""
generate Pyrex files from the flowmodel.
"""
from pypy.interpreter.baseobjspace import ObjSpace
from pypy.interpreter.argument import Arguments
from pypy.objspace.flow.model import Variable, Constant
from pypy.objspace.flow.model import mkentrymap, last_exception
from pypy.annotation.annrpython import RPythonAnnotator
from pypy.annotation.model import SomePBC
from pypy.annotation.description import MethodDesc
from pypy.annotation.classdef import ClassDef
from pypy.tool.uid import uid
import inspect
class Op:
def __init__(self, operation, gen, block):
self._str = gen._str
self.gen = gen
self.argnames = [self._str(arg, block) for arg in operation.args]
self.resultname = self._str(operation.result, block)
self.op = operation
#op.opname
def __call__(self):
operator = self.gen.ops.get(self.op.opname, self.op.opname)
args = self.argnames
if not (operator[0] >= "a" and operator[0] <= "z"):
if len(args) == 1:
return "%s = %s %s" % (self.resultname, operator) + args
elif len(args) == 2:
#Inplace operators
inp=['+=','-=','*=','/=','%=','&=','|=','^=','//=',
'<<=','>>=','**=']
if operator in inp:
return "%s = %s; %s %s %s" % (self.resultname, args[0],
self.resultname, operator, args[1])
else:
return "%s = %s %s %s" % (self.resultname, args[0], operator, args[1])
elif len(args) == 3 and operator == "**": #special case, have to handle it manually
return "%s = pow(%s, %s, %s)" % (self.resultname,) + args
else:
raise NotImplementedError, "I don't know to handle the operator %s (arity %s)" \
% (operator, len(args))
else:
method = getattr(self, "op_%s" % operator, self.generic_op)
return method()
def ispythonident(self, s):
if s[0] not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_":
return False
for c in s[1:]:
if (c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
"0123456789"):
return False
return True
def generic_op(self):
"""Generic handler for all operators, which I don't handle explicitly"""
return "%s = %s(%s)" % (self.resultname, self.op.opname, ", ".join(self.argnames))
def op_next(self):
args = self.argnames
return "%s = %s.next()" % (self.resultname, args[0])
def op_contains(self):
args = self.argnames
return "%s = %s in %s" % (self.resultname, args[1], args[0])
def op_getitem(self):
direct = "%s = %s[%s]" % ((self.resultname,) + tuple(self.argnames))
w_sequence, w_index = self.op.args
tp = self.gen.get_type(w_index)
if tp is int:
return direct
else:
# the index could be a slice
indexname = self.argnames[1]
lines = []
if tp is slice: # XXX do this better
lines.append('if 1:')
else:
lines.append('from types import SliceType')
lines.append('if isinstance(%s, SliceType):' % indexname)
lines.append(' assert %s.step is None' % indexname)
lines.append(' %s = %s[%s.start:%s.stop]' % (self.resultname,
self.argnames[0],
indexname,
indexname))
lines.append('else:')
lines.append(' ' + direct)
return "\n".join(lines)
def op_newtuple(self):
if self.argnames:
return "%s = (%s,)" % (self.resultname, ", ".join(self.argnames))
else:
return "%s = ()" % self.resultname
def op_newlist(self):
if self.argnames:
return "%s = [%s,]" % (self.resultname, ", ".join(self.argnames))
else:
return "%s = []" % self.resultname
def op_newdict(self):
return "%s = {}" % (self.resultname,)
def op_newslice(self):
a = self.argnames
return "%s = slice(%s, %s, %s)" % (self.resultname, a[0], a[1], a[2])
def op_call_args(self):
a = self.argnames
shape = self.op.args[1].value
args = Arguments.fromshape(None, shape, a[2:])
lst = args.arguments_w[:]
for key, value in args.kwds_w.items():
lst.append("%s=%s" % (key, value))
if args.w_stararg is not None:
lst.append("*%s" % args.w_stararg)
if args.w_starstararg is not None:
lst.append("**%s" % args.w_starstararg)
return "%s = %s(%s)" % (self.resultname, a[0], ", ".join(lst))
def op_simple_call(self):
a = self.argnames
return "%s = %s(%s)" % (self.resultname, a[0], ", ".join(a[1:]))
def op_setitem(self):
a = self.argnames
return "%s[%s] = %s" % (a[0], a[1], a[2])
def op_getattr(self):
args = self.argnames
attr = self.op.args[1]
if isinstance(attr, Constant) and self.ispythonident(attr.value):
return "%s = %s.%s" % (self.resultname, args[0], attr.value)
else:
return "%s = getattr(%s)" % (self.resultname, ", ".join(args))
def op_setattr(self):
args = self.argnames
attr = self.op.args[1]
if isinstance(attr, Constant) and self.ispythonident(attr.value):
return "%s.%s = %s" % (args[0], attr.value, args[2])
else:
return "setattr(%s, %s, %s)" % args
def op_not(self):
return "%s = not %s" % (self.resultname, self.argnames[0])
def op_is_true(self):
return "%s = not not %s" % (self.resultname, self.argnames[0])
class GenPyrex:
def __init__(self, functiongraph):
self.functiongraph = functiongraph
ops = {}
oparity = {}
for (opname, opsymbol, arity, _) in ObjSpace.MethodTable:
ops[opname] = opsymbol
oparity[opname] = arity
self.ops = ops
self.oparity = oparity
self.annotator = None
self.namecache = {}
def annotate(self, input_arg_types):
a = RPythonAnnotator()
a.build_types(self.functiongraph, input_arg_types)
self.setannotator(a)
def setannotator(self, annotator):
self.annotator = annotator
def emitcode(self, public=True):
self.blockids = {}
#self.variablelocations = {}
self.lines = []
self.indent = 0
self.gen_graph(public)
return "\n".join(self.lines)
def putline(self, line):
for l in line.split('\n'):
self.lines.append(" " * self.indent + l)
def gen_graph(self, public=True):
fun = self.functiongraph
self.entrymap = mkentrymap(fun)
currentlines = self.lines
self.lines = []
self.indent += 1
self.gen_block(fun.startblock)
self.indent -= 1
# emit the header after the body
functionbodylines = self.lines
self.lines = currentlines
inputargnames = [ " ".join(self._paramvardecl(var)) for var in fun.getargs() ]
params = ", ".join(inputargnames)
returntype = self.get_type(fun.getreturnvar())
returntypename = self._gettypename(returntype)
try:
function_object = self.by_the_way_the_function_was # XXX!
except AttributeError:
def function_object(): pass # XXX!!!
if public:
# make the function visible from the outside
# under its original name
args = ', '.join([var.name for var in fun.getargs()])
self.putline("def %s(%s):" % (fun.name.split('.')[-1], args))
self.indent += 1
self.putline("return %s(%s)" % (
self.getfunctionname(function_object), args))
self.indent -= 1
# go ahead with the mandled header and body of the function
self.putline("cdef %s %s(%s):" % (
returntypename,
self.getfunctionname(function_object),
params))
self.indent += 1
#self.putline("# %r" % self.annotations)
decllines = []
missing_decl = []
funargs = fun.getargs()
for block in self.blockids:
for var in block.getvariables():
if var not in funargs:
decl = self._vardecl(var)
if decl:
decllines.append(decl)
else:
missing_decl.append(self.get_varname(var))
if missing_decl:
missing_decl.sort()
decllines.append('# untyped variables: ' + ' '.join(missing_decl))
decllines.sort()
for decl in decllines:
self.putline(decl)
self.indent -= 1
self.lines.extend(functionbodylines)
def get_type(self, var):
if isinstance(var, Constant):
tp = var.value.__class__
if self.annotator and tp in self.annotator.bookkeeper.descs:
classdesc = self.annotator.bookkeeper.descs[tp]
return classdesc.getuniqueclassdef()
return type(var.value)
elif self.annotator:
return self.annotator.gettype(var)
else:
return None
def get_varname(self, var):
vartype = self.get_type(var)
if vartype in (int, bool):
prefix = "i_"
elif isinstance(vartype, ClassDef):
prefix = "p_"
else:
prefix = ""
return prefix + var.name
def _paramvardecl(self, var):
vartype = self.get_type(var)
ctype=self._gettypename(vartype)
return (ctype, self.get_varname(var))
def _gettypename(self, vartype):
if vartype in (int, bool):
ctype = "int"
elif isinstance(vartype, ClassDef):
ctype = self.getclassname(vartype)
else:
ctype = "object"
return ctype
def _vardecl(self, var):
vartype, varname = self._paramvardecl(var)
if vartype != "object":
return "cdef %s %s" % (vartype, varname)
else:
return ""
def getclassname(self,cls):
assert isinstance(cls, ClassDef)
name = cls.shortname
if cls.issubclass(self.annotator.bookkeeper.getuniqueclassdef(Exception)):
return name
return '%s__%x' % (name, uid(cls))#self._hackname(cls)
def getfunctionname(self,func):
# NB. the purpose of the cache is not performance, but to ensure that
# two methods that compare equal get the same name.
if inspect.ismethod(func) and func.im_self is None:
func = func.im_func # consider unbound methods as plain functions
try:
return self.namecache[func]
except KeyError:
assert inspect.isfunction(func) or inspect.ismethod(func)
name = '%s__%x' % (func.__name__, uid(func))#self._hackname(func)
self.namecache[func] = name
return name
def getvarname(self,var):
assert inspect.isclass(var)
return self._hackname(var)
def _str(self, obj, block):
if isinstance(obj, Variable):
#self.variablelocations[obj] = block
return self.get_varname(obj)
elif isinstance(obj, Constant):
import types
if isinstance(obj.value,(types.ClassType,type)):
bk = self.annotator.bookkeeper
fff=self.getclassname(bk.getuniqueclassdef(obj.value))
elif isinstance(obj.value,(types.FunctionType,
types.MethodType,
type)):
fff=self.getfunctionname(obj.value)
elif isinstance(obj.value, types.BuiltinFunctionType):
fff=str(obj.value.__name__)
else:
#fff=self._hackname(obj.value)
fff=repr(obj.value)
if isinstance(obj.value,( int,long)):
fff = repr(int(obj.value))
return fff
else:
raise TypeError("Unknown class: %s" % obj.__class__)
def gen_block(self, block):
if self.blockids.has_key(block):
self.putline('cinline "goto Label%s;"' % self.blockids[block])
return
blockids = self.blockids
blockids.setdefault(block, len(blockids))
#the label is only written if there are multiple refs to the block
if len(self.entrymap[block]) > 1:
self.putline('cinline "Label%s:"' % blockids[block])
if block.exitswitch == Constant(last_exception):
catch_exc = len(block.operations)-1
else:
catch_exc = None
for i, op in zip(range(len(block.operations)), block.operations):
if i == catch_exc:
self.putline("try:")
self.indent += 1
opg = Op(op, self, block)
self.putline(opg())
if i == catch_exc:
# generate all exception handlers
self.indent -= 1
exits = block.exits
for exit in exits[1:]:
self.putline("except %s, last_exc_value:" %
exit.exitcase.__name__)
self.indent += 1
self.putline("last_exception = last_exc_value.__class__")
self.gen_link(block, exit)
self.indent -= 1
self.putline("else:") # no-exception case
self.indent += 1
assert exits[0].exitcase is None
self.gen_link(block, exits[0])
self.indent -= 1
break
else:
exits = block.exits
if len(exits) == 1:
self.gen_link(block, exits[0])
elif len(exits) > 1:
varname = self._str(block.exitswitch, block)
for i in range(len(exits)):
exit = exits[-i-1] # reverse order
cond = self._str(Constant(exit.exitcase), block)
if i == 0:
self.putline("if %s == %s:" % (varname, cond))
elif i < len(exits) - 1:
self.putline("elif %s == %s:" % (varname, cond))
else:
self.putline("else: # %s == %s" % (varname, cond))
self.indent += 1
self.gen_link(block, exit)
self.indent -= 1
elif len(block.inputargs) == 2: # exc_cls, exc_value
exc_cls = self._str(block.inputargs[0], block)
exc_value = self._str(block.inputargs[1], block)
self.putline("raise %s, %s" % (exc_cls, exc_value))
else:
self.putline("return %s" % self._str(block.inputargs[0], block))
def gen_link(self, prevblock, link):
_str = self._str
block = link.target
sourceargs = link.args
targetargs = block.inputargs
assert len(sourceargs) == len(targetargs)
# get rid of identity-assignments and assignments of undefined_value
sargs, targs = [], []
for s,t in zip(sourceargs, targetargs):
if s != t: # and s != Constant(undefined_value):
sargs.append(s)
targs.append(t)
if sargs:
sargs = [_str(arg, prevblock) for arg in sargs]
targs = [_str(arg, block) for arg in targs]
self.putline("%s = %s" % (", ".join(targs), ", ".join(sargs)))
self.gen_block(block)
def globaldeclarations(self,):
"""Generate the global class declaration for a group of functions."""
if self.annotator:
self.lines = []
self.indent = 0
delay_methods={}
for cls in self.annotator.bookkeeper.classdefs:
if cls.basedef:
bdef="(%s)" % (self.getclassname(cls.basedef))
else:
bdef=""
self.putline("cdef class %s%s:" % (self.getclassname(cls),bdef))
self.indent += 1
empty = True
for attr, attrdef in cls.attrs.items():
s_value = attrdef.s_value
if isinstance(s_value, SomePBC):
assert s_value.getKind() is MethodDesc, ("don't support "
"prebuilt constants like %r" % (s_value,))
for methdesc in s_value.descriptions:
meth_class = methdesc.originclassdef
delay_methods.setdefault(meth_class,[]).append(methdesc)
else:
vartype=self._gettypename(s_value.knowntype)
self.putline("cdef public %s %s" % (vartype, attr))
empty = False
list_methods=delay_methods.get(cls,[])
for methdesc in list_methods:
# XXX!
graph = methdesc.funcdesc.getuniquegraph()
hackedargs = ', '.join([var.name for var in graph.getargs()])
name = graph.name.split('.')[-1]
self.putline("def %s(%s):" % (name, hackedargs))
self.indent += 1
# XXX special case hack: cannot use 'return' in __init__
if name == "__init__":
statement = ""
else:
statement = "return "
self.putline("%s%s(%s)" % (statement,
self.getfunctionname(graph.func),
hackedargs))
self.indent -= 1
empty = False
if empty:
self.putline("pass")
self.indent -= 1
self.putline("")
return '\n'.join(self.lines)
else:
return ''
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.