code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
from pypy.rpython.extfunc import genericcallable, register_external
from pypy.translator.flex.modules.flex import add_import, Event, flexTrace
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
# This demonstrates:
# Global variables (getGlobal, setGlobal), necessary for callback functions to do anything useful
# Text Areas
# Adding multiple kinds of controls to a window (Button and TextArea inherit from Control, Window takes Control type)
# Inherited by TextArea and Button, so that both types can be added to the window
class Control(BasicExternal):
_render_class = "mx.controls.TextArea"
pass
add_import("mx.controls.TextArea")
class TextArea(Control):
_render_class = "mx.controls.TextArea"
_fields = {
'x': int,
'y': int,
'text':str,
'editable':bool,
'labelPlacement':str,
}
_methods = {
'addEventListener':MethodDesc([str, genericcallable([Event])]),
'move': MethodDesc([int, int])
}
add_import("mx.controls.Button")
class Button(Control):
_render_class = "mx.controls.Button"
_fields = {
'x': int,
'y': int,
'label':str,
'labelPlacement':str,
}
_methods = {
'addEventListener':MethodDesc([str, genericcallable([Event])]),
'move': MethodDesc([int, int])
}
# These Get and Set from the globally accesible Actionscript Dictionary object (see library.as)
def getGlobal(s):
pass
register_external(getGlobal, args=[str], export_name="_consts_0.getGlobal", result=TextArea)
def setGlobal(s):
pass
register_external(setGlobal, args=[str, TextArea], export_name="_consts_0.setGlobal")
# Window stuff; note the use of Control
class Window(BasicExternal):
_methods = {
'addChild': MethodDesc([Control]),
}
def castToWindow(i):
pass
register_external(castToWindow, args=[int], result=Window,export_name="_consts_0.castToWindow")
# Callback function
def onSubmit(event):
getGlobal("Out").text = getGlobal("In").text
# Main function
def flash_main( x=1 ):
# Set up the TextAreas
textOut = TextArea()
textIn = TextArea()
textOut.text = "Output"
textOut.editable = False
textIn.text = "Input"
# add the TextAreas to the globals
setGlobal("Out", textOut)
setGlobal("In", textIn)
# set up the button
submit = Button()
submit.label = "Copy Input to Output"
submit.addEventListener( "click" , onSubmit)
# add to the window
w = castToWindow( x )
w.addChild(textOut)
w.addChild(textIn)
w.addChild(submit)
| Python |
"""
MoinMoin - Python Source Parser
"""
# Imports
import cgi, string, sys, cStringIO
import keyword, token, tokenize
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
_colors = {
token.NUMBER: '#0080C0',
token.OP: '#0000C0',
token.STRING: '#004080',
tokenize.COMMENT: '#008000',
token.NAME: '#000000',
token.ERRORTOKEN: '#FF8080',
_KEYWORD: '#C00000',
_TEXT: '#000000',
}
class Parser:
""" Send colored python source.
"""
def __init__(self, raw, out = sys.stdout):
""" Store the source text.
"""
self.raw = string.strip(string.expandtabs(raw))
self.out = out
def format(self, formatter, form):
""" Parse and send the colored source.
"""
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
while 1:
pos = string.find(self.raw, '\n', pos) + 1
if not pos: break
self.lines.append(pos)
self.lines.append(len(self.raw))
# parse the source and write it
self.pos = 0
text = cStringIO.StringIO(self.raw)
self.out.write('<pre><font face="Lucida,Courier New">')
try:
tokenize.tokenize(text.readline, self)
except tokenize.TokenError, ex:
msg = ex[0]
line = ex[1][0]
self.out.write("<h3>ERROR: %s</h3>%s\n" % (
msg, self.raw[self.lines[line]:]))
self.out.write('</font></pre>')
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
""" Token handler.
"""
if 0:
print "type", toktype, token.tok_name[toktype], "text", toktext,
print "start", srow,scol, "end", erow,ecol, "<br>"
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# handle newlines
if toktype in [token.NEWLINE, tokenize.NL]:
self.out.write('\n')
return
# send the original whitespace, if needed
if newpos > oldpos:
self.out.write(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = _colors.get(toktype, _colors[_TEXT])
style = ''
if toktype == token.ERRORTOKEN:
style = ' style="border: solid 1.5pt #FF0000;"'
# send text
self.out.write('<font color="%s"%s>' % (color, style))
self.out.write(cgi.escape(toktext))
self.out.write('</font>')
if __name__ == "__main__":
import os, sys
print "Formatting..."
# open own source
source = open('chimp.py').read()
# write colorized version to "python.html"
Parser(source, open('chimp.html', 'wt')).format(None, None) | Python |
#/usr/bin/env python
"""
This simple example has very little to do with the pygame
chimp example, except that it will act the same (more or less)
and it uses the same resources, only they got converted to
mp3s, pngs.
"""
#Import Modules
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class MyRect:
def __init__(self,x,y,w,h):
self.x, self.y,self.w,self.h = x,y,w,h
SCREEN_W = 468
SCREEN_H = 60
class Game:
def __init__(self):pass
def init(self,screen):
self.screen = screen
screen.layout = 'absolute'
screen.setActualSize(SCREEN_W, SCREEN_H)
screen.addEventListener('mouseMove',mousemotion)
screen.addEventListener('enterFrame',do_loop)
screen.addEventListener('click',chimp_whip)
self.bg = load_sprite("py_background_png");
self.screen.addChild(self.bg)
self.chimp = load_sprite("py_chimp_png");
self.screen.addChild(self.chimp)
self.orig_y = self.chimp.y
img2 = self.fist = load_sprite("py_fist_png")
self.screen.addChild(img2)
img2.move(400,img2.height/2)
self.move = 1
self.spin = 0
self.hit = 0
self.hit_move = 1
self.sfx = {}
self.sfx['whip'] = load_sound_resource("py_punch_mp3")
self.sfx['nohit'] = load_sound_resource("py_whiff_mp3")
def loop(self):
img = self.chimp
if self.spin:
self.spin -= 1
img.rotation = self.spin*24
else:
img.x += self.move * 8
if img.x > SCREEN_W-img.width:
self.move = -1
if img.x < 0:
self.move = 1
if self.hit:
self.hit -= 1
self.fist.y += 6 * self.hit_move
if self.hit <= 5:
self.hit_move = -1
def paint(self,screen):
pass
game = Game()
def mousemotion(e):
img = game.fist
img_halfw = img.width / 2
newx = e.stageX - img_halfw
# don't reach the borders
if e.stageX > SCREEN_W - img_halfw:
newx = SCREEN_W - img.width
if newx <= 0:
newx = 0
img.x = newx
def do_loop(e):
game.loop()
def chimp_whip(e):
img = game.chimp
game.hit = 10
game.hit_move = 1
game.fist.y=game.fist.height/2
if e.stageX > img.x and e.stageX < img.x+img.width:
game.sfx['whip'].play()
game.spin = 20
else:
game.sfx['nohit'].play()
def flash_main( x=1 ):
game.init(castToWindow(x))
| Python |
import os
def cmd(c):
print c
if os.system(c): raise RuntimeError
def main():
try:
cmd('./py2flex.sh')
cmd('~/flex/bin/mxmlc -warnings=false output.mxml')
cmd('firefox ./output.swf')
except:
pass
if __name__=='__main__': main()
| Python |
#/usr/bin/env python
"""
This simple example has very little to do with the pygame
chimp example, except that it will act the same (more or less)
and it uses the same resources, only they got converted to
mp3s, pngs.
"""
#Import Modules
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class MyRect:
def __init__(self,x,y,w,h):
self.x, self.y,self.w,self.h = x,y,w,h
SCREEN_W = 468
SCREEN_H = 60
class Game:
def __init__(self):pass
def init(self,screen):
self.screen = screen
screen.layout = 'absolute'
screen.setActualSize(SCREEN_W, SCREEN_H)
screen.addEventListener('mouseMove',mousemotion)
screen.addEventListener('enterFrame',do_loop)
screen.addEventListener('click',chimp_whip)
self.bg = load_sprite("py_background_png");
self.screen.addChild(self.bg)
self.chimp = load_sprite("py_chimp_png");
self.screen.addChild(self.chimp)
self.orig_y = self.chimp.y
img2 = self.fist = load_sprite("py_fist_png")
self.screen.addChild(img2)
img2.move(400,img2.height/2)
self.move = 1
self.spin = 0
self.hit = 0
self.hit_move = 1
self.sfx = {}
self.sfx['whip'] = load_sound_resource("py_punch_mp3")
self.sfx['nohit'] = load_sound_resource("py_whiff_mp3")
def loop(self):
img = self.chimp
if self.spin:
self.spin -= 1
img.rotation = self.spin*24
else:
img.x += self.move * 8
if img.x > SCREEN_W-img.width:
self.move = -1
if img.x < 0:
self.move = 1
if self.hit:
self.hit -= 1
self.fist.y += 6 * self.hit_move
if self.hit <= 5:
self.hit_move = -1
def paint(self,screen):
pass
game = Game()
def mousemotion(e):
img = game.fist
img_halfw = img.width / 2
newx = e.stageX - img_halfw
# don't reach the borders
if e.stageX > SCREEN_W - img_halfw:
newx = SCREEN_W - img.width
if newx <= 0:
newx = 0
img.x = newx
def do_loop(e):
game.loop()
def chimp_whip(e):
img = game.chimp
game.hit = 10
game.hit_move = 1
game.fist.y=game.fist.height/2
if e.stageX > img.x and e.stageX < img.x+img.width:
game.sfx['whip'].play()
game.spin = 20
else:
game.sfx['nohit'].play()
def flash_main( x=1 ):
game.init(castToWindow(x))
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class Grossini():
def load_image(self, w):
i = Image()
i.load("grossini.png")
w.addChild(i)
class Sonido():
def load_sound(self):
s = load_sound_resource("py_sal_mp3")
s.play()
class State(object):
sound = None
state = State()
def callback(event):
s = state.sound
s.load_sound()
def flash_main( x=1 ):
w = castToWindow( x )
w.setActualSize(300,200)
g = Grossini()
g.load_image(w)
s = Sonido()
state.sound = s
b = Button()
b.addEventListener("click" , callback)
b.label = "Play a sound!"
b.x=20
b.y=52
w.addChild(b)
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
def callback(event):
flexTrace("hola")
class Clase:
def metodo(self):
flexTrace("hola")
def flash_main( x=1 ):
window = castToWindow( x )
b = Button()
b.addEventListener( "click" , callback)
b.label = "Hello world!"
b.x=20
b.y=52
a = Clase()
a.metodo()
window.addChild(b)
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class Grossini():
def load_image(self, w):
i = Image()
i.load("espejo.png")
w.addChild(i)
def flash_main( x=1 ):
w = castToWindow( x )
w.setActualSize(400,400)
o = Grossini()
o.load_image(w)
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class Sonido():
def load_sound(self, w):
s = Sound()
r = newURLRequest("sal.mp3")
s.load(r)
s.play()
def flash_main( x=1 ):
w = castToWindow( x )
o = Sonido()
o.load_sound(w)
| Python |
from pypy.translator.flex.modules.flex import *
class Bar:
def __init__(self, arg):
self.arg = arg +"(!)"
def setValue(self, arg):
self.arg = arg
def value(self):
return self.arg
class Foo:
def __init__(self, arg):
self.arg = Bar(arg + "@")
def setValue(self, arg):
self.arg = Bar(arg +"#")
def value(self):
return self.arg.value()
def flash_main(a=1):
flexTrace("Starting! python")
f = Foo("hola")
for x in range(20):
f.setValue( "doing number "+str(x)+" !!" )
flexTrace(f.value())
flexTrace("Im done!")
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
def callback(event, mess):
flexTrace(mess)
def flash_main( x=1 ):
window = castToWindow( x )
b = Button()
func = partial(callback, "hola")
b.addEventListener("click" , func)
b.label = "Hello world!"
b.x=20
b.y=52
window.addChild(b)
b = Button()
func = partial(callback, "chau")
b.addEventListener("click" , func)
b.label = "Bye bye world!"
b.x=20
b.y=52
window.addChild(b)
| Python |
from pypy.rpython.extfunc import genericcallable, register_external
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
# For each wrapped function, we define an empty function, and then register it with an actionscript function:
# This one is for debug output, so we can see our calls working
def flexTrace(s):
pass
# This function has one arg, which is of type string
# if there were a return type, it would be result=<type>
# export_name refers to the corresponding Actionscript function. In this case it's _const_0.flexTrace, which is defined by us in library.as
register_external(flexTrace, args=[str], export_name="_consts_0.flexTrace")
# Wrapping a Actionscript class as a python class
class Date(BasicExternal):
# This ties the Python "Date" class to the Actionscript "Date" class
_render_class = "Date"
# This lists the methods. The required argument for MethodDesc is a list of arguments (which are all empty in this case).
# The return types are specified by retval=<type>
_methods = {
'getDate': MethodDesc([], retval=int),
'getFullYear': MethodDesc([], retval=int),
'getMonth': MethodDesc([], retval=int),
'getHours': MethodDesc([], retval=int),
'getMinutes': MethodDesc([], retval=int),
}
# The Date class in Actionscript doesn't require importing any libraries, but if it did, we would import the library with:
# add_import(<libname>)
# We would do it at the top level, not in flash_main
def flash_main(a=1):
flexTrace("Starting! python")
d = Date()
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] [d.getMonth()]
dateString = month + " " + str(d.getDate()) + ", " + str(d.getFullYear())
timeString = str(d.getHours()) + ":" + str(d.getMinutes())
flexTrace("The current date is: " + dateString + " The current time is " + timeString)
flexTrace("Im done!")
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
def flash_main( x=1 ):
i = Image()
i.source = load_resource("py_grossini_png")
w = castToWindow( x )
w.addChild(i)
| Python |
from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
from pypy.interpreter.main import run_string
import py
from pypy.objspace.std import Space
from pypy.interpreter.pycompiler import CPythonCompiler as CompilerClass
def codetest(source, functionname, args):
"""Compile and run the given code string, and then call its function
named by 'functionname' with arguments 'args'."""
from pypy.interpreter import baseobjspace
from pypy.interpreter import pyframe, gateway, module
space = Space()
source = str(py.code.Source(source).strip()) + '\n'
w = space.wrap
w_code = space.builtin.call('compile',
w(source), w('<string>'), w('exec'), w(0), w(0))
tempmodule = module.Module(space, w("__temp__"))
w_glob = tempmodule.w_dict
space.setitem(w_glob, w("__builtins__"), space.builtin)
code = space.unwrap(w_code)
code.exec_code(space, w_glob, w_glob)
wrappedargs = [w(a) for a in args]
wrappedfunc = space.getitem(w_glob, w(functionname))
def callit():
return space.call_function(wrappedfunc, *wrappedargs)
return callit
try:
w_output = space.call_function(wrappedfunc, *wrappedargs)
except baseobjspace.OperationError, e:
#e.print_detailed_traceback(space)
return '<<<%s>>>' % e.errorstr(space)
else:
return space.unwrap(w_output)
python_code = codetest("""
def hello():
print 'hello'
""", "hello", [])
def callback(event):
python_code()
def flash_main( x=1 ):
window = castToWindow( x )
b = Button()
b.addEventListener( "click" , callback)
b.label = "Hello world!"
b.x=20
b.y=52
window.addChild(b) | Python |
from pypy.translator.gensupp import NameManager
#from pypy.translator.js.optimize import is_optimized_function
class JavascriptNameManager(NameManager):
def __init__(self, db):
NameManager.__init__(self)
self.db = db
self.reserved = {}
#http://javascript.about.com/library/blreserved.htm
reserved_words = '''
abstract as boolean break byte case catch
char class continue const debugger default delete
do double else enum export extends false
final finally float for function goto if implements
import in instanceof int interface is long
namespace native new null package private protected
public return short static super switch synchronized
this throw throws transient true try typeof
use var void volatile while with alert
'''
for name in reserved_words.split():
self.reserved[name] = True
#http://javascript.about.com/library/blclassobj.htm
# XXX WAAAHHH!!! IE alert :( there are a lot of objects here that are
# _not_ in standard JS, see
# http://devedge-temp.mozilla.org/library/manuals/2000/javascript/1.5/reference/
predefined_classes_and_objects = '''
Anchor anchors Applet applets Area Array Body
Button Checkbox Date document Error EvalError FileUpload
Form forms frame frames Function Hidden History
history Image images Link links location Math
MimeType mimetypes navigator Number Object Option options
Password Plugin plugins Radio RangeError ReferenceError RegExp
Reset screen Script Select String Style StyleSheet
Submit SyntaxError Text Textarea TypeError URIError window
'''
for name in predefined_classes_and_objects.split():
self.reserved[name] = True
#http://javascript.about.com/library/blglobal.htm
global_properties_and_methods = '''
_content closed Components controllers crypto defaultstatus directories
document frames history innerHeight innerWidth length location
locationbar menubar name navigator opener outerHeight outerWidth
pageXOffset pageYOffset parent personalbar pkcs11 prompter screen
screenX screenY scrollbars scrollX scrollY self statusbar
toolbar top window
'''
for name in global_properties_and_methods.split():
self.reserved[name] = True
self.make_reserved_names(' '.join(self.reserved))
self.predefined = set(predefined_classes_and_objects)
#def uniquename(self, name, lenmax=0):
# return NameManager.uniquename(self, , lenmax)
def ensure_non_reserved(self, name):
while name in self.reserved:
name += '_'
return name
| Python |
""" Some helpers
"""
from pypy.translator.flex.modules.dom import document
def escape(s):
#return s.replace("&", "&").replace("<", "<").replace(">", ">"). \
# replace("'", "\\'").replace(" ", " ").replace("\n", "<br/>")
return s
def create_debug_div():
debug_div = document.createElement("div")
debug_div.setAttribute("id", "debug_div")
# XXX attach it somewhere...
#body = document.getElementsByTagName('body')[0]
document.childNodes[0].childNodes[1].appendChild(debug_div)
return debug_div
def __show_traceback(tb, exc):
debug_div = document.getElementById("debug_div")
if not debug_div:
# create div here
debug_div = create_debug_div()
pre_div = document.createElement("pre")
pre_div.style.color = "#FF0000"
debug_div.appendChild(pre_div)
txt = document.createTextNode("")
pre_div.appendChild(txt)
for tb_entry in tb[1:]:
# list of tuples...
fun_name, args, filename, lineno = tb_entry
# some source maybe? or so?
line1 = escape("%s %s" % (fun_name, args))
line2 = escape(" %s: %s\n" % (filename, lineno))
txt.nodeValue += line1 + '\n' + line2
txt.nodeValue += str(exc)
__show_traceback.explicit_traceback = True
| Python |
""" opcode definitions
"""
from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult,\
InstructionList, New, GetField, MicroInstruction, RuntimeNew, PushPrimitive
from pypy.translator.oosupport.metavm import _GetFieldDispatcher, _SetFieldDispatcher, \
_CallDispatcher, _MethodDispatcher, SetField
from pypy.translator.flex.metavm import IsInstance, Call, CallMethod,\
CopyName, CastString, _Prefix, _CastFun, _NotImplemented, CallBuiltin,\
CallBuiltinObject, GetBuiltinField, SetBuiltinField, IndirectCall,\
CallExternalObject, SetExternalField, _CastMethod, _LoadConst,\
DiscardStack, CheckLength, fix_opcodes
from pypy.translator.flex.jsbuiltin import Builtins
from pypy.rpython.ootypesystem import ootype
DoNothing = []
from pypy.translator.flex.log import log
class_map = { 'Call' : Call,
'CallMethod' : CallMethod,
'CallBuiltinObject' : CallBuiltinObject,
'CallBuiltin' : CallBuiltin,
'GetBuiltinField' : GetBuiltinField,
'GetField' : GetField,
'SetField' : SetField,
'SetBuiltinField' : SetBuiltinField,
'CallExternalObject' : CallExternalObject,
'SetExternalField' : SetExternalField,
}
opcodes = {'int_mul': '*',
'int_add': '+',
'int_sub': '-',
'int_sub_ovf': '-', # XXX overflow
'int_floordiv': '/',
'int_mod': '%',
'int_mod_ovf': '%', # XXX: what's that?
'int_mod_zer': '%', # XXX: fix zero stuff
'int_and': '&',
'int_or': '|',
'int_xor': '^',
'int_lshift': '<<',
'int_lshift_ovf': '<<', # XXX overflow
'int_rshift': '>>',
'int_rshift_ovf': '>>', # XXX overflow
'int_lt': '<',
'int_le': '<=',
'int_eq': '==',
'int_ne': '!=',
'int_ge': '>=',
'int_gt': '>',
'uint_mul': '*',
'uint_add': '+',
'uint_sub': '-',
'uint_floordiv': '/',
'uint_mod': '%',
'uint_and': '&',
'uint_or': '|',
'uint_xor': '^',
'uint_lshift': '<<',
'uint_rshift': '>>',
'uint_lt': '<',
'uint_le': '<=',
'uint_eq': '==',
'uint_ne': '!=',
'uint_ge': '>=',
'uint_gt': '>',
'unichar_lt': '<',
'unichar_le': '<=',
'unichar_eq': '==',
'unichar_ne': '!=',
'unichar_ge': '>=',
'unichar_gt': '>',
'char_lt': '<',
'char_le': '<=',
'char_eq': '==',
'char_ne': '!=',
'char_ge': '>=',
'char_gt': '>',
'float_mul': '*',
'float_add': '+',
'float_sub': '-',
'float_truediv': '/',
'float_lt': '<',
'float_le': '<=',
'float_eq': '==',
'float_ne': '!=',
'float_ge': '>=',
'float_gt': '>',
'ptr_eq': '==',
'ptr_ne': '!=',
'bool_not': [PushAllArgs,_Prefix('!')],
'int_neg': [PushAllArgs,_Prefix('-')],
'int_invert': [PushAllArgs,_Prefix('~')],
'float_neg': [PushAllArgs,_Prefix('-')],
'float_pow': [PushAllArgs,_CastFun('Math.pow',2)],
'int_abs': [PushAllArgs,_CastFun('Math.abs',1)],
'float_abs': [PushAllArgs,_CastFun('Math.abs',1)],
'int_is_true': [PushAllArgs,_Prefix('!!')],
'uint_is_true': [PushAllArgs,_Prefix('!!')],
'float_is_true': [PushAllArgs,_Prefix('!!')],
'is_true': [PushAllArgs,_Prefix('!!')],
'direct_call' : [_CallDispatcher(Builtins, class_map)],
'indirect_call' : [IndirectCall],
'same_as' : CopyName,
'new' : [New],
'runtimenew' : [RuntimeNew],
'instanceof' : [IsInstance],
#'subclassof' : [IsSubclassOf],
# objects
'oosetfield' : [_SetFieldDispatcher(Builtins, class_map)],
'oogetfield' : [_GetFieldDispatcher(Builtins, class_map)],
'oosend' : [_MethodDispatcher(Builtins, class_map)],
'ooupcast' : CopyName,
'oodowncast' : CopyName,
'oononnull' : [PushAllArgs,_Prefix('!!')],
'oostring' : [PushArg(0),CastString],
'ooparse_int' : [PushAllArgs,_CastFun("parseInt",2)],
'ooparse_float' : [PushAllArgs,_CastFun("parseFloat",1)],
'oois' : '===',
'cast_bool_to_int': CopyName,
'cast_bool_to_uint': CopyName,
'cast_bool_to_float': CopyName,
'cast_char_to_int': [PushAllArgs,_LoadConst(0),_CastMethod("charCodeAt",1)],
'cast_unichar_to_int': [PushAllArgs,_LoadConst(0),_CastMethod("charCodeAt",1)],
'cast_int_to_char': [PushAllArgs,_CastFun("String.fromCharCode",1)],
'cast_int_to_unichar': [PushAllArgs,_CastFun("String.fromCharCode",1)],
'cast_int_to_uint': CopyName,
'cast_int_to_float': CopyName,
'cast_int_to_longlong': CopyName,
'cast_uint_to_int': CopyName,
'cast_uint_to_float': CopyName,
'cast_float_to_int': [PushAllArgs,_CastFun("Math.floor",1)],
'cast_float_to_uint': [PushAllArgs,_CastFun("Math.floor",1)],
'cast_float_to_longlong': [PushAllArgs,_CastFun("Math.floor",1)],
'truncate_longlong_to_int': CopyName,
'debug_assert' : DoNothing,
'resume_point' : DoNothing,
'is_early_constant': [PushPrimitive(ootype.Bool, False)],
}
fix_opcodes(opcodes)
| Python |
""" genjs class definition
"""
from pypy.translator.cli.node import Node
from pypy.translator.cli.cts import CTS
import pypy.translator.flex.asmgen as asmgen
class Class(Node):
def __init__(self, db, classdef):
self.db = db
self.cts = db.genoo.TypeSystem(db)
self.classdef = classdef
self.name = classdef._name.replace('.', '_')#[-1]
self.real_name = classdef._name
if not self.is_root(classdef):
self.parent = self.db.pending_class(classdef._superclass)
self.order = self.parent.order + 1
else:
self.order = 0
def __hash__(self):
return hash(self.classdef)
def __eq__(self, other):
return self.classdef == other.classdef
def __cmp__(self, other):
return cmp(self.order, other.order)
def is_root(classdef):
return classdef._superclass is None
is_root = staticmethod(is_root)
def get_name(self):
return self.name
def render(self, ilasm):
if self.is_root(self.classdef) or self.name == 'Object':
return
if self.db.class_name(self.classdef) is not None:
return # already rendered
self.ilasm = ilasm
ilasm.push_gen("py/%s.as"%self.name)
ilasm.set_push(False)
if not self.is_root(self.classdef):
basename = self.basename(self.classdef._superclass._name)
if basename != 'Root':
ilasm.begin_class(self.name, basename)
else:
ilasm.begin_class(self.name)
else:
ilasm.begin_class(self.name)
ilasm.begin_function(self.name, [])
# we need to copy here all the arguments
self.copy_class_attributes(ilasm)
ilasm.end_function()
# begin to_String method
ilasm.begin_method("toString", self.name, [])
ilasm.load_str("'<%s object>'" % self.real_name)
ilasm.ret()
ilasm.end_function()
#for f_name, (f_type, f_default) in self.classdef._fields.iteritems():
# cts_type = self.cts.lltype_to_cts(f_type)
#if cts_type != 'void':
# ilasm.field(f_name, cts_type)
for m_name, m_meth in self.classdef._methods.iteritems():
graph = getattr(m_meth, 'graph', None)
if m_name=="o__init__": continue
if graph:
f = self.db.genoo.Function(self.db, graph, m_name, is_method = True, _class = self.name)
f.render(ilasm)
else:
pass
# XXX: We want to implement an abstract method here
self.db.pending_abstract_function(m_name)
self.db.record_class(self.classdef, self.name)
ilasm.set_push(True)
ilasm.end_class()
ilasm.pop_gen()
def copy_class_attributes(self, ilasm):
default_values = self.classdef._fields.copy()
default_values.update(self.classdef._overridden_defaults)
for field_name, (field_type, field_value) in default_values.iteritems():
ilasm.load_str("this")
self.db.load_const(field_type, field_value, ilasm)
ilasm.set_field(None, field_name)
def basename(self, name):
return name.replace('.', '_')#[-1]
| Python |
""" This is example of totally basic server for XMLHttp request
built on top of BaseHTTPServer.
Construction is like that:
you take your own implementation of Handler and subclass it
to provide whatever you like. Each request is checked first for
apropriate method in handler (with dots replaced as _) and this method
needs to have set attribute exposed
If method is not there, we instead try to search exported_methods (attribute
of handler) for apropriate JSON call. We write down a JSON which we get as
a return value (note that right now arguments could be only strings) and
pass them to caller
"""
import traceback
HTTP_STATUS_MESSAGES = {
200: 'OK',
204: 'No Content',
301: 'Moved permanently',
302: 'Found',
304: 'Not modified',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Not found',
500: 'Server error',
501: 'Not implemented',
}
class HTTPError(Exception):
""" raised on HTTP errors """
def __init__(self, status, data=None):
self.status = status
self.message = HTTP_STATUS_MESSAGES[status]
self.data = data
def __str__(self):
data = ''
if self.data:
data = ' (%s)' % (self.data,)
return '<HTTPException %s "%s"%s>' % (self.status, self.message, data)
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import re
import time
import random
import os
import sys
import py
from pypy.translator.js.lib.url import parse_url
from pypy.translator.js import json
from pypy.rpython.ootypesystem.bltregistry import MethodDesc, BasicExternal,\
described
from pypy.translator.js.main import rpython2javascript
from pypy.translator.js import commproxy
commproxy.USE_MOCHIKIT = False
class Collection(object):
""" an HTTP collection
essentially this is a container object that has a path that ends on a
slash, and support for PATH_INFO (so can have (virtual or not)
children)
children are callable attributes of ourselves that have an 'exposed'
attribute themselves, that accept 3 arguments: 'handler', a reference
to the BaseHTTPHandler that handles the request (XXX should be
abstracted?), 'path', the requested path to the object, and 'query',
the (unparsed!) GET query string (without a preceding ?)
"""
def traverse(self, path, orgpath):
""" traverse path relative to self
'path' is the path requested by the client, split on '/', but
relative from the current object: parent Collection items may have
removed items (they will have, actually, unless 'self' is the root
of the website) from the beginning on traversal to 'self'
path is split on '/', the first item is removed and used to do
a lookup on self, if that fails a 404 is raised, if successful
the item is used to continue traversal (if the object found is
a Collection type) or to handle the request (if the object found
is a callable with .exposed set to True)
if path equals '', a lookup for 'index' is done
can be overridden in subclasses to implement different path
handling (PATH_INFO-like stuff)
"""
name = path.pop(0)
if name == '':
name = 'index'
name = name.replace(".", "_")
resource = getattr(self, name, None)
if (resource is None or (not isinstance(resource, Collection) and
(not callable(resource) or
not getattr(resource, 'exposed', True)))):
raise HTTPError(404)
if path:
if not isinstance(resource, Collection):
raise HTTPError(500) # no PATH_INFO allowed for non-Collection
return resource.traverse(path, orgpath)
else:
if isinstance(resource, Collection):
# targeting a collection directly: redirect to its 'index'
raise HTTPError(301, orgpath + '/')
if not getattr(resource, 'exposed', False):
# don't reveal what is not accessible...
raise HTTPError(404)
return resource
class ExportedMethods(BasicExternal, Collection):
_render_base_path = "exported_methods"
def traverse(self, path, orgpath):
""" traverse path relative to self
'path' is the path requested by the client, split on '/', but
relative from the current object: parent Collection items may have
removed items (they will have, actually, unless 'self' is the root
of the website) from the beginning on traversal to 'self'
path is split on '/', the first item is removed and used to do
a lookup on self, if that fails a 404 is raised, if successful
the item is used to continue traversal (if the object found is
a Collection type) or to handle the request (if the object found
is a callable with .exposed set to True)
if path equals '', a lookup for 'index' is done
can be overridden in subclasses to implement different path
handling (PATH_INFO-like stuff)
"""
name = path.pop(0)
name = name.replace(".", "_")
resource = getattr(self, name, None)
if not resource:
raise HTTPError(404)
return lambda **args : ('text/json', json.write(resource(**args)))
_render_xmlhttp = True
exported_methods = ExportedMethods()
def patch_handler(handler_class):
""" This function takes care of adding necessary
attributed to Static objects
"""
for name, value in handler_class.__dict__.iteritems():
if isinstance(value, Static) and value.path is None:
assert hasattr(handler_class, "static_dir")
value.path = os.path.join(str(handler_class.static_dir),
name + ".html")
class TestHandler(BaseHTTPRequestHandler):
exported_methods = exported_methods
def do_GET(self):
path, args = parse_url(self.path)
if not path:
path = ["index"]
name_path = path[0].replace(".", "_")
if len(path) > 1:
rest = os.path.sep.join(path[1:])
else:
rest = None
method_to_call = getattr(self, name_path, None)
if method_to_call is None or not getattr(method_to_call, 'exposed', None):
exec_meth = getattr(self.exported_methods, name_path, None)
if exec_meth is None:
self.send_error(404, "File %s not found" % path)
else:
self.serve_data('text/json', json.write(exec_meth(**args)),
True)
else:
if rest:
outp = method_to_call(rest, **args)
else:
outp = method_to_call(**args)
if isinstance(outp, (str, unicode)):
self.serve_data('text/html', outp)
elif isinstance(outp, tuple):
self.serve_data(*outp)
else:
raise ValueError("Don't know how to serve %s" % (outp,))
def log_message(self, format, *args):
# XXX just discard it
pass
do_POST = do_GET
def serve_data(self, content_type, data, nocache=False):
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-length", len(data))
if nocache:
self.send_nocache_headers()
self.end_headers()
self.wfile.write(data)
def send_nocache_headers(self):
self.send_header('Expires', 'Mon, 26 Jul 1997 05:00:00 GMT')
self.send_header('Last-Modified',
time.strftime("%a, %d %b %Y %H:%M:%S GMT"))
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Cache-Control', 'post-check=0, pre-check=0')
self.send_header('Pragma', 'no-cache')
class Static(object):
exposed = True
def __init__(self, path=None):
self.path = path
def __call__(self):
return open(str(self.path)).read()
class FsFile(object):
exposed = True
debug = False
def __init__(self, path, content_type="text/html"):
self._path = path
self._content_type = content_type
_data = None
def __call__(self):
if self._data is None or self.debug:
self._data = self._path.read()
return ({'Content-Type': self._content_type}, self._data)
class StaticDir(Collection):
exposed = True
def __init__(self, path, type=None):
self.path = path
self.type = type
def traverse(self, path, orgpath):
data = open(os.path.join(str(self.path), *path)).read()
if self.type:
return lambda : (self.type, data)
return lambda : data
def create_server(server_address = ('', 8000), handler=TestHandler,
server=HTTPServer):
""" Parameters:
spawn - create new thread and return (by default it doesn't return)
fork - do a real fork
timeout - kill process after X seconds (actually doesn't work for threads)
port_file - function to be called with port number
"""
patch_handler(handler)
httpd = server(server_address, handler)
httpd.last_activity = time.time()
print "Server started, listening on %s:%s" %\
(httpd.server_address[0],httpd.server_port)
return httpd
def start_server_in_new_thread(server):
import thread
thread.start_new_thread(server.serve_forever, ())
def start_server_in_new_process(server, timeout=None):
pid = os.fork()
if not pid:
if timeout:
def f(httpd):
while 1:
time.sleep(.3)
if time.time() - httpd.last_activity > timeout:
httpd.server_close()
import os
os.kill(os.getpid(), 15)
import thread
thread.start_new_thread(f, (server,))
server.serve_forever()
os._exit(0)
return pid
Handler = TestHandler
# deprecate TestHandler name
class NewHandler(BaseHTTPRequestHandler):
""" BaseHTTPRequestHandler that does object publishing
"""
application = None # attach web root (Collection object) here!!
bufsize = 1024
def do_GET(self, send_body=True):
""" perform a request """
path, query = self.process_path(self.path)
_, args = parse_url("?" + query)
try:
resource = self.find_resource(path)
# XXX strange hack
if hasattr(resource, 'im_self'):
resource.im_self.server = self.server
retval = resource(**args)
if isinstance(retval, str):
headers = {'Content-Type': 'text/html'}
data = retval
else:
headers, data = retval
if isinstance(headers, str):
headers = {'Content-Type': headers}
except HTTPError, e:
status = e.status
headers, data = self.process_http_error(e)
except:
exc, e, tb = sys.exc_info()
tb_formatted = '\n'.join(traceback.format_tb(tb))
status = 200
data = 'An error has occurred: %s - %s\n\n%s' % (exc, e,
tb_formatted)
headers = {'Content-Type': 'text/plain'}
if hasattr(self.application, 'handle_error'):
self.application.handle_error(exc, e, tb)
else:
status = 200
if not 'content-type' in [k.lower() for k in headers]:
headers['Content-Type'] = 'text/html; charset=UTF-8'
self.response(status, headers, data, send_body)
do_POST = do_GET
def do_HEAD(self):
return self.do_GET(False)
def process_path(self, path):
""" split the path in a path and a query part#
returns a tuple (path, query), where path is a string and
query a dictionary containing the GET vars (URL decoded and such)
"""
path = path.split('?')
if len(path) > 2:
raise ValueError('illegal path %s' % (path,))
p = path[0]
q = len(path) > 1 and path[1] or ''
return p, q
def find_resource(self, path):
""" find the resource for a given path
"""
if not path:
raise HTTPError(301, '/')
assert path.startswith('/')
chunks = path.split('/')
chunks.pop(0) # empty item
return self.application.traverse(chunks, path)
def process_http_error(self, e):
""" create the response body and headers for errors
"""
headers = {'Content-Type': 'text/html'} # XXX need more headers here?
if e.status in [301, 302]:
headers['Location'] = e.data
body = 'Redirecting to %s' % (e.data,)
else:
message, explain = self.responses[e.status]
body = self.error_message_format % {'code': e.status, 'message': message,
'explain': explain}
return headers, body
def response(self, status, headers, body, send_body=True):
""" generate the HTTP response and send it to the client
"""
self.send_response(status)
if (isinstance(body, str) and
not 'content-length' in [k.lower() for k in headers]):
headers['Content-Length'] = len(body)
for keyword, value in headers.iteritems():
self.send_header(keyword, value)
self.end_headers()
if not send_body:
return
if isinstance(body, str):
self.wfile.write(body)
elif hasattr(body, 'read'):
while 1:
data = body.read(self.bufsize)
if data == '':
break
self.wfile.write(data)
else:
raise ValueError('body is not a plain string or file-like object')
| Python |
""" Some support files for mapping urls, mostly bindings
for existing cgi stuff
"""
import cgi
import urllib
class URL(object):
def __init__(self, path, vars):
self.path = path
self.vars = vars
def __eq__(self, other):
if isinstance(other, URL):
return self.path == other.path and self.vars == other.vars
if isinstance(other, tuple):
if len(other) != 2:
return False
return self.path, self.vars == other
return False
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter((self.path, self.vars))
def parse_url(path):
""" Parse a/b/c?q=a into ('a', 'b', 'c') {'q':'a'}
"""
if '?' in path:
path, var_str = path.split("?")
vars_orig = cgi.parse_qs(var_str)
# if vars has a list inside...
vars = {}
for i, v in vars_orig.items():
if isinstance(v, list):
vars[i] = v[0]
else:
vars[i] = v
else:
vars = {}
parts = [urllib.unquote(i) for i in path.split("/") if i]
return URL(parts, vars)
| Python |
""" Various simple support functions
"""
from pypy.rpython.ootypesystem.bltregistry import described, load_dict_args,\
MethodDesc
from pypy.rpython.extfunc import genericcallable
def callback(retval=None, args={}):
""" Variant of described decorator, which flows
an additional argument with return value of decorated
function, used specifically for callbacks
"""
def decorator(func):
defs = func.func_defaults
if defs is None:
defs = ()
vars = func.func_code.co_varnames[:func.func_code.co_argcount]
if isinstance(args, dict):
arg_list = load_dict_args(vars, defs, args)
else:
arg_list = args
arg_list.append(("callback", genericcallable(args=[retval])))
func._method = (func.__name__, MethodDesc(arg_list, retval))
return func
return decorator
import sys, new
from pypy.translator.js.main import rpython2javascript
def js_source(functions, use_pdb=True):
mod = new.module('_js_src')
function_names = []
for func in functions:
name = func.__name__
if hasattr(mod, name):
raise ValueError("exported function name %r is duplicated"
% (name,))
mod.__dict__[name] = func
function_names.append(name)
sys.modules['_js_src'] = mod
try:
return rpython2javascript(mod, function_names, use_pdb=use_pdb)
finally:
del sys.modules['_js_src']
| Python |
""" genjs constant database module
"""
from pypy.rpython.ootypesystem import ootype
from pypy.translator.flex.opcodes import opcodes
from pypy.translator.flex.function import Function
from pypy.translator.flex.log import log
from pypy.translator.flex._class import Class
from pypy.translator.flex.support import JavascriptNameManager
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Void, Bool, Float
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong, typeOf
from pypy.rpython.lltypesystem.lltype import Char, UniChar
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.ootypesystem import bltregistry
from pypy.objspace.flow.model import Variable, Constant
from pypy.translator.flex.modules import dom
from pypy.translator.flex.commproxy import XmlHttp
try:
set
except NameError:
from sets import Set as set
class LowLevelDatabase(object):
def __init__(self, genoo):
self._pending_nodes = set()
self.genoo = genoo
self._rendered_nodes = set()
self.classes = {} # classdef --> class_name
self.functions = {} # graph --> function_name
self.function_names = {} # graph --> real_name
self.methods = {} # graph --> method_name
self.consts = {} # value --> const_name
self.reverse_consts = {}
self.const_names = set()
self.rendered = set()
self.const_var = Variable("_consts")
self.name_manager = JavascriptNameManager(self)
self.pending_consts = []
self.cts = self.genoo.TypeSystem(self)
self.proxies = []
def is_primitive(self, type_):
if type_ in [Void, Bool, Float, Signed, Unsigned, SignedLongLong, UnsignedLongLong, Char, UniChar, ootype.StringBuilder] or \
isinstance(type_,ootype.StaticMethod):
return True
return False
def pending_function(self, graph):
self.pending_node(self.genoo.Function(self, graph))
def pending_abstract_function(self, name):
pass
# XXX we want to implement it at some point (maybe...)
def pending_class(self, classdef):
c = Class(self, classdef)
self.pending_node(c)
return c
def pending_record(self, record):
r = Record(self, record)
self.pending_node(r)
return r.get_name()
def pending_node(self, node):
if node in self._pending_nodes or node in self._rendered_nodes:
return
self._pending_nodes.add(node)
def record_function(self, graph, name):
self.functions[graph] = name
def get_uniquename(self, graph, name):
try:
return self.function_names[graph]
except KeyError:
real_name = self.name_manager.uniquename(name, lenmax=1111111)
self.function_names[graph] = real_name
return real_name
def record_class(self, classdef, name):
self.classes[classdef] = name
def register_comm_proxy(self, proxy_const, *args):
""" Register external object which should be rendered as
method call
"""
self.proxies.append(XmlHttp(proxy_const, *args))
def graph_name(self, graph):
return self.functions.get(graph, None)
def class_name(self, classdef):
return self.classes.get(classdef, None)
def record_const(self, value, type_ = None, retval='name'):
if type_ is None:
type_ = typeOf(value)
if self.is_primitive(type_):
return None
const = AbstractConst.make(self, value)
if not const:
return None
try:
if retval == 'name':
return self.consts[const]
else:
self.consts[const]
return self.reverse_consts[self.consts[const]]
except KeyError:
if self.genoo.config.translation.verbose:
log("New const:%r"%value)
if isinstance(value, ootype._string):
log(value._str)
else:
log.dot()
name = const.get_name()
if name in self.const_names:
name += '__%d' % len(self.consts)
self.consts[const] = name
self.reverse_consts[name] = const
self.const_names.add(name)
self.pending_consts.append((const,name))
if retval == 'name':
return name
else:
return const
def gen_constants(self, ilasm, pending):
try:
while True:
const,name = self.pending_consts.pop()
const.record_fields()
except IndexError:
pass
if pending:
return
if not self.rendered:
ilasm.begin_consts(self.const_var.name)
def generate_constants(consts):
all_c = [const for const,name in consts.iteritems()]
dep_ok = set()
while len(all_c) > 0:
const = all_c.pop()
if const not in self.rendered:
to_render = True
if hasattr(const, 'depends_on') and const.depends_on:
for i in const.depends_on:
if i not in self.rendered and i not in dep_ok:
assert i.depends is None or const in i.depends
to_render = False
continue
if to_render and (not hasattr(const, 'depends')) or (not const.depends) or const in dep_ok:
yield const,consts[const]
self.rendered.add(const)
else:
all_c.append(const)
for i in const.depends:
all_c.append(i)
dep_ok.add(const)
# We need to keep track of fields to make sure
# our items appear earlier than us
to_init = []
for const, name in generate_constants(self.consts):
if self.genoo.config.translation.verbose:
log("Recording %r %r"%(const,name))
else:
log.dot()
ilasm.load_local(self.const_var)
const.init(ilasm)
ilasm.set_field(None, name)
ilasm.store_void()
to_init.append((const, name))
#ilasm.field(name, const.get_type(), static=True)
for const, name in to_init:
const.init_fields(ilasm, self.const_var, name)
def load_const(self, type_, value, ilasm):
if self.is_primitive(type_):
ilasm.load_const(self.cts.primitive_repr(type_, value))
else:
try:
return self.consts[BuiltinConst(value)]
except KeyError:
name = self.record_const(value)
ilasm.load_local(self.const_var)
ilasm.get_field(name)
#assert False, 'Unknown constant %s' % const
class AbstractConst(object):
def __init__(self, db, const):
self.db = db
self.const = const
self.cts = db.genoo.TypeSystem(db)
self.depends = set()
self.depends_on = set()
def __hash__(self):
return hash(self.get_key())
def __eq__(self, other):
return (other.__class__ is self.__class__ and
other.get_key() == self.get_key())
def __ne__(self, other):
return not (self == other)
def make(db, const):
if isinstance(const, ootype._view):
static_type = const._TYPE
const = const._inst
else:
static_type = None
if isinstance(const, ootype._instance):
return InstanceConst(db, const, static_type)
elif isinstance(const, ootype._list):
return ListConst(db, const)
elif isinstance(const, ootype._record):
return RecordConst(db, const)
elif isinstance(const, ootype._string):
return StringConst(db, const)
elif isinstance(const, ootype._dict):
return DictConst(db, const)
elif isinstance(const, bltregistry._external_inst):
return ExtObject(db, const)
elif isinstance(const, ootype._class):
if const._INSTANCE:
return ClassConst(db, const)
else:
return None
else:
assert False, 'Unknown constant: %s %r' % (const, typeOf(const))
make = staticmethod(make)
def get_name(self):
pass
def get_type(self):
pass
def init(self, ilasm):
pass
def init_fields(self, ilasm, const_var, name):
pass
def record_fields(self):
pass
class InstanceConst(AbstractConst):
def __init__(self, db, obj, static_type):
self.depends = set()
self.depends_on = set()
self.db = db
self.cts = db.genoo.TypeSystem(db)
self.obj = obj
if static_type is None:
self.static_type = obj._TYPE
else:
self.static_type = static_type
self.cts.lltype_to_cts(obj._TYPE) # force scheduling of obj's class
def get_key(self):
return self.obj
def get_name(self):
return self.obj._TYPE._name.replace('.', '_')
def get_type(self):
return self.cts.lltype_to_cts(self.static_type)
def init(self, ilasm):
if not self.obj:
ilasm.load_void()
return
classdef = self.obj._TYPE
try:
classdef._hints['_suggested_external']
ilasm.new(classdef._name.split(".")[-1])
except KeyError:
ilasm.new(classdef._name.replace(".", "_"))
def record_fields(self):
if not self.obj:
return
INSTANCE = self.obj._TYPE
#while INSTANCE:
for i, (_type, val) in INSTANCE._allfields().items():
if _type is not ootype.Void:
name = self.db.record_const(getattr(self.obj, i), _type, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def init_fields(self, ilasm, const_var, name):
if not self.obj:
return
INSTANCE = self.obj._TYPE
#while INSTANCE:
for i, (_type, el) in INSTANCE._allfields().items():
if _type is not ootype.Void:
ilasm.load_local(const_var)
self.db.load_const(_type, getattr(self.obj, i), ilasm)
ilasm.set_field(None, "%s.%s"%(name, i))
ilasm.store_void()
class RecordConst(AbstractConst):
def get_name(self):
return "const_tuple"
def init(self, ilasm):
if not self.const:
ilasm.load_void()
else:
ilasm.new_obj()
def record_fields(self):
if not self.const:
return
for i in self.const._items:
name = self.db.record_const(self.const._items[i], None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def get_key(self):
return self.const
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
#for i in self.const.__dict__["_items"]:
for i in self.const._items:
ilasm.load_local(const_var)
el = self.const._items[i]
self.db.load_const(typeOf(el), el, ilasm)
ilasm.set_field(None, "%s.%s"%(name, i))
ilasm.store_void()
class ListConst(AbstractConst):
def get_name(self):
return "const_list"
def init(self, ilasm):
if not self.const:
ilasm.load_void()
else:
ilasm.new_list()
def record_fields(self):
if not self.const:
return
for i in self.const._list:
name = self.db.record_const(i, None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def get_key(self):
return self.const
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
for i in xrange(len(self.const._list)):
ilasm.load_str("%s.%s"%(const_var.name, name))
el = self.const._list[i]
self.db.load_const(typeOf(el), el, ilasm)
self.db.load_const(typeOf(i), i, ilasm)
ilasm.list_setitem()
ilasm.store_void()
class StringConst(AbstractConst):
def get_name(self):
return "const_str"
def get_key(self):
return self.const._str
def init(self, ilasm):
if self.const:
s = self.const._str
# do some escaping
#s = s.replace("\n", "\\n").replace('"', '\"')
#s = repr(s).replace("\"", "\\\"")
ilasm.load_str("%s" % repr(s))
else:
ilasm.load_str("undefined")
def init_fields(self, ilasm, const_var, name):
pass
class ClassConst(AbstractConst):
def __init__(self, db, const):
super(ClassConst, self).__init__(db, const)
self.cts.lltype_to_cts(const._INSTANCE) # force scheduling of class
def get_name(self):
return "const_class"
def get_key(self):
return self.get_name()
def get_name(self):
return self.const._INSTANCE._name.replace(".", "_")
def init(self, ilasm):
ilasm.load_const("%s" % self.get_name())
#def init_fields(self, ilasm, const_var, name):
# pass
class BuiltinConst(AbstractConst):
def __init__(self, name):
self.name = name
def get_key(self):
return self.name
def get_name(self):
return self.name
def init_fields(self, *args):
pass
def init(self, ilasm):
ilasm.load_str(self.name)
class DictConst(RecordConst):
def record_const(self, co):
name = self.db.record_const(co, None, 'const')
if name is not None:
self.depends.add(name)
name.depends_on.add(self)
def record_fields(self):
if not self.const:
return
for i in self.const._dict:
self.record_const(i)
self.record_const(self.const._dict[i])
def init_fields(self, ilasm, const_var, name):
if not self.const:
return
for i in self.const._dict:
ilasm.load_str("%s.%s"%(const_var.name, name))
el = self.const._dict[i]
self.db.load_const(typeOf(el), el, ilasm)
self.db.load_const(typeOf(i), i, ilasm)
ilasm.list_setitem()
ilasm.store_void()
class ExtObject(AbstractConst):
def __init__(self, db, const):
self.db = db
self.const = const
self.name = self.get_name()
self.depends = set()
self.depends_on = set()
def get_key(self):
return self.name
def get_name(self):
return self.const._TYPE._name.split('.')[-1][:-2]
def init(self, ilasm):
_class = self.const._TYPE._class_
if getattr(_class, '_render_xmlhttp', False):
use_xml = getattr(_class, '_use_xml', False)
base_url = getattr(_class, '_base_url', "") # XXX: should be
method = getattr(_class, '_use_method', 'GET')
# on per-method basis
self.db.register_comm_proxy(self.const, self.name, use_xml, base_url, method)
ilasm.new(self.get_name())
else:
# Otherwise they just exist, or it's not implemented
if not hasattr(self.const.value, '_render_name'):
import sys
sys.stderr.write(
"Prebuilt constant %s has no attribute _render_name,"
"don't know how to render" % self.const.value)
#raise ValueError(
# "Prebuilt constant %s has no attribute _render_name,"
# "don't know how to render" % self.const.value)
ilasm.load_str("0")
return
ilasm.load_str(self.const.value._render_name)
| Python |
""" tester - support module for testing js code inside python
Needs to be imported in case one wants tests involving calling BasicExternal
methods
"""
from pypy.rpython.ootypesystem.bltregistry import BasicExternal
def __getattr__(self, attr):
val = super(BasicExternal, self).__getattribute__(attr)
if not callable(val) or attr not in self._methods:
return val # we don't do anything special
# otherwise....
def wrapper(*args, **kwargs):
args = list(args)
# do this only if last arg is callable
if not (len(args) > 0 and callable(args[-1])):
return val(*args, **kwargs)
callback = args.pop()
res = val(*args, **kwargs)
if not hasattr(self, '__callbacks'):
self.__callbacks = []
self.__callbacks.append((callback, res))
wrapper.func_name = attr
return wrapper
BasicExternal.__getattribute__ = __getattr__
def schedule_callbacks(*args):
for arg in args:
if hasattr(arg, '__callbacks'):
for callback, res in arg.__callbacks:
callback(res)
| Python |
import py, os, re, subprocess
from pypy.translator.translator import TranslationContext
from pypy.translator.backendopt.all import backend_optimizations
from pypy.translator.js.js import JS
from pypy.translator.js.test.browsertest import jstest
from pypy.translator.js import conftest
from pypy.translator.js.log import log
from pypy.conftest import option
from pypy.rpython.test.tool import BaseRtypingTest, OORtypeMixin
from pypy.rlib.nonconst import NonConstant
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.llinterp import LLException
log = log.runtest
use_browsertest = conftest.option.browser
use_tg = conftest.option.tg
port = 8080
class JSException(LLException):
pass
def _CLI_is_on_path():
if py.path.local.sysfind('js') is None: #we recommend Spidermonkey
return False
return True
class compile_function(object):
def __init__(self, function, annotations, stackless=False, view=False, html=None, is_interactive=False, root = None, run_browser = True, policy = None):
if not use_browsertest and not _CLI_is_on_path():
py.test.skip('Javascript CLI (js) not found')
self.html = html
self.is_interactive = is_interactive
t = TranslationContext()
if policy is None:
from pypy.annotation.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
policy.allow_someobjects = False
ann = t.buildannotator(policy=policy)
ann.build_types(function, annotations)
if view or option.view:
t.view()
t.buildrtyper(type_system="ootype").specialize()
if view or option.view:
t.view()
#self.js = JS(t, [function, callback_function], stackless)
self.js = JS(t, function, stackless)
self.js.write_source()
if root is None and use_tg:
from pypy.translator.js.demo.jsdemo.controllers import Root
self.root = Root
else:
self.root = root
self.run_browser = run_browser
self.function_calls = []
def source(self):
return self.js.tmpfile.open().read()
def _conv(self, v):
if isinstance(v, str):
return repr(v)
return str(v).lower()
def __call__(self, *kwds):
return self.call(None, kwds)
def call(self, entry_function, kwds):
args = ', '.join([self._conv(kw) for kw in kwds]) #lowerstr for (py)False->(js)false, etc.
if entry_function is None:
entry_function = self.js.translator.graphs[0].name
else:
entry_function = self.js.translator.annotator.bookkeeper.getdesc(entry_function).cached_graph(None)
function_call = "%s(%s)" % (entry_function, args)
self.function_calls.append(function_call)
#if self.js.stackless:
# function_call = "slp_entry_point('%s')" % function_call
if use_browsertest:
if not use_tg:
log("Used html: %r" % self.html)
output = jstest(self.js.filename, function_call, use_browsertest, self.html, self.is_interactive)
else:
global port
from pypy.translator.js.test.tgtest import run_tgtest
out = run_tgtest(self, tg_root = self.root, port=port, run_browser=self.run_browser).results
assert out[1] == 'undefined' or out[1] == ""
output = out[0]
port += 1
else:
# cmd = 'echo "load(\'%s\'); print(%s)" | js 2>&1' % (self.js.filename, function_call)
# log(cmd)
# output = os.popen(cmd).read().strip()
js = subprocess.Popen(["js"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
input = "load(%r);\n" % self.js.filename.strpath
for call in self.function_calls[:-1]:
input += "%s;\n" % call
input += "print(\"'\" + %s + \"'\");\n" % self.function_calls[-1]
js.stdin.write(input)
stdout, stderr = js.communicate()
output = (stderr + stdout).strip()
for s in output.split('\n'):
log(s)
m = re.match("'(.*)'", output, re.DOTALL)
if not m:
log("Error: %s" % output)
raise JSException(output)
return self.reinterpret(m.group(1))
def reinterpret(cls, s):
#while s.startswith(" "):
# s = s[1:] # :-) quite inneficient, but who cares
if s == 'false':
res = False
elif s == 'true':
res = True
elif s == 'undefined':
res = None
elif s == 'inf':
res = 1e300 * 1e300
elif s == 'NaN':
res = (1e300 * 1e300) / (1e300 * 1e300)
elif s.startswith('[') or s.startswith('('):
l = s[1:-1].split(',')
res = [cls.reinterpret(i) for i in l]
else:
try:
res = float(s)
if float(int(res)) == res:
return int(res)
except ValueError:
res = str(s)
return res
reinterpret = classmethod(reinterpret)
class JsTest(BaseRtypingTest, OORtypeMixin):
def _compile(self, _fn, args, policy=None):
argnames = _fn.func_code.co_varnames[:_fn.func_code.co_argcount]
func_name = _fn.func_name
if func_name == '<lambda>':
func_name = 'func'
source = py.code.Source("""
def %s():
from pypy.rlib.nonconst import NonConstant
res = _fn(%s)
if isinstance(res, type(None)):
return None
else:
return str(res)"""
% (func_name, ",".join(["%s=NonConstant(%r)" % (name, i) for
name, i in zip(argnames, args)])))
exec source.compile() in locals()
return compile_function(locals()[func_name], [], policy=policy)
def string_to_ll(self, s):
return s
def interpret(self, fn, args, policy=None):
f = self._compile(fn, args, policy)
res = f(*args)
return res
def interpret_raises(self, exception, fn, args):
#import exceptions # needed by eval
#try:
#import pdb; pdb.set_trace()
try:
res = self.interpret(fn, args)
except JSException, e:
s = e.args[0]
assert s.startswith('uncaught exception:')
assert re.search(exception.__name__, s)
else:
raise AssertionError("Did not raise, returned %s" % res)
#except ExceptionWrapper, ex:
# assert issubclass(eval(ex.class_name), exception)
#else:
# assert False, 'function did raise no exception at all'
def ll_to_string(self, s):
return str(s)
def ll_to_list(self, l):
return l
def ll_unpack_tuple(self, t, length):
assert len(t) == length
return tuple(t)
def class_name(self, value):
return value[:-8].split('.')[-1]
def is_of_instance_type(self, val):
m = re.match("^<.* object>$", val)
return bool(m)
def read_attr(self, obj, name):
py.test.skip('read_attr not supported on genjs tests')
def check_source_contains(compiled_function, pattern):
import re
source = compiled_function.js.tmpfile.open().read()
return re.search(pattern, source)
| Python |
from BaseHTTPServer import HTTPServer as BaseHTTPServer, BaseHTTPRequestHandler
import py
from os import system
from cgi import parse_qs
from sys import platform
from time import sleep
import webbrowser
from pypy.translator.js.log import log
log = log.browsertest
class HTTPServer(BaseHTTPServer):
allow_reuse_address = True
class config:
http_port = 10001
html_page = """<html>
<head>
<script type="text/javascript">
%(jscode)s
// code for running the unittest...
function runTest() {
var result = undefined;
try {
result = %(jstestcase)s;
} catch (e) {
try {
result = "throw '" + e.toSource() + "'";
} catch (dummy) {
result = "throw 'unknown javascript exception'";
}
}
if (result != undefined || !in_browser) { // if valid result (no timeout)
handle_result(result);
}
};
function handle_result(result) {
var resultform = document.forms['resultform'];
if (typeof(result) == typeof({})) {
result = result.chars; //assume it's a rpystring
}
resultform.result.value = result;
resultform.submit();
};
</script>
</head>
<body onload="runTest()">
%(jsfilename)s
<form method="post" name="resultform" id="resultform">
<input name="result" type="hidden" value="UNKNOWN" />
</form>
<div id="logdiv"></div>
</body>
</html>"""
refresh_page = """<html>
<head>
<meta http-equiv="refresh" content="0">
</head>
<body>
<pre>
// testcase: %(jstestcase)s
%(jscode)s
</pre>
</body>
</html>"""
class TestCase(object):
def __init__(self, jsfilename, jstestcase):
self.jsfilename = jsfilename
self.jscode = open(jsfilename).read()
self.jstestcase = jstestcase
self.result = None
class TestHandler(BaseHTTPRequestHandler):
"""The HTTP handler class that provides the tests and handles results"""
def do_GET(self):
global do_status
if self.path != "/test.html":
self.send_error(404, "File /test.html not found")
return
jsfilename = jstest.jsfilename
jstestcase = jstest.jstestcase
jscode = jstest.jscode
if self.server.html_page:
if self.server.is_interactive:
isinteractive = ''
else:
isinteractive = 'resultform.submit();'
try:
html_page = open(self.server.html_page).read() % locals()
except IOError:
log("HTML FILE WAS NOT FOUND!!!!")
self.send_error(404, "File %s not found" % self.server.html_page)
return
else:
html_page = config.html_page % locals()
open("html_page.html", "w").write(html_page)
self.serve_data('text/html', html_page)
do_status = 'do_GET'
def do_POST(self):
global do_status
if self.path != "/test.html":
self.send_error(404, "File /test.html not found")
return
form = parse_qs(self.rfile.read(int(self.headers['content-length'])))
if self.server.is_interactive:
if not form.has_key('ok'):
jstest.result = 'Not clicked OK'
else:
jstest.result = 'OK'
#assert False, "Clicked not ok"
else:
jstest.result = form['result'][0]
#we force a page refresh here because of two reason:
# 1. we don't have the next testcase ready yet
# 2. browser should ask again when we do have a test
jsfilename = jstest.jsfilename
jstestcase = jstest.jstestcase
jscode = jstest.jscode
refresh_page = config.refresh_page % locals()
self.serve_data('text/html', refresh_page)
do_status = 'do_POST'
def serve_data(self, content_type, data):
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-length", len(data))
self.end_headers()
self.wfile.write(data)
class BrowserTest(object):
"""The browser driver"""
def start_server(self, port, html_page, is_interactive):
server_address = ('', port)
self.httpd = HTTPServer(server_address, TestHandler)
self.httpd.is_interactive = is_interactive
self.httpd.html_page = html_page
def get_result(self):
global do_status
do_status = None
while do_status != 'do_GET':
self.httpd.handle_request()
while do_status != 'do_POST':
self.httpd.handle_request()
return jstest.result
def jstest(jsfilename, jstestcase, browser_to_use, html_page = None, is_interactive = False):
global driver, jstest
jstest = TestCase(str(jsfilename), str(jstestcase))
try:
driver.httpd.html_page = html_page
driver.httpd.is_interactive = is_interactive
except:
driver = BrowserTest()
driver.start_server(config.http_port, html_page, is_interactive)
if browser_to_use == 'default':
browser_to_use = None
if browser_to_use != 'none':
webbrowser.get(browser_to_use).open('http://localhost:%d/test.html' % config.http_port)
result = driver.get_result()
return result
| Python |
""" TurboGears browser testing utility
"""
import thread
import pkg_resources
pkg_resources.require("TurboGears")
import cherrypy
import os
import sys
import webbrowser
from pypy.translator.js.demo.jsdemo import controllers
conf_file = os.path.join(os.path.dirname(controllers.__file__), "..", "dev.cfg")
class run_tgtest(object):
def __init__(self, compiled_fun, tg_root = None, port = 8080, run_browser=True):
def cont():
cherrypy.server.wait()
if run_browser:
webbrowser.open("http://localhost:%d/" % port)
cherrypy.root.wait_for_results()
self.results = cherrypy.root.results
cherrypy.server.stop()
cherrypy.server.interrupt = SystemExit()
cherrypy.config.update(file=conf_file)
cherrypy.config.update({'global':{'server.socketPort':port}})
if tg_root is None:
cherrypy.root = controllers.Root()
else:
cherrypy.root = tg_root()
cherrypy.root.jssource = compiled_fun.js.tmpfile.open().read()
cherrypy.root.jsname = compiled_fun.js.translator.graphs[0].name
thread.start_new_thread(cont, ())
sys.path.insert(1, os.path.join(os.path.dirname(controllers.__file__), ".."))
cherrypy.server.start()
| Python |
import sys
from pypy.translator.llvm.log import log
from pypy.rpython.lltypesystem import lltype
log = log.pyrex
PRIMITIVES_TO_C = {lltype.Bool: "char",
lltype.Float: "double",
lltype.Char: "char",
}
# 32 bit platform
if sys.maxint == 2**31-1:
PRIMITIVES_TO_C.update({
lltype.Signed: "int",
lltype.Unsigned: "unsigned int" })
# 64 bit platform
elif sys.maxint == 2**63-1:
PRIMITIVES_TO_C.update({
lltype.Signed: "long",
lltype.Unsigned: "unsigned long" })
else:
assert False, "Unsupported platform"
def write_pyx_wrapper(genllvm, targetpath):
funcgen = genllvm.entrynode
def c_declaration():
returntype = PRIMITIVES_TO_C[
funcgen.graph.returnblock.inputargs[0].concretetype]
inputargtypes = [PRIMITIVES_TO_C[arg.concretetype]
for arg in funcgen.graph.startblock.inputargs]
result = "%s __entrypoint__%s(%s)" % (returntype, funcgen.ref.lstrip("%"),
", ".join(inputargtypes))
return result
lines = []
append = lines.append
inputargs = funcgen.db.repr_arg_multi(funcgen.graph.startblock.inputargs)
inputargs = [x.strip("%") for x in inputargs]
append("cdef extern " + c_declaration())
append("cdef extern int __entrypoint__raised_LLVMException()")
append("cdef extern int Pyrex_RPython_StartupCode()")
append("")
append("setup = [False]")
append("")
append("class LLVMException(Exception):")
append(" pass")
append("")
append(genllvm.db.gcpolicy.pyrex_code())
append("def %s_wrapper(%s):" % (funcgen.ref.strip("%"), ", ".join(inputargs)))
append(" if not setup[0]:")
append(" startup = Pyrex_RPython_StartupCode()")
append(" if not startup: raise Exception('Failure to startup')")
append(" setup[0] = True")
append(" result = __entrypoint__%s(%s)" % (funcgen.ref.strip("%"), ", ".join(inputargs)))
append(" if __entrypoint__raised_LLVMException(): #not caught by the LLVM code itself")
append(" raise LLVMException")
append(" return result")
append("")
targetpath.write("\n".join(lines))
| Python |
import os
import sys
import types
import urllib
from pypy.objspace.flow.model import FunctionGraph
from pypy.rpython.rmodel import inputconst
from pypy.rpython.lltypesystem import lltype
from pypy.translator.llvm.codewriter import DEFAULT_CCONV
from pypy.translator.llvm.buildllvm import llvm_gcc_version
from pypy.tool.udir import udir
support_functions = [
"%raisePyExc_IOError",
"%raisePyExc_ValueError",
"%raisePyExc_OverflowError",
"%raisePyExc_ZeroDivisionError",
"%raisePyExc_RuntimeError",
"%raisePyExc_thread_error",
"%RPyString_FromString",
"%RPyString_AsString",
"%RPyString_Size",
"%RPyExceptionOccurred",
"%LLVM_RPython_StartupCode",
]
def get_module_file(name):
return os.path.join(get_llvm_cpath(), name)
def get_ll(ccode, function_names):
function_names += support_functions
filename = str(udir.join("ccode.c"))
f = open(filename, "w")
f.write(ccode)
f.close()
plain = filename[:-2]
includes = get_incdirs()
if llvm_gcc_version() < 4.0:
emit_llvm = ''
else:
emit_llvm = '-emit-llvm -O0'
# XXX localize this
include_path = '-I/sw/include'
cmd = "llvm-gcc %s %s %s -S %s.c -o %s.ll 2>&1" % (
include_path, includes, emit_llvm, plain, plain)
os.system(cmd)
llcode = open(plain + '.ll').read()
# strip lines
ll_lines = []
funcnames = dict([(k, True) for k in function_names])
# strip declares that are in funcnames
for line in llcode.split('\n'):
# For some reason gcc introduces this and then we cant resolve it
# XXX Get rid of this - when got more time on our hands
if line.find("__main") >= 1:
continue
# get rid of any of the structs that llvm-gcc introduces to struct types
line = line.replace("%struct.", "%")
# strip comments
comment = line.find(';')
if comment >= 0:
line = line[:comment]
line = line.rstrip()
# find function names, declare them with the default calling convertion
if '(' in line and line[-1:] == '{':
returntype, s = line.split(' ', 1)
funcname , s = s.split('(', 1)
funcnames[funcname] = True
if line.find("internal") == -1:
if funcname not in ["%main", "%Pyrex_RPython_StartupCode"]:
internal = 'internal '
line = '%s%s %s' % (internal, DEFAULT_CCONV, line,)
ll_lines.append(line)
# patch calls to function that we just declared fastcc
ll_lines2, calltag, declaretag = [], 'call ', 'declare '
for line in ll_lines:
i = line.find(calltag)
if i >= 0:
cconv = 'ccc'
for funcname in funcnames.iterkeys():
if line.find(funcname) >= 0:
cconv = DEFAULT_CCONV
break
line = "%scall %s %s" % (line[:i], cconv, line[i+len(calltag):])
if line[:len(declaretag)] == declaretag:
cconv = 'ccc'
for funcname in funcnames.keys():
if line.find(funcname) >= 0:
cconv = DEFAULT_CCONV
break
line = "declare %s %s" % (cconv, line[len(declaretag):])
ll_lines2.append(line)
ll_lines2.append("declare ccc void %abort()")
llcode = '\n'.join(ll_lines2)
try:
decl, impl = llcode.split('implementation')
except:
raise "Can't compile external function code (llcode.c): ERROR:", llcode
return decl, impl
def setup_externs(c_db, db):
rtyper = db.translator.rtyper
from pypy.translator.c.extfunc import predeclare_all
# hacks to make predeclare_all work
decls = list(predeclare_all(c_db, rtyper))
for c_name, obj in decls:
if isinstance(obj, lltype.LowLevelType):
db.prepare_type(obj)
elif isinstance(obj, FunctionGraph):
funcptr = rtyper.getcallable(obj)
c = inputconst(lltype.typeOf(funcptr), funcptr)
db.prepare_arg_value(c)
elif isinstance(lltype.typeOf(obj), lltype.Ptr):
db.prepare_constant(lltype.typeOf(obj), obj)
elif type(c_name) is str and type(obj) is int:
pass #define c_name obj
else:
assert False, "unhandled predeclare %s %s %s" % (c_name, type(obj), obj)
def annotatehelper(func, *argtypes):
graph = db.translator.rtyper.annotate_helper(func, argtypes)
fptr = rtyper.getcallable(graph)
c = inputconst(lltype.typeOf(fptr), fptr)
db.prepare_arg_value(c)
decls.append(("ll_" + func.func_name, graph))
return graph.name
return decls
def get_c_cpath():
from pypy.translator import translator
return os.path.dirname(translator.__file__)
def get_llvm_cpath():
return os.path.join(os.path.dirname(__file__), "module")
def get_incdirs():
import distutils.sysconfig
includes = (distutils.sysconfig.EXEC_PREFIX + "/include",
distutils.sysconfig.EXEC_PREFIX + "/include/gc",
distutils.sysconfig.get_python_inc(),
get_c_cpath(),
get_llvm_cpath())
includestr = ""
for ii in includes:
includestr += "-I %s " % ii
return includestr
def generate_llfile(db, extern_decls, entrynode, standalone):
ccode = []
function_names = []
def predeclarefn(c_name, llname):
function_names.append(llname)
assert llname[0] == "%"
llname = llname[1:]
assert '\n' not in llname
ccode.append('#define\t%s\t%s\n' % (c_name, llname))
if standalone:
predeclarefn("__ENTRY_POINT__", entrynode.get_ref())
ccode.append('#define ENTRY_POINT_DEFINED 1\n\n')
for c_name, obj in extern_decls:
if isinstance(obj, lltype.LowLevelType):
s = "#define %s struct %s\n%s;\n" % (c_name, c_name, c_name)
ccode.append(s)
elif isinstance(obj, FunctionGraph):
funcptr = db.translator.rtyper.getcallable(obj)
c = inputconst(lltype.typeOf(funcptr), funcptr)
predeclarefn(c_name, db.repr_arg(c))
elif isinstance(lltype.typeOf(obj), lltype.Ptr):
if c_name.startswith("RPyExc_"):
c_name = c_name[1:]
ccode.append("void raise%s(char *);\n" % c_name)
else:
predeclarefn(c_name, db.obj2node[obj._obj].ref)
elif type(c_name) is str and type(obj) is int:
ccode.append("#define\t%s\t%d\n" % (c_name, obj))
else:
assert False, "unhandled extern_decls %s %s %s" % (c_name, type(obj), obj)
# append protos
ccode.append(open(get_module_file('protos.h')).read())
# include this early to get constants and macros for any further includes
ccode.append('#include <Python.h>\n')
# ask gcpolicy for any code needed
ccode.append('%s\n' % db.gcpolicy.genextern_code())
# append our source file
ccode.append(open(get_module_file('genexterns.c')).read())
return get_ll("".join(ccode), function_names)
| Python |
from pypy.objspace.flow.model import Constant
from pypy.rpython.lltypesystem import lltype
from pypy.translator.llvm.log import log
from pypy.translator.llvm.structnode import getindexhelper
log = log.opwriter
class OpRepr(object):
__slots__ = "db op retref rettype argrefs argtypes".split()
def __init__(self, op, db):
self.db = db
self.op = op
self.argrefs = db.repr_arg_multi(op.args)
self.argtypes = db.repr_arg_type_multi(op.args)
self.retref = db.repr_arg(op.result)
self.rettype = db.repr_arg_type(op.result)
class OpReprCall(OpRepr):
__slots__ = "db op retref rettype argrefs argtypes".split()
def __init__(self, op, db):
super(OpReprCall, self).__init__(op, db)
self.argrefs = [aref for arg, aref in zip(op.args, self.argrefs)
if arg.concretetype is not lltype.Void]
self.argtypes = [atype for arg, atype in zip(op.args, self.argtypes)
if arg.concretetype is not lltype.Void]
if self.db.is_function_ptr(self.op.result):
self.rettype = "%s (%s)*" % (self.rettype,
", ".join(self.argtypes[1:]))
class OpReprInvoke(OpReprCall):
__slots__ = "db op retref rettype argrefs argtypes functionref".split()
def __init__(self, op, db):
super(OpReprInvoke, self).__init__(op, db)
if op.opname in ('direct_call', 'indirect_call'):
self.functionref = self.argrefs[0]
self.argrefs = self.argrefs[1:]
self.argtypes = self.argtypes[1:]
else:
self.functionref = '%pypyop_' + op.opname
class OpWriter(object):
binary_operations = {
'float_mul' : 'mul',
'float_add' : 'add',
'float_sub' : 'sub',
'float_truediv' : 'div',
'ptr_eq' : 'seteq',
'ptr_ne' : 'setne' }
# generic numeric ops
for tt in 'int llong ullong uint'.split():
for oo in 'mul add sub and or xor'.split():
binary_operations['%s_%s' % (tt, oo)] = oo
binary_operations['%s_floordiv' % tt] = 'div'
binary_operations['%s_mod' % tt] = 'rem'
# comparison ops
for tt in 'int llong ullong uint unichar float'.split():
for oo in 'lt le eq ne ge gt'.split():
binary_operations['%s_%s' % (tt, oo)] = 'set%s' % oo
shift_operations = {'int_lshift': 'shl',
'int_rshift': 'shr',
'uint_lshift': 'shl',
'uint_rshift': 'shr',
'llong_lshift': 'shl',
'llong_rshift': 'shr',
}
char_operations = {'char_lt': 'setlt',
'char_le': 'setle',
'char_eq': 'seteq',
'char_ne': 'setne',
'char_ge': 'setge',
'char_gt': 'setgt'}
def __init__(self, db, codewriter):
self.db = db
self.codewriter = codewriter
self.word = db.get_machine_word()
self.uword = db.get_machine_uword()
def _tmp(self, count=1):
if count == 1:
return self.db.repr_tmpvar()
else:
return [self.db.repr_tmpvar() for ii in range(count)]
def _arrayindices(self, arg):
ARRAYTYPE = arg.concretetype.TO
if isinstance(ARRAYTYPE, lltype.Array):
# skip the length field
indices = [(self.uword, 1)]
else:
assert isinstance(ARRAYTYPE, lltype.FixedSizeArray)
indices = []
return indices
def write_operation(self, op):
#log(op)
if op.opname in ("direct_call", 'indirect_call'):
opr = OpReprCall(op, self.db)
else:
opr = OpRepr(op, self.db)
if op.opname.startswith('gc'):
meth = getattr(self.db.gcpolicy, 'op' + op.opname[2:])
meth(self.codewriter, opr)
elif op.opname in self.binary_operations:
self.binaryop(opr)
elif op.opname in self.shift_operations:
self.shiftop(opr)
elif op.opname in self.char_operations:
self.char_binaryop(opr)
elif op.opname.startswith('cast_') or op.opname.startswith('truncate_'):
if op.opname == 'cast_char_to_int':
self.cast_char_to_int(opr)
else:
self.cast_primitive(opr)
else:
meth = getattr(self, op.opname, None)
if not meth:
raise Exception, "operation %s not found" % op.opname
# XXX bit unclean
if self.db.genllvm.config.translation.llvm.debug:
self.codewriter.comment(str(op))
meth(opr)
def _generic_pow(self, opr, onestr):
# XXX This broken as... will only work for constants
try:
value = "NO VALUE"
value = opr.op.args[1].value
operand = int(value)
except Exception, exc:
msg = 'XXX: Error: _generic_pow: Variable '\
'%s - failed to convert to int %s' % (value, str(exc))
self.codewriter.comment(msg)
raise Exception(msg)
mult_type = opr.argtypes[0]
mult_val = opr.argrefs[0]
last_val = mult_val
if operand < 1:
res_val = onestr
else:
res_val = mult_val
for ii in range(operand - 1):
res_val = self._tmp()
self.codewriter.binaryop("mul", res_val, mult_type,
last_val, mult_val)
last_val = res_val
self.codewriter.cast(opr.retref, mult_type, res_val, mult_type)
def _skipped(self, opr):
self.codewriter.comment('***Skipping operation %s()' % opr.op.opname)
keepalive = _skipped
resume_point = _skipped
def int_abs(self, opr):
assert len(opr.argrefs) == 1
functionref = '%pypyop_' + opr.op.opname
self.codewriter.call(opr.retref, opr.rettype, functionref,
opr.argtypes, opr.argrefs)
float_abs = int_abs
llong_abs = int_abs
def debug_assert(self, opr):
# XXX could do something about assertions
pass
def int_pow(self, opr):
self._generic_pow(opr, "1")
uint_pow = int_pow
def float_pow(self, opr):
self._generic_pow(opr, "1.0")
def _generic_neg(self, opr, zerostr):
self.codewriter.binaryop("sub", opr.retref, opr.argtypes[0],
zerostr, opr.argrefs[0])
def int_neg(self, opr):
self._generic_neg(opr, "0")
llong_neg = int_neg
def float_neg(self, opr):
self._generic_neg(opr, "0.0")
def bool_not(self, opr):
self.codewriter.binaryop("xor", opr.retref, opr.argtypes[0],
opr.argrefs[0], "true")
def int_invert(self, opr):
self.codewriter.binaryop("xor", opr.retref, opr.argtypes[0],
opr.argrefs[0], -1)
def uint_invert(self, opr):
from sys import maxint
self.codewriter.binaryop("xor", opr.retref, opr.argtypes[0],
opr.argrefs[0], str(maxint*2+1))
def binaryop(self, opr):
assert len(opr.argrefs) == 2
name = self.binary_operations[opr.op.opname]
self.codewriter.binaryop(name, opr.retref, opr.argtypes[0],
opr.argrefs[0], opr.argrefs[1])
def char_binaryop(self, opr):
assert len(opr.argrefs) == 2
name = self.char_operations[opr.op.opname]
c1, c2 = self._tmp(2)
self.codewriter.cast(c1, "sbyte", opr.argrefs[0], "ubyte")
self.codewriter.cast(c2, "sbyte", opr.argrefs[1], "ubyte")
self.codewriter.binaryop(name, opr.retref, "ubyte", c1, c2)
def shiftop(self, opr):
op = opr.op
name = self.shift_operations[op.opname]
if isinstance(op.args[1], Constant):
var = opr.argrefs[1]
else:
var = self._tmp()
self.codewriter.cast(var, opr.argtypes[1], opr.argrefs[1], 'ubyte')
self.codewriter.shiftop(name, opr.retref, opr.argtypes[0],
opr.argrefs[0], var)
def cast_char_to_int(self, opr):
" works for all casts "
assert len(opr.argrefs) == 1
intermediate = self._tmp()
self.codewriter.cast(intermediate, opr.argtypes[0],
opr.argrefs[0], "ubyte")
self.codewriter.cast(opr.retref, "ubyte", intermediate, opr.rettype)
def cast_primitive(self, opr):
" works for all casts "
#assert len(opr.argrefs) == 1
self.codewriter.cast(opr.retref, opr.argtypes[0],
opr.argrefs[0], opr.rettype)
same_as = cast_primitive
def int_is_true(self, opr):
self.codewriter.binaryop("setne", opr.retref, opr.argtypes[0],
opr.argrefs[0], "0")
uint_is_true = int_is_true
llong_is_true = int_is_true
def float_is_true(self, opr):
self.codewriter.binaryop("setne", opr.retref, opr.argtypes[0],
opr.argrefs[0], "0.0")
def ptr_nonzero(self, opr):
self.codewriter.binaryop("setne", opr.retref, opr.argtypes[0],
opr.argrefs[0], "null")
def ptr_iszero(self, opr):
self.codewriter.binaryop("seteq", opr.retref, opr.argtypes[0],
opr.argrefs[0], "null")
def direct_call(self, opr):
self.codewriter.call(opr.retref, opr.rettype, opr.argrefs[0],
opr.argtypes[1:], opr.argrefs[1:])
# the following works since the extra arguments that indirect_call has
# is of type Void, which is removed by direct_call
indirect_call = direct_call
def boehm_malloc(self, opr):
self.db.gcpolicy._zeromalloc(self.codewriter, opr.retref, opr.argrefs[0], atomic=False)
def boehm_malloc_atomic(self, opr):
self.db.gcpolicy._zeromalloc(self.codewriter, opr.retref, opr.argrefs[0], atomic=True)
def boehm_register_finalizer(self, opr):
# ha, ha
pass
def flavored_malloc(self, opr):
flavor = opr.op.args[0].value
type_ = opr.rettype[:-1] #XXX stripping of *
if flavor == "raw":
self.codewriter.malloc(opr.retref, type_)
elif flavor == "stack":
self.codewriter.alloca(opr.retref, type_)
else:
raise NotImplementedError
def flavored_malloc_varsize(self, opr):
flavor = opr.op.args[0].value
if flavor == "raw":
arg_type = opr.op.args[1].value
node = self.db.obj2node[arg_type]
self.db.gcpolicy.var_zeromalloc(self.codewriter, opr.retref,
opr.rettype, node, opr.argrefs[2],
atomic=arg_type._is_atomic())
else:
raise NotImplementedError
def flavored_free(self, opr):
flavor = opr.op.args[0].value
if flavor == "raw":
self.codewriter.free(opr.argtypes[1], opr.argrefs[1])
elif flavor == "stack":
self.codewriter.comment('***Skipping free of stack allocated data')
else:
raise NotImplementedError
def call_boehm_gc_alloc(self, opr):
word = self.db.get_machine_word()
self.codewriter.call(opr.retref, 'sbyte*', '%pypy_malloc',
[word], [opr.argrefs[0]])
def getfield(self, opr):
op = opr.op
if opr.rettype != "void":
index = getindexhelper(op.args[1].value,
op.args[0].concretetype.TO)
assert index != -1
tmpvar = self._tmp()
self.codewriter.getelementptr(tmpvar, opr.argtypes[0],
opr.argrefs[0], [("uint", index)])
self.codewriter.load(opr.retref, opr.rettype, tmpvar)
else:
self._skipped(opr)
def direct_fieldptr(self, opr):
op = opr.op
assert opr.rettype != "void"
index = getindexhelper(op.args[1].value,
op.args[0].concretetype.TO)
assert index != -1
tmpvar = self._tmp()
self.codewriter.getelementptr(tmpvar, opr.argtypes[0],
opr.argrefs[0], [(self.uword, index)])
# get element ptr gets a pointer to the right type, except the generated code really expected
# an array of size 1... so we just cast it
element_type = self.db.repr_type(op.result.concretetype.TO.OF) + '*'
self.codewriter.cast(opr.retref, element_type, tmpvar, opr.rettype)
def getsubstruct(self, opr):
index = getindexhelper(opr.op.args[1].value,
opr.op.args[0].concretetype.TO)
assert opr.rettype != "void"
indices = [(self.uword, index)]
self.codewriter.getelementptr(opr.retref, opr.argtypes[0],
opr.argrefs[0], indices)
def setfield(self, opr):
op = opr.op
if opr.argtypes[2] != "void":
tmpvar = self._tmp()
index = getindexhelper(op.args[1].value,
op.args[0].concretetype.TO)
self.codewriter.getelementptr(tmpvar, opr.argtypes[0],
opr.argrefs[0], [(self.uword, index)])
self.codewriter.store(opr.argtypes[2], opr.argrefs[2], tmpvar)
else:
self._skipped(opr)
bare_setfield = setfield
def getarrayitem(self, opr):
if opr.rettype == "void":
self._skipped(opr)
return
array, index = opr.argrefs
arraytype, indextype = opr.argtypes
tmpvar = self._tmp()
indices = self._arrayindices(opr.op.args[0]) + [(self.word, index)]
self.codewriter.getelementptr(tmpvar, arraytype, array, indices)
self.codewriter.load(opr.retref, opr.rettype, tmpvar)
def direct_arrayitems(self, opr):
assert opr.rettype != "void"
array = opr.argrefs[0]
arraytype = opr.argtypes[0]
indices = self._arrayindices(opr.op.args[0]) + [(self.word, 0)]
tmpvar = self._tmp()
self.codewriter.getelementptr(tmpvar, arraytype, array, indices)
# get element ptr gets a pointer to the right type, except the generated code really expected
# an array of size 1... so we just cast it
element_type = self.db.repr_type(opr.op.result.concretetype.TO.OF) + '*'
self.codewriter.cast(opr.retref, element_type, tmpvar, opr.rettype)
def direct_ptradd(self, opr):
array, incr = opr.argrefs
arraytype, _ = opr.argtypes
tmpvar = self._tmp()
self.codewriter.getelementptr(tmpvar, arraytype, array, [(self.word, incr)])
# get element ptr gets a pointer to the right type, except the generated code really expected
# an array of size 1... so we just cast it
element_type = self.db.repr_type(opr.op.result.concretetype.TO.OF) + '*'
self.codewriter.cast(opr.retref, element_type, tmpvar, opr.rettype)
def getarraysubstruct(self, opr):
array, index = opr.argrefs
arraytype, indextype = opr.argtypes
indices = self._arrayindices(opr.op.args[0]) + [(self.word, index)]
self.codewriter.getelementptr(opr.retref, arraytype, array, indices)
def setarrayitem(self, opr):
array, index, valuevar = opr.argrefs
arraytype, indextype, valuetype = opr.argtypes
tmpvar = self._tmp()
if valuetype == "void":
self._skipped(opr)
return
indices = self._arrayindices(opr.op.args[0]) + [(self.word, index)]
self.codewriter.getelementptr(tmpvar, arraytype, array, indices)
self.codewriter.store(valuetype, valuevar, tmpvar)
bare_setarrayitem = setarrayitem
def getarraysize(self, opr):
ARRAYTYPE = opr.op.args[0].concretetype.TO
assert isinstance(ARRAYTYPE, lltype.Array)
tmpvar = self._tmp()
self.codewriter.getelementptr(tmpvar, opr.argtypes[0],
opr.argrefs[0], [(self.uword, 0)])
self.codewriter.load(opr.retref, opr.rettype, tmpvar)
def adr_delta(self, opr):
addr1, addr2 = self._tmp(2)
self.codewriter.cast(addr1, opr.argtypes[0], opr.argrefs[0], self.word)
self.codewriter.cast(addr2, opr.argtypes[1], opr.argrefs[1], self.word)
self.codewriter.binaryop("sub", opr.retref, opr.rettype, addr1, addr2)
def _op_adr_generic(self, opr, llvm_op):
addr, res = self._tmp(2)
self.codewriter.cast(addr, opr.argtypes[0], opr.argrefs[0], self.word)
self.codewriter.binaryop(llvm_op, res, self.word, addr, opr.argrefs[1])
self.codewriter.cast(opr.retref, self.word, res, opr.rettype)
def adr_add(self, opr):
self._op_adr_generic(opr, "add")
def adr_sub(self, opr):
self._op_adr_generic(opr, "sub")
def _op_adr_cmp(self, opr, llvm_op):
addr1, addr2 = self._tmp(2)
self.codewriter.cast(addr1, opr.argtypes[0], opr.argrefs[0], self.word)
self.codewriter.cast(addr2, opr.argtypes[1], opr.argrefs[1], self.word)
assert opr.rettype == "bool"
self.codewriter.binaryop(llvm_op, opr.retref, self.word, addr1, addr2)
def adr_eq(self, opr):
self._op_adr_cmp(opr, "seteq")
def adr_ne(self, opr):
self._op_adr_cmp(opr, "setne")
def adr_le(self, opr):
self._op_adr_cmp(opr, "setle")
def adr_gt(self, opr):
self._op_adr_cmp(opr, "setgt")
def adr_lt(self, opr):
self._op_adr_cmp(opr, "setlt")
def adr_ge(self, opr):
self._op_adr_cmp(opr, "setge")
# XXX Not sure any of this makes sense - maybe seperate policy for
# different flavours of mallocs? Well it depend on what happens the GC
# developments
def raw_malloc(self, opr):
self.codewriter.call(opr.retref, opr.rettype, "%raw_malloc",
opr.argtypes, opr.argrefs)
def raw_malloc_usage(self, opr):
self.codewriter.cast(opr.retref, opr.argtypes[0], opr.argrefs[0],
opr.rettype)
def raw_free(self, opr):
self.codewriter.call(opr.retref, opr.rettype, "%raw_free",
opr.argtypes, opr.argrefs)
def raw_memcopy(self, opr):
self.codewriter.call(opr.retref, opr.rettype, "%raw_memcopy",
opr.argtypes, opr.argrefs)
def raw_memclear(self, opr):
self.codewriter.call(opr.retref, opr.rettype, "%raw_memclear",
opr.argtypes, opr.argrefs)
def raw_store(self, opr):
arg_addr, arg_dummy, arg_incr, arg_value = opr.argrefs
(argtype_addr, argtype_dummy,
argtype_incr, argtype_value) = opr.argtypes
cast_addr = self._tmp()
addr_type = argtype_value + "*"
# cast to the correct type before arithmetic/storing
self.codewriter.cast(cast_addr, argtype_addr, arg_addr, addr_type)
# pointer arithmetic
if arg_incr:
incr_addr = self._tmp()
self.codewriter.getelementptr(incr_addr,
addr_type,
cast_addr,
[(self.word, arg_incr)],
getptr=False)
cast_addr = incr_addr
self.codewriter.store(argtype_value, arg_value, cast_addr)
def raw_load(self, opr):
arg_addr, arg_dummy, arg_incr = opr.argrefs
argtype_addr, argtype_dummy, argtype_incr = opr.argtypes
cast_addr = self._tmp()
addr_type = opr.rettype + "*"
# cast to the correct type before arithmetic/loading
self.codewriter.cast(cast_addr, argtype_addr, arg_addr, addr_type)
# pointer arithmetic
if arg_incr:
incr_addr = self._tmp()
self.codewriter.getelementptr(incr_addr,
addr_type,
cast_addr,
[(self.word, arg_incr)],
getptr=False)
cast_addr = incr_addr
self.codewriter.load(opr.retref, opr.rettype, cast_addr)
def debug_print(self, opr):
pass # XXX
def debug_fatalerror(self, opr):
# XXX message?
self.codewriter.call(None, "void", "%abort", [], [])
def hint(self, opr):
self.same_as(opr)
def is_early_constant(self, opr):
# If it gets this far it is always false
self.codewriter.cast(opr.retref, 'bool',
'false', opr.rettype)
| Python |
from pypy.translator.llvm.log import log
from pypy.translator.llvm.node import LLVMNode, ConstantLLVMNode
from pypy.rpython.lltypesystem import lltype
def getindexhelper(name, struct):
assert name in list(struct._names)
fieldnames = struct._names_without_voids()
try:
index = fieldnames.index(name)
except ValueError:
index = -1
return index
log = log.structnode
class StructTypeNode(LLVMNode):
__slots__ = "db struct ref name".split()
prefix = '%structtype_'
def __init__(self, db, struct):
assert isinstance(struct, lltype.Struct)
self.db = db
self.struct = struct
name = self.struct._name
self.ref = self.make_ref(self.prefix, name)
self.name = self.ref[len(self.prefix):]
def __str__(self):
return "<StructTypeNode %r>" %(self.ref,)
def _fields(self):
return [getattr(self.struct, name)
for name in self.struct._names_without_voids()]
def setup(self):
# Recurse
for field in self._fields():
self.db.prepare_type(field)
# ______________________________________________________________________
# main entry points from genllvm
def writedatatypedecl(self, codewriter):
fields_types = [self.db.repr_type(f) for f in self._fields()]
codewriter.structdef(self.ref, fields_types)
class FixedSizeArrayTypeNode(StructTypeNode):
prefix = '%fixarray_'
def __str__(self):
return "<FixedArrayTypeNode %r>" % self.ref
def setup(self):
fields = self._fields()
if fields:
self.db.prepare_type(fields[0])
def writedatatypedecl(self, codewriter):
codewriter.fixedarraydef(self.ref,
self.struct.length,
self.db.repr_type(self.struct.OF))
class StructVarsizeTypeNode(StructTypeNode):
__slots__ = "constructor_ref constructor_decl".split()
def __init__(self, db, struct):
super(StructVarsizeTypeNode, self).__init__(db, struct)
prefix = '%new_varsizestruct_'
self.constructor_ref = self.make_ref(prefix, self.name)
self.constructor_decl = "%s * %s(%s %%len)" % \
(self.ref,
self.constructor_ref,
self.db.get_machine_word())
def __str__(self):
return "<StructVarsizeTypeNode %r>" %(self.ref,)
# ______________________________________________________________________
# main entry points from genllvm
def var_malloc_info(self):
# build up a list of indices to get to the last
# var-sized struct (or rather the according array)
indices_to_array = []
current = self.struct
while isinstance(current, lltype.Struct):
last_pos = len(current._names_without_voids()) - 1
# struct requires uint consts
indices_to_array.append(("uint", last_pos))
name = current._names_without_voids()[-1]
current = current._flds[name]
assert isinstance(current, lltype.Array)
return current, indices_to_array
class StructNode(ConstantLLVMNode):
""" A struct constant. Can simply contain
a primitive,
a struct,
pointer to struct/array
"""
__slots__ = "db value structtype ref _get_ref_cache _get_types".split()
prefix = '%structinstance_'
def __init__(self, db, value):
self.db = db
self.value = value
self.structtype = self.value._TYPE
name = str(value).split()[1]
self.ref = self.make_ref(self.prefix, name)
self._get_ref_cache = None
self._get_types = self._compute_types()
def __str__(self):
return "<StructNode %r>" % (self.ref,)
def _compute_types(self):
return [(name, self.structtype._flds[name])
for name in self.structtype._names_without_voids()]
def _getvalues(self):
values = []
for name, T in self._get_types:
value = getattr(self.value, name)
values.append(self.db.repr_constant(value)[1])
return values
def setup(self):
for name, T in self._get_types:
assert T is not lltype.Void
value = getattr(self.value, name)
self.db.prepare_constant(T, value)
p, c = lltype.parentlink(self.value)
if p is not None:
self.db.prepare_constant(lltype.typeOf(p), p)
def get_typerepr(self):
return self.db.repr_type(self.structtype)
def get_childref(self, index):
pos = 0
found = False
for name in self.structtype._names_without_voids():
if name == index:
found = True
break
pos += 1
return "getelementptr(%s* %s, int 0, uint %s)" %(
self.get_typerepr(),
self.get_ref(),
pos)
def get_ref(self):
""" Returns a reference as used for operations in blocks. """
# XXX cache here is **dangerous** considering it can return different values :-(
# XXX should write a test to prove this
#if self._get_ref_cache:
# return self._get_ref_cache
p, c = lltype.parentlink(self.value)
if p is None:
ref = self.ref
else:
ref = self.db.get_childref(p, c)
#XXXself._get_ref_cache = ref
return ref
def get_pbcref(self, toptr):
""" Returns a reference as used per pbc. """
return self.get_ref()
def constantvalue(self):
""" Returns the constant representation for this node. """
values = self._getvalues()
all_values = ",\n ".join(values)
return "%s {\n %s\n }\n" % (self.get_typerepr(), all_values)
class FixedSizeArrayNode(StructNode):
prefix = '%fixarrayinstance_'
def __init__(self, db, struct):
super(FixedSizeArrayNode, self).__init__(db, struct)
self.array = struct
self.arraytype = self.structtype.OF
def __str__(self):
return "<FixedSizeArrayNode %r>" % (self.ref,)
def constantvalue(self):
""" Returns the constant representation for this node. """
values = self._getvalues()
all_values = ",\n ".join(values)
return "%s [\n %s\n ]\n" % (self.get_typerepr(), all_values)
def get_ref(self):
p, c = lltype.parentlink(self.value)
if p is None:
ref = self.ref
else:
ref = self.db.get_childref(p, c)
if isinstance(self.value, lltype._subarray):
# ptr -> array of len 1
ref = "cast(%s* %s to %s*)" % (self.db.repr_type(self.arraytype),
ref,
self.db.repr_type(lltype.typeOf(self.value)))
return ref
def get_childref(self, index):
return "getelementptr(%s* %s, int 0, int %s)" % (
self.get_typerepr(),
self.get_ref(),
index)
def setup(self):
if isinstance(self.value, lltype._subarray):
p, c = lltype.parentlink(self.value)
if p is not None:
self.db.prepare_constant(lltype.typeOf(p), p)
else:
super(FixedSizeArrayNode, self).setup()
class StructVarsizeNode(StructNode):
""" A varsize struct constant. Can simply contain
a primitive,
a struct,
pointer to struct/array
and the last element *must* be
an array
OR
a series of embedded structs, which has as its last element an array.
"""
def __str__(self):
return "<StructVarsizeNode %r>" % (self.ref,)
def _getvalues(self):
values = []
for name, T in self._get_types[:-1]:
value = getattr(self.value, name)
values.append(self.db.repr_constant(value)[1])
values.append(self._get_lastnoderepr())
return values
def _get_lastnode_helper(self):
lastname, LASTT = self._get_types[-1]
assert isinstance(LASTT, lltype.Array) or (
isinstance(LASTT, lltype.Struct) and LASTT._arrayfld)
value = getattr(self.value, lastname)
return self.db.repr_constant(value)
def _get_lastnode(self):
return self._get_lastnode_helper()[0]
def _get_lastnoderepr(self):
return self._get_lastnode_helper()[1]
def setup(self):
super(StructVarsizeNode, self).setup()
def get_typerepr(self):
try:
return self._get_typerepr_cache
except:
# last type is a special case and need to be worked out recursively
types = self._get_types[:-1]
types_repr = [self.db.repr_type(T) for name, T in types]
types_repr.append(self._get_lastnode().get_typerepr())
result = "{%s}" % ", ".join(types_repr)
self._get_typerepr_cache = result
return result
def get_childref(self, index):
pos = 0
found = False
for name in self.structtype._names_without_voids():
if name == index:
found = True
break
pos += 1
assert found
ref = "getelementptr(%s* %s, int 0, uint %s)" %(
self.get_typerepr(),
super(StructVarsizeNode, self).get_ref(),
pos)
return ref
def get_ref(self):
ref = super(StructVarsizeNode, self).get_ref()
typeval = self.db.repr_type(lltype.typeOf(self.value))
ref = "cast(%s* %s to %s*)" % (self.get_typerepr(),
ref,
typeval)
return ref
def get_pbcref(self, toptr):
""" Returns a reference as used per pbc. """
ref = self.ref
p, c = lltype.parentlink(self.value)
assert p is None, "child varsize struct are NOT needed by rtyper"
fromptr = "%s*" % self.get_typerepr()
refptr = "getelementptr(%s %s, int 0)" % (fromptr, ref)
ref = "cast(%s %s to %s)" % (fromptr, refptr, toptr)
return ref
| Python |
from pypy.translator.llvm.node import ConstantLLVMNode
from pypy.translator.llvm.log import log
from pypy.translator.c.extfunc import EXTERNALS
from pypy.rpython.lltypesystem import lltype
log = log.extfuncnode
from sys import maxint
class ExtFuncSig(object):
def __init__(self, rettype, args):
self.rettype = rettype
self.args = args
# signature of external functions differ from C's implementation
ext_func_sigs = {
"%LL_os_isatty" : ExtFuncSig("int", None),
"%LL_stack_too_big" : ExtFuncSig("int", None),
"%LL_os_lseek" : ExtFuncSig("int", None),
"%LL_thread_acquirelock" : ExtFuncSig("int", [None, "int"]),
"%LL_thread_start" : ExtFuncSig(None, ["sbyte*", "sbyte*"]),
}
if maxint != 2**31-1:
ext_func_sigs["%LL_os_write"] = ExtFuncSig(None, ["int", None])
ext_func_sigs["%LL_math_ldexp"] = ExtFuncSig(None, [None, "int"])
class SimplerExternalFuncNode(ConstantLLVMNode):
def __init__(self, db, value):
self.db = db
self.value = value
self.ref = "%" + value._name
def writeglobalconstants(self, codewriter):
pass
def getdecl_parts(self):
T = self.value._TYPE
rettype = self.db.repr_type(T.RESULT)
argtypes = [self.db.repr_type(a) for a in T.ARGS if a is not lltype.Void]
return rettype, argtypes
def getdecl(self):
rettype, argtypes = self.getdecl_parts()
return "%s %s(%s)" % (rettype, self.ref, ", ".join(argtypes))
def writedecl(self, codewriter):
codewriter.declare(self.getdecl())
class ExternalFuncNode(ConstantLLVMNode):
def __init__(self, db, value, extname=None):
self.db = db
self.value = value
name = value._callable.__name__
#assert name.startswith("ll")
self.callable = value._callable
if extname is not None:
mapped_name = EXTERNALS[extname]
else:
mapped_name = EXTERNALS[self.callable]
self.ref = self.make_ref("%", mapped_name)
def setup(self):
self.db.prepare_type(self.value._TYPE.RESULT)
self.db.prepare_type_multi(self.value._TYPE._trueargs())
def __str__(self):
return "<ExternalFuncNode %r>" % self.ref
def _get_wrapper(self):
wrapper = ext_func_sigs.get(self.ref, None)
if wrapper is None and maxint != 2**31-1:
#log("ref=%s" % self.ref)
rettype, args = self.getdecl_parts()
conversions = False
if rettype == "long":
rettype = "int"
conversions = True
elif rettype == "ulong":
rettype = "uint"
conversions = True
else:
rettype = None
for i, a in enumerate(args):
if a == "long":
args[i] = "int"
conversions = True
elif a == "ulong":
args[i] = "uint"
conversions = True
else:
args[i] = None
if conversions:
wrapper = ExtFuncSig(rettype, args)
#log(" rettype=%s" % str(rettype))
#log(" args =%s" % str(args))
return wrapper
def getdecl_parts(self):
T = self.value._TYPE
rettype = self.db.repr_type(T.RESULT)
argtypes = [self.db.repr_type(a) for a in T.ARGS if a is not lltype.Void]
return rettype, argtypes
def getdecl(self):
rettype, argtypes = self.getdecl_parts()
return "%s %s(%s)" % (rettype, self.ref, ", ".join(argtypes))
def writedecl(self, codewriter):
codewriter.declare(self.getdecl())
def writeimpl(self, codewriter):
wrapper = self._get_wrapper()
if wrapper is None:
return
rettype, argtypes = self.getdecl_parts()
argrefs = [self.db.repr_tmpvar() for ii in argtypes]
arg_desription = ", ".join([
"%s %s" % (typ_, name)
for typ_, name in zip(argtypes, argrefs)])
open_decl = "%s %s(%s)" % (rettype, self.ref, arg_desription)
codewriter.openfunc(open_decl)
returnval = self.db.repr_tmpvar()
# call function with this
expected_argrefs = []
expected_argtypes = []
# find out what the args/types should be
if wrapper.args is not None:
assert len(wrapper.args) == len(argtypes)
for expected_typ, typ, ref in zip(wrapper.args,
argtypes,
argrefs):
if expected_typ is not None:
# cast to desired arg type
expected_ref = self.db.repr_tmpvar()
codewriter.cast(expected_ref, typ, ref, expected_typ)
else:
expected_ref = ref
expected_typ = typ
expected_argrefs.append(expected_ref)
expected_argtypes.append(expected_typ)
else:
expected_argrefs = argrefs
expected_argtypes = argtypes
# find out what the return type should be
expected_rettype = wrapper.rettype or rettype
# call
codewriter.call(returnval, expected_rettype, self.ref,
expected_argtypes, expected_argrefs)
if wrapper.rettype:
# cast to desired return type
tmpval = returnval
returnval = self.db.repr_tmpvar()
codewriter.cast(returnval, wrapper.rettype,
tmpval, rettype)
codewriter.ret(rettype, returnval)
codewriter.closefunc()
def writeglobalconstants(self, codewriter):
pass
| Python |
import os
import sys
import py
from pypy.translator.llvm.log import log
from pypy.translator.llvm.pyxwrapper import write_pyx_wrapper
from pypy.translator.tool import stdoutcapture
from pypy.translator.tool.cbuild import make_c_from_pyxfile
import distutils.sysconfig
def llvm_is_on_path():
if py.path.local.sysfind("llvm-as") is None or \
py.path.local.sysfind("llvm-gcc") is None:
return False
return True
def _exe_version(exe, cache={}):
try:
v = cache[exe]
except KeyError:
v = os.popen(exe + ' -version 2>&1').read()
v = ''.join([c for c in v if c.isdigit()])
v = int(v) / 10.0
cache[exe] = v
return v
llvm_version = lambda: _exe_version('llvm-as')
def postfix():
if llvm_version() >= 2.0:
return '.i32'
else:
return ''
def _exe_version2(exe):
v = os.popen(exe + ' --version 2>&1').read()
i = v.index(')')
v = v[i+2:].split()[0].split('.')
major, minor = v[0], ''.join([c for c in v[1] if c.isdigit()])
v = float(major) + float(minor) / 10.0
return v
gcc_version = lambda: _exe_version2('gcc')
llvm_gcc_version = lambda: _exe_version2('llvm-gcc')
def compile_module(module, source_files, object_files, library_files):
open("%s_setup.py" % module, "w").write(str(py.code.Source(
'''
from distutils.core import setup
from distutils.extension import Extension
setup(name="%(module)s",
ext_modules = [Extension(
name = "%(module)s",
sources = %(source_files)s,
libraries = %(library_files)s,
extra_objects = %(object_files)s)])
''' % locals())))
cmd ="python %s_setup.py build_ext --inplace --force" % module
log.build(cmd)
py.process.cmdexec(cmd)
class Builder(object):
def __init__(self, genllvm):
self.genllvm = genllvm
self.cmds = []
def optimizations(self):
if llvm_version() < 2.0:
cmd = "gccas /dev/null -o /dev/null -debug-pass=Arguments 2>&1"
gccas_output = os.popen(cmd)
opts = gccas_output.read()[17:-1] + " "
else:
opts = '-std-compile-opts'
# these were added by Chris Lattner for some old version of llvm
# opts += "-globalopt -constmerge -ipsccp -deadargelim -inline " \
# "-instcombine -scalarrepl -globalsmodref-aa -licm -load-vn " \
# "-gcse -instcombine -simplifycfg -globaldce "
# added try to reduce the amount of excessive inlining by us, llvm and gcc
# opts += "-inline-threshold=175 " #default: 200
return opts
def compile_bytecode(self, b):
# run llvm assembler and optimizer
opts = self.optimizations()
if llvm_version() < 2.0:
self.cmds.append("llvm-as < %s.ll | opt %s -f -o %s.bc" % (b, opts, b))
else:
# we generate 1.x .ll files, so upgrade these first
self.cmds.append("llvm-upgrade < %s.ll | llvm-as | opt %s -f -o %s.bc" % (b, opts, b))
def execute_cmds(self):
c = stdoutcapture.Capture(mixed_out_err=True)
log.build("working in", py.path.local())
try:
try:
for cmd in self.cmds:
log.build(cmd)
py.process.cmdexec(cmd)
finally:
foutput, ferror = c.done()
except:
data = 'OUTPUT:\n' + foutput.read() + '\n\nERROR:\n' + ferror.read()
fdump = open("%s.errors" % self.genllvm.filename, "w")
fdump.write(data)
fdump.close()
log.build(data)
raise
def make_module(self):
llvmfile = self.genllvm.filename
# change into dirpath and store current path to change back
dirpath = llvmfile.dirpath()
lastdir = py.path.local()
dirpath.chdir()
b = llvmfile.purebasename
# generate the llvm bytecode from ll file
self.compile_bytecode(b)
library_files = self.genllvm.db.gcpolicy.gc_libraries()
gc_libs = ' '.join(['-l' + lib for lib in library_files])
object_files = ["-L/sw/lib"]
if sys.platform == 'darwin':
libdir = '/sw/lib'
gc_libs_path = '-L%s -ldl' % libdir
else:
gc_libs_path = '-static'
use_gcc = True #self.genllvm.config.translation.llvm_via_c
if not use_gcc:
self.cmds.append("llc -relocation-model=pic %s.bc -f -o %s.s" % (b, b))
self.cmds.append("as %s.s -o %s.o" % (b, b))
object_files.append("%s.o" % b)
else:
self.cmds.append("llc %s.bc -march=c -f -o %s.c" % (b, b))
self.cmds.append("gcc %s.c -c -O2" % b)
object_files.append("%s.o" % b)
try:
self.execute_cmds()
# use pyrex to create module for CPython
basename = self.genllvm.filename.purebasename + '_wrapper.pyx'
pyxfile = self.genllvm.filename.new(basename = basename)
write_pyx_wrapper(self.genllvm, pyxfile)
modname = pyxfile.purebasename
source_files = ["%s.c" % modname]
make_c_from_pyxfile(pyxfile)
compile_module(modname, source_files, object_files, library_files)
finally:
lastdir.chdir()
return modname, str(dirpath)
def make_standalone(self, exename):
llvmfile = self.genllvm.filename
# change into dirpath and store current path to change back
dirpath = llvmfile.dirpath()
lastdir = py.path.local()
dirpath.chdir()
b = llvmfile.purebasename
# generate the llvm bytecode from ll file
self.compile_bytecode(b)
object_files = ["-L/sw/lib"]
library_files = self.genllvm.db.gcpolicy.gc_libraries()
gc_libs = ' '.join(['-l' + lib for lib in library_files])
if sys.platform == 'darwin':
libdir = '/sw/' + "/lib"
gc_libs_path = '-L%s -ldl' % libdir
else:
gc_libs_path = '-static'
source_files = []
use_gcc = self.genllvm.config.translation.llvm_via_c
if not use_gcc:
self.cmds.append("llc %s.bc -f -o %s.s" % (b, b))
self.cmds.append("as %s.s -o %s.o" % (b, b))
cmd = "gcc -O3 %s.o %s %s -lm -pipe -o %s" % (b, gc_libs_path, gc_libs, exename)
self.cmds.append(cmd)
object_files.append("%s.o" % b)
else:
self.cmds.append("llc %s.bc -march=c -f -o %s.c" % (b, b))
if (self.genllvm.config.translation.profopt is not None and
not self.genllvm.config.translation.noprofopt):
cmd = "gcc -fprofile-generate %s.c -c -O3 -pipe -o %s.o" % (b, b)
self.cmds.append(cmd)
cmd = "gcc -fprofile-generate %s.o %s %s -lm -pipe -o %s_gen" % \
(b, gc_libs_path, gc_libs, exename)
self.cmds.append(cmd)
self.cmds.append("./%s_gen %s" % (exename, self.genllvm.config.translation.profopt))
cmd = "gcc -fprofile-use %s.c -c -O3 -pipe -o %s.o" % (b, b)
self.cmds.append(cmd)
cmd = "gcc -fprofile-use %s.o %s %s -lm -pipe -o %s" % \
(b, gc_libs_path, gc_libs, exename)
else:
cmd = "gcc %s.c -c -O3 -pipe -fomit-frame-pointer" % b
self.cmds.append(cmd)
cmd = "gcc %s.o %s %s -lm -pipe -o %s" % (b, gc_libs_path, gc_libs, exename)
self.cmds.append(cmd)
source_files.append("%s.c" % b)
try:
self.execute_cmds()
finally:
lastdir.chdir()
return str(dirpath.join(exename))
| Python |
from pypy.translator.llvm.log import log
log = log.codewriter
DEFAULT_TAIL = '' #/tail
DEFAULT_CCONV = 'fastcc' #ccc/fastcc
DEFAULT_LINKAGE = 'internal ' #/internal (disabled for now because of the JIT)
class CodeWriter(object):
def __init__(self, file, db, tail=DEFAULT_TAIL, cconv=DEFAULT_CCONV,
linkage=DEFAULT_LINKAGE):
self.file = file
self.word_repr = db.get_machine_word()
self.tail = tail
self.cconv = cconv
self.linkage = linkage
def close(self):
self.file.close()
def _resolvetail(self, tail, cconv):
# from: http://llvm.org/docs/LangRef.html
# The optional "tail" marker indicates whether the callee function
# accesses any allocas or varargs in the caller. If the "tail" marker
# is present, the function call is eligible for tail call
# optimization. Note that calls may be marked "tail" even if they do
# not occur before a ret instruction.
if cconv is not 'fastcc':
tail_ = ''
else:
tail_ = tail
if tail_:
tail_ += ' '
return tail_
# keep these two internal for now - incase we try a different API
def _append(self, line):
self.file.write(line + '\n')
def _indent(self, line):
self._append(" " + line)
def write_lines(self, lines):
for l in lines.split("\n"):
self._append(l)
def comment(self, line, indent=True):
line = ";; " + line
if indent:
self._indent(line)
else:
self._append(line)
def header_comment(self, s):
self.newline()
self.comment(s)
self.newline()
def newline(self):
self._append("")
def label(self, name):
self.newline()
self._append(" %s:" % name)
def globalinstance(self, name, typeandata, linkage=None):
if linkage is None:
linkage = self.linkage
self._append("%s = %sglobal %s" % (name, linkage, typeandata))
def typedef(self, name, type_):
self._append("%s = type %s" % (name, type_))
def structdef(self, name, typereprs):
self.typedef(name, "{ %s }" % ", ".join(typereprs))
def arraydef(self, name, lentype, typerepr):
self.typedef(name, "{ %s, [0 x %s] }" % (lentype, typerepr))
def fixedarraydef(self, name, arraylen, typerepr):
self.typedef(name, "[%s x %s]" % (arraylen, typerepr))
def funcdef(self, name, rettyperepr, argtypereprs):
self.typedef(name, "%s (%s)" % (rettyperepr,
", ".join(argtypereprs)))
def declare(self, decl, cconv=None):
if cconv is None:
cconv = self.cconv
self._append("declare %s %s" %(cconv, decl,))
def startimpl(self):
self.newline()
self._append("implementation")
self.newline()
def br_uncond(self, blockname):
self._indent("br label %%%s" %(blockname,))
def br(self, cond, blockname_false, blockname_true):
self._indent("br bool %s, label %%%s, label %%%s"
% (cond, blockname_true, blockname_false))
def switch(self, intty, cond, defaultdest, value_labels):
labels = ''
for value, label in value_labels:
labels += ' %s %s, label %%%s' % (intty, value, label)
self._indent("switch %s %s, label %%%s [%s ]"
% (intty, cond, defaultdest, labels))
def openfunc(self, decl, cconv=None, linkage=None):
if cconv is None:
cconv = self.cconv
if linkage is None:
linkage = self.linkage
self.newline()
self._append("%s%s %s {" % (linkage, cconv, decl,))
def closefunc(self):
self._append("}")
def ret(self, type_, ref):
if type_ == 'void':
self._indent("ret void")
else:
self._indent("ret %s %s" % (type_, ref))
def phi(self, targetvar, type_, refs, blocknames):
assert len(refs) == len(blocknames), "phi node requires blocks"
mergelist = ", ".join(
["[%s, %%%s]" % item
for item in zip(refs, blocknames)])
s = "%s = phi %s %s" % (targetvar, type_, mergelist)
self._indent(s)
def binaryop(self, name, targetvar, type_, ref1, ref2):
self._indent("%s = %s %s %s, %s" % (targetvar, name, type_,
ref1, ref2))
def shiftop(self, name, targetvar, type_, ref1, ref2):
self._indent("%s = %s %s %s, ubyte %s" % (targetvar, name, type_,
ref1, ref2))
def cast(self, targetvar, fromtype, fromvar, targettype):
if fromtype == 'void' and targettype == 'void':
return
self._indent("%(targetvar)s = cast %(fromtype)s "
"%(fromvar)s to %(targettype)s" % locals())
def getelementptr(self, targetvar, type, typevar, indices, getptr=True):
# getelementptr gives you back a value for the last thing indexed
# what is getptr?
# ---------------
# All global variables in LLVM are pointers, and pointers must also be
# dereferenced with the getelementptr instruction (hence the int 0)
# not only that, but if we need to look into something (ie a struct)
# then we must get the initial pointer to ourself
if getptr:
indices = [(self.word_repr, 0)] + list(indices)
res = "%(targetvar)s = getelementptr %(type)s %(typevar)s, " % locals()
res += ", ".join(["%s %s" % (t, i) for t, i in indices])
self._indent(res)
def load(self, target, targettype, ptr):
self._indent("%(target)s = load %(targettype)s* %(ptr)s" % locals())
def store(self, valuetype, value, ptr):
l = "store %(valuetype)s %(value)s, %(valuetype)s* %(ptr)s" % locals()
self._indent(l)
def unwind(self):
self._indent("unwind")
def call(self, targetvar, returntype, functionref, argtypes, argrefs,
tail=None, cconv=None):
if tail is None:
tail = self.tail
if cconv is None:
cconv = self.cconv
tail = self._resolvetail(tail, cconv)
args = ", ".join(["%s %s" % item for item in zip(argtypes, argrefs)])
if returntype == 'void':
return_str = ''
else:
return_str = '%s = ' % targetvar
self._indent("%s%scall %s %s %s(%s)" % (return_str,
tail,
cconv,
returntype,
functionref,
args))
def alloca(self, targetvar, vartype):
self._indent("%s = alloca %s" % (targetvar, vartype))
def malloc(self, targetvar, vartype, numelements=1):
if numelements == 1:
self._indent("%s = malloc %s" % (targetvar, vartype))
else:
assert numelements > 1
self._indent("%s = malloc %s, uint %s" % (targetvar,
vartype,
numelements))
def free(self, vartype, varref):
self._indent("free %s %s" % (vartype, varref))
| Python |
from pypy.translator.llvm.node import LLVMNode, ConstantLLVMNode
from pypy.rpython.lltypesystem import lltype
class OpaqueTypeNode(LLVMNode):
def __init__(self, db, opaquetype):
assert isinstance(opaquetype, lltype.OpaqueType)
self.db = db
self.opaquetype = opaquetype
self.ref = "%%RPyOpaque_%s" % (opaquetype.tag)
def __str__(self):
return "<OpaqueNode %r>" %(self.ref,)
# ______________________________________________________________________
# main entry points from genllvm
def writedatatypedecl(self, codewriter):
codewriter.typedef(self.ref, "opaque*")
class ExtOpaqueTypeNode(OpaqueTypeNode):
def writedatatypedecl(self, codewriter):
pass
class OpaqueNode(ConstantLLVMNode):
def __init__(self, db, value):
self.db = db
self.value = value
self.ref = "null"
# ______________________________________________________________________
# main entry points from genllvm
def writeglobalconstants(self, codewriter):
# XXX Dummy - not sure what what we want
pass
class ExtOpaqueNode(ConstantLLVMNode):
def __init__(self, db, value):
self.db = db
self.value = value
prefix = '%opaqueinstance_'
name = str(value).split()[1]
self.ref = self.make_ref(prefix, name)
self._get_ref_cache = None
# ______________________________________________________________________
# main entry points from genllvm
def get_ref(self):
""" Returns a reference as used for operations in blocks. """
if self._get_ref_cache:
return self._get_ref_cache
p, c = lltype.parentlink(self.value)
if p is None:
ref = self.ref
else:
ref = self.db.get_childref(p, c)
self._get_ref_cache = ref
return ref
def writeglobalconstants(self, codewriter):
# XXX Dummy - not sure what what we want
pass
def constantvalue(self):
return "%s zeroinitializer" % self.db.repr_type(self.value._TYPE)
def writesetupcode(self, codewriter):
T = self.value._TYPE
# XXX similar non generic hacks to genc for now
if T.tag == 'ThreadLock':
argrefs = [self.get_ref()]
argtypes = [self.db.repr_type(T) + "*"]
lock = self.value.externalobj
argtypes.append("int")
if lock.locked():
argrefs.append('1')
else:
argrefs.append('0')
# XXX Check result
codewriter.call(self.db.repr_tmpvar(),
"sbyte*",
"%RPyOpaque_LLVM_SETUP_ThreadLock",
argtypes, argrefs)
# XXX Check result
| Python |
import py
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer('llvm')
py.log.setconsumer('llvm', ansi_log)
| Python |
def _noresult(returntype):
r = returntype.strip()
if r == 'void':
return 'void'
elif r == 'bool':
return 'bool false'
elif r in 'float double'.split():
return r + ' 0.0'
elif r in 'ubyte sbyte ushort short uint int ulong long'.split():
return r + ' 0'
return r + ' null'
def llvm_implcode(entrynode):
from pypy.translator.llvm.codewriter import DEFAULT_CCONV as cconv
from pypy.translator.llvm.module.excsupport import exctransform_code
returntype, entrypointname = entrynode.getdecl().split('%', 1)
noresult = _noresult(returntype)
return exctransform_code % locals()
| Python |
from pypy.objspace.flow.model import Block, Constant, Link
from pypy.objspace.flow.model import mkentrymap, c_last_exception
from pypy.rpython.lltypesystem import lltype
from pypy.translator.llvm.node import LLVMNode, ConstantLLVMNode
from pypy.translator.llvm.opwriter import OpWriter
from pypy.translator.llvm.log import log
from pypy.translator.unsimplify import remove_double_links, no_links_to_startblack
log = log.funcnode
class FuncTypeNode(LLVMNode):
__slots__ = "db type_ ref".split()
def __init__(self, db, type_):
self.db = db
assert isinstance(type_, lltype.FuncType)
self.type_ = type_
self.ref = self.make_ref('%functiontype', '')
def __str__(self):
return "<FuncTypeNode %r>" % self.ref
def setup(self):
self.db.prepare_type(self.type_.RESULT)
self.db.prepare_type_multi(self.type_._trueargs())
def writedatatypedecl(self, codewriter):
returntype = self.db.repr_type(self.type_.RESULT)
inputargtypes = [self.db.repr_type(a) for a in self.type_._trueargs()]
codewriter.funcdef(self.ref, returntype, inputargtypes)
class BranchException(Exception):
pass
class FuncNode(ConstantLLVMNode):
__slots__ = "db value ref graph block_to_name bad_switch_block".split()
def __init__(self, db, value):
self.db = db
self.value = value
self.ref = self.make_ref('%pypy_', value.graph.name)
self.graph = value.graph
self.bad_switch_block = False
def __str__(self):
return "<FuncNode %r>" %(self.ref,)
def setup(self):
assert self.graph, "cannot traverse"
prepare_arg = self.db.prepare_arg
for block in self.graph.iterblocks():
for arg in block.inputargs:
prepare_arg(arg)
for op in block.operations:
for arg in op.args:
prepare_arg(arg)
prepare_arg(op.result)
assert block.exitswitch != c_last_exception
for link in block.exits:
for arg in link.args:
prepare_arg(arg)
# ______________________________________________________________________
# main entry points from genllvm
def post_setup_transform(self):
remove_double_links(self.db.translator.annotator, self.graph)
no_links_to_startblack(self.graph)
def writedecl(self, codewriter):
codewriter.declare(self.getdecl())
def writeimpl(self, codewriter):
graph = self.graph
log.writeimpl(graph.name)
codewriter.openfunc(self.getdecl())
nextblock = graph.startblock
args = graph.startblock.inputargs
self.block_to_name = {}
for i, block in enumerate(graph.iterblocks()):
self.block_to_name[block] = "block%s" % i
for block in graph.iterblocks():
codewriter.label(self.block_to_name[block])
for name in 'startblock returnblock'.split():
if block is getattr(graph, name):
getattr(self, 'write_' + name)(codewriter, block)
break
else:
self.write_block(codewriter, block)
if self.bad_switch_block:
codewriter.label('badswitch')
codewriter._indent('call void %abort()')
codewriter._indent('unreachable')
codewriter.closefunc()
def writeglobalconstants(self, codewriter):
pass
# ______________________________________________________________________
# writing helpers for entry points
def getdecl_parts(self):
startblock = self.graph.startblock
returnblock = self.graph.returnblock
startblock_inputargs = [a for a in startblock.inputargs
if a.concretetype is not lltype.Void]
inputargs = self.db.repr_arg_multi(startblock_inputargs)
inputargtypes = self.db.repr_arg_type_multi(startblock_inputargs)
returntype = self.db.repr_arg_type(self.graph.returnblock.inputargs[0])
args = ["%s %s" % item for item in zip(inputargtypes, inputargs)]
return returntype, self.ref, args
def getdecl(self):
returntype, ref, args = self.getdecl_parts()
return "%s %s(%s)" % (returntype, ref, ", ".join(args))
# ______________________________________________________________________
# helpers for block writers
def get_phi_data(self, block):
data = []
entrylinks = mkentrymap(self.graph)[block]
entrylinks = [x for x in entrylinks if x.prevblock is not None]
inputargs = self.db.repr_arg_multi(block.inputargs)
inputargtypes = self.db.repr_arg_type_multi(block.inputargs)
# for each argument in block, return a 4 tuple of
# arg_name, arg_type, [list of names from previous blocks,
# [corresponding list of block names]
for ii, (arg, type_) in enumerate(zip(inputargs, inputargtypes)):
names = self.db.repr_arg_multi([link.args[ii]
for link in entrylinks])
blocknames = [self.block_to_name[link.prevblock]
for link in entrylinks]
assert len(names) == len(blocknames)
data.append((arg, type_, names, blocknames))
return data
def write_block_phi_nodes(self, codewriter, block):
for arg, type_, names, blocknames in self.get_phi_data(block):
if type_ != "void":
codewriter.phi(arg, type_, names, blocknames)
def write_block_branches(self, codewriter, block):
assert block.exitswitch != c_last_exception
if len(block.exits) == 1:
codewriter.br_uncond(self.block_to_name[block.exits[0].target])
return
cond, condtype = self.db.repr_argwithtype(block.exitswitch)
if block.exitswitch.concretetype == lltype.Bool:
assert len(block.exits) == 2
if block.exits[0].llexitcase == False:
assert block.exits[1].llexitcase == True
false_case = block.exits[0].target
true_case = block.exits[1].target
else:
assert block.exits[0].llexitcase == True
assert block.exits[1].llexitcase == False
false_case = block.exits[1].target
true_case = block.exits[0].target
codewriter.br(cond,
self.block_to_name[false_case],
self.block_to_name[true_case])
elif block.exitswitch.concretetype in \
(lltype.Signed, lltype.Unsigned, lltype.SignedLongLong,
lltype.UnsignedLongLong, lltype.Char, lltype.UniChar):
defaultlink = None
value_labels = []
for link in block.exits:
if link.exitcase == 'default':
defaultlink = link
continue
exitcase = link.llexitcase
if block.exitswitch.concretetype in [lltype.Char, lltype.UniChar]:
exitcase = ord(exitcase)
value_labels.append( (exitcase,
self.block_to_name[link.target]) )
if defaultlink:
defaultblockname = self.block_to_name[defaultlink.target]
else:
defaultblockname = 'badswitch'
self.bad_switch_block = True
codewriter.switch(condtype, cond, defaultblockname, value_labels)
else:
raise BranchException("exitswitch type '%s' not supported" %
block.exitswitch.concretetype)
def write_block_operations(self, codewriter, block):
# XXX We dont need multiple of these
opwriter = OpWriter(self.db, codewriter)
assert block.exitswitch != c_last_exception
# emit operations
for op in block.operations:
opwriter.write_operation(op)
# ______________________________________________________________________
# actual block writers
def write_startblock(self, codewriter, block):
self.write_block_operations(codewriter, block)
# a start block may return also
if block.exitswitch is None and len(block.exits) == 0:
inputarg, inputargtype = self.db.repr_argwithtype(block.inputargs[0])
codewriter.ret(inputargtype, inputarg)
else:
self.write_block_branches(codewriter, block)
def write_block(self, codewriter, block):
self.write_block_phi_nodes(codewriter, block)
self.write_block_operations(codewriter, block)
self.write_block_branches(codewriter, block)
def write_returnblock(self, codewriter, block):
block.exitswitch is None and len(block.exits) == 0
assert len(block.inputargs) == 1
self.write_block_phi_nodes(codewriter, block)
inputarg, inputargtype = self.db.repr_argwithtype(block.inputargs[0])
codewriter.ret(inputargtype, inputarg)
| Python |
import sys
from pypy.rpython.lltypesystem.rstr import STR
from pypy.translator.c import gc
from pypy.translator.llvm.log import log
log = log.gc
from pypy.translator.llvm.buildllvm import postfix
def have_boehm():
import distutils.sysconfig
from os.path import exists
libdir = distutils.sysconfig.EXEC_PREFIX + "/lib"
return exists(libdir + '/libgc.so') or exists(libdir + '/libgc.a')
class GcPolicy:
n_malloced = 0
def __init__(self, db):
raise Exception, 'GcPolicy should not be used directly'
def genextern_code(self):
return ''
def gc_libraries(self):
return []
def pyrex_code(self):
return ''
def get_count(self, inc=False):
if inc:
self.n_malloced = self.n_malloced + 1
return '_%d' % self.n_malloced
def _zeromalloc(self, codewriter, targetvar, size=1, atomic=False,
exc_flag=False):
raise NotImplementedError, 'GcPolicy should not be used directly'
def op_call_rtti_destructor(self, codewriter, opr):
raise Exception, 'GcPolicy should not be used directly'
def op_free(self, codewriter, opr):
raise Exception, 'GcPolicy should not be used directly'
def op_fetch_exception(self, codewriter, opr):
raise Exception, 'GcPolicy should not be used directly'
def op_restore_exception(self, codewriter, opr):
raise Exception, 'GcPolicy should not be used directly'
def op_collect(self, codewriter, opr):
raise Exception, 'GcPolicy should not be used directly'
def new(db, gcpolicy=None):
# """ factory """
if gcpolicy == 'boehm':
# XXX would be nice to localise this sort of thing?
#assert have_boehm(), 'warning: Boehm GC libary not found in /usr/lib'
gcpolicy = BoehmGcPolicy(db)
elif gcpolicy == 'ref':
gcpolicy = RefcountingGcPolicy(db)
elif gcpolicy in ('none', 'raw'):
gcpolicy = RawGcPolicy(db)
elif gcpolicy == 'framework':
gcpolicy = FrameworkGcPolicy(db)
else:
raise Exception, 'unknown gcpolicy: ' + str(gcpolicy)
return gcpolicy
new = staticmethod(new)
class RawGcPolicy(GcPolicy):
def __init__(self, db):
self.db = db
def genextern_code(self):
r = ''
r += '#define __GC_STARTUP_CODE__\n'
r += '#define __GC_SETUP_CODE__\n'
r += 'char* pypy_malloc(int size) { return calloc(1, size); }\n'
r += 'char* pypy_malloc_atomic(int size) { return calloc(1, size); }\n'
return r
def gc_libraries(self):
return ['pthread']
def _zeromalloc(self, codewriter, targetvar, size=1, atomic=False,
exc_flag=False):
""" assumes malloc of word size """
uword = self.db.get_machine_uword()
boundary_size = 0
# malloc_size is unsigned right now
codewriter.malloc(targetvar, "sbyte", size)
codewriter.call(None, 'void', '%llvm.memset' + postfix(),
['sbyte*', 'ubyte', uword, uword],
[targetvar, 0, size, boundary_size],
cconv='ccc')
class BoehmGcPolicy(GcPolicy):
def __init__(self, db, exc_useringbuf=True):
self.db = db
# XXX a config option...
self.exc_useringbuf = exc_useringbuf
def genextern_code(self):
r = ''
r += '#include "boehm.h"\n'
r += '#define __GC_SETUP_CODE__\n'
return r
def gc_libraries(self):
return ['gc', 'pthread']
def pyrex_code(self):
return '''
cdef extern int GC_get_heap_size()
def GC_get_heap_size_wrapper():
return GC_get_heap_size()
'''
def _zeromalloc(self, codewriter, targetvar, size=1, atomic=False,
exc_flag=False):
""" assumes malloc of word size """
boundary_size = 0
word = self.db.get_machine_word()
uword = self.db.get_machine_uword()
fnname = '%pypy_malloc' + (atomic and '_atomic' or '')
## XXX (arigo) disabled the ring buffer for comparison purposes
## XXX until we know if it's a valid optimization or not
## if self.exc_useringbuf and exc_flag:
## fnname += '_ringbuffer'
## # dont clear the ringbuffer data
## atomic = False
# malloc_size is unsigned right now
sizeu = '%malloc_sizeu' + self.get_count()
codewriter.cast(sizeu, word, size, uword)
codewriter.call(targetvar, 'sbyte*', fnname, [word], [size])
if atomic:
codewriter.call(None, 'void', '%llvm.memset' + postfix(),
['sbyte*', 'ubyte', uword, uword],
[targetvar, 0, sizeu, boundary_size],
cconv='ccc')
def op__collect(self, codewriter, opr):
codewriter.call(opr.retref, opr.rettype, "%pypy_gc__collect",
opr.argtypes, opr.argrefs)
class RefcountingGcPolicy(RawGcPolicy):
def __init__(self, db, exc_useringbuf=True):
self.db = db
def op_call_rtti_destructor(self, codewriter, opr):
log.WARNING("skipping op_call_rtti_destructor")
def op_free(self, codewriter, opr):
assert opr.rettype == 'void' and len(opr.argtypes) == 1
codewriter.free(opr.argtypes[0], opr.argrefs[0])
class FrameworkGcPolicy(GcPolicy):
def __init__(self, db):
self.db = db
def genextern_code(self):
# XXX
# This is not finished: we must call the gc init function!
r = ''
r += '#define __GC_STARTUP_CODE__\n'
r += '#define __GC_SETUP_CODE__\n'
return r
def gc_libraries(self):
return ['pthread']
| Python |
from pypy.rpython.lltypesystem import lltype
from pypy.translator.llvm.log import log
from pypy.translator.llvm.node import LLVMNode, ConstantLLVMNode
log = log.structnode
class ArrayTypeNode(LLVMNode):
__slots__ = "db array arraytype ref constructor_ref constructor_decl".split()
def __init__(self, db, array):
assert isinstance(array, lltype.Array)
self.db = db
self.array = array
self.arraytype = arraytype = array.OF
# ref is used to reference the arraytype in llvm source
# constructor_ref is used to reference the constructor
# for the array type in llvm source code
# constructor_decl is used to declare the constructor
# for the array type (see writeimpl)
name = ""
if isinstance(arraytype, lltype.Ptr):
name += "ptr_"
arraytype = arraytype.TO
if hasattr(arraytype, "_name"):
name += arraytype._name
else:
name += str(arraytype)
self.ref = self.make_ref('%arraytype_', name)
self.constructor_ref = self.make_ref('%new_array_', name)
self.constructor_decl = "%s * %s(%s %%len)" % \
(self.ref,
self.constructor_ref,
self.db.get_machine_word())
def __str__(self):
return "<ArrayTypeNode %r>" % self.ref
def setup(self):
self.db.prepare_type(self.arraytype)
# ______________________________________________________________________
# entry points from genllvm
#
def writedatatypedecl(self, codewriter):
codewriter.arraydef(self.ref,
self.db.get_machine_word(),
self.db.repr_type(self.arraytype))
def var_malloc_info(self):
return self.array, ()
class VoidArrayTypeNode(LLVMNode):
__slots__ = "db array ref".split()
def __init__(self, db, array):
assert isinstance(array, lltype.Array)
self.db = db
self.array = array
self.ref = "%arraytype_Void"
def writedatatypedecl(self, codewriter):
codewriter.typedef(self.ref, "{ %s }" % self.db.get_machine_word())
class ArrayNode(ConstantLLVMNode):
""" An arraynode. Elements can be
a primitive,
a struct,
pointer to struct/array
"""
__slots__ = "db value arraytype ref".split()
def __init__(self, db, value):
assert isinstance(lltype.typeOf(value), lltype.Array)
self.db = db
self.value = value
self.arraytype = lltype.typeOf(value).OF
prefix = '%arrayinstance'
name = '' #str(value).split()[1]
self.ref = self.make_ref(prefix, name)
def __str__(self):
return "<ArrayNode %r>" % (self.ref,)
def setup(self):
for item in self.value.items:
self.db.prepare_constant(self.arraytype, item)
p, c = lltype.parentlink(self.value)
if p is not None:
self.db.prepare_constant(lltype.typeOf(p), p)
def get_length(self):
""" returns logical length of array """
items = self.value.items
return len(items)
def get_arrayvalue(self):
items = self.value.items
l = len(items)
r = "[%s]" % ", ".join([self.db.repr_constant(v)[1] for v in items])
return l, r
def get_typerepr(self):
arraylen = self.get_arrayvalue()[0]
typeval = self.db.repr_type(self.arraytype)
return "{ %s, [%s x %s] }" % (self.db.get_machine_word(),
arraylen, typeval)
def get_ref(self):
typeval = self.db.repr_type(lltype.typeOf(self.value))
p, c = lltype.parentlink(self.value)
if p is None:
ref = self.ref
else:
ref = self.db.get_childref(p, c)
ref = "cast(%s* %s to %s*)" % (self.get_typerepr(),
ref,
typeval)
return ref
def get_pbcref(self, toptr):
ref = self.ref
p, c = lltype.parentlink(self.value)
assert p is None, "child PBC arrays are NOT needed by rtyper"
fromptr = "%s*" % self.get_typerepr()
ref = "cast(%s %s to %s)" % (fromptr, ref, toptr)
return ref
def get_childref(self, index):
return "getelementptr(%s* %s, int 0, uint 1, int %s)" %(
self.get_typerepr(),
self.ref,
index)
def constantvalue(self):
physicallen, arrayrepr = self.get_arrayvalue()
typeval = self.db.repr_type(self.arraytype)
# first length is logical, second is physical
value = "%s %s, [%s x %s] %s" % (self.db.get_machine_word(),
self.get_length(),
physicallen,
typeval,
arrayrepr)
s = "%s {%s}" % (self.get_typerepr(), value)
return s
class StrArrayNode(ArrayNode):
__slots__ = "".split()
printables = dict([(ord(i), None) for i in
("0123456789abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"!#$%&()*+,-./:;<=>?@[]^_`{|}~ '")])
def get_arrayvalue(self):
items = self.value.items
item_length = len(items)
if item_length == 0 or items[-1] != chr(0):
items = items + [chr(0)]
item_length += 1
s = []
for c in items:
if ord(c) in StrArrayNode.printables:
s.append(c)
else:
s.append("\\%02x" % ord(c))
r = 'c"%s"' % "".join(s)
return item_length, r
class VoidArrayNode(ConstantLLVMNode):
__slots__ = "db value ref".split()
def __init__(self, db, value):
assert isinstance(lltype.typeOf(value), lltype.Array)
self.db = db
self.value = value
prefix = '%arrayinstance'
name = '' #str(value).split()[1]
self.ref = self.make_ref(prefix, name)
def constantvalue(self):
return "{ %s } {%s %s}" % (self.db.get_machine_word(),
self.db.get_machine_word(),
len(self.value.items))
| Python |
from pypy.rpython.lltypesystem import lltype
class LLVMNode(object):
__slots__ = "".split()
nodename_count = {}
def make_name(self, name):
" helper for creating names"
if name in self.nodename_count:
postfix = '_%d' % self.nodename_count[name]
self.nodename_count[name] += 1
else:
postfix = ''
self.nodename_count[name] = 1
name += postfix
if " " in name or "<" in name:
name = '"%s"' % name
return name
def make_ref(self, prefix, name):
return self.make_name(prefix + name)
def setup(self):
pass
# __________________ before "implementation" ____________________
def post_setup_transform(self):
pass
def writedatatypedecl(self, codewriter):
""" write out declare names of data types
(structs/arrays/function pointers)
"""
def writeglobalconstants(self, codewriter):
""" write out global values. """
def writedecl(self, codewriter):
""" write function forward declarations. """
def writecomments(self, codewriter):
""" write operations strings for debugging purposes. """
# __________________ after "implementation" ____________________
def writeimpl(self, codewriter):
""" write function implementations. """
# ______________________________________________________________________
# pre entry-point setup
def writesetupcode(self, codewriter):
pass
class ConstantLLVMNode(LLVMNode):
__slots__ = "".split()
def get_ref(self):
""" Returns a reference as used for operations in blocks for pbc. """
return self.ref
def get_childref(self, index):
""" Returns a reference as used for operations in blocks for internals of a pbc. """
raise AttributeError("Must be implemented in subclass")
def get_pbcref(self, toptr):
""" Returns a reference as a pointer used per pbc. """
return self.ref
def constantvalue(self):
""" Returns the constant representation for this node. """
raise AttributeError("Must be implemented in subclass")
# ______________________________________________________________________
# entry points from genllvm
def writeglobalconstants(self, codewriter):
p, c = lltype.parentlink(self.value)
if p is None:
codewriter.globalinstance(self.ref, self.constantvalue())
| Python |
extdeclarations = """
%last_exception_type = internal global %RPYTHON_EXCEPTION_VTABLE* null
%last_exception_value = internal global %RPYTHON_EXCEPTION* null
declare ccc uint %strlen(sbyte*)
declare ccc void %llvm.memsetPOSTFIX(sbyte*, ubyte, UWORD, UWORD)
declare ccc void %llvm.memcpyPOSTFIX(sbyte*, sbyte*, UWORD, UWORD)
"""
extfunctions = """
internal fastcc sbyte* %RPyString_AsString(%RPyString* %structstring) {
%source1ptr = getelementptr %RPyString* %structstring, int 0, uint 1, uint 1
%source1 = cast [0 x sbyte]* %source1ptr to sbyte*
ret sbyte* %source1
}
internal fastcc WORD %RPyString_Size(%RPyString* %structstring) {
%sizeptr = getelementptr %RPyString* %structstring, int 0, uint 1, uint 0
%size = load WORD* %sizeptr
ret WORD %size
}
internal fastcc int %RPyExceptionOccurred() {
%tmp.0 = load %RPYTHON_EXCEPTION_VTABLE** %last_exception_type
%bool_res = setne %RPYTHON_EXCEPTION_VTABLE* %tmp.0, null
%res = cast bool %bool_res to int
ret int %res
}
internal fastcc %RPyString* %RPyString_FromString(sbyte* %s) {
%lenu = call ccc uint %strlen(sbyte* %s)
%lenuword = cast uint %lenu to UWORD
%lenword = cast uint %lenu to WORD
%rpy = call fastcc %RPyString* %pypy_RPyString_New__Signed(WORD %lenword)
%rpystrptr = getelementptr %RPyString* %rpy, int 0, uint 1, uint 1
%rpystr = cast [0 x sbyte]* %rpystrptr to sbyte*
call ccc void %llvm.memcpyPOSTFIX(sbyte* %rpystr, sbyte* %s, UWORD %lenuword, UWORD 0)
ret %RPyString* %rpy
}
internal fastcc WORD %pypyop_int_abs(WORD %x) {
block0:
%cond1 = setge WORD %x, 0
br bool %cond1, label %return_block, label %block1
block1:
%x2 = sub WORD 0, %x
br label %return_block
return_block:
%result = phi WORD [%x, %block0], [%x2, %block1]
ret WORD %result
}
internal fastcc long %pypyop_llong_abs(long %x) {
block0:
%cond1 = setge long %x, 0
br bool %cond1, label %return_block, label %block1
block1:
%x2 = sub long 0, %x
br label %return_block
return_block:
%result = phi long [%x, %block0], [%x2, %block1]
ret long %result
}
internal fastcc double %pypyop_float_abs(double %x) {
block0:
%cond1 = setge double %x, 0.0
br bool %cond1, label %return_block, label %block1
block1:
%x2 = sub double 0.0, %x
br label %return_block
return_block:
%result = phi double [%x, %block0], [%x2, %block1]
ret double %result
}
"""
from sys import maxint
if maxint != 2**31-1:
extfunctions += """
internal fastcc void %pypy_ll_raise_OSError__Signed(int %errno_0) {
%tmp = cast int %errno_0 to long
call fastcc void %pypy_ll_raise_OSError__Signed(long %tmp)
ret void
}
internal fastcc void %pypy__RPyListOfString_SetItem__listPtr_Signed_rpy_stringPtr(%RPyListOfString* %l_1, int %index_0, %RPyString* %newstring_0) {
%index_0_long = cast int %index_0 to long
call fastcc void %pypy__RPyListOfString_SetItem__listPtr_Signed_rpy_stringPtr(%RPyListOfString* %l_1, long %index_0_long, %RPyString* %newstring_0)
ret void
}
"""
extfunctions_standalone = """
"""
if maxint != 2**31-1:
extfunctions_standalone += """
internal fastcc int %pypy_entry_point(%RPyListOfString* %argv) {
%result = call fastcc long %pypy_entry_point(%RPyListOfString* %argv)
%tmp = cast long %result to int
ret int %tmp
}
"""
def write_raise_exc(c_name, exc_repr, codewriter):
l = """
internal fastcc void %%raise%s(sbyte* %%msg) {
%%exception_value = cast %s to %%RPYTHON_EXCEPTION*
%%tmp = getelementptr %%RPYTHON_EXCEPTION* %%exception_value, int 0, uint 0
%%exception_type = load %%RPYTHON_EXCEPTION_VTABLE** %%tmp
store %%RPYTHON_EXCEPTION_VTABLE* %%exception_type, %%RPYTHON_EXCEPTION_VTABLE** %%last_exception_type
store %%RPYTHON_EXCEPTION* %%exception_value, %%RPYTHON_EXCEPTION** %%last_exception_value
call fastcc void %%unwind()
ret void
}
""" % (c_name, exc_repr)
codewriter.write_lines(l)
| Python |
exctransform_code = '''
ccc %(returntype)s%%__entrypoint__%(entrypointname)s {
store %%RPYTHON_EXCEPTION_VTABLE* null, %%RPYTHON_EXCEPTION_VTABLE** %%last_exception_type
%%result = call %(cconv)s %(returntype)s%%%(entrypointname)s
%%tmp = load %%RPYTHON_EXCEPTION_VTABLE** %%last_exception_type
%%exc = seteq %%RPYTHON_EXCEPTION_VTABLE* %%tmp, null
br bool %%exc, label %%no_exception, label %%exception
no_exception:
ret %(returntype)s %%result
exception:
ret %(noresult)s
}
;XXX this should use the transformation data that has the same purpose
ccc int %%__entrypoint__raised_LLVMException() {
%%tmp = load %%RPYTHON_EXCEPTION_VTABLE** %%last_exception_type
%%result = cast %%RPYTHON_EXCEPTION_VTABLE* %%tmp to int
ret int %%result
}
internal fastcc void %%unwind() {
ret void
}
'''
| Python |
import time
from pypy.tool.isolate import Isolate
from pypy.translator.llvm import buildllvm
from pypy.translator.llvm.database import Database
from pypy.rpython.rmodel import inputconst
from pypy.rpython.typesystem import getfunctionptr
from pypy.rpython.lltypesystem import lltype
from pypy.tool.udir import udir
from pypy.translator.llvm.codewriter import CodeWriter
from pypy.translator.llvm import extfuncnode
from pypy.translator.llvm.module.support import \
extdeclarations, extfunctions, extfunctions_standalone, write_raise_exc
from pypy.translator.llvm.node import LLVMNode
from pypy.translator.llvm.externs2ll import setup_externs, generate_llfile
from pypy.translator.llvm.gc import GcPolicy
from pypy.translator.llvm.log import log
from pypy.translator.llvm.buildllvm import llvm_is_on_path, postfix
class GenLLVM(object):
# see create_codewriter() below
function_count = {}
def __init__(self, translator, standalone):
# reset counters
LLVMNode.nodename_count = {}
self.standalone = standalone
self.translator = translator
self.config = translator.config
def gen_source(self, func):
self._checkpoint()
codewriter = self.setup(func)
# write top part of llvm file
self.write_headers(codewriter)
codewriter.startimpl()
# write bottom part of llvm file
self.write_implementations(codewriter)
self._checkpoint('done')
codewriter.close()
return self.filename
def setup(self, func):
""" setup all nodes
create c file for externs
create ll file for c file
create codewriter """
# XXX please dont ask!
from pypy.translator.c.genc import CStandaloneBuilder
cbuild = CStandaloneBuilder(self.translator, func, config=self.config)
#cbuild.stackless = self.stackless
c_db = cbuild.generate_graphs_for_llinterp()
self.db = Database(self, self.translator)
# XXX hardcoded for now
self.db.gcpolicy = GcPolicy.new(self.db, 'boehm')
# get entry point
entry_point = self.get_entry_point(func)
self._checkpoint('get_entry_point')
# set up all nodes
self.db.setup_all()
self.entrynode = self.db.set_entrynode(entry_point)
self._checkpoint('setup_all all nodes')
# set up externs nodes
self.extern_decls = setup_externs(c_db, self.db)
self.translator.rtyper.specialize_more_blocks()
self.db.setup_all()
self._checkpoint('setup_all externs')
for node in self.db.getnodes():
node.post_setup_transform()
self._print_node_stats()
# create ll file from c code
self.generate_ll_externs()
self._checkpoint('setup_externs')
# open file & create codewriter
codewriter, self.filename = self.create_codewriter()
self._checkpoint('open file and create codewriter')
return codewriter
def _set_wordsize(self, s):
s = s.replace('UWORD', self.db.get_machine_uword())
s = s.replace( 'WORD', self.db.get_machine_word())
s = s.replace('POSTFIX', postfix())
return s
def write_headers(self, codewriter):
# write external function headers
codewriter.header_comment('External Function Headers')
codewriter.write_lines(self.llexterns_header)
codewriter.header_comment("Type Declarations")
# write extern type declarations
self.write_extern_decls(codewriter)
self._checkpoint('write externs type declarations')
# write node type declarations
for typ_decl in self.db.getnodes():
typ_decl.writedatatypedecl(codewriter)
self._checkpoint('write data type declarations')
codewriter.header_comment("Global Data")
# write pbcs
for typ_decl in self.db.getnodes():
typ_decl.writeglobalconstants(codewriter)
self._checkpoint('write global constants')
codewriter.header_comment("Function Prototypes")
# write external protos
codewriter.write_lines(self._set_wordsize(extdeclarations))
# write node protos
for typ_decl in self.db.getnodes():
typ_decl.writedecl(codewriter)
self._checkpoint('write function prototypes')
def write_implementations(self, codewriter):
codewriter.header_comment("Function Implementation")
# write external function implementations
codewriter.header_comment('External Function Implementation')
codewriter.write_lines(self.llexterns_functions)
codewriter.write_lines(self._set_wordsize(extfunctions))
if self.standalone:
codewriter.write_lines(self._set_wordsize(extfunctions_standalone))
self.write_extern_impls(codewriter)
self.write_setup_impl(codewriter)
self._checkpoint('write support implentations')
# write exception implementaions
from pypy.translator.llvm.exception import llvm_implcode
codewriter.write_lines(llvm_implcode(self.entrynode))
# write all node implementations
for typ_decl in self.db.getnodes():
typ_decl.writeimpl(codewriter)
self._checkpoint('write node implementations')
# write entry point if there is one
codewriter.comment("End of file")
def get_entry_point(self, func):
assert func is not None
self.entrypoint = func
bk = self.translator.annotator.bookkeeper
ptr = getfunctionptr(bk.getdesc(func).getuniquegraph())
c = inputconst(lltype.typeOf(ptr), ptr)
self.db.prepare_arg_value(c)
self.entry_func_name = func.func_name
return c.value._obj
def generate_ll_externs(self):
self.llexterns_header, self.llexterns_functions = \
generate_llfile(self.db,
self.extern_decls,
self.entrynode,
self.standalone)
def create_codewriter(self):
# prevent running the same function twice in a test
if self.entry_func_name in self.function_count:
postfix = '_%d' % self.function_count[self.entry_func_name]
self.function_count[self.entry_func_name] += 1
else:
postfix = ''
self.function_count[self.entry_func_name] = 1
filename = udir.join(self.entry_func_name + postfix).new(ext='.ll')
f = open(str(filename), 'w')
return CodeWriter(f, self.db), filename
def write_extern_decls(self, codewriter):
for c_name, obj in self.extern_decls:
if isinstance(obj, lltype.LowLevelType):
if isinstance(obj, lltype.Ptr):
obj = obj.TO
l = "%%%s = type %s" % (c_name, self.db.repr_type(obj))
codewriter.write_lines(l)
def write_extern_impls(self, codewriter):
for c_name, obj in self.extern_decls:
if c_name.startswith("RPyExc_"):
c_name = c_name[1:]
exc_repr = self.db.repr_constant(obj)[1]
write_raise_exc(c_name, exc_repr, codewriter)
def write_setup_impl(self, codewriter):
open_decl = "sbyte* %LLVM_RPython_StartupCode()"
codewriter.openfunc(open_decl)
for node in self.db.getnodes():
node.writesetupcode(codewriter)
codewriter.ret("sbyte*", "null")
codewriter.closefunc()
def compile_module(self):
assert not self.standalone
modname, dirpath = buildllvm.Builder(self).make_module()
mod, wrap_fun = self.get_module(modname, dirpath)
return mod, wrap_fun
def get_module(self, modname, dirpath):
if self.config.translation.llvm.isolate:
mod = Isolate((dirpath, modname))
else:
from pypy.translator.tool.cbuild import import_module_from_directory
mod = import_module_from_directory(dirpath, modname)
wrap_fun = getattr(mod, 'pypy_' + self.entry_func_name + "_wrapper")
return mod, wrap_fun
def compile_standalone(self, exe_name):
assert self.standalone
return buildllvm.Builder(self).make_standalone(exe_name)
def _checkpoint(self, msg=None):
if not self.config.translation.llvm.logging:
return
if msg:
t = (time.time() - self.starttime)
log('\t%s took %02dm%02ds' % (msg, t/60, t%60))
else:
log('GenLLVM:')
self.starttime = time.time()
def _print_node_stats(self):
# disable node stats output
if not self.config.translation.llvm.logging:
return
nodecount = {}
for node in self.db.getnodes():
typ = type(node)
try:
nodecount[typ] += 1
except:
nodecount[typ] = 1
stats = [(count, str(typ)) for typ, count in nodecount.iteritems()]
stats.sort()
for s in stats:
log('STATS %s' % str(s))
| Python |
import sys
from pypy.translator.llvm.log import log
from pypy.translator.llvm.funcnode import FuncNode, FuncTypeNode
from pypy.translator.llvm.extfuncnode import ExternalFuncNode, SimplerExternalFuncNode
from pypy.translator.llvm.structnode import StructNode, StructVarsizeNode, \
StructTypeNode, StructVarsizeTypeNode, getindexhelper, \
FixedSizeArrayTypeNode, FixedSizeArrayNode
from pypy.translator.llvm.arraynode import ArrayNode, StrArrayNode, \
VoidArrayNode, ArrayTypeNode, VoidArrayTypeNode
from pypy.translator.llvm.opaquenode import OpaqueNode, ExtOpaqueNode, \
OpaqueTypeNode, ExtOpaqueTypeNode
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython.memory.lladdress import NULL
from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.rlib import objectmodel
from pypy.rlib import jit
log = log.database
class Database(object):
def __init__(self, genllvm, translator):
self.genllvm = genllvm
self.translator = translator
self.obj2node = {}
self._pendingsetup = []
self._tmpcount = 1
self.helper2ptr = {}
self.primitives = Primitives(self)
#_______debuggging______________________________________
def dump_pbcs(self):
r = ""
for k, v in self.obj2node.iteritems():
if isinstance(k, lltype.LowLevelType):
continue
assert isinstance(lltype.typeOf(k), lltype.ContainerType)
# Only dump top levels
p, _ = lltype.parentlink(k)
if p is None:
ref = v.get_ref()
pbc_ref = v.get_ref()
r += "\ndump_pbcs %s (%s)\n" \
"getref -> %s \n" \
"pbcref -> %s \n" % (v, k, ref, pbc_ref)
return r
#_______setting up and preperation______________________________
def create_constant_node(self, type_, value):
node = None
if isinstance(type_, lltype.FuncType):
if getattr(value._callable, "suggested_primitive", False):
node = ExternalFuncNode(self, value)
elif hasattr(value, '_external_name'):
node = ExternalFuncNode(self, value, value._external_name)
elif getattr(value, 'external', None) == 'C':
node = SimplerExternalFuncNode(self, value)
else:
node = FuncNode(self, value)
elif isinstance(type_, lltype.FixedSizeArray):
node = FixedSizeArrayNode(self, value)
elif isinstance(type_, lltype.Struct):
if type_._arrayfld:
node = StructVarsizeNode(self, value)
else:
node = StructNode(self, value)
elif isinstance(type_, lltype.Array):
if type_.OF is lltype.Char:
node = StrArrayNode(self, value)
elif type_.OF is lltype.Void:
node = VoidArrayNode(self, value)
else:
node = ArrayNode(self, value)
elif isinstance(type_, lltype.OpaqueType):
if hasattr(type_, '_exttypeinfo'):
node = ExtOpaqueNode(self, value)
else:
node = OpaqueNode(self, value)
assert node is not None, "%s not supported" % (type_)
return node
def addpending(self, key, node):
# santity check we at least have a key of the right type
assert (isinstance(key, lltype.LowLevelType) or
isinstance(lltype.typeOf(key), lltype.ContainerType))
assert key not in self.obj2node, (
"node with key %r already known!" %(key,))
#log("added to pending nodes:", type(key), node)
self.obj2node[key] = node
self._pendingsetup.append(node)
def prepare_type(self, type_):
if type_ in self.obj2node:
return
if isinstance(type_, lltype.Primitive):
pass
elif isinstance(type_, lltype.Ptr):
self.prepare_type(type_.TO)
elif isinstance(type_, lltype.FixedSizeArray):
self.addpending(type_, FixedSizeArrayTypeNode(self, type_))
elif isinstance(type_, lltype.Struct):
if type_._arrayfld:
self.addpending(type_, StructVarsizeTypeNode(self, type_))
else:
self.addpending(type_, StructTypeNode(self, type_))
elif isinstance(type_, lltype.FuncType):
self.addpending(type_, FuncTypeNode(self, type_))
elif isinstance(type_, lltype.Array):
if type_.OF is lltype.Void:
self.addpending(type_, VoidArrayTypeNode(self, type_))
else:
self.addpending(type_, ArrayTypeNode(self, type_))
elif isinstance(type_, lltype.OpaqueType):
if hasattr(type_, '_exttypeinfo'):
self.addpending(type_, ExtOpaqueTypeNode(self, type_))
else:
self.addpending(type_, OpaqueTypeNode(self, type_))
else:
assert False, "need to prepare typerepr %s %s" % (type_, type(type_))
def prepare_type_multi(self, types):
for type_ in types:
self.prepare_type(type_)
def prepare_constant(self, type_, value):
if isinstance(type_, lltype.Primitive):
#log.prepareconstant(value, "(is primitive)")
if type_ is llmemory.Address:
# prepare the constant data which this address references
assert isinstance(value, llmemory.fakeaddress)
if value:
self.prepare_constant(lltype.typeOf(value.ptr), value.ptr)
return
if isinstance(type_, lltype.Ptr) and isinstance(value._obj, int):
return
if isinstance(type_, lltype.Ptr):
type_ = type_.TO
value = value._obj
#log.prepareconstant("preparing ptr", value)
# we dont need a node for nulls
if value is None:
return
# we can share data via pointers
if value not in self.obj2node:
self.addpending(value, self.create_constant_node(type_, value))
# always add type (it is safe)
self.prepare_type(type_)
def prepare_arg_value(self, const_or_var):
"""if const_or_var is not already in a dictionary self.obj2node,
the appropriate node gets constructed and gets added to
self._pendingsetup and to self.obj2node"""
if isinstance(const_or_var, Constant):
ct = const_or_var.concretetype
if isinstance(ct, lltype.Primitive):
# special cases for address
if ct is llmemory.Address:
fakedaddress = const_or_var.value
if fakedaddress:
ptrvalue = fakedaddress.ptr
ct = lltype.typeOf(ptrvalue)
else:
return
elif ct is llmemory.WeakGcAddress:
return # XXX sometime soon
else:
return
else:
assert isinstance(ct, lltype.Ptr), "Preparation of non primitive and non pointer"
ptrvalue = const_or_var.value
value = ptrvalue._obj
if isinstance(value, int):
return
# Only prepare root values at this point
if isinstance(ct, lltype.Array) or isinstance(ct, lltype.Struct):
p, c = lltype.parentlink(value)
if p is None:
#log.prepareargvalue("skipping preparing non root", value)
return
if value is not None and value not in self.obj2node:
self.addpending(value, self.create_constant_node(ct.TO, value))
else:
assert isinstance(const_or_var, Variable)
def prepare_arg(self, const_or_var):
#log.prepare(const_or_var)
self.prepare_type(const_or_var.concretetype)
self.prepare_arg_value(const_or_var)
def setup_all(self):
while self._pendingsetup:
node = self._pendingsetup.pop(0)
#log.settingup(node)
node.setup()
def set_entrynode(self, key):
self.entrynode = self.obj2node[key]
return self.entrynode
def getnodes(self):
return self.obj2node.itervalues()
# __________________________________________________________
# Representing variables and constants in LLVM source code
def repr_arg(self, arg):
if isinstance(arg, Constant):
if isinstance(arg.concretetype, lltype.Primitive):
return self.primitives.repr(arg.concretetype, arg.value)
else:
assert isinstance(arg.value, lltype._ptr)
if isinstance(arg.value._obj, int):
rt = self.repr_type(arg.concretetype)
v = repr(arg.value._obj)
return 'cast (int %s to %s)'%(v, rt)
elif not arg.value:
return 'null'
else:
node = self.obj2node[arg.value._obj]
return node.get_ref()
else:
assert isinstance(arg, Variable)
return "%" + str(arg)
def repr_arg_type(self, arg):
assert isinstance(arg, (Constant, Variable))
ct = arg.concretetype
return self.repr_type(ct)
def repr_type(self, type_):
try:
return self.obj2node[type_].ref
except KeyError:
if isinstance(type_, lltype.Primitive):
return self.primitives[type_]
elif isinstance(type_, lltype.Ptr):
return self.repr_type(type_.TO) + '*'
else:
raise TypeError("cannot represent %r" %(type_,))
def repr_argwithtype(self, arg):
return self.repr_arg(arg), self.repr_arg_type(arg)
def repr_arg_multi(self, args):
return [self.repr_arg(arg) for arg in args]
def repr_arg_type_multi(self, args):
return [self.repr_arg_type(arg) for arg in args]
def repr_constant(self, value):
" returns node and repr as tuple "
type_ = lltype.typeOf(value)
if isinstance(type_, lltype.Primitive):
repr = self.primitives.repr(type_, value)
return None, "%s %s" % (self.repr_type(type_), repr)
elif isinstance(type_, lltype.Ptr):
toptr = self.repr_type(type_)
value = value._obj
# special case, null pointer
if value is None:
return None, "%s null" % toptr
if isinstance(value, int):
return None, '%s cast (int %s to %s)'%(toptr, value, toptr)
node = self.obj2node[value]
ref = node.get_pbcref(toptr)
return node, "%s %s" % (toptr, ref)
elif isinstance(type_, (lltype.Array, lltype.Struct)):
node = self.obj2node[value]
return node, node.constantvalue()
elif isinstance(type_, lltype.OpaqueType):
node = self.obj2node[value]
if isinstance(node, ExtOpaqueNode):
return node, node.constantvalue()
assert False, "%s not supported" % (type(value))
def repr_tmpvar(self):
count = self._tmpcount
self._tmpcount += 1
return "%tmp_" + str(count)
def repr_constructor(self, type_):
return self.obj2node[type_].constructor_ref
def repr_name(self, obj):
" simply returns a reference to constant value "
return self.obj2node[obj].ref
def repr_value(self, value):
# XXX Testing
return self.obj2node[value].get_ref()
# __________________________________________________________
# Other helpers
def get_machine_word(self):
return self.primitives[lltype.Signed]
def get_machine_uword(self):
return self.primitives[lltype.Unsigned]
def is_function_ptr(self, arg):
if isinstance(arg, (Constant, Variable)):
arg = arg.concretetype
if isinstance(arg, lltype.Ptr):
if isinstance(arg.TO, lltype.FuncType):
return True
return False
def get_childref(self, parent, child):
node = self.obj2node[parent]
return node.get_childref(child)
class Primitives(object):
def __init__(self, database):
self.database = database
self.types = {
lltype.Char: "sbyte",
lltype.Bool: "bool",
lltype.Float: "double",
lltype.UniChar: "uint",
lltype.Void: "void",
lltype.UnsignedLongLong: "ulong",
lltype.SignedLongLong: "long",
llmemory.Address: "sbyte*",
llmemory.WeakGcAddress: "sbyte*",
}
# 32 bit platform
if sys.maxint == 2**31-1:
self.types.update({
lltype.Signed: "int",
lltype.Unsigned: "uint" })
# 64 bit platform
elif sys.maxint == 2**63-1:
self.types.update({
lltype.Signed: "long",
lltype.Unsigned: "ulong" })
else:
raise Exception("Unsupported platform - unknown word size")
self.reprs = {
lltype.SignedLongLong : self.repr_signed,
lltype.Signed : self.repr_signed,
lltype.UnsignedLongLong : self.repr_default,
lltype.Unsigned : self.repr_default,
lltype.Float : self.repr_float,
lltype.Char : self.repr_char,
lltype.UniChar : self.repr_unichar,
lltype.Bool : self.repr_bool,
lltype.Void : self.repr_void,
llmemory.Address : self.repr_address,
llmemory.WeakGcAddress : self.repr_weakgcaddress,
}
try:
import ctypes
except ImportError:
pass
else:
from pypy.rpython.rctypes import rcarithmetic as rcarith
def update(from_, type):
if from_ not in self.types:
self.types[from_] = type
if from_ not in self.reprs:
self.reprs[from_] = self.repr_default
for k, v in [
(rcarith.CByte, self.types[lltype.Char]),
(rcarith.CUByte, 'ubyte'),
(rcarith.CShort, 'short'),
(rcarith.CUShort, 'ushort'),
(rcarith.CInt, 'int'),
(rcarith.CUInt, 'uint'),
(rcarith.CLong, self.types[lltype.Signed]),
(rcarith.CULong, self.types[lltype.Unsigned]),
(rcarith.CLonglong, self.types[lltype.SignedLongLong]),
(rcarith.CULonglong, self.types[lltype.UnsignedLongLong])]:
update(k, v)
def __getitem__(self, key):
return self.types[key]
def repr(self, type_, value):
try:
reprfn = self.reprs[type_]
except KeyError:
raise Exception, "unsupported primitive type %r, value %r" % (type_, value)
else:
return reprfn(type_, value)
def repr_default(self, type_, value):
return str(value)
def repr_bool(self, type_, value):
return str(value).lower() #False --> false
def repr_void(self, type_, value):
return 'void'
def repr_char(self, type_, value):
x = ord(value)
if x >= 128:
r = "cast (ubyte %s to sbyte)" % x
else:
r = str(x)
return r
def repr_unichar(self, type_, value):
return str(ord(value))
def repr_float(self, type_, value):
repr = "%f" % value
# llvm requires a . when using e notation
if "e" in repr and "." not in repr:
repr = repr.replace("e", ".0e")
elif repr in ["inf", "nan"]:
# Need hex repr
import struct
packed = struct.pack("d", value)
if sys.byteorder == 'little':
packed = packed[::-1]
repr = "0x" + "".join([("%02x" % ord(ii)) for ii in packed])
return repr
def repr_address(self, type_, value):
if not value:
return 'null'
ptr = value.ptr
node, ref = self.database.repr_constant(ptr)
res = "cast(%s to sbyte*)" % (ref,)
return res
def repr_weakgcaddress(self, type_, value):
assert isinstance(value, llmemory.fakeweakaddress)
log.WARNING("XXX weakgcaddress completely ignored...")
return 'null'
def repr_signed(self, type_, value):
if isinstance(value, Symbolic):
return self.repr_symbolic(type_, value)
return str(value)
def repr_symbolic(self, type_, value):
""" returns an int value for pointer arithmetic - not sure this is the
llvm way, but well XXX need to fix adr_xxx operations """
if (type(value) == llmemory.GCHeaderOffset or
type(value) == llmemory.AddressOffset):
repr = 0
elif isinstance(value, llmemory.AddressOffset):
from_, indices, to = self.get_offset(value)
indices_as_str = ", ".join("%s %s" % (w, i) for w, i in indices)
r = self.database.repr_type
repr = "cast(%s* getelementptr(%s* null, %s) to int)" % (r(to),
r(from_),
indices_as_str)
elif isinstance(value, ComputedIntSymbolic):
# force the ComputedIntSymbolic to become a real integer value now
repr = '%d' % value.compute_fn()
elif isinstance(value, CDefinedIntSymbolic):
if value is objectmodel.malloc_zero_filled:
repr = '1'
elif value is jit._we_are_jitted:
repr = '0'
else:
raise NotImplementedError("CDefinedIntSymbolic: %r" % (value,))
else:
raise NotImplementedError("symbolic: %r" % (value,))
return repr
def get_offset(self, value, initialindices=None):
" return (from_type, (indices, ...), to_type) "
word = self.database.get_machine_word()
uword = self.database.get_machine_uword()
indices = initialindices or [(word, 0)]
if isinstance(value, llmemory.ItemOffset):
# skips over a fixed size item (eg array access)
from_ = value.TYPE
lasttype, lastvalue = indices[-1]
assert lasttype == word
indices[-1] = (word, lastvalue + value.repeat)
to = value.TYPE
elif isinstance(value, llmemory.FieldOffset):
# jumps to a field position in a struct
from_ = value.TYPE
pos = getindexhelper(value.fldname, value.TYPE)
indices.append((uword, pos))
to = getattr(value.TYPE, value.fldname)
elif isinstance(value, llmemory.ArrayLengthOffset):
# jumps to the place where the array length is stored
from_ = value.TYPE # <Array of T> or <GcArray of T>
assert isinstance(value.TYPE, lltype.Array)
indices.append((uword, 0))
to = lltype.Signed
elif isinstance(value, llmemory.ArrayItemsOffset):
# jumps to the beginning of array area
from_ = value.TYPE
if not isinstance(value.TYPE, lltype.FixedSizeArray):
indices.append((uword, 1))
indices.append((word, 0)) # go to the 1st item
to = value.TYPE.OF
elif isinstance(value, llmemory.CompositeOffset):
from_, indices, to = self.get_offset(value.offsets[0], indices)
for item in value.offsets[1:]:
_, indices, to = self.get_offset(item, indices)
else:
raise Exception("unsupported offset")
return from_, indices, to
| Python |
from __future__ import division
#function snippets
def simple1():
return 1
def simple2():
return False
def simple3(i):
c = "Hello, Stars!"
return c[i]
def simple4():
return 3 + simple1()
def simple5(b):
if b:
x = 12
else:
x = 13
return x
def simple6():
simple4()
return 1
def ackermann(n, m):
if n == 0:
return m + 1
if m == 0:
return ackermann(n - 1, 1)
return ackermann(n - 1, ackermann(n, m - 1))
def calling1(m):
if m > 1:
return calling2(m - 1)
return m
def calling2(m):
if m > 1:
return calling1(m - 1)
return m
def default_arguments(i1, i2=2, s1="asdf"):
return i1 + i2 + len(s1)
def call_default_arguments(i, j):
if j == 0:
return default_arguments(i)
elif j == 1:
return default_arguments(i, 42)
return default_arguments(i, j, "qwertyuiop")
def list_default_argument(i1, l1=[0]):
l1.append(i1)
return len(l1) + l1[-2]
def call_list_default_argument(i1):
return list_default_argument(i1)
def return_none():
pass
def shiftleft(i, j):
return i << j
def shiftright(i, j):
return i >> j
#float snippets
def float_f1(x):
return x + 1.2
def float_int_bool(x):
return x * (2 + True)
#array snippets
def array_simple():
a = [42]
return a[0]
def array_simple1(item):
a = [item] * 10
i = 0
v = 0
while i < 10:
v += a[i]
i += 1
return v
def array_setitem(i):
a = [1] * 10
a[i] = i
a[1] = 12
a[2] = 13
return a[i]
def array_add(a0, a1, b0, b1, i):
a = [0] * 2
b = [0] * 2
a[0] = a0
a[1] = a1
b[0] = b0
b[1] = b1
return (a + b)[i]
def double_array():
a = [15]
b = [a]
return b[0][0]
def double_array_set():
a = [15] * 3
b = [a] * 3
b[0][0] = 1
return b[1][0]
def bool_array():
a = [False] * 100
a[12] = True
return a[12]
def callee(array):
return array[0]
def array_arg(i):
a = [i - 5] * 12
return callee(a)
def array_len():
a = [1] * 10
return len(a)
def array_append(i):
a = [0] * 3
a.append(10)
return a[i]
def array_reverse(i):
a = [0] * 2
a[1] = 1
a.reverse()
return a[i]
def rangetest(i):
return range(10)[i]
def array_pop(i):
a = [0, 1, 2, 3]
return a.pop() + len(a) + a[i]
def newlist_zero_arg(i):
a = []
a.append(i)
return len(a) + a[0]
def big_array(i):
return [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17][i]
glob_array = [[i] * 5 for i in range(5)]
def access_global_array(x, y, z):
result = glob_array[x][y]
glob_array[x][y] = z
return result
def circular_list(n):
lst = []
i = 0
while i < n:
i += 1
lst = [lst]
return len(lst)
#class snippets
class A(object):
def __init__(self):
self.a = 14
self.b = False
def class_simple():
a = A()
return a.a
class B(A):
def __init__(self):
self.a = 14
self.b = False
def change(self, newa):
self.a = newa * 5
def class_simple1(newa):
b = B()
b.change(newa)
return b.a
class C(A):
def __init__(self, a):
self.a = a
self.b = 1
def class_simple2(newa):
b = B()
b.change(newa)
c = C(b)
return c.a.a
class AA(object):
x = 8
def __init__(self):
self.a = 15
self.b = 16
def g(self):
return self.a + self.b
class BB(AA):
x = 3
def g(self):
return self.a + self.a
def class_inherit1():
aa = AA()
bb = BB()
return aa.x + bb.x
def class_inherit2():
aa = AA()
bb = BB()
return aa.g() - bb.g()
class D(object):
def __init__(self, a, length):
self.a = [a] * length
self.length = length
def set_range(self):
i = 0
while i < self.length:
self.a[i] = i
i += 1
def id_int(i):
d = D(1, i + 1)
d.set_range()
return d.a[i]
class GGG(object):
pass
ggg = GGG()
ggg.a = 36
ggg.b = (1, 2, 3)
ggg.c = [1, 2, 3]
def global_instance(x):
previous = ggg.a
previous1 = ggg.c[-1]
ggg.c.append(x)
d = ggg.b[1]
ggg.a = x
36 + d + 3
return previous + d + previous1
class FFF: pass
fff = FFF()
fff.x = 10
def getset(x):
res = fff.x
fff.x = x
return res
def testgetset(y):
res1 = getset(y)
res2 = getset(y)
return res1 + res2
def degrading_func(obj):
if isinstance(obj, C):
return obj.a + obj.b
elif isinstance(obj, B):
return obj.a
return -90
def call_degrading_func(flag):
if flag:
return degrading_func(C(-37))
else:
return degrading_func(B())
circular_instance = GGG()
circular_instance.x = circular_instance
circular_instance.b = 10
def circular_classdef():
return circular_instance.x.x.x.x.x.x.x.b
#simple inheritance snippets
class AAA(object):
def __init__(self):
self.a = 1
def get(self):
return 4
def g(self):
return self.a
class BBB(AAA):
def __init__(self):
AAA.__init__(self)
self.b = 2
def get(self):
return 5
class CCC(BBB):
def __init__(self):
AAA.__init__(self)
BBB.__init__(self)
def cmethod(self):
return 13
def attribute_from_base_class():
a = AAA()
b = BBB()
return a.a + b.a + b.b
def method_of_base_class():
a = AAA()
b = BBB()
return a.get() + AAA.get(b) + b.get() + b.g()
def direct_call_of_virtual_method():
a = AAA()
b = BBB()
c = CCC()
return a.get() + b.get() + c.get()
def ifisinstance(a):
if isinstance(a, CCC):
return a.cmethod()
elif isinstance(a, BBB):
return a.b
return 1
def flow_type():
a = AAA()
b = BBB()
c = CCC()
return ifisinstance(a) + ifisinstance(b) + ifisinstance(c)
class CLA(object):
def __init__(self):
self.a = 1
class CLB(CLA):
def __init__(self):
self.a = 2
self.b = 1
def merge_classes(flag):
if flag:
a = CLA()
else:
a = CLB()
return a.a
class CLC(object):
def __init__(self, a):
self.a = a
def attribute_instance(x):
if x:
a = CLC(CLA())
else:
a = CLC(CLB())
return a.a.a
#string snippets
def string_f1(i):
j = 0
ret = ""
while j < i:
ret += "abc"
j += 1
return ret
def string_f2(i, j):
return string_f1(i)[j]
#tuple snippets
def tuple_f1(i):
return (1, "asdf", i)[2]
def tuple_f2(i):
return (i, "test", "another one", [1, 2, 3])
def tuple_f3(i):
j, s1, s2, l = tuple_f2(i)
return j
def constant_tuple(i):
return len((1, 2, "asdf")) + i
#PBC snippets
class PBCClass(object):
def __init__(self, a):
self.a = a
self.b = 12
def _freeze_(self):
return True
def get(self, i):
return self.a[i]
pbc = PBCClass([1, 2, 3, 4])
def pbc_passing(pbc, i):
return pbc.a[i] + pbc.get(i)
def pbc_function1(i):
return pbc_passing(pbc, i)
class CIRCULAR1(object):
def __init__(self):
self.a = [1, 2, 3, 4]
self.pbc = pbc1
def get(self, i):
return self.a[i] + self.pbc.a.a[i] + self.pbc.b
class CIRCULAR2(CIRCULAR1):
def __init__(self):
pass
pbc1 = PBCClass(CIRCULAR2())
pbc1.a.pbc = pbc1
pbc1.a.a = range(4)
def pbc_function2(i):
a = CIRCULAR1()
return a.get(i)
| Python |
import py
from pypy.tool import isolate
from pypy.translator.llvm.buildllvm import llvm_is_on_path, llvm_version, gcc_version
from pypy.translator.llvm.genllvm import GenLLVM
optimize_tests = False
MINIMUM_LLVM_VERSION = 1.9
ext_modules = []
# test options
run_isolated_only = True
do_not_isolate = False
from pypy import conftest
def _cleanup(leave=0):
# no test should ever need more than 5 compiled functions
if leave:
mods = ext_modules[:-leave]
else:
mods = ext_modules
for mod in mods:
if isinstance(mod, isolate.Isolate):
isolate.close_isolate(mod)
if leave:
del ext_modules[:-leave]
else:
del ext_modules[:]
def teardown_module(mod):
_cleanup()
def llvm_test():
if not llvm_is_on_path():
py.test.skip("could not find one of llvm-as or llvm-gcc")
llvm_ver = llvm_version()
if llvm_ver < MINIMUM_LLVM_VERSION:
py.test.skip("llvm version not up-to-date (found "
"%.1f, should be >= %.1f)" % (llvm_ver, MINIMUM_LLVM_VERSION))
def gcc3_test():
gcc_ver = gcc_version()
if int(gcc_ver) != 3:
py.test.skip("test required gcc version 3 (found version %.1f)" % gcc_ver)
return False
return True
#______________________________________________________________________________
def genllvm_compile(function,
annotation,
# debug options
debug=True,
logging=False,
isolate=True,
# pass to compile
optimize=True,
extra_opts={}):
""" helper for genllvm """
from pypy.translator.driver import TranslationDriver
from pypy.config.pypyoption import get_pypy_config
config = get_pypy_config({}, translating=True)
options = {
'translation.backend': 'llvm',
'translation.llvm.debug': debug,
'translation.llvm.logging': logging,
'translation.llvm.isolate': isolate,
'translation.backendopt.none': not optimize,
'translation.gc': 'boehm',
}
options.update(extra_opts)
config.set(**options)
driver = TranslationDriver(config=config)
driver.setup(function, annotation)
driver.annotate()
if conftest.option.view:
driver.translator.view()
driver.rtype()
if conftest.option.view:
driver.translator.view()
driver.compile()
if conftest.option.view:
driver.translator.view()
return driver.c_module, driver.c_entryp
def compile_test(function, annotation, isolate=True, **kwds):
" returns module and compiled function "
llvm_test()
if run_isolated_only and not isolate:
py.test.skip("skipping not isolated test")
# turn off isolation?
isolate = isolate and not do_not_isolate
# maintain only 3 isolated process (if any)
_cleanup(leave=3)
optimize = kwds.pop('optimize', optimize_tests)
mod, fn = genllvm_compile(function, annotation, optimize=optimize,
isolate=isolate, **kwds)
if isolate:
ext_modules.append(mod)
return mod, fn
def compile_function(function, annotation, isolate=True, **kwds):
" returns compiled function "
return compile_test(function, annotation, isolate=isolate, **kwds)[1]
| Python |
#!/usr/bin/env python
"""This script computes the relative performance between python
implementations on a set of microbenchmarks. The script usally is started
with "./microbench.py python ./pypy" where pypy is a symlink to you pypy exectable."""
import os, time, sys
microbenches = []
for fname in os.listdir('.'):
if not fname.startswith('test_') or not fname.endswith('.py'):
continue
microbench = fname[:-3]
exec 'import ' + microbench
microbenches.append(microbench)
def run(test_cases):
MINIMUM_MICROBENCH_TIME = 1.0
for microbench in microbenches:
for k in [s for s in globals()[microbench].__dict__ if s.startswith('test_')] :
if test_cases:
for tc in test_cases:
if k.startswith(tc):
break
else:
continue
testcase = microbench + '.' + k + '()'
start = time.clock()
n = 0
duration = 0.0
while duration < MINIMUM_MICROBENCH_TIME:
exec testcase
n += 1
duration = time.clock() - start
print '%s took %.2f seconds' % (testcase, duration / float(n))
if __name__ == '__main__':
args = sys.argv[1:]
if '-k' in args:
i = args.index('-k')
executables = args[:i]
test_cases = args[i+1:]
limit = '-k ' + ' '.join(test_cases)
else:
executables = args
test_cases = []
limit = ''
for n, exe in enumerate(executables):
print 'exe:', exe
data = [s for s in os.popen(exe + ' microbench.py %s 2>&1' % limit).readlines() if not s.startswith('debug:')]
benchdata = {}
for d in data:
testcase, took, duration, seconds = d.split()
benchdata[testcase] = float(duration)
if n == 0:
benchdata_ref = benchdata
else:
result = []
for k, v in benchdata.iteritems():
result.append( (v / benchdata_ref[k], k) )
result.sort()
for r in result:
slowdown, testcase = r
print '%5.2fx slower on %s' % (slowdown, testcase)
if not executables:
run(test_cases)
| Python |
#!/usr/bin/env python
"""This script computes the relative performance between python
implementations on a set of microbenchmarks. The script usally is started
with "./microbench.py python ./pypy" where pypy is a symlink to you pypy exectable."""
import os, time, sys
microbenches = []
for fname in os.listdir('.'):
if not fname.startswith('test_') or not fname.endswith('.py'):
continue
microbench = fname[:-3]
exec 'import ' + microbench
microbenches.append(microbench)
def run(test_cases):
MINIMUM_MICROBENCH_TIME = 1.0
for microbench in microbenches:
for k in [s for s in globals()[microbench].__dict__ if s.startswith('test_')] :
if test_cases:
for tc in test_cases:
if k.startswith(tc):
break
else:
continue
testcase = microbench + '.' + k + '()'
start = time.clock()
n = 0
duration = 0.0
while duration < MINIMUM_MICROBENCH_TIME:
exec testcase
n += 1
duration = time.clock() - start
print '%s took %.2f seconds' % (testcase, duration / float(n))
if __name__ == '__main__':
args = sys.argv[1:]
if '-k' in args:
i = args.index('-k')
executables = args[:i]
test_cases = args[i+1:]
limit = '-k ' + ' '.join(test_cases)
else:
executables = args
test_cases = []
limit = ''
for n, exe in enumerate(executables):
print 'exe:', exe
data = [s for s in os.popen(exe + ' microbench.py %s 2>&1' % limit).readlines() if not s.startswith('debug:')]
benchdata = {}
for d in data:
testcase, took, duration, seconds = d.split()
benchdata[testcase] = float(duration)
if n == 0:
benchdata_ref = benchdata
else:
result = []
for k, v in benchdata.iteritems():
result.append( (v / benchdata_ref[k], k) )
result.sort()
for r in result:
slowdown, testcase = r
print '%5.2fx slower on %s' % (slowdown, testcase)
if not executables:
run(test_cases)
| Python |
from pybench import Test
class SimpleListManipulation(Test):
version = 0.2
operations = 5* (6 + 6 + 6)
rounds = 60000
def test(self):
l = []
for i in xrange(self.rounds):
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
if len(l) > 10000:
# cut down the size
l = []
def calibrate(self):
l = []
for i in xrange(self.rounds):
pass
class ListSlicing(Test):
version = 0.4
operations = 25*(3+1+2+1)
rounds = 400
def test(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
l = range(100)
for j in r:
m = l[50:]
m = l[:25]
m = l[50:55]
l[:3] = n
m = l[:-1]
m = l[1:]
l[-1:] = n
def calibrate(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
l = range(100)
for j in r:
pass
class SmallLists(Test):
version = 0.3
operations = 5*(1+ 6 + 6 + 3 + 1)
rounds = 60000
def test(self):
for i in xrange(self.rounds):
l = []
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
l.append(2)
l.append(3)
l.append(4)
l.append(2)
l.append(3)
l.append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
def calibrate(self):
for i in xrange(self.rounds):
l = []
| Python |
from pybench import Test
class CreateInstances(Test):
version = 0.2
operations = 3 + 7 + 4
rounds = 60000
def test(self):
class c:
pass
class d:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e:
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in xrange(self.rounds):
o = c()
o1 = c()
o2 = c()
p = d(i,i,3)
p1 = d(i,i,3)
p2 = d(i,3,3)
p3 = d(3,i,3)
p4 = d(i,i,i)
p5 = d(3,i,3)
p6 = d(i,i,i)
q = e(i,i,3)
q1 = e(i,i,3)
q2 = e(i,i,3)
q3 = e(i,i)
def calibrate(self):
class c:
pass
class d:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e:
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in xrange(self.rounds):
pass
| Python |
try:
unicode
except NameError:
raise ImportError
from pybench import Test
from string import join
class ConcatUnicode(Test):
version = 0.1
operations = 10 * 5
rounds = 60000
def test(self):
# Make sure the strings are *not* interned
s = unicode(join(map(str,range(100))))
t = unicode(join(map(str,range(1,101))))
for i in xrange(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = unicode(join(map(str,range(100))))
t = unicode(join(map(str,range(1,101))))
for i in xrange(self.rounds):
pass
class CompareUnicode(Test):
version = 0.1
operations = 10 * 5
rounds = 150000
def test(self):
# Make sure the strings are *not* interned
s = unicode(join(map(str,range(10))))
t = unicode(join(map(str,range(10))) + "abc")
for i in xrange(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = unicode(join(map(str,range(10))))
t = unicode(join(map(str,range(10))) + "abc")
for i in xrange(self.rounds):
pass
class CreateUnicodeWithConcat(Test):
version = 0.1
operations = 10 * 5
rounds = 80000
def test(self):
for i in xrange(self.rounds):
s = u'om'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
def calibrate(self):
for i in xrange(self.rounds):
pass
class UnicodeSlicing(Test):
version = 0.1
operations = 5 * 7
rounds = 100000
def test(self):
s = unicode(join(map(str,range(100))))
for i in xrange(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = unicode(join(map(str,range(100))))
for i in xrange(self.rounds):
pass
### String methods
class UnicodeMappings(Test):
version = 0.1
operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000
def test(self):
s = join(map(unichr,range(20)),'')
t = join(map(unichr,range(100)),'')
u = join(map(unichr,range(500)),'')
v = join(map(unichr,range(1000)),'')
for i in xrange(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = join(map(unichr,range(20)),'')
t = join(map(unichr,range(100)),'')
u = join(map(unichr,range(500)),'')
v = join(map(unichr,range(1000)),'')
for i in xrange(self.rounds):
pass
class UnicodePredicates(Test):
version = 0.1
operations = 5 * 9
rounds = 100000
def test(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
try:
import unicodedata
except ImportError:
pass
else:
class UnicodeProperties(Test):
version = 0.1
operations = 5 * 8
rounds = 100000
def test(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in xrange(self.rounds):
c = data[i % len_data]
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
def calibrate(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in xrange(self.rounds):
c = data[i % len_data]
| Python |
#!/usr/local/bin/python -O
""" A Python Benchmark Suite
"""
__copyright__="""\
Copyright (c), 1997-2001, Marc-Andre Lemburg (mal@lemburg.com)
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and
its documentation for any purpose and without fee or royalty is
hereby granted, provided that the above copyright notice appear in
all copies and that both that copyright notice and this permission
notice appear in supporting documentation or portions thereof,
including modifications, that you make.
THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE !
"""
__version__ = '1.0'
#
# NOTE: Use xrange for all test loops unless you want to face
# a 20MB process !
#
# All tests should have rounds set to values so that a run()
# takes between 20-50 seconds. This is to get fairly good
# clock() values. You can use option -w to speedup the tests
# by a fixed integer factor (the "warp factor").
#
import autopath
from py.xml import html
import sys,time,operator
from CommandLine import *
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
### Test baseclass
class Test:
""" All test must have this class as baseclass. It provides
the necessary interface to the benchmark machinery.
The tests must set .rounds to a value high enough to let the
test run between 20-50 seconds. This is needed because
clock()-timing only gives rather inaccurate values (on Linux,
for example, it is accurate to a few hundreths of a
second). If you don't want to wait that long, use a warp
factor larger than 1.
It is also important to set the .operations variable to a
value representing the number of "virtual operations" done per
call of .run().
If you change a test in some way, don't forget to increase
its version number.
"""
operations = 1 # number of operations done per test()-call
rounds = 100000 # number of rounds per run
is_a_test = 1 # identifier
last_timing = (0,0,0) # last timing (real,run,calibration)
warp = 1 # warp factor this test uses
cruns = 20 # number of calibration runs
overhead = None # list of calibration timings
version = 1.0 # version number of this test
def __init__(self,warp=1):
if warp > 1:
self.rounds = self.rounds / warp
self.warp = warp
self.times = []
self.overhead = []
# We want these to be in the instance dict, so that pickle
# saves them
self.version = self.version
self.operations = self.operations
self.rounds = self.rounds
def run(self):
""" Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration
timing low w/r to the test timing.
"""
test = self.test
calibrate = self.calibrate
clock = time.clock
cruns = self.cruns
# first calibrate
offset = 0.0
for i in range(cruns):
t = clock()
calibrate()
t = clock() - t
offset = offset + t
offset = offset / cruns
# now the real thing
t = clock()
test()
t = clock() - t
self.last_timing = (t-offset,t,offset)
self.times.append(t-offset)
def calibrate(self):
""" Run the test, doing self.rounds loops, but without
the actual test target in place.
"""
return
def test(self):
""" Run the test, doing self.rounds loop
"""
# do some tests
return
def stat(self):
""" Returns two value: average time per run and average per
operation.
"""
runs = len(self.times)
if runs == 0:
return 0,0
totaltime = reduce(operator.add,self.times,0.0)
avg = totaltime / float(runs)
op_avg = totaltime / float(runs * self.rounds * self.operations)
if self.overhead:
totaloverhead = reduce(operator.add,self.overhead,0.0)
ov_avg = totaloverhead / float(runs)
else:
# use self.last_timing - not too accurate
ov_avg = self.last_timing[2]
return avg,op_avg,ov_avg
### load Setup
import Setup
### Benchmark base class
class Benchmark:
name = '?' # Name of the benchmark
rounds = 1 # Number of rounds to run
warp = 1 # Warp factor
roundtime = 0 # Average round time
version = None # Benchmark version number (see __init__)
def __init__(self):
self.tests = {}
self.version = 0.31
def load_tests(self,setupmod,warp=1):
self.warp = warp
tests = self.tests
print 'Searching for tests...'
setupmod.__dict__.values()
for c in setupmod.__dict__.values():
if hasattr(c,'is_a_test') and c.__name__ != 'Test':
tests[c.__name__] = c(warp)
l = tests.keys()
l.sort()
for t in l:
print ' ',t
print
def run(self):
tests = self.tests.items()
tests.sort()
clock = time.clock
print 'Running %i round(s) of the suite: ' % self.rounds
print
roundtime = clock()
for i in range(self.rounds):
print ' Round %-25i real abs overhead' % (i+1)
for j in range(len(tests)):
name,t = tests[j]
print '%30s:' % name,
t.run()
print ' %.3fr %.3fa %.3fo' % t.last_timing
print ' ----------------------'
print ' Average round time: %.3f seconds' % \
((clock() - roundtime)/(i+1))
print
self.roundtime = (clock() - roundtime) / self.rounds
print
def print_stat(self, compare_to=None, hidenoise=0):
if not compare_to:
print '%-30s per run per oper. overhead' % 'Tests:'
print '-'*72
tests = self.tests.items()
tests.sort()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
print '%30s: %10.2f ms %7.2f us %7.2f ms' % \
(name,avg*1000.0,op_avg*1000000.0,ov_avg*1000.0)
print '-'*72
print '%30s: %10.2f ms' % \
('Average round time',self.roundtime * 1000.0)
else:
print '%-30s per run per oper. diff *)' % \
'Tests:'
print '-'*72
tests = self.tests.items()
tests.sort()
compatible = 1
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
print '%30s: %10.2f ms %7.2f us %8s' % \
(name,avg*1000.0,op_avg*1000000.0,qop_avg)
print '-'*72
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
print '%30s: %10.2f ms %+7.2f%%' % \
('Average round time',self.roundtime * 1000.0,
((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
else:
print '%30s: %10.2f ms n/a' % \
('Average round time',self.roundtime * 1000.0)
print
print '*) measured against: %s (rounds=%i, warp=%i)' % \
(compare_to.name,compare_to.rounds,compare_to.warp)
print
def html_stat(self, compare_to=None, hidenoise=0):
if not compare_to:
table = html.table(
html.thead(
html.tr(
[ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str'), ('per run','float'),
('per oper.', 'float'), ('overhead', 'float')]])
),id = "sortable_table")
tests = self.tests.items()
tests.sort()
tbody = html.tbody()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
tbody.append(html.tr( html.td(name),
html.td(avg*1000.0),
html.td(op_avg*1000000.0),
html.td(ov_avg*1000.0)
))
table.append(tbody)
table.append(html.tr(
'Average round time %s' % (self.roundtime * 1000.0))
)
return table
elif isinstance(compare_to, Benchmark):
table = html.table(html.thead(
html.tr([ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str'), ('per run','float'),
('per oper.', 'float'), ('diff', 'float')]])),
id = "sortable_table", class_="datagrid")
tests = self.tests.items()
tests.sort()
compatible = 1
tbody = html.tbody()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
tbody.append(html.tr( html.td(name),
html.td(avg*1000.0),
html.td(op_avg*1000000.0),
html.td(qop_avg)
))
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
tbody.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0),
html.td(''),
html.td('%+7.2f%%'% (((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
)))
else:
tbody.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0)))
table.append(tbody)
return table
else:
table = html.table(html.thead(
html.tr([ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str')]+[('pypy ver','float') for z in compare_to]
])),
id = "sortable_table")
tests = self.tests.items()
tests.sort()
compatible = 1
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
percent = []
for comp_to in compare_to:
try:
other = comp_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
percent.append(qop_avg)
table.append(html.tr( html.td(name),
[html.td(qop_avg) for qop_avg in percent]
))
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
table.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0),
html.td(''),
html.td('%+7.2f%%'% (((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
)))
else:
table.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0)))
return table
def print_machine():
import platform
print 'Machine Details:'
print ' Platform ID: %s' % platform.platform()
# There's a bug in Python 2.2b1+...
if 1 or sys.version[:6] != '2.2b1+':
print ' Python: %s' % platform.python_version()
print ' Compiler: %s' % platform.python_compiler()
buildno, buildate = platform.python_build()
print ' Build: %s (#%i)' % (buildate, buildno)
class Document(object):
def __init__(self, title=None):
self.body = html.body()
self.head = html.head()
self.doc = html.html(self.head, self.body)
if title is not None:
self.head.append(
html.meta(name="title", content=title))
self.head.append(
html.link(rel="Stylesheet", type="text/css", href="MochiKit-1.1/examples/sortable_tables/sortable_tables.css"))
self.head.append(
html.script(rel="JavaScript", type="text/javascript", src="MochiKit-1.1/lib/MochiKit/MochiKit.js"))
self.head.append(
html.script(rel="JavaScript", type="text/javascript", src="MochiKit-1.1/examples/sortable_tables/sortable_tables.js"))
def writetopath(self, p):
assert p.ext == '.html'
self.head.append(
html.meta(name="Content-Type", content="text/html;charset=UTF-8")
)
s = self.doc.unicode().encode('utf-8')
p.write(s)
class PyBenchCmdline(Application):
header = ("PYBENCH - a benchmark test suite for Python "
"interpreters/compilers.")
version = __version__
options = [ArgumentOption('-n','number of rounds',Setup.Number_of_rounds),
ArgumentOption('-f','save benchmark to file arg',''),
ArgumentOption('-c','compare benchmark with the one in file arg',''),
ArgumentOption('-l','compare benchmark with the ones in the files arg',''),
ArgumentOption('-s','show benchmark in file arg, then exit',''),
ArgumentOption('-w','set warp factor to arg',Setup.Warp_factor),
SwitchOption('-d','hide noise in compares', 0),
SwitchOption('--no-gc','disable garbage collection', 0),
SwitchOption('-x','write html table', 0),
]
about = """\
The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisms.
Examples:
python1.5 pybench.py -w 100 -f p15
python1.4 pybench.py -w 100 -f p14
python pybench.py -s p15 -c p14
"""
copyright = __copyright__
def main(self):
rounds = self.values['-n']
reportfile = self.values['-f']
show_bench = self.values['-s']
compare_to = self.values['-c']
compare_to_many = self.values['-l']
hidenoise = self.values['-d']
warp = self.values['-w']
nogc = self.values['--no-gc']
html = self.values['-x']
# Switch off GC
if nogc:
try:
import gc
except ImportError:
nogc = 0
else:
if self.values['--no-gc']:
gc.disable()
print 'PYBENCH',__version__
print
if not compare_to:
#print_machine()
print
if compare_to:
try:
f = open(compare_to,'rb')
bench = pickle.load(f)
bench.name = compare_to
f.close()
compare_to = bench
except IOError:
print '* Error opening/reading file',compare_to
compare_to = None
if show_bench:
try:
f = open(show_bench,'rb')
bench = pickle.load(f)
bench.name = show_bench
f.close()
print 'Benchmark: %s (rounds=%i, warp=%i)' % \
(bench.name,bench.rounds,bench.warp)
print
print "*******************************************"
if html:
print "Generating HTML"
import py.path
index = py.path.local('index.html')
table = bench.html_stat(compare_to, hidenoise)
doc = Document()
doc.body.append(table)
doc.writetopath(index)
else:
bench.print_stat(compare_to, hidenoise)
except IOError:
print '* Error opening/reading file',show_bench
print
return
if reportfile:
if nogc:
print 'Benchmark: %s (rounds=%i, warp=%i, no GC)' % \
(reportfile,rounds,warp)
else:
print 'Benchmark: %s (rounds=%i, warp=%i)' % \
(reportfile,rounds,warp)
print
# Create benchmark object
bench = Benchmark()
bench.rounds = rounds
bench.load_tests(Setup,warp)
try:
bench.run()
except KeyboardInterrupt:
print
print '*** KeyboardInterrupt -- Aborting'
print
return
bench.print_stat(compare_to)
if html:
print "Generating HTML"
import py.path
index = py.path.local('index.html')
table = bench.html_stat(compare_to, hidenoise)
doc = Document()
doc.body.append(table)
doc.writetopath(index)
# ring bell
sys.stderr.write('\007')
if reportfile:
try:
f = open(reportfile,'wb')
bench.name = reportfile
pickle.dump(bench,f)
f.close()
except IOError:
print '* Error opening/writing reportfile'
if __name__ == '__main__':
PyBenchCmdline()
| Python |
from pybench import Test
class CompareIntegers(Test):
version = 0.1
operations = 30 * 5
rounds = 120000
def test(self):
for i in xrange(self.rounds):
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
def calibrate(self):
for i in xrange(self.rounds):
pass
class CompareFloats(Test):
version = 0.1
operations = 30 * 5
rounds = 60000
def test(self):
for i in xrange(self.rounds):
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
def calibrate(self):
for i in xrange(self.rounds):
pass
class CompareFloatsIntegers(Test):
version = 0.1
operations = 30 * 5
rounds = 60000
def test(self):
for i in xrange(self.rounds):
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
def calibrate(self):
for i in xrange(self.rounds):
pass
class CompareLongs(Test):
version = 0.1
operations = 30 * 5
rounds = 60000
def test(self):
for i in xrange(self.rounds):
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
1234567890L < 3456789012345L
1234567890L > 3456789012345L
1234567890L == 3456789012345L
1234567890L > 3456789012345L
1234567890L < 3456789012345L
def calibrate(self):
for i in xrange(self.rounds):
pass
| Python |
from pybench import Test
class PythonFunctionCalls(Test):
version = 0.3
operations = 5*(1+4+4+2)
rounds = 60000
def test(self):
global f,f1,g,h
# define functions
def f():
pass
def f1(x):
pass
def g(a,b,c):
return a,b,c
def h(a,b,c,d=1,e=2,f=3):
return d,e,f
# do calls
for i in xrange(self.rounds):
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
def calibrate(self):
global f,f1,g,h
# define functions
def f():
pass
def f1(x):
pass
def g(a,b,c):
return a,b,c
def h(a,b,c,d=1,e=2,f=3):
return d,e,f
# do calls
for i in xrange(self.rounds):
pass
###
class BuiltinFunctionCalls(Test):
version = 0.4
operations = 5*(2+5+5+5)
rounds = 30000
def test(self):
# localize functions
f0 = globals
f1 = hash
f2 = cmp
f3 = range
# do calls
for i in xrange(self.rounds):
f0()
f0()
f1(i)
f1(i)
f1(i)
f1(i)
f1(i)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f0()
f0()
f1(i)
f1(i)
f1(i)
f1(i)
f1(i)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f0()
f0()
f1(i)
f1(i)
f1(i)
f1(i)
f1(i)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f0()
f0()
f1(i)
f1(i)
f1(i)
f1(i)
f1(i)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f0()
f0()
f1(i)
f1(i)
f1(i)
f1(i)
f1(i)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f2(1,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
f3(1,3,2)
def calibrate(self):
# localize functions
f0 = dir
f1 = hash
f2 = range
f3 = range
# do calls
for i in xrange(self.rounds):
pass
###
class PythonMethodCalls(Test):
version = 0.3
operations = 5*(6 + 5 + 4)
rounds = 20000
def test(self):
class c:
x = 2
s = 'string'
def f(self):
return self.x
def j(self,a,b):
self.y = a
self.t = b
return self.y
def k(self,a,b,c=3):
self.y = a
self.s = b
self.t = c
o = c()
for i in xrange(self.rounds):
o.f()
o.f()
o.f()
o.f()
o.f()
o.f()
o.j(i,i)
o.j(i,i)
o.j(i,2)
o.j(i,2)
o.j(2,2)
o.k(i,i)
o.k(i,2)
o.k(i,2,3)
o.k(i,i,c=4)
o.f()
o.f()
o.f()
o.f()
o.f()
o.f()
o.j(i,i)
o.j(i,i)
o.j(i,2)
o.j(i,2)
o.j(2,2)
o.k(i,i)
o.k(i,2)
o.k(i,2,3)
o.k(i,i,c=4)
o.f()
o.f()
o.f()
o.f()
o.f()
o.f()
o.j(i,i)
o.j(i,i)
o.j(i,2)
o.j(i,2)
o.j(2,2)
o.k(i,i)
o.k(i,2)
o.k(i,2,3)
o.k(i,i,c=4)
o.f()
o.f()
o.f()
o.f()
o.f()
o.f()
o.j(i,i)
o.j(i,i)
o.j(i,2)
o.j(i,2)
o.j(2,2)
o.k(i,i)
o.k(i,2)
o.k(i,2,3)
o.k(i,i,c=4)
o.f()
o.f()
o.f()
o.f()
o.f()
o.f()
o.j(i,i)
o.j(i,i)
o.j(i,2)
o.j(i,2)
o.j(2,2)
o.k(i,i)
o.k(i,2)
o.k(i,2,3)
o.k(i,i,c=4)
def calibrate(self):
class c:
x = 2
s = 'string'
def f(self):
return self.x
def j(self,a,b):
self.y = a
self.t = b
def k(self,a,b,c=3):
self.y = a
self.s = b
self.t = c
o = c
for i in xrange(self.rounds):
pass
###
class Recursion(Test):
version = 0.3
operations = 5
rounds = 50000
def test(self):
global f
def f(x):
if x > 1:
return f(x-1)
return 1
for i in xrange(self.rounds):
f(10)
f(10)
f(10)
f(10)
f(10)
def calibrate(self):
global f
def f(x):
if x > 0:
return f(x-1)
return 1
for i in xrange(self.rounds):
pass
| Python |
#!python
# Setup file for pybench
#
# This file has to import all tests to be run; it is executed as
# Python source file, so you can do all kinds of manipulations here
# rather than having to edit the tests themselves.
#
# Defaults
Number_of_rounds = 10
Warp_factor = 20
# Import tests
from Arithmetic import *
from Calls import *
from Constructs import *
from Lookups import *
from Instances import *
from Lists import *
from Tuples import *
from Dict import *
from Exceptions import *
from Imports import *
from Strings import *
from Numbers import *
try:
from Unicode import *
except (ImportError, SyntaxError):
pass
| Python |
#!/usr/local/bin/python -O
""" A Python Benchmark Suite
"""
__copyright__="""\
Copyright (c), 1997-2001, Marc-Andre Lemburg (mal@lemburg.com)
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and
its documentation for any purpose and without fee or royalty is
hereby granted, provided that the above copyright notice appear in
all copies and that both that copyright notice and this permission
notice appear in supporting documentation or portions thereof,
including modifications, that you make.
THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE !
"""
__version__ = '1.0'
#
# NOTE: Use xrange for all test loops unless you want to face
# a 20MB process !
#
# All tests should have rounds set to values so that a run()
# takes between 20-50 seconds. This is to get fairly good
# clock() values. You can use option -w to speedup the tests
# by a fixed integer factor (the "warp factor").
#
import autopath
from py.xml import html
import sys,time,operator
from CommandLine import *
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
### Test baseclass
class Test:
""" All test must have this class as baseclass. It provides
the necessary interface to the benchmark machinery.
The tests must set .rounds to a value high enough to let the
test run between 20-50 seconds. This is needed because
clock()-timing only gives rather inaccurate values (on Linux,
for example, it is accurate to a few hundreths of a
second). If you don't want to wait that long, use a warp
factor larger than 1.
It is also important to set the .operations variable to a
value representing the number of "virtual operations" done per
call of .run().
If you change a test in some way, don't forget to increase
its version number.
"""
operations = 1 # number of operations done per test()-call
rounds = 100000 # number of rounds per run
is_a_test = 1 # identifier
last_timing = (0,0,0) # last timing (real,run,calibration)
warp = 1 # warp factor this test uses
cruns = 20 # number of calibration runs
overhead = None # list of calibration timings
version = 1.0 # version number of this test
def __init__(self,warp=1):
if warp > 1:
self.rounds = self.rounds / warp
self.warp = warp
self.times = []
self.overhead = []
# We want these to be in the instance dict, so that pickle
# saves them
self.version = self.version
self.operations = self.operations
self.rounds = self.rounds
def run(self):
""" Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration
timing low w/r to the test timing.
"""
test = self.test
calibrate = self.calibrate
clock = time.clock
cruns = self.cruns
# first calibrate
offset = 0.0
for i in range(cruns):
t = clock()
calibrate()
t = clock() - t
offset = offset + t
offset = offset / cruns
# now the real thing
t = clock()
test()
t = clock() - t
self.last_timing = (t-offset,t,offset)
self.times.append(t-offset)
def calibrate(self):
""" Run the test, doing self.rounds loops, but without
the actual test target in place.
"""
return
def test(self):
""" Run the test, doing self.rounds loop
"""
# do some tests
return
def stat(self):
""" Returns two value: average time per run and average per
operation.
"""
runs = len(self.times)
if runs == 0:
return 0,0
totaltime = reduce(operator.add,self.times,0.0)
avg = totaltime / float(runs)
op_avg = totaltime / float(runs * self.rounds * self.operations)
if self.overhead:
totaloverhead = reduce(operator.add,self.overhead,0.0)
ov_avg = totaloverhead / float(runs)
else:
# use self.last_timing - not too accurate
ov_avg = self.last_timing[2]
return avg,op_avg,ov_avg
### load Setup
import Setup
### Benchmark base class
class Benchmark:
name = '?' # Name of the benchmark
rounds = 1 # Number of rounds to run
warp = 1 # Warp factor
roundtime = 0 # Average round time
version = None # Benchmark version number (see __init__)
def __init__(self):
self.tests = {}
self.version = 0.31
def load_tests(self,setupmod,warp=1):
self.warp = warp
tests = self.tests
print 'Searching for tests...'
setupmod.__dict__.values()
for c in setupmod.__dict__.values():
if hasattr(c,'is_a_test') and c.__name__ != 'Test':
tests[c.__name__] = c(warp)
l = tests.keys()
l.sort()
for t in l:
print ' ',t
print
def run(self):
tests = self.tests.items()
tests.sort()
clock = time.clock
print 'Running %i round(s) of the suite: ' % self.rounds
print
roundtime = clock()
for i in range(self.rounds):
print ' Round %-25i real abs overhead' % (i+1)
for j in range(len(tests)):
name,t = tests[j]
print '%30s:' % name,
t.run()
print ' %.3fr %.3fa %.3fo' % t.last_timing
print ' ----------------------'
print ' Average round time: %.3f seconds' % \
((clock() - roundtime)/(i+1))
print
self.roundtime = (clock() - roundtime) / self.rounds
print
def print_stat(self, compare_to=None, hidenoise=0):
if not compare_to:
print '%-30s per run per oper. overhead' % 'Tests:'
print '-'*72
tests = self.tests.items()
tests.sort()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
print '%30s: %10.2f ms %7.2f us %7.2f ms' % \
(name,avg*1000.0,op_avg*1000000.0,ov_avg*1000.0)
print '-'*72
print '%30s: %10.2f ms' % \
('Average round time',self.roundtime * 1000.0)
else:
print '%-30s per run per oper. diff *)' % \
'Tests:'
print '-'*72
tests = self.tests.items()
tests.sort()
compatible = 1
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
print '%30s: %10.2f ms %7.2f us %8s' % \
(name,avg*1000.0,op_avg*1000000.0,qop_avg)
print '-'*72
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
print '%30s: %10.2f ms %+7.2f%%' % \
('Average round time',self.roundtime * 1000.0,
((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
else:
print '%30s: %10.2f ms n/a' % \
('Average round time',self.roundtime * 1000.0)
print
print '*) measured against: %s (rounds=%i, warp=%i)' % \
(compare_to.name,compare_to.rounds,compare_to.warp)
print
def html_stat(self, compare_to=None, hidenoise=0):
if not compare_to:
table = html.table(
html.thead(
html.tr(
[ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str'), ('per run','float'),
('per oper.', 'float'), ('overhead', 'float')]])
),id = "sortable_table")
tests = self.tests.items()
tests.sort()
tbody = html.tbody()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
tbody.append(html.tr( html.td(name),
html.td(avg*1000.0),
html.td(op_avg*1000000.0),
html.td(ov_avg*1000.0)
))
table.append(tbody)
table.append(html.tr(
'Average round time %s' % (self.roundtime * 1000.0))
)
return table
elif isinstance(compare_to, Benchmark):
table = html.table(html.thead(
html.tr([ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str'), ('per run','float'),
('per oper.', 'float'), ('diff', 'float')]])),
id = "sortable_table", class_="datagrid")
tests = self.tests.items()
tests.sort()
compatible = 1
tbody = html.tbody()
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
tbody.append(html.tr( html.td(name),
html.td(avg*1000.0),
html.td(op_avg*1000000.0),
html.td(qop_avg)
))
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
tbody.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0),
html.td(''),
html.td('%+7.2f%%'% (((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
)))
else:
tbody.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0)))
table.append(tbody)
return table
else:
table = html.table(html.thead(
html.tr([ html.th(x, **{'mochi:format': y, 'align':'left'})
for (x,y) in [('Tests','str')]+[('pypy ver','float') for z in compare_to]
])),
id = "sortable_table")
tests = self.tests.items()
tests.sort()
compatible = 1
for name,t in tests:
avg,op_avg,ov_avg = t.stat()
percent = []
for comp_to in compare_to:
try:
other = comp_to.tests[name]
except KeyError:
other = None
if other and other.version == t.version and \
other.operations == t.operations:
avg1,op_avg1,ov_avg1 = other.stat()
qop_avg = (op_avg/op_avg1-1.0)*100.0
if hidenoise and abs(qop_avg) < 10:
qop_avg = ''
else:
qop_avg = '%+7.2f%%' % qop_avg
else:
qavg,qop_avg = 'n/a', 'n/a'
compatible = 0
percent.append(qop_avg)
table.append(html.tr( html.td(name),
[html.td(qop_avg) for qop_avg in percent]
))
if compatible and compare_to.roundtime > 0 and \
compare_to.version == self.version:
table.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0),
html.td(''),
html.td('%+7.2f%%'% (((self.roundtime*self.warp)/
(compare_to.roundtime*compare_to.warp)-1.0)*100.0)
)))
else:
table.append(html.tr(
html.td('Average round time'),
html.td(self.roundtime * 1000.0)))
return table
def print_machine():
import platform
print 'Machine Details:'
print ' Platform ID: %s' % platform.platform()
# There's a bug in Python 2.2b1+...
if 1 or sys.version[:6] != '2.2b1+':
print ' Python: %s' % platform.python_version()
print ' Compiler: %s' % platform.python_compiler()
buildno, buildate = platform.python_build()
print ' Build: %s (#%i)' % (buildate, buildno)
class Document(object):
def __init__(self, title=None):
self.body = html.body()
self.head = html.head()
self.doc = html.html(self.head, self.body)
if title is not None:
self.head.append(
html.meta(name="title", content=title))
self.head.append(
html.link(rel="Stylesheet", type="text/css", href="MochiKit-1.1/examples/sortable_tables/sortable_tables.css"))
self.head.append(
html.script(rel="JavaScript", type="text/javascript", src="MochiKit-1.1/lib/MochiKit/MochiKit.js"))
self.head.append(
html.script(rel="JavaScript", type="text/javascript", src="MochiKit-1.1/examples/sortable_tables/sortable_tables.js"))
def writetopath(self, p):
assert p.ext == '.html'
self.head.append(
html.meta(name="Content-Type", content="text/html;charset=UTF-8")
)
s = self.doc.unicode().encode('utf-8')
p.write(s)
class PyBenchCmdline(Application):
header = ("PYBENCH - a benchmark test suite for Python "
"interpreters/compilers.")
version = __version__
options = [ArgumentOption('-n','number of rounds',Setup.Number_of_rounds),
ArgumentOption('-f','save benchmark to file arg',''),
ArgumentOption('-c','compare benchmark with the one in file arg',''),
ArgumentOption('-l','compare benchmark with the ones in the files arg',''),
ArgumentOption('-s','show benchmark in file arg, then exit',''),
ArgumentOption('-w','set warp factor to arg',Setup.Warp_factor),
SwitchOption('-d','hide noise in compares', 0),
SwitchOption('--no-gc','disable garbage collection', 0),
SwitchOption('-x','write html table', 0),
]
about = """\
The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisms.
Examples:
python1.5 pybench.py -w 100 -f p15
python1.4 pybench.py -w 100 -f p14
python pybench.py -s p15 -c p14
"""
copyright = __copyright__
def main(self):
rounds = self.values['-n']
reportfile = self.values['-f']
show_bench = self.values['-s']
compare_to = self.values['-c']
compare_to_many = self.values['-l']
hidenoise = self.values['-d']
warp = self.values['-w']
nogc = self.values['--no-gc']
html = self.values['-x']
# Switch off GC
if nogc:
try:
import gc
except ImportError:
nogc = 0
else:
if self.values['--no-gc']:
gc.disable()
print 'PYBENCH',__version__
print
if not compare_to:
#print_machine()
print
if compare_to:
try:
f = open(compare_to,'rb')
bench = pickle.load(f)
bench.name = compare_to
f.close()
compare_to = bench
except IOError:
print '* Error opening/reading file',compare_to
compare_to = None
if show_bench:
try:
f = open(show_bench,'rb')
bench = pickle.load(f)
bench.name = show_bench
f.close()
print 'Benchmark: %s (rounds=%i, warp=%i)' % \
(bench.name,bench.rounds,bench.warp)
print
print "*******************************************"
if html:
print "Generating HTML"
import py.path
index = py.path.local('index.html')
table = bench.html_stat(compare_to, hidenoise)
doc = Document()
doc.body.append(table)
doc.writetopath(index)
else:
bench.print_stat(compare_to, hidenoise)
except IOError:
print '* Error opening/reading file',show_bench
print
return
if reportfile:
if nogc:
print 'Benchmark: %s (rounds=%i, warp=%i, no GC)' % \
(reportfile,rounds,warp)
else:
print 'Benchmark: %s (rounds=%i, warp=%i)' % \
(reportfile,rounds,warp)
print
# Create benchmark object
bench = Benchmark()
bench.rounds = rounds
bench.load_tests(Setup,warp)
try:
bench.run()
except KeyboardInterrupt:
print
print '*** KeyboardInterrupt -- Aborting'
print
return
bench.print_stat(compare_to)
if html:
print "Generating HTML"
import py.path
index = py.path.local('index.html')
table = bench.html_stat(compare_to, hidenoise)
doc = Document()
doc.body.append(table)
doc.writetopath(index)
# ring bell
sys.stderr.write('\007')
if reportfile:
try:
f = open(reportfile,'wb')
bench.name = reportfile
pickle.dump(bench,f)
f.close()
except IOError:
print '* Error opening/writing reportfile'
if __name__ == '__main__':
PyBenchCmdline()
| Python |
from pybench import Test
class SimpleIntegerArithmetic(Test):
version = 0.3
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in xrange(self.rounds):
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleFloatArithmetic(Test):
version = 0.3
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 100000
def test(self):
for i in xrange(self.rounds):
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleIntFloatArithmetic(Test):
version = 0.3
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in xrange(self.rounds):
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleLongArithmetic(Test):
version = 0.3
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 30000
def test(self):
for i in xrange(self.rounds):
a = 2220001L
b = 100001L
c = 30005L
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001L
b = 100001L
c = 30005L
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001L
b = 100001L
c = 30005L
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001L
b = 100001L
c = 30005L
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001L
b = 100001L
c = 30005L
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
class SimpleComplexArithmetic(Test):
version = 0.3
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 40000
def test(self):
for i in xrange(self.rounds):
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in xrange(self.rounds):
pass
| Python |
#!python
# Setup file for pybench
#
# This file has to import all tests to be run; it is executed as
# Python source file, so you can do all kinds of manipulations here
# rather than having to edit the tests themselves.
#
# Defaults
Number_of_rounds = 10
Warp_factor = 20
# Import tests
from Arithmetic import *
from Calls import *
from Constructs import *
from Lookups import *
from Instances import *
from Lists import *
from Tuples import *
from Dict import *
from Exceptions import *
from Imports import *
from Strings import *
from Numbers import *
try:
from Unicode import *
except (ImportError, SyntaxError):
pass
| Python |
from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
from pybench import Test
class TupleSlicing(Test):
version = 0.31
operations = 3 * 25 * 10 * 7
rounds = 400
def test(self):
r = range(25)
for i in xrange(self.rounds):
t = tuple(range(100))
for j in r:
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
def calibrate(self):
r = range(25)
for i in xrange(self.rounds):
t = tuple(range(100))
for j in r:
pass
class SmallTuples(Test):
version = 0.3
operations = 5*(1 + 3 + 6 + 2)
rounds = 80000
def test(self):
for i in xrange(self.rounds):
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
def calibrate(self):
for i in xrange(self.rounds):
pass
| Python |
from pybench import Test
class DictCreation(Test):
version = 0.3
operations = 5*(5 + 5)
rounds = 60000
def test(self):
for i in xrange(self.rounds):
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
def calibrate(self):
for i in xrange(self.rounds):
pass
class DictWithStringKeys(Test):
version = 0.1
operations = 5*(6 + 6)
rounds = 200000
def test(self):
d = {}
for i in xrange(self.rounds):
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
def calibrate(self):
d = {}
for i in xrange(self.rounds):
pass
class DictWithFloatKeys(Test):
version = 0.1
operations = 5*(6 + 6)
rounds = 200000
def test(self):
d = {}
for i in xrange(self.rounds):
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
def calibrate(self):
d = {}
for i in xrange(self.rounds):
pass
class DictWithIntegerKeys(Test):
version = 0.1
operations = 5*(6 + 6)
rounds = 200000
def test(self):
d = {}
for i in xrange(self.rounds):
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
def calibrate(self):
d = {}
for i in xrange(self.rounds):
pass
class SimpleDictManipulation(Test):
version = 0.3
operations = 5*(6 + 6 + 6 + 6)
rounds = 50000
def test(self):
d = {}
for i in xrange(self.rounds):
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
d.has_key(0)
d.has_key(2)
d.has_key(4)
d.has_key(6)
d.has_key(8)
d.has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
d.has_key(0)
d.has_key(2)
d.has_key(4)
d.has_key(6)
d.has_key(8)
d.has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
d.has_key(0)
d.has_key(2)
d.has_key(4)
d.has_key(6)
d.has_key(8)
d.has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
d.has_key(0)
d.has_key(2)
d.has_key(4)
d.has_key(6)
d.has_key(8)
d.has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
d.has_key(0)
d.has_key(2)
d.has_key(4)
d.has_key(6)
d.has_key(8)
d.has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
def calibrate(self):
d = {}
for i in xrange(self.rounds):
pass
| Python |
from pybench import Test
class IfThenElse(Test):
version = 0.31
operations = 30*3 # hard to say...
rounds = 150000
def test(self):
a,b,c = 1,2,3
for i in xrange(self.rounds):
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
if a == 1:
if b == 2:
if c != 3:
c = 3
b = 3
else:
c = 2
elif b == 3:
b = 2
a = 2
elif a == 2:
a = 3
else:
a = 1
def calibrate(self):
a,b,c = 1,2,3
for i in xrange(self.rounds):
pass
class NestedForLoops(Test):
version = 0.3
operations = 1000*10*5
rounds = 150
def test(self):
l1 = range(1000)
l2 = range(10)
l3 = range(5)
for i in xrange(self.rounds):
for i in l1:
for j in l2:
for k in l3:
pass
def calibrate(self):
l1 = range(1000)
l2 = range(10)
l3 = range(5)
for i in xrange(self.rounds):
pass
class ForLoops(Test):
version = 0.1
operations = 5 * 5
rounds = 8000
def test(self):
l1 = range(100)
for i in xrange(self.rounds):
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
for i in l1:
pass
def calibrate(self):
l1 = range(1000)
for i in xrange(self.rounds):
pass
| Python |
from pybench import Test
class TryRaiseExcept(Test):
version = 0.1
operations = 2 + 3
rounds = 60000
def test(self):
error = ValueError
for i in xrange(self.rounds):
try:
raise error
except:
pass
try:
raise error
except:
pass
try:
raise error,"something"
except:
pass
try:
raise error,"something"
except:
pass
try:
raise error,"something"
except:
pass
def calibrate(self):
error = ValueError
for i in xrange(self.rounds):
pass
class TryExcept(Test):
version = 0.1
operations = 15 * 10
rounds = 200000
def test(self):
for i in xrange(self.rounds):
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
try:
pass
except:
pass
def calibrate(self):
for i in xrange(self.rounds):
pass
| Python |
from pybench import Test
class SpecialClassAttribute(Test):
version = 0.3
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class NormalClassAttribute(Test):
version = 0.3
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in xrange(self.rounds):
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
def calibrate(self):
class c:
pass
for i in xrange(self.rounds):
pass
class SpecialInstanceAttribute(Test):
version = 0.3
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class NormalInstanceAttribute(Test):
version = 0.3
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
def calibrate(self):
class c:
pass
o = c()
for i in xrange(self.rounds):
pass
class BuiltinMethodLookup(Test):
version = 0.3
operations = 5*(3*5 + 3*5)
rounds = 70000
def test(self):
l = []
d = {}
for i in xrange(self.rounds):
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
d.has_key
d.has_key
d.has_key
d.has_key
d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
def calibrate(self):
l = []
d = {}
for i in xrange(self.rounds):
pass
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS
from pypy.rlib import rarithmetic
from pypy.rpython import rclass, rmodel
from pypy.translator.backendopt import support
from pypy.objspace.flow import model
from pypy.translator import unsimplify, simplify
from pypy.translator.unsimplify import varoftype
from pypy.annotation import model as annmodel
from pypy.rpython.annlowlevel import MixLevelHelperAnnotator
from pypy.translator.stackless import code, frame
from pypy.rpython.rclass import getinstancerepr
from pypy.rpython.rbuiltin import gen_cast
from pypy.rpython.rtyper import LowLevelOpList
from pypy.rpython.module import ll_stackless, ll_stack
from pypy.rlib.objectmodel import ComputedIntSymbolic
from pypy.translator.backendopt import graphanalyze
from pypy.translator.stackless.frame import SAVED_REFERENCE, STORAGE_TYPES
from pypy.translator.stackless.frame import STORAGE_FIELDS
from pypy.translator.stackless.frame import STATE_HEADER, null_state
from pypy.translator.stackless.frame import storage_type
SAVE_STATISTICS = False
# we do this _a lot_:
def copyvar(var):
if isinstance(var, model.Variable):
return unsimplify.copyvar(None, var)
else:
return varoftype(var.concretetype)
def copy_link_with_varmap(link, varmap):
varmap = varmap.copy()
def rename(arg):
if isinstance(arg, model.Variable):
if arg in varmap:
return varmap[arg]
else:
assert arg in [link.last_exception, link.last_exc_value]
r = copyvar(arg)
varmap[arg] = r
return r
else:
return arg
return link.copy(rename)
if SAVE_STATISTICS:
import cStringIO
class StacklessStats:
def __init__(self):
self.rp_count = 0
self.rp_type_counts = {}
self.rp_per_graph = {}
self.rp_per_graph_type_counts = {}
self.saveops = self.resumeops = 0
self.pot_exact_saves = {}
self.total_pot_exact_saves = 0
self.pot_erased_saves = {}
self.total_pot_erased_saves = 0
self.saved_retrieval_ops = 0
self.saved_cast_ops = 0
self.saved_return_ops = 0
def __repr__(self):
s = cStringIO.StringIO()
print >> s, self.__class__.__name__
for k in sorted(self.__dict__.keys()):
r = repr(self.__dict__[k])
if len(r) > 60:
r = r[:50] + '...'
print >>s, ' '+k, r
return s.getvalue()
def inc(d, key):
d[key] = d.get(key, 0) + 1
def gkey(graph):
return (graph.name, id(graph))
# a simple example of what the stackless transform does
#
# def func(x):
# return g() + x + 1
#
# STATE_func_0 = lltype.Struct('STATE_func_0',
# ('header', STATE_HEADER),
# ('saved_long_0', Signed))
#
# def func(x):
# state = global_state.restart_substate
# if state == -1: # normal case
# try:
# retval = g(x)
# except code.UnwindException, u:
# state = lltype.malloc(STATE_func_0, flavor='gc_nocollect')
# state.header.f_restart = <index in array of frame.FRAMEINFO>
# state.saved_long_0 = x
# code.add_frame_state(u, state.header)
# raise
# elif state == 0:
# global_state.restart_substate = -1
# state = lltype.cast_pointer(lltype.Ptr(STATE_func_0),
# global_state.top)
# global_state.top = null_state
# x = state.saved_long_0
# retval = code.fetch_retval_long() # can raise an exception
# elif state == 1:
# ...
# elif state == 2:
# ...
# else:
# abort()
# return retval + x + 1
class SymbolicRestartNumber(ComputedIntSymbolic):
def __init__(self, label, value=None):
ComputedIntSymbolic.__init__(self, self._getvalue)
self.label = label
self.value = value
def _getvalue(self):
# argh, we'd like to assert-fail if value is None here, but we
# get called too early (during databasing) for this to be
# valid. so we might return None and rely on the database
# checking that this only happens before the database is
# complete.
return self.value
# the strategy for sharing parts of the resume code:
#
# when a function is being resumed, there are three things that need to
# be done: the values (as erased types) need to be read out of the
# frame state structure, the return value needs to be read out of
# the global_state (this is also when the check is made if we are
# resuming with an exception) and the return value might need to be
# cast to an exact type. our strategy is to do each of these things
# in separate blocks, reusing blocks where we can. the retrieval and
# cast blocks will (conceptually at least) have a switch on the
# restate subcase at the end of it.
#
# note that we order types by the index of the erased type in
# STORAGE_TYPES, to increase the chance that we can reuse the types.
#
# in simple cases this approach creates graphs that are more
# complicated than needed, so we run the graph through a few
# simplifications to join blocks and remove unused variables.
class FrameTyper:
# this class only exists independently to ease testing
def __init__(self, stackless_gc=False, transformer=None):
self.frametypes = {}
self.stackless_gc = stackless_gc
self.c_gc_nocollect = model.Constant("gc_nocollect", lltype.Void)
self.transformer = transformer
def _key_for_types(self, TYPES):
counts = {}
for EXACT_TYPE in TYPES:
if EXACT_TYPE is lltype.Void:
continue
ERASED_TYPE = storage_type(EXACT_TYPE)
counts[ERASED_TYPE] = counts.get(ERASED_TYPE, 0) + 1
key = lltype.frozendict(counts)
return key
def saving_function_for_type(self, FRAME_TYPE):
v_exception = varoftype(self.transformer.unwind_exception_type)
v_restart = varoftype(lltype.Signed)
save_block = model.Block([v_exception, v_restart])
llops = LowLevelOpList()
if self.stackless_gc:
v_state = llops.genop(
'flavored_malloc',
[self.c_gc_nocollect, model.Constant(FRAME_TYPE, lltype.Void)],
resulttype=lltype.Ptr(FRAME_TYPE))
else:
v_state = llops.genop(
'malloc',
[model.Constant(FRAME_TYPE, lltype.Void)],
resulttype=lltype.Ptr(FRAME_TYPE))
for fieldname in FRAME_TYPE._names[1:]: # skip the 'header' field
v_arg = varoftype(FRAME_TYPE._flds[fieldname])
save_block.inputargs.append(v_arg)
llops.genop('setfield',
[v_state, model.Constant(fieldname, lltype.Void), v_arg],
resulttype=lltype.Void)
v_header = gen_cast(llops, lltype.Ptr(STATE_HEADER), v_state)
llops.genop('direct_call',
[self.transformer.add_frame_state_ptr, v_exception, v_header],
resulttype=lltype.Void)
llops.genop("setfield",
[v_header, self.transformer.c_f_restart_name, v_restart],
resulttype=lltype.Void)
save_state_graph = model.FunctionGraph('save_' + FRAME_TYPE._name, save_block,
varoftype(lltype.Void))
save_block.operations = llops
save_block.closeblock(model.Link([v_header], save_state_graph.returnblock))
FUNC_TYPE = lltype.FuncType([v.concretetype for v in save_block.inputargs],
lltype.Void)
return lltype.functionptr(FUNC_TYPE, save_state_graph.name,
graph=save_state_graph)
def frame_type_for_vars(self, vars):
key = self._key_for_types([v.concretetype for v in vars])
if key not in self.frametypes:
fields = []
tcounts = []
for t in STORAGE_TYPES:
tcount = key.get(t, 0)
tcounts.append(str(tcount))
for j in range(tcount):
fname = 'state_%s_%d' % (STORAGE_FIELDS[t], j)
fields.append((fname, t))
FRAME_TYPE = frame.make_state_header_type(
"FrameState_"+'_'.join(tcounts), *fields)
self.frametypes[key] = (FRAME_TYPE, self.saving_function_for_type(FRAME_TYPE))
T, save_state_funcptr = self.frametypes[key]
varsforcall = list(vars)
def key(v):
return STORAGE_TYPES.index(storage_type(v.concretetype))
def mycmp(x, y):
return cmp(key(x), key(y))
varsforcall.sort(mycmp)
return T, varsforcall, save_state_funcptr
def ensure_frame_type_for_types(self, FRAME_TYPE):
assert len(FRAME_TYPE._names[1:]) <= 1, "too lazy"
if len(FRAME_TYPE._names[1:]) == 1:
fname, = FRAME_TYPE._names[1:]
T = FRAME_TYPE._flds[fname]
key = self._key_for_types([T])
else:
key = self._key_for_types([])
if key in self.frametypes:
assert self.frametypes[key][0] is FRAME_TYPE
self.frametypes[key] = (FRAME_TYPE, self.saving_function_for_type(FRAME_TYPE))
class StacklessAnalyzer(graphanalyze.GraphAnalyzer):
def __init__(self, translator, unwindtype, stackless_gc):
graphanalyze.GraphAnalyzer.__init__(self, translator)
self.unwindtype = unwindtype
self.stackless_gc = stackless_gc
def operation_is_true(self, op):
if op.opname == 'yield_current_frame_to_caller':
return True
elif op.opname == 'resume_point':
return True
elif op.opname == 'resume_state_invoke':
return True
elif op.opname == 'resume_state_create':
return True
return self.stackless_gc and LL_OPERATIONS[op.opname].canunwindgc
def analyze_external_call(self, op):
callable = op.args[0].value._obj._callable
return callable in [ll_stack.ll_stack_unwind, ll_stack.ll_stack_capture,
ll_stackless.ll_stackless_stack_frames_depth,
ll_stackless.ll_stackless_switch]
def vars_to_save(block):
lastresult = block.operations[-1].result
args = []
for l in block.exits:
for arg in l.args:
if isinstance(arg, model.Variable) \
and arg is not lastresult \
and arg not in args \
and arg not in [l.last_exception, l.last_exc_value]:
args.append(arg)
return args
class StacklessTransformer(object):
def __init__(self, translator, entrypoint,
stackless_gc=False, assert_unwind=False):
self.translator = translator
self.stackless_gc = stackless_gc
self.frametyper = FrameTyper(stackless_gc, self)
self.masterarray1 = []
self.curr_graph = None
self.signaturecodes = [{} for RETTYPE in frame.STORAGE_TYPES]
# self.signaturecodes[n] is a dict {ARGTYPES: signature_index}
# where n is the return type as an index in STORAGE_TYPES.
# the signature_index is an arbitrary number but it encodes
# the type of the result, i.e.
# n == (signature_index & storage_type_bitmask)
bk = translator.annotator.bookkeeper
self.unwind_exception_type = getinstancerepr(
self.translator.rtyper,
bk.getuniqueclassdef(code.UnwindException)).lowleveltype
self.analyzer = StacklessAnalyzer(translator,
self.unwind_exception_type,
stackless_gc)
# the point of this little dance is to not annotate
# code.global_state.masterarray as a constant.
data_classdef = bk.getuniqueclassdef(code.StacklessData)
data_classdef.generalize_attr(
'masterarray',
annmodel.SomePtr(lltype.Ptr(frame.FRAME_INFO_ARRAY)))
mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)
l2a = annmodel.lltype_to_annotation
if assert_unwind:
def slp_entry_point(argv):
try:
r = entrypoint(argv)
except code.UnwindException, u:
code.slp_main_loop()
return code.global_state.retval_long
else:
assert False, "entrypoint never unwound the stack"
return r
slp_entry_point.stackless_explicit = True
else:
def slp_entry_point(argv):
try:
r = entrypoint(argv)
except code.UnwindException, u:
code.slp_main_loop()
return code.global_state.retval_long
return r
slp_entry_point.stackless_explicit = True
self.slp_entry_point = slp_entry_point
oldgraph = bk.getdesc(entrypoint).getuniquegraph()
s_argv = translator.annotator.binding(oldgraph.getargs()[0])
self.slp_entry_point_ptr = mixlevelannotator.constfunc(
slp_entry_point, [s_argv], annmodel.SomeInteger())
unwinddef = bk.getuniqueclassdef(code.UnwindException)
self.add_frame_state_ptr = mixlevelannotator.constfunc(
code.add_frame_state,
[annmodel.SomeInstance(unwinddef),
annmodel.SomePtr(lltype.Ptr(STATE_HEADER))],
l2a(lltype.Void))
# order really matters on 64 bits machines on which
# longlong==signed; so lltype.Signed must appear *after*
# longlong in this dict
self.fetch_retvals = {
lltype.Void: mixlevelannotator.constfunc(
code.fetch_retval_void, [], annmodel.s_None),
lltype.SignedLongLong: mixlevelannotator.constfunc(
code.fetch_retval_longlong, [], annmodel.SomeInteger(knowntype=rarithmetic.r_longlong)),
lltype.Signed: mixlevelannotator.constfunc(
code.fetch_retval_long, [], annmodel.SomeInteger()),
lltype.Float: mixlevelannotator.constfunc(
code.fetch_retval_float, [], annmodel.SomeFloat()),
llmemory.Address: mixlevelannotator.constfunc(
code.fetch_retval_addr, [], annmodel.SomeAddress()),
SAVED_REFERENCE: mixlevelannotator.constfunc(
code.fetch_retval_ref, [], annmodel.SomePtr(SAVED_REFERENCE)),
llmemory.WeakGcAddress: mixlevelannotator.constfunc(
code.fetch_retval_weak, [], annmodel.SomeWeakGcAddress()),
}
s_StatePtr = annmodel.SomePtr(frame.OPAQUE_STATE_HEADER_PTR)
self.suggested_primitives = {
ll_stackless.ll_stackless_stack_frames_depth:
mixlevelannotator.constfunc(
code.stack_frames_depth, [], annmodel.SomeInteger()),
ll_stackless.ll_stackless_switch:
mixlevelannotator.constfunc(
code.ll_frame_switch, [s_StatePtr], s_StatePtr),
ll_stack.ll_stack_unwind:
mixlevelannotator.constfunc(
code.ll_stack_unwind, [], annmodel.s_None),
ll_stack.ll_stack_capture:
mixlevelannotator.constfunc(
code.ll_stack_capture, [], s_StatePtr),
}
self.yield_current_frame_to_caller_ptr = mixlevelannotator.constfunc(
code.yield_current_frame_to_caller, [], s_StatePtr)
s_hdrptr = annmodel.SomePtr(lltype.Ptr(STATE_HEADER))
# order really matters on 64 bits machines on which
# longlong==signed; so lltype.Signed must appear *after*
# longlong in this dict
self.resume_afters = {
lltype.Void: mixlevelannotator.constfunc(
code.resume_after_void,
[s_StatePtr, annmodel.s_None],
annmodel.s_None),
lltype.SignedLongLong: mixlevelannotator.constfunc(
code.resume_after_longlong,
[s_StatePtr, annmodel.SomeInteger(knowntype=rarithmetic.r_longlong)],
annmodel.s_None),
lltype.Signed: mixlevelannotator.constfunc(
code.resume_after_long,
[s_StatePtr, annmodel.SomeInteger()],
annmodel.s_None),
lltype.Float: mixlevelannotator.constfunc(
code.resume_after_float,
[s_StatePtr, annmodel.SomeFloat()],
annmodel.s_None),
llmemory.Address: mixlevelannotator.constfunc(
code.resume_after_addr,
[s_StatePtr, annmodel.SomeAddress()],
annmodel.s_None),
SAVED_REFERENCE: mixlevelannotator.constfunc(
code.resume_after_ref,
[s_StatePtr, annmodel.SomePtr(SAVED_REFERENCE)],
annmodel.s_None),
llmemory.WeakGcAddress: mixlevelannotator.constfunc(
code.resume_after_weak,
[s_StatePtr, annmodel.SomeWeakGcAddress()],
annmodel.s_None),
}
exception_def = bk.getuniqueclassdef(Exception)
self.resume_after_raising_ptr = mixlevelannotator.constfunc(
code.resume_after_raising,
[s_StatePtr, annmodel.SomeInstance(exception_def)],
annmodel.s_None)
self.exception_type = getinstancerepr(
self.translator.rtyper, exception_def).lowleveltype
mixlevelannotator.finish()
s_global_state = bk.immutablevalue(code.global_state)
r_global_state = translator.rtyper.getrepr(s_global_state)
self.ll_global_state = model.Constant(
r_global_state.convert_const(code.global_state),
r_global_state.lowleveltype)
self.seen_blocks = set()
# some prebuilt constants to save memory
self.c_restart_substate_name = model.Constant("inst_restart_substate",
lltype.Void)
self.c_inst_top_name = model.Constant("inst_top", lltype.Void)
self.c_f_restart_name = model.Constant("f_restart", lltype.Void)
self.c_minus_one = model.Constant(-1, lltype.Signed)
self.c_null_state = model.Constant(null_state,
lltype.typeOf(null_state))
self.c_gc_nocollect = model.Constant("gc_nocollect", lltype.Void)
self.is_finished = False
# only for sanity checking, but still very very important
self.explicit_resume_point_data = {}
self.symbolic_restart_numbers = {}
# register the prebuilt restartinfos & give them names for use
# with resume_state_create
# the mauling of frame_typer internals should be a method on FrameTyper.
for restartinfo in frame.RestartInfo.prebuilt:
name = restartinfo.func_or_graph.__name__
for i in range(len(restartinfo.frame_types)):
label = name + '_' + str(i)
assert label not in self.symbolic_restart_numbers
# XXX we think this is right:
self.symbolic_restart_numbers[label] = SymbolicRestartNumber(
label, len(self.masterarray1) + i)
frame_type = restartinfo.frame_types[i]
self.explicit_resume_point_data[label] = frame_type
self.frametyper.ensure_frame_type_for_types(frame_type)
self.register_restart_info(restartinfo)
if SAVE_STATISTICS:
translator.stackless_stats = self.stats = StacklessStats()
def transform_all(self):
for graph in self.translator.graphs:
self.transform_graph(graph)
self.finish()
def transform_graph(self, graph):
self.resume_blocks = []
if hasattr(graph, 'func'):
if getattr(graph.func, 'stackless_explicit', False):
if self.stackless_gc:
self.transform_gc_nocollect(graph)
return
if not self.analyzer.analyze_direct_call(graph):
return
assert self.curr_graph is None
self.curr_graph = graph
self.curr_graph_save_blocks = {}
self.curr_graph_resume_retrieval_blocks = {}
self.curr_graph_resume_return_blocks = {}
self.curr_graph_resume_cast_blocks = {}
if SAVE_STATISTICS:
self.stats.cur_rp_exact_types = {}
self.stats.cur_rp_erased_types = {}
for block in list(graph.iterblocks()):
assert block not in self.seen_blocks
self.transform_block(block)
self.seen_blocks.add(block)
if self.resume_blocks:
self.insert_resume_handling(graph)
self.generate_restart_infos(graph)
if SAVE_STATISTICS:
pot_exact_save_count = 0
for t, count in self.stats.cur_rp_exact_types.items():
pot_exact_save_count += count - 1
del self.stats.cur_rp_exact_types
self.stats.pot_exact_saves[gkey(self.curr_graph)] = pot_exact_save_count
self.stats.total_pot_exact_saves += pot_exact_save_count
pot_erased_save_count = 0
for t, count in self.stats.cur_rp_erased_types.items():
pot_erased_save_count += count - 1
del self.stats.cur_rp_erased_types
self.stats.pot_erased_saves[gkey(self.curr_graph)] = pot_erased_save_count
self.stats.total_pot_erased_saves += pot_erased_save_count
self.curr_graph = None
self.curr_graph_save_blocks = None
self.curr_graph_resume_retrieval_blocks = None
self.curr_graph_resume_return_blocks = None
self.curr_graph_resume_cast_blocks = None
def insert_resume_handling(self, graph):
old_start_block = graph.startblock
newinputargs = [unsimplify.copyvar(self.translator.annotator, v)
for v in old_start_block.inputargs]
new_start_block = model.Block(newinputargs)
v_resume_substate = varoftype(lltype.Signed)
new_start_block.operations.append(
model.SpaceOperation("getfield",
[self.ll_global_state,
self.c_restart_substate_name],
v_resume_substate))
not_resuming_link = model.Link(newinputargs, old_start_block, -1)
not_resuming_link.llexitcase = -1
resuming_links = []
for resume_index, resume_block in enumerate(self.resume_blocks):
resuming_links.append(
model.Link([v_resume_substate], resume_block, resume_index))
resuming_links[-1].llexitcase = resume_index
new_start_block.exitswitch = v_resume_substate
new_start_block.closeblock(not_resuming_link, *resuming_links)
old_start_block.isstartblock = False
new_start_block.isstartblock = True
graph.startblock = new_start_block
for block in graph.iterblocks():
if len(block.exits) == 1 and block.exitswitch is not None:
block.exitswitch = None
block.exits[0].exitcase = block.exits[0].llexitcase = None
simplify.simplify_graph(graph, [simplify.eliminate_empty_blocks,
simplify.join_blocks,
simplify.transform_dead_op_vars])
def insert_return_conversion(self, link, targettype, retvar):
llops = LowLevelOpList()
newvar = gen_cast(llops, targettype, retvar)
convertblock = unsimplify.insert_empty_block(None, link, llops)
# begin ouch!
for index, linkvar in enumerate(convertblock.exits[0].args):
# does this var come from retval ?
try:
index1 = convertblock.inputargs.index(linkvar)
except ValueError: # e.g. linkvar is a Constant
continue
if link.args[index1] is retvar:
# yes
convertblock.exits[0].args[index] = newvar
# end ouch!
def handle_resume_point(self, block, i):
# in some circumstances we might be able to reuse
# an already inserted resume point
op = block.operations[i]
if i == len(block.operations) - 1:
link = block.exits[0]
nextblock = None
else:
link = support.split_block_with_keepalive(block, i+1)
i = 0
nextblock = link.target
label = op.args[0].value
parms = op.args[1:]
if not isinstance(parms[0], model.Variable):
assert parms[0].value is None
parms[0] = None
args = vars_to_save(block)
for a in args:
if a not in parms:
raise Exception, "not covered needed value at resume_point %r"%(label,)
if parms[0] is not None: # returns= case
res = parms[0]
args = [arg for arg in args if arg is not res]
else:
args = args
res = op.result
(FRAME_TYPE, varsforcall, saver) = self.frametyper.frame_type_for_vars(parms[1:])
if label in self.explicit_resume_point_data:
OTHER_TYPE = self.explicit_resume_point_data[label]
assert FRAME_TYPE == OTHER_TYPE, "inconsistent types for label %r"%(label,)
else:
self.explicit_resume_point_data[label] = FRAME_TYPE
self._make_resume_handling(FRAME_TYPE, varsforcall, res, block.exits)
restart_number = len(self.masterarray1) + len(self.resume_blocks) - 1
if label in self.symbolic_restart_numbers:
symb = self.symbolic_restart_numbers[label]
assert symb.value is None
symb.value = restart_number
else:
symb = SymbolicRestartNumber(label, restart_number)
self.symbolic_restart_numbers[label] = symb
return nextblock
def handle_resume_state_create(self, block, i):
op = block.operations[i]
llops = LowLevelOpList()
label = op.args[1].value
parms = op.args[2:]
FRAME_TYPE, varsforcall, saver = self.frametyper.frame_type_for_vars(parms)
if label in self.explicit_resume_point_data:
OTHER_TYPE = self.explicit_resume_point_data[label]
assert FRAME_TYPE == OTHER_TYPE, "inconsistent types for label %r"%(label,)
else:
self.explicit_resume_point_data[label] = FRAME_TYPE
if label in self.symbolic_restart_numbers:
symb = self.symbolic_restart_numbers[label]
else:
symb = SymbolicRestartNumber(label)
self.symbolic_restart_numbers[label] = symb
# this is rather insane: we create an exception object, pass
# it to the saving function, then read the thus created state
# out of and then clear global_state.top
c_EXC = model.Constant(self.unwind_exception_type.TO, lltype.Void)
v_exc = llops.genop('malloc', [c_EXC],
resulttype = self.unwind_exception_type)
realvarsforcall = []
for v in varsforcall:
if v.concretetype != lltype.Void:
realvarsforcall.append(gen_cast(llops, storage_type(v.concretetype), v))
llops.genop('direct_call',
[model.Constant(saver, lltype.typeOf(saver)), v_exc,
model.Constant(symb, lltype.Signed)] + realvarsforcall,
resulttype = lltype.Void)
v_state = varoftype(lltype.Ptr(frame.STATE_HEADER))
v_state_hdr = llops.genop("getfield",
[self.ll_global_state, self.c_inst_top_name],
resulttype=lltype.Ptr(STATE_HEADER))
v_state = gen_cast(llops, lltype.Ptr(FRAME_TYPE), v_state_hdr)
llops.genop("setfield",
[self.ll_global_state, self.c_inst_top_name, self.c_null_state])
v_prevstate = gen_cast(llops, lltype.Ptr(frame.STATE_HEADER), op.args[0])
llops.genop('setfield', [v_state_hdr,
model.Constant('f_back', lltype.Void),
v_prevstate])
llops.append(model.SpaceOperation('cast_opaque_ptr', [v_state_hdr], op.result))
block.operations[i:i+1] = llops
def handle_resume_state_invoke(self, block):
op = block.operations[-1]
assert op.opname == 'resume_state_invoke'
# some commentary.
#
# we don't want to write 155 or so different versions of
# resume_after_foo that appear to the annotator to return
# different types. we take advantage of the fact that this
# function always raises UnwindException and have it (appear
# to) return Void. then to placate all the other machinery,
# we pass a constant zero-of-the-appropriate-type along the
# non-exceptional link (which we know will never be taken).
# Nota Bene: only mutate a COPY of the non-exceptional link
# because the non-exceptional link has been stored in
# self.resume_blocks and we don't want a constant "zero" in
# there.
v_state = op.args[0]
v_returning = op.args[1]
v_raising = op.args[2]
llops = LowLevelOpList()
if v_raising.concretetype == lltype.Void:
erased_type = storage_type(v_returning.concretetype)
resume_after_ptr = self.resume_afters[erased_type]
v_param = v_returning
else:
assert v_returning.concretetype == lltype.Void
erased_type = self.exception_type
resume_after_ptr = self.resume_after_raising_ptr
v_param = v_raising
if erased_type != v_param.concretetype:
v_param = gen_cast(llops, erased_type, v_param)
llops.genop('direct_call', [resume_after_ptr, v_state, v_param],
resulttype=lltype.Void)
del block.operations[-1]
block.operations.extend(llops)
noexclink = block.exits[0].copy()
realrettype = op.result.concretetype
for i, a in enumerate(noexclink.args):
if a is op.result:
noexclink.args[i] = model.Constant(realrettype._defl(), realrettype)
block.recloseblock(*((noexclink,) + block.exits[1:]))
def insert_unwind_handling(self, block, i):
# for the case where we are resuming to an except:
# block we need to store here a list of links that
# might be resumed to, and in insert_resume_handling
# we need to basically copy each link onto the
# resuming block.
#
# it probably also makes sense to compute the list of
# args to save once, here, and save that too.
#
# finally, it is important that the fetch_retval
# function be called right at the end of the resuming
# block, and that it is called even if the return
# value is not again used.
edata = self.translator.rtyper.getexceptiondata()
etype = edata.lltype_of_exception_type
evalue = edata.lltype_of_exception_value
if i == len(block.operations) - 1 \
and block.exitswitch == model.c_last_exception:
link = block.exits[0]
exitcases = dict.fromkeys([l.exitcase for l in block.exits])
nextblock = None
else:
link = support.split_block_with_keepalive(block, i+1)
nextblock = link.target
block.exitswitch = model.c_last_exception
link.llexitcase = None
# add a general Exception link, because all calls can
# raise anything
v_exctype = varoftype(etype)
v_excvalue = varoftype(evalue)
newlink = model.Link([v_exctype, v_excvalue],
self.curr_graph.exceptblock,
Exception)
newlink.last_exception = v_exctype
newlink.last_exc_value = v_excvalue
newexits = list(block.exits)
newexits.append(newlink)
block.recloseblock(*newexits)
self.translator.rtyper._convert_link(block, newlink)
v_unwind_exception = varoftype(evalue)
op = block.operations[i]
args = vars_to_save(block)
save_block, varsforcall = self.generate_save_and_resume_blocks(
args, v_unwind_exception, op.result, block.exits)
newlink = model.Link(varsforcall + [v_unwind_exception],
save_block, code.UnwindException)
newlink.last_exception = model.Constant(code.UnwindException,
etype)
newlink.last_exc_value = v_unwind_exception
newexits = list(block.exits)
newexits.insert(1, newlink)
block.recloseblock(*newexits)
self.translator.rtyper._convert_link(block, newlink)
return nextblock
def transform_block(self, block):
i = 0
def replace_with_call(fnptr):
args = [fnptr] + op.args[1:]
newop = model.SpaceOperation('direct_call', args, op.result)
block.operations[i] = newop
return newop
while i < len(block.operations):
stackless_op = False
op = block.operations[i]
if op.opname == 'yield_current_frame_to_caller':
op = replace_with_call(self.yield_current_frame_to_caller_ptr)
stackless_op = True
if op.opname == 'resume_state_create':
self.handle_resume_state_create(block, i)
continue # go back and look at that malloc
if (op.opname in ('direct_call', 'indirect_call')
or self.analyzer.operation_is_true(op)):
if op.opname == 'resume_point':
block = self.handle_resume_point(block, i)
if block is None:
return
else:
i = 0
continue
# trap calls to stackless-related suggested primitives
if op.opname == 'direct_call':
func = getattr(op.args[0].value._obj, '_callable', None)
if func in self.suggested_primitives:
op = replace_with_call(self.suggested_primitives[func])
stackless_op = True
if not stackless_op and not self.analyzer.analyze(op):
i += 1
continue
if (not stackless_op and i == len(block.operations) - 1 and
len(block.exits) == 1 and
block.exits[0].target is self.curr_graph.returnblock and
(block.exits[0].args[0].concretetype is lltype.Void or
block.exits[0].args[0] is op.result)):
# print "optimizing tail call %s in function %s" % (op, self.curr_graph.name)
i += 1
continue
nextblock = self.insert_unwind_handling(block, i)
if op.opname == 'resume_state_invoke':
self.handle_resume_state_invoke(block)
if nextblock is None:
return
block = nextblock
i = 0
else:
i += 1
def generate_save_and_resume_blocks(self, varstosave, v_exception,
v_result, links_to_resumption):
frame_type, varsforcall, saver = self.frametyper.frame_type_for_vars(varstosave)
if SAVE_STATISTICS:
self.stats.rp_count += 1
inc(self.stats.rp_type_counts, frame_type)
inc(self.stats.rp_per_graph, gkey(self.curr_graph))
inc(self.stats.rp_per_graph_type_counts.setdefault(gkey(self.curr_graph), {}), frame_type)
exact_key = [v.concretetype for v in varstosave]
exact_key.sort()
exact_key = (tuple(exact_key), v_result.concretetype)
inc(self.stats.cur_rp_exact_types, exact_key)
inc(self.stats.cur_rp_erased_types, frame_type)
varsforcall0 = varsforcall[:]
c_restart = model.Constant(len(self.masterarray1) + len(self.resume_blocks), lltype.Signed)
varsforcall.insert(0, c_restart)
varsforcall = [v for v in varsforcall if v.concretetype != lltype.Void]
self._make_resume_handling(frame_type, varsforcall0,
v_result, links_to_resumption)
return (self._generate_save_block(varsforcall, v_exception, saver),
varsforcall)
def _generate_save_block(self, varsforcall, v_unwind_exception, saver):
conc_types = tuple([v.concretetype for v in varsforcall])
if conc_types in self.curr_graph_save_blocks:
return self.curr_graph_save_blocks[conc_types]
rtyper = self.translator.rtyper
edata = rtyper.getexceptiondata()
etype = edata.lltype_of_exception_type
evalue = edata.lltype_of_exception_value
inputargs = [copyvar(v) for v in varsforcall]
v_unwind_exception = copyvar(v_unwind_exception)
save_state_block = model.Block(inputargs + [v_unwind_exception])
saveops = LowLevelOpList()
v_exc = gen_cast(saveops, self.unwind_exception_type, v_unwind_exception)
realvarsforcall = [v_exc]
for v in inputargs:
realvarsforcall.append(gen_cast(saveops, storage_type(v.concretetype), v))
saveops.genop('direct_call',
[model.Constant(saver, lltype.typeOf(saver))] + realvarsforcall,
resulttype=lltype.Void)
save_state_block.operations = saveops
type_repr = rclass.get_type_repr(rtyper)
c_unwindexception = model.Constant(
type_repr.convert_const(code.UnwindException), etype)
if not hasattr(self.curr_graph.exceptblock.inputargs[0], 'concretetype'):
self.curr_graph.exceptblock.inputargs[0].concretetype = etype
if not hasattr(self.curr_graph.exceptblock.inputargs[1], 'concretetype'):
self.curr_graph.exceptblock.inputargs[1].concretetype = evalue
save_state_block.closeblock(model.Link(
[c_unwindexception, v_unwind_exception],
self.curr_graph.exceptblock))
self.translator.rtyper._convert_link(
save_state_block, save_state_block.exits[0])
if SAVE_STATISTICS:
self.stats.saveops += len(save_state_block.operations)
self.curr_graph_save_blocks[conc_types] = save_state_block
return save_state_block
def _make_resume_handling(self, FRAME_TYPE, sorted_vars, v_retval, links_to_resumption):
resume_substate = len(self.resume_blocks)
erased_types = []
for v in sorted_vars:
if v.concretetype != lltype.Void:
erased_types.append(storage_type(v.concretetype))
retval_type = v_retval.concretetype
erased_retval_type = storage_type(retval_type)
retrieve_block, output_args = self._get_resume_retrieval_block(FRAME_TYPE, erased_types)
return_block, switch_block = self._get_resume_return_block(
erased_types, erased_retval_type, links_to_resumption[1:], sorted_vars)
link = model.Link(output_args, return_block, resume_substate)
link.llexitcase = link.exitcase
retrieve_block.recloseblock(*(tuple(retrieve_block.exits) + (link,)))
if erased_retval_type != lltype.Void:
erased_types.append(erased_retval_type)
cast_block, cast_args = self._get_resume_cast_block(
erased_types,
[v.concretetype for v in sorted_vars] + [retval_type])
link = model.Link(switch_block.inputargs, cast_block, resume_substate)
link.llexitcase = resume_substate
switch_block.recloseblock(*(tuple(switch_block.exits) + (link,)))
varmap = dict([(v, cast_args[sorted_vars.index(v)]) for v in sorted_vars])
for k, v in varmap.items():
assert k.concretetype == v.concretetype
varmap[v_retval] = cast_args[-1]
link = copy_link_with_varmap(links_to_resumption[0], varmap)
link.exitcase = link.llexitcase = resume_substate
cast_block.recloseblock(*(tuple(cast_block.exits) + (link,)))
self.resume_blocks.append(retrieve_block)
def _make_resume_retrieval_block(self, FRAME_TYPE, erased_types):
retrieve_block = model.Block([varoftype(lltype.Signed)])
retrieve_block.exitswitch = retrieve_block.inputargs[0]
llops = LowLevelOpList()
llops.genop("setfield",
[self.ll_global_state, self.c_restart_substate_name, self.c_minus_one])
v_state_hdr = llops.genop("getfield",
[self.ll_global_state, self.c_inst_top_name],
resulttype=lltype.Ptr(STATE_HEADER))
v_state = gen_cast(llops, lltype.Ptr(FRAME_TYPE), v_state_hdr)
llops.genop("setfield",
[self.ll_global_state, self.c_inst_top_name, self.c_null_state])
output_args = [retrieve_block.inputargs[0]]
assert len(FRAME_TYPE._names[1:]) == len(erased_types)
for fieldname, TYPE in zip(FRAME_TYPE._names[1:], erased_types):
assert FRAME_TYPE._flds[fieldname] == TYPE
output_args.append(llops.genop("getfield",
[v_state, model.Constant(fieldname, lltype.Void)],
resulttype=TYPE))
retrieve_block.operations = llops
return retrieve_block, output_args
def _get_resume_retrieval_block(self, FRAME_TYPE, erased_types):
key = tuple(erased_types)
if key in self.curr_graph_resume_retrieval_blocks:
retrieve_block, output_args = self.curr_graph_resume_retrieval_blocks[key]
if SAVE_STATISTICS:
self.stats.saved_retrieval_ops += len(retrieve_block.operations)
return retrieve_block, output_args
else:
retrieve_block, output_args = self._make_resume_retrieval_block(
FRAME_TYPE, erased_types)
self.curr_graph_resume_retrieval_blocks[key] = retrieve_block, output_args
return retrieve_block, output_args
def _make_resume_return_block(self, erased_types, erased_retval_type, except_links, sorted_vars):
inputargs = [varoftype(lltype.Signed)] + [varoftype(t) for t in erased_types]
return_block = model.Block(inputargs)
return_block.exitswitch = model.c_last_exception
llops = LowLevelOpList()
getretval = self.fetch_retvals[erased_retval_type]
v_retval = llops.genop("direct_call", [getretval],
resulttype=erased_retval_type)
switch_block = model.Block([copyvar(v) for v in inputargs])
switch_block.exitswitch = switch_block.inputargs[0]
retlink = model.Link(inputargs, switch_block, None)
if erased_retval_type != lltype.Void:
retlink.args.append(v_retval)
switch_block.inputargs.append(copyvar(v_retval))
links = [retlink]
for except_link in except_links:
cast_block, cast_args = self._make_cast_block(
erased_types, [v.concretetype for v in sorted_vars])
varmap = dict([(v, cast_args[sorted_vars.index(v)]) for v in sorted_vars])
link = model.Link(inputargs[1:], cast_block, except_link.exitcase)
link.llexitcase = except_link.llexitcase
for attr in "last_exception", "last_exc_value":
old = getattr(except_link, attr)
new = copyvar(old)
setattr(link, attr, new)
link.args.append(new)
newnew = copyvar(new)
cast_block.inputargs.append(newnew)
varmap[old] = newnew
links.append(link)
link = copy_link_with_varmap(except_link, varmap)
link.exitcase = link.llexitcase = None
link.last_exception = link.last_exc_value = None
cast_block.closeblock(link)
return_block.operations = llops
return_block.closeblock(*links)
return return_block, switch_block
def _get_resume_return_block(self, erased_types, erased_retval_type, except_links, sorted_vars):
key = (erased_retval_type,)
key += tuple(erased_types)
key += tuple([(elink.exitcase, elink.target) for elink in except_links])
if except_links and max([len(elink.args) for elink in except_links]) > 2:
key = None
if key in self.curr_graph_resume_return_blocks:
return_block, switch_block = self.curr_graph_resume_return_blocks[key]
if SAVE_STATISTICS:
self.stats.saved_return_ops += len(return_block.operations)
return return_block, switch_block
else:
return_block, switch_block = self._make_resume_return_block(erased_types, erased_retval_type, except_links, sorted_vars)
if key is not None:
self.curr_graph_resume_return_blocks[key] = return_block, switch_block
return return_block, switch_block
def _make_cast_block(self, erased_types, exact_types):
inputargs = [varoftype(t) for t in erased_types]
cast_block = model.Block(inputargs)
cast_block.operations = LowLevelOpList()
output_args = []
assert len(inputargs) == len([typ for typ in exact_types if typ != lltype.Void])
i_arg = 0
for typ in exact_types:
if typ == lltype.Void:
output_args.append(model.Constant(None, lltype.Void))
else:
arg = inputargs[i_arg]
i_arg += 1
output_args.append(gen_cast(cast_block.operations, typ, arg))
assert i_arg == len(inputargs)
return cast_block, output_args
def _get_resume_cast_block(self, erased_vars, exact_types):
# returns something that you should add a link to (with
# recloseblock), and the output_args to use in that link.
key = tuple(exact_types)
if key in self.curr_graph_resume_cast_blocks:
cast_block, output_args = self.curr_graph_resume_cast_blocks[key]
if SAVE_STATISTICS:
self.stats.saved_cast_ops += len(cast_block.operations)
return cast_block, output_args
else:
cast_block, output_args = self._make_cast_block(erased_vars, exact_types)
cast_block.inputargs.insert(0, varoftype(lltype.Signed))
cast_block.exitswitch = cast_block.inputargs[0]
self.curr_graph_resume_cast_blocks[key] = cast_block, output_args
return cast_block, output_args
def generate_restart_infos(self, graph):
restartinfo = frame.RestartInfo(graph, len(self.resume_blocks))
self.register_restart_info(restartinfo)
def register_restart_info(self, restartinfo):
assert not self.is_finished
rtyper = self.translator.rtyper
for frame_info in restartinfo.compress(self.signaturecodes, rtyper):
self.masterarray1.append(frame_info)
def finish(self):
# compute the final masterarray by copying over the masterarray1,
# which is a list of dicts of attributes
if SAVE_STATISTICS:
import cPickle
cPickle.dump(self.stats, open('stackless-stats.pickle', 'wb'))
# fun fun fun patching the call_function_retval_xyz() functions!
for RESTYPE, typename in frame.STORAGE_TYPES_AND_FIELDS:
rettype_index = STORAGE_TYPES.index(RESTYPE)
cache = self.signaturecodes[rettype_index]
if not cache:
continue # not used anyway, don't produce a broken empty switch
func = getattr(code, 'call_function_retval_' + typename)
desc = self.translator.annotator.bookkeeper.getdesc(func)
graph = desc.getuniquegraph()
[v_fnaddr, v_signature_index] = graph.getargs()
block = model.Block([v_fnaddr, v_signature_index])
block.exitswitch = v_signature_index
block.isstartblock = True
graph.startblock = block
switchlinks = []
for ARGTYPES, signature_index in cache.items():
# XXX because of type erasure, the following cast is
# kind of invalid, but we hope that nobody will notice
FUNCTYPE = lltype.Ptr(lltype.FuncType(ARGTYPES, RESTYPE))
v_fnaddr1 = varoftype(v_fnaddr.concretetype)
callblock = model.Block([v_fnaddr1])
llops = LowLevelOpList()
args_v = [model.Constant(TYPE._defl(), concretetype=TYPE)
for TYPE in ARGTYPES]
v_res = llops.genop('adr_call', [v_fnaddr1] + args_v,
resulttype = RESTYPE)
callblock.operations[:] = llops
callblock.closeblock(model.Link([v_res], graph.returnblock))
link = model.Link([v_fnaddr], callblock)
link.exitcase = signature_index
link.llexitcase = signature_index
switchlinks.append(link)
block.closeblock(*switchlinks)
model.checkgraph(graph)
self.is_finished = True
masterarray = lltype.malloc(frame.FRAME_INFO_ARRAY,
len(self.masterarray1),
immortal=True)
for dst, src in zip(masterarray, self.masterarray1):
dst.fnaddr, dst.info = src
# horrors in the same spirit as in rpython.memory.gctransform
# (shorter, though)
ll_global_state = self.ll_global_state.value
ll_global_state.inst_masterarray = masterarray
return [masterarray]
def transform_gc_nocollect(self, graph):
# for the framework gc: in stackless_explicit graphs, make sure
# that the mallocs won't trigger a collect.
for block in graph.iterblocks():
for i, op in enumerate(block.operations):
if op.opname.startswith('malloc'):
newop = model.SpaceOperation('flavored_' + op.opname,
[self.c_gc_nocollect]+op.args,
op.result)
block.operations[i] = newop
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory, lloperation
from pypy.tool.sourcetools import func_with_new_name
from pypy.rlib import rarithmetic
from pypy.rpython import extfunctable
from pypy.translator.stackless import frame
from pypy.translator.stackless.frame import STATE_HEADER, SAVED_REFERENCE, STORAGE_TYPES_AND_FIELDS
EMPTY_STATE = frame.make_state_header_type('empty_state')
# ____________________________________________________________
SWITCH_STATE = frame.make_state_header_type('switch_state',
('c', SAVED_REFERENCE))
def ll_frame_switch(targetstate):
if global_state.restart_substate == -1:
# normal entry point for a call to state.switch()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(SWITCH_STATE)
s.header.f_restart = INDEX_SWITCH
s.c = lltype.cast_opaque_ptr(SAVED_REFERENCE, targetstate)
add_frame_state(u, s.header)
raise u
elif global_state.restart_substate == 0:
# STATE 0: we didn't do anything so far, but the stack is unwound
global_state.restart_substate = -1
# grab the frame corresponding to ourself
# the 'targetstate' local is garbage here, it must be read back from
# 's.c' where we saved it by the normal entry point above
mystate = global_state.top
s = lltype.cast_pointer(lltype.Ptr(SWITCH_STATE), mystate)
targetstate = lltype.cast_opaque_ptr(lltype.Ptr(STATE_HEADER), s.c)
# prepare a new saved state for the future switch() back,
# which will go to STATE 1 below
sourcestate = lltype.malloc(EMPTY_STATE).header
sourcestate.f_back = mystate.f_back
sourcestate.f_restart = INDEX_SWITCH + 1
global_state.top = targetstate
global_state.retval_ref = lltype.cast_opaque_ptr(SAVED_REFERENCE,
sourcestate)
raise UnwindException() # this jumps to targetstate
else:
# STATE 1: switching back into a tasklet suspended by
# a call to switch()
global_state.top = frame.null_state
global_state.restart_substate = -1
origin_state = lltype.cast_opaque_ptr(frame.OPAQUE_STATE_HEADER_PTR,
fetch_retval_ref())
return origin_state # a normal return into the current tasklet,
# with the source state as return value
ll_frame_switch.stackless_explicit = True
INDEX_SWITCH = frame.RestartInfo.add_prebuilt(ll_frame_switch,
[SWITCH_STATE, EMPTY_STATE])
# ____________________________________________________________
def yield_current_frame_to_caller():
if global_state.restart_substate == -1:
# normal entry point for yield_current_frame_to_caller()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(EMPTY_STATE).header
s.f_restart = INDEX_YCFTC
add_frame_state(u, s)
raise u # this goes to 'STATE 0' below
elif global_state.restart_substate == 0:
# STATE 0: we didn't do anything so far, but the stack is unwound
global_state.restart_substate = -1
ycftc_state = global_state.top
our_caller_state = ycftc_state.f_back
caller_state = our_caller_state.f_back
# when our immediate caller finishes (which is later, when the
# tasklet finishes), then we will jump to 'STATE 1' below
endstate = lltype.malloc(EMPTY_STATE).header
endstate.f_restart = INDEX_YCFTC + 1
our_caller_state.f_back = endstate
global_state.top = caller_state
global_state.retval_ref = lltype.cast_opaque_ptr(SAVED_REFERENCE,
our_caller_state)
raise UnwindException() # this goes to the caller's caller
elif global_state.restart_substate == 1:
# STATE 1: this is a slight abuse of yield_current_frame_to_caller(),
# as we return here when our immediate caller returns (and thus the
# new tasklet finishes).
global_state.restart_substate = -1
next_state = lltype.cast_opaque_ptr(lltype.Ptr(STATE_HEADER),
fetch_retval_ref())
# return a NULL state pointer to the target of the implicit switch
global_state.top = next_state
global_state.retval_ref = frame.null_saved_ref
raise UnwindException() # this goes to the switch target given by
# the 'return' at the end of our caller
else:
# this is never reached! But the annotator doesn't know it,
# so it makes the whole function be annotated as returning a random
# non-constant STATE_HEADER pointer.
return lltype.cast_opaque_ptr(frame.OPAQUE_STATE_HEADER_PTR,
global_state.top)
yield_current_frame_to_caller.stackless_explicit = True
INDEX_YCFTC = frame.RestartInfo.add_prebuilt(yield_current_frame_to_caller,
[EMPTY_STATE, EMPTY_STATE])
# ____________________________________________________________
def stack_frames_depth():
if global_state.restart_substate == -1:
# normal entry point for stack_frames_depth()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(EMPTY_STATE).header
s.f_restart = INDEX_DEPTH
add_frame_state(u, s)
raise u # goes to STATE 0 below
else:
# STATE 0: now the stack is unwound, and we can count the frames
# in the heap
cur = global_state.top
global_state.top = frame.null_state
global_state.restart_substate = -1
depth = 0
while cur:
depth += 1
cur = cur.f_back
return depth
stack_frames_depth.stackless_explicit = True
INDEX_DEPTH = frame.RestartInfo.add_prebuilt(stack_frames_depth,
[EMPTY_STATE])
# ____________________________________________________________
def ll_stack_unwind():
if global_state.restart_substate == -1:
# normal entry point for stack_frames_depth()
# first unwind the stack in the usual way
u = UnwindException()
s = lltype.malloc(EMPTY_STATE).header
s.f_restart = INDEX_UNWIND
add_frame_state(u, s)
raise u # goes to STATE 0 below
else:
# STATE 0: now the stack is unwound. That was the goal.
# Return to caller.
global_state.top = frame.null_state
global_state.restart_substate = -1
ll_stack_unwind.stackless_explicit = True
INDEX_UNWIND = frame.RestartInfo.add_prebuilt(ll_stack_unwind,
[EMPTY_STATE])
# ____________________________________________________________
def ll_stack_capture():
if global_state.restart_substate == -1:
# normal entry point for ll_stack_capture()
# first unwind the stack in the usual way
u = UnwindException()
s = lltype.malloc(EMPTY_STATE).header
s.f_restart = INDEX_CAPTURE
add_frame_state(u, s)
raise u # goes to STATE 0 below
else:
# STATE 0: now the stack is unwound. That was the goal.
# Return to caller.
cur = global_state.top
global_state.top = frame.null_state
global_state.restart_substate = -1
# Pass the caller's own saved state back to it.
# The StacklessFrameworkGCTransformer uses this for introspection.
return lltype.cast_opaque_ptr(frame.OPAQUE_STATE_HEADER_PTR,
cur.f_back)
ll_stack_capture.stackless_explicit = True
INDEX_CAPTURE = frame.RestartInfo.add_prebuilt(ll_stack_capture,
[EMPTY_STATE])
def resume_after_void(state, retvalue):
if global_state.restart_substate == -1:
# normal entry point for a call to state.switch()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(SWITCH_STATE)
s.header.f_restart = INDEX_RESUME_AFTER_VOID
s.c = lltype.cast_opaque_ptr(SAVED_REFERENCE, state)
add_frame_state(u, s.header)
raise u
elif global_state.restart_substate == 0:
# STATE 0: we didn't do anything so far, but the stack is unwound
global_state.restart_substate = -1
# grab the frame corresponding to ourself
# the 'targetstate' local is garbage here, it must be read back from
# 's.c' where we saved it by the normal entry point above
mystate = global_state.top
s = lltype.cast_pointer(lltype.Ptr(SWITCH_STATE), mystate)
targetstate = lltype.cast_opaque_ptr(lltype.Ptr(STATE_HEADER), s.c)
resume_bottom = targetstate
while resume_bottom.f_back:
resume_bottom = resume_bottom.f_back
resume_bottom.f_back = mystate.f_back
global_state.top = targetstate
raise UnwindException()
resume_after_void.stackless_explicit = True
INDEX_RESUME_AFTER_VOID = frame.RestartInfo.add_prebuilt(resume_after_void,
[SWITCH_STATE,
EMPTY_STATE])
def resume_after_raising(state, exception):
if global_state.restart_substate == -1:
# normal entry point for a call to state.switch()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(SWITCH_STATE)
s.header.f_restart = INDEX_RESUME_AFTER_RAISING
s.c = lltype.cast_opaque_ptr(SAVED_REFERENCE, state)
add_frame_state(u, s.header)
global_state.exception = exception
raise u
elif global_state.restart_substate == 0:
# STATE 0: we didn't do anything so far, but the stack is unwound
global_state.restart_substate = -1
# grab the frame corresponding to ourself
# the 'targetstate' local is garbage here, it must be read back from
# 's.c' where we saved it by the normal entry point above
mystate = global_state.top
s = lltype.cast_pointer(lltype.Ptr(SWITCH_STATE), mystate)
targetstate = lltype.cast_opaque_ptr(lltype.Ptr(STATE_HEADER), s.c)
resume_bottom = targetstate
while resume_bottom.f_back:
resume_bottom = resume_bottom.f_back
resume_bottom.f_back = mystate.f_back
global_state.top = targetstate
raise UnwindException()
resume_after_raising.stackless_explicit = True
INDEX_RESUME_AFTER_RAISING = frame.RestartInfo.add_prebuilt(resume_after_raising,
[SWITCH_STATE,
EMPTY_STATE])
template = """\
def resume_after_%(typename)s(state, retvalue):
if global_state.restart_substate == -1:
# normal entry point for a call to state.switch()
# first unwind the stack
u = UnwindException()
s = lltype.malloc(SWITCH_STATE)
s.header.f_restart = INDEX_RESUME_AFTER_%(TYPENAME)s
s.c = lltype.cast_opaque_ptr(SAVED_REFERENCE, state)
global_state.retval_%(typename)s = retvalue
add_frame_state(u, s.header)
raise u
elif global_state.restart_substate == 0:
# STATE 0: we didn't do anything so far, but the stack is unwound
global_state.restart_substate = -1
# grab the frame corresponding to ourself
# the 'targetstate' local is garbage here, it must be read back from
# 's.c' where we saved it by the normal entry point above
mystate = global_state.top
s = lltype.cast_pointer(lltype.Ptr(SWITCH_STATE), mystate)
targetstate = lltype.cast_opaque_ptr(lltype.Ptr(STATE_HEADER), s.c)
resume_bottom = targetstate
while resume_bottom.f_back:
resume_bottom = resume_bottom.f_back
resume_bottom.f_back = mystate.f_back
global_state.top = targetstate
raise UnwindException()
resume_after_%(typename)s.stackless_explicit = True
INDEX_RESUME_AFTER_%(TYPENAME)s = frame.RestartInfo.add_prebuilt(resume_after_%(typename)s,
[SWITCH_STATE,
EMPTY_STATE])
"""
for _lltype, typename in STORAGE_TYPES_AND_FIELDS:
if typename == 'void': continue
exec template%dict(typename=typename, TYPENAME=typename.upper())
# ____________________________________________________________
class StacklessData:
def __init__(self):
self.top = frame.null_state
self.restart_substate = -1
self.retval_long = 0
self.retval_longlong = rarithmetic.r_longlong(0)
self.retval_float = 0.0
self.retval_addr = llmemory.NULL
self.retval_ref = frame.null_saved_ref
self.retval_weak = llmemory.WEAKNULL
self.exception = None
self.masterarray = lltype.malloc(frame.FRAME_INFO_ARRAY, 0,
immortal=True)
global_state = StacklessData()
# the following functions are patched by transform.py in finish()
# so that they don't really do what they appear to - we discovered
# that it was not safe at all to produce this kind of C code
def define_call_function_retval(TYPE, typename):
FUNCTYPE = lltype.Ptr(lltype.FuncType([], TYPE))
def call_function_retval_xyz(fnaddr, signature_index):
fn = llmemory.cast_adr_to_ptr(fnaddr, FUNCTYPE)
return fn()
call_function_retval_xyz.stackless_explicit = True
call_function_retval_xyz.dont_inline = True
fnname = 'call_function_retval_' + typename
fn = func_with_new_name(call_function_retval_xyz, fnname)
globals()[fnname] = fn
for _lltype, _typename in STORAGE_TYPES_AND_FIELDS:
define_call_function_retval(_lltype, _typename)
def call_function(fn, signature_index):
retval_code = signature_index & frame.storage_type_bitmask
if retval_code == frame.RETVAL_VOID:
call_function_retval_void(fn, signature_index)
elif retval_code == frame.RETVAL_REF:
global_state.retval_ref = (
call_function_retval_ref(fn, signature_index))
elif retval_code == frame.RETVAL_ADDR:
global_state.retval_addr = (
call_function_retval_addr(fn, signature_index))
elif retval_code == frame.RETVAL_LONG:
global_state.retval_long = (
call_function_retval_long(fn, signature_index))
elif retval_code == frame.RETVAL_FLOAT:
global_state.retval_float = (
call_function_retval_float(fn, signature_index))
elif retval_code == frame.RETVAL_LONGLONG:
global_state.retval_longlong = (
call_function_retval_longlong(fn, signature_index))
elif retval_code == frame.RETVAL_WEAK:
global_state.retval_weak = (
call_function_retval_weak(fn, signature_index))
else:
assert False
call_function.stackless_explicit = True
class UnwindException(lloperation.StackException):
def __init__(self):
# during unwind, global_state.top points to frame that first caught
# the UnwindException, whilst frame_bottom points to the frame
# that most recently caught the UnwindException. In a normal
# situation, frame_bottom is global_state.top.f_back.f_back.etc...
# To switch manually to a different frame, code issues a regular
# UnwindException first, to empty the C stack, and then issues a
# (XXX complete this comment)
self.frame_bottom = frame.null_state
__init__.stackless_explicit = True
def slp_main_loop():
"""
slp_main_loop() keeps resuming...
"""
pending = global_state.top
while True:
back = pending.f_back
decoded = frame.decodestate(pending.f_restart)
(fn, global_state.restart_substate, signature_index) = decoded
try:
call_function(fn, signature_index)
except UnwindException, u: #XXX annotation support needed
if u.frame_bottom:
u.frame_bottom.f_back = back
pending = global_state.top
continue
except Exception, e:
if not back:
raise
global_state.exception = e
else:
if not back:
return
global_state.top = pending = back
slp_main_loop.stackless_explicit = True
def add_frame_state(u, frame_state):
if not u.frame_bottom:
global_state.top = u.frame_bottom = frame_state
else:
u.frame_bottom.f_back = frame_state
u.frame_bottom = frame_state
add_frame_state.stackless_explicit = True
def fetch_retval_void():
e = global_state.exception
if e:
global_state.exception = None
raise e
fetch_retval_void.stackless_explicit = True
def fetch_retval_long():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
return global_state.retval_long
fetch_retval_long.stackless_explicit = True
def fetch_retval_longlong():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
return global_state.retval_longlong
fetch_retval_longlong.stackless_explicit = True
def fetch_retval_float():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
return global_state.retval_float
fetch_retval_float.stackless_explicit = True
def fetch_retval_addr():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
res = global_state.retval_addr
global_state.retval_addr = llmemory.NULL
return res
fetch_retval_addr.stackless_explicit = True
def fetch_retval_ref():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
res = global_state.retval_ref
global_state.retval_ref = frame.null_saved_ref
return res
fetch_retval_ref.stackless_explicit = True
def fetch_retval_weak():
e = global_state.exception
if e:
global_state.exception = None
raise e
else:
return global_state.retval_weak
fetch_retval_weak.stackless_explicit = True
| Python |
#
| Python |
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython import extfunctable
from pypy.rpython.typesystem import getfunctionptr
from pypy.rpython.annlowlevel import annotate_lowlevel_helper
from pypy.objspace.flow.model import FunctionGraph
from pypy.tool.sourcetools import compile2
from pypy.annotation import model as annmodel
from pypy.rpython.annlowlevel import MixLevelHelperAnnotator
# ____________________________________________________________
# generic data types
SAVED_REFERENCE = llmemory.GCREF
null_saved_ref = lltype.nullptr(SAVED_REFERENCE.TO)
STORAGE_TYPES_AND_FIELDS = [
(lltype.Void, 'void'),
(SAVED_REFERENCE, 'ref'),
(llmemory.Address, 'addr'),
(lltype.SignedLongLong, 'longlong'),
(lltype.Signed, 'long'),
(lltype.Float, 'float'),
(llmemory.WeakGcAddress, 'weak'),
]
STORAGE_TYPES = []
for _TYPE, _FIELD in STORAGE_TYPES_AND_FIELDS:
# we do not want to add the longlong type twice on 64 bits
# machines on which longlong is the same as signed
if _TYPE not in STORAGE_TYPES:
STORAGE_TYPES.append(_TYPE)
storage_type_bitmask = 0x07 # a power of two - 1
assert storage_type_bitmask >= len(STORAGE_TYPES)
STORAGE_FIELDS = dict(STORAGE_TYPES_AND_FIELDS)
del STORAGE_FIELDS[lltype.Void]
for (_key, _value) in STORAGE_TYPES_AND_FIELDS:
globals()['RETVAL_' + _value.upper()] = STORAGE_TYPES.index(_key)
def storage_type(T):
"""Return the 'erased' storage type corresponding to T.
"""
if T is lltype.Void:
return lltype.Void
elif isinstance(T, lltype.Ptr):
if T._needsgc():
return SAVED_REFERENCE
else:
return llmemory.Address
elif T is lltype.Float:
return lltype.Float
elif T in [lltype.SignedLongLong, lltype.UnsignedLongLong]:
return lltype.SignedLongLong
elif T is llmemory.Address:
return llmemory.Address
elif T is llmemory.WeakGcAddress:
return llmemory.WeakGcAddress
elif isinstance(T, lltype.Primitive):
return lltype.Signed
else:
raise Exception("don't know about %r" % (T,))
# ____________________________________________________________
# structures for saved frame states
STATE_HEADER = lltype.GcStruct('state_header',
('f_back', lltype.Ptr(lltype.GcForwardReference())),
('f_restart', lltype.Signed))
STATE_HEADER.f_back.TO.become(STATE_HEADER)
null_state = lltype.nullptr(STATE_HEADER)
OPAQUE_STATE_HEADER_PTR = lltype.Ptr(
extfunctable.frametop_type_info.get_lltype())
def make_state_header_type(name, *fields):
return lltype.GcStruct(name,
('header', STATE_HEADER),
*fields)
# ____________________________________________________________
# master array giving information about the restart points
# (STATE_HEADER.frameinfo is an index into this array)
FRAME_INFO = lltype.Struct('frame_info',
('fnaddr', llmemory.Address),
('info', lltype.Signed))
FRAME_INFO_ARRAY = lltype.Array(FRAME_INFO)
def decodestate(index):
from pypy.translator.stackless.code import global_state
masterarray = global_state.masterarray
finfo = masterarray[index]
if finfo.fnaddr:
restartstate = 0
else:
restartstate = finfo.info
finfo = masterarray[index - restartstate]
return (finfo.fnaddr, # function ptr
restartstate, # restart state within function
finfo.info) # signature_index
decodestate.stackless_explicit = True
class RestartInfo(object):
"""A RestartInfo is created (briefly) for each graph that contains
a resume point.
In addition, a RestartInfo is created for each function that needs
to do explicit stackless manipulations
(e.g. code.yield_current_frame_to_caller)."""
def __init__(self, func_or_graph, resume_point_count):
self.func_or_graph = func_or_graph
self.resume_point_count = resume_point_count
self.frame_types = ()
def compress(self, signaturecodes, rtyper):
"""This returns sufficient information to be able to build the
entries that will go in the global array of restart
information."""
if self.resume_point_count > 0:
bk = rtyper.annotator.bookkeeper
graph = self.func_or_graph
if not isinstance(graph, FunctionGraph):
graph = bk.getdesc(graph).getuniquegraph()
funcptr = getfunctionptr(graph)
FUNC = lltype.typeOf(funcptr).TO
rettype_index = STORAGE_TYPES.index(storage_type(FUNC.RESULT))
cache = signaturecodes[rettype_index]
key = tuple([storage_type(ARG) for ARG in FUNC.ARGS])
try:
signature_index = cache[key]
except KeyError:
signature_index = len(cache) * (storage_type_bitmask+1)
signature_index |= rettype_index
cache[key] = signature_index
assert (signature_index & storage_type_bitmask) == rettype_index
result = [(llmemory.cast_ptr_to_adr(funcptr), signature_index)]
for i in range(1, self.resume_point_count):
result.append((llmemory.NULL, i))
else:
result = []
return result
prebuilt = []
prebuiltindex = 0
def add_prebuilt(cls, func, frame_types):
assert func.stackless_explicit # did you forget this flag?
restart = cls(func, len(frame_types))
restart.frame_types = frame_types
n = cls.prebuiltindex
cls.prebuilt.append(restart)
cls.prebuiltindex += len(frame_types)
return n
add_prebuilt = classmethod(add_prebuilt)
| Python |
#ra
| Python |
from pypy.objspace.flow.model import *
def copyvar(annotator, v):
"""Make a copy of the Variable v, preserving annotations and concretetype."""
assert isinstance(v, Variable)
newvar = Variable(v)
if annotator is not None and v in annotator.bindings:
annotator.transfer_binding(newvar, v)
if hasattr(v, 'concretetype'):
newvar.concretetype = v.concretetype
return newvar
def varoftype(concretetype, name=None):
var = Variable(name)
var.concretetype = concretetype
return var
def insert_empty_block(annotator, link, newops=[]):
"""Insert and return a new block along the given link."""
vars = {}
for v in link.args:
if isinstance(v, Variable):
vars[v] = True
for op in newops:
for v in op.args:
if isinstance(v, Variable):
vars.setdefault(v, True)
vars[op.result] = False
vars = [v for v, keep in vars.items() if keep]
mapping = {}
for v in vars:
mapping[v] = copyvar(annotator, v)
newblock = Block(vars)
newblock.operations.extend(newops)
newblock.closeblock(Link(link.args, link.target))
newblock.renamevariables(mapping)
link.args[:] = vars
link.target = newblock
return newblock
def insert_empty_startblock(annotator, graph):
vars = [copyvar(annotator, v) for v in graph.startblock.inputargs]
newblock = Block(vars)
newblock.closeblock(Link(vars, graph.startblock))
graph.startblock.isstartblock = False
graph.startblock = newblock
graph.startblock.isstartblock = True
def starts_with_empty_block(graph):
return (not graph.startblock.operations
and graph.startblock.exitswitch is None
and graph.startblock.exits[0].args == graph.getargs())
def split_block(annotator, block, index, _forcelink=None):
"""return a link where prevblock is the block leading up but excluding the
index'th operation and target is a new block with the neccessary variables
passed on. NOTE: if you call this after rtyping, you WILL need to worry
about keepalives, you may use backendopt.support.split_block_with_keepalive.
"""
assert 0 <= index <= len(block.operations)
if block.exitswitch == c_last_exception:
assert index < len(block.operations)
#varmap is the map between names in the new and the old block
#but only for variables that are produced in the old block and needed in
#the new one
varmap = {}
vars_produced_in_new_block = {}
def get_new_name(var):
if var is None:
return None
if isinstance(var, Constant):
return var
if var in vars_produced_in_new_block:
return var
if var not in varmap:
varmap[var] = copyvar(annotator, var)
return varmap[var]
moved_operations = block.operations[index:]
new_moved_ops = []
for op in moved_operations:
newop = SpaceOperation(op.opname,
[get_new_name(arg) for arg in op.args],
op.result)
new_moved_ops.append(newop)
vars_produced_in_new_block[op.result] = True
moved_operations = new_moved_ops
links = block.exits
block.exits = None
for link in links:
for i, arg in enumerate(link.args):
#last_exception and last_exc_value are considered to be created
#when the link is entered
if link.args[i] not in [link.last_exception, link.last_exc_value]:
link.args[i] = get_new_name(link.args[i])
exitswitch = get_new_name(block.exitswitch)
#the new block gets all the attributes relevant to outgoing links
#from block the old block
if _forcelink is not None:
assert index == 0
linkargs = list(_forcelink)
else:
linkargs = varmap.keys()
newblock = Block([get_new_name(v) for v in linkargs])
newblock.operations = moved_operations
newblock.recloseblock(*links)
newblock.exitswitch = exitswitch
link = Link(linkargs, newblock)
block.operations = block.operations[:index]
block.recloseblock(link)
block.exitswitch = None
return link
def split_block_at_start(annotator, block):
# split before the first op, preserve order and inputargs
# in the second block!
return split_block(annotator, block, 0, _forcelink=block.inputargs)
def remove_direct_loops(annotator, graph):
"""This is useful for code generators: it ensures that no link has
common input and output variables, which could occur if a block's exit
points back directly to the same block. It allows code generators to be
simpler because they don't have to worry about overwriting input
variables when generating a sequence of assignments."""
def visit(link):
if isinstance(link, Link) and link.prevblock is link.target:
insert_empty_block(annotator, link)
traverse(visit, graph)
def remove_double_links(annotator, graph):
"""This can be useful for code generators: it ensures that no block has
more than one incoming links from one and the same other block. It allows
argument passing along links to be implemented with phi nodes since the
value of an argument can be determined by looking from which block the
control passed. """
def visit(block):
if isinstance(block, Block):
double_links = []
seen = {}
for link in block.exits:
if link.target in seen:
double_links.append(link)
seen[link.target] = True
for link in double_links:
insert_empty_block(annotator, link)
traverse(visit, graph)
def no_links_to_startblack(graph):
"""Ensure no links to start block."""
links_to_start_block = False
for block in graph.iterblocks():
for link in block.exits:
if link.target == graph.startblock:
links_to_start_block = True
break
if links_to_start_block:
insert_empty_startblock(None, graph)
| Python |
from pypy.translator.test import rpystone
from pypy.translator.c.symboltable import getsymboltable
def make_target_definition(LOOPS):
def entry_point(loops):
g = rpystone.g
g.IntGlob = 0
g.BoolGlob = 0
g.Char1Glob = '\0'
g.Char2Glob = '\0'
for i in range(51):
g.Array1Glob[i] = 0
for i in range(51):
for j in range(51):
g.Array2Glob[i][j] = 0
g.PtrGlb = None
g.PtrGlbNext = None
return rpystone.pystones(loops), id(g)
def target(*args):
return entry_point, [int]
def run(c_entry_point):
res = c_entry_point(LOOPS)
(benchtime, stones), _ = res
print "translated rpystone.pystones time for %d passes = %g" % \
(LOOPS, benchtime)
print "This machine benchmarks at %g translated rpystone pystones/second" % (stones,)
res = c_entry_point(50000)
_, g_addr = res
print "CPython:"
benchtime, stones = rpystone.pystones(50000)
print "rpystone.pystones time for %d passes = %g" % \
(50000, benchtime)
print "This machine benchmarks at %g rpystone pystones/second" % (stones,)
symtable = getsymboltable(c_entry_point.__module__)
check_g_results(symtable, g_addr)
return entry_point, target, run
def check_g_results(symtable, g_addr):
try:
g_ptr = symtable[g_addr]
except KeyError:
print "No low-level equivalent of structure 'g' found."
else:
assert g_ptr.inst_BoolGlob == rpystone.g.BoolGlob
assert g_ptr.inst_Char1Glob == rpystone.g.Char1Glob
assert g_ptr.inst_Char2Glob == rpystone.g.Char2Glob
compare_array(g_ptr.inst_Array1Glob, rpystone.g.Array1Glob)
compare_array_of_array(g_ptr.inst_Array2Glob, rpystone.g.Array2Glob)
compare_record(g_ptr.inst_PtrGlb, rpystone.g.PtrGlb)
compare_record(g_ptr.inst_PtrGlbNext, rpystone.g.PtrGlbNext)
def compare_array_of_array(array, pylist):
items = array.ll_items()
assert len(items) == len(pylist)
for i in range(len(pylist)):
x1 = items[i]
x2 = pylist[i]
compare_array(x1, x2)
def compare_array(array, pylist):
items = array.ll_items()
assert len(items) == len(pylist)
for i in range(len(pylist)):
x1 = items[i]
x2 = pylist[i]
assert x1 == x2
def compare_record(struct, pyrecord):
if pyrecord is None:
assert not struct
else:
assert struct
compare_record(struct.inst_PtrComp, pyrecord.PtrComp)
assert struct.inst_Discr == pyrecord.Discr
assert struct.inst_EnumComp == pyrecord.EnumComp
assert struct.inst_IntComp == pyrecord.IntComp
compare_string(struct.inst_StringComp, pyrecord.StringComp)
def compare_string(str, pystr):
assert len(str.chars) == len(pystr)
for i in range(len(pystr)):
assert str.chars[i] == pystr[i]
#if __name__ == "__main__":
# # just run it without translation
# LOOPS = 50000
# target()
# run(entry_point)
| Python |
import sys
import os
RTYPERORDER = os.getenv('RTYPERORDER').split(',')
if len(RTYPERORDER) == 2:
module_list = RTYPERORDER[1]
else:
module_list = 'module-list'
lst = open(module_list, 'r')
try:
print "reading module-list: %s" % module_list
prefixes = lst.readlines()
finally:
lst.close()
prefixes = [line.strip() for line in prefixes]
prefixes = [line for line in prefixes if line and not line.startswith('#')]
NOMATCH = sys.maxint
def order(annotator, pending):
cache = {}
annotated = annotator.annotated
def indx(block):
func = annotated[block]
module = func.__module__
if module is None:
module = 'None'
tag = "%s:%s" % (module, func.__name__)
try:
return cache[tag]
except KeyError:
match = NOMATCH
i = 0
for pfx in prefixes:
if tag.startswith(pfx):
if match == NOMATCH:
match = i
else:
if len(pfx) > len(prefixes[match]):
match = i
i += 1
cache[tag] = match, module
return match
pending.sort(lambda blk1, blk2: cmp(indx(blk1), indx(blk2)))
cur_module = ['$']
def track(block):
module = annotated[block].__module__
if module != cur_module[0]:
print "--- Specializing blocks in module: %s" % module
cur_module[0] = module
return track
| Python |
import os, sys
#from pypy.translator.goal import richards
modfilename = os.path.join(os.path.dirname(__file__), 'richards.py')
# Number of times richards is imported in parallel.
# Can be changed on the command line, e.g.
#
# translate.py targetvarsized.py 20
#
DEFAULT_CODE_SIZE_FACTOR = 10
take_options = True
# __________ Entry point __________
def richards_main(fn, iterations):
s = "Richards benchmark (RPython) starting...\n"
os.write(1, s)
result, startTime, endTime = fn(iterations)
if not result:
os.write(2, "Incorrect results!\n")
return False
os.write(1, "finished.\n")
total_s = endTime - startTime
avg = total_s * 1000 / iterations
os.write(1, "Total time for %d iterations: %f secs\n" %(iterations, total_s))
os.write(1, "Average time per iteration: %f ms\n" %(avg))
return True
def entry_point(argv):
for fn in functions:
if not richards_main(fn, 10):
return 1
return 0
# _____ Define and setup target ___
def target(driver, args, config):
global modules, functions
if len(args) == 0:
N = DEFAULT_CODE_SIZE_FACTOR
elif len(args) == 1:
N = int(args[0])
else:
raise ValueError("too many command-line arguments")
modules = []
functions = []
f = open(modfilename)
source = f.read()
f.close()
for i in range(N):
d = {'__name__': 'richards%d' % i}
exec source in d
modules.append(d)
functions.append(d['entry_point'])
return entry_point, None
| Python |
# functions to query information out of the translator and annotator from the debug prompt of translate
import types
import re
import pypy.annotation.model as annmodel
import pypy.objspace.flow.model as flowmodel
# query used for sanity checks by translate
def short_binding(annotator, var):
try:
binding = annotator.binding(var)
except KeyError:
return "?"
if binding.is_constant():
return 'const %s' % binding.__class__.__name__
else:
return binding.__class__.__name__
def graph_sig(t, g):
ann = t.annotator
hbinding = lambda v: short_binding(ann, v)
return "%s -> %s" % (
', '.join(map(hbinding, g.getargs())),
hbinding(g.getreturnvar()))
class Found(Exception):
pass
def polluted_qgen(translator):
"""list functions with still real SomeObject variables"""
annotator = translator.annotator
def visit(block):
if isinstance(block, flowmodel.Block):
for v in block.getvariables():
s = annotator.binding(v, None)
if s and s.__class__ == annmodel.SomeObject and s.knowntype != type:
raise Found
for g in translator.graphs:
try:
flowmodel.traverse(visit, g)
except Found:
line = "%s: %s" % (g, graph_sig(translator, g))
yield line
def check_exceptblocks_qgen(translator):
annotator = translator.annotator
for graph in translator.graphs:
et, ev = graph.exceptblock.inputargs
s_et = annotator.binding(et, None)
s_ev = annotator.binding(ev, None)
if s_et:
if s_et.knowntype == type:
if s_et.__class__ == annmodel.SomeObject:
if hasattr(s_et, 'is_type_of') and s_et.is_type_of == [ev]:
continue
else:
if s_et.__class__ == annmodel.SomePBC:
continue
yield "%s exceptblock is not completely sane" % graph.name
def check_methods_qgen(translator):
from pypy.annotation.description import FunctionDesc, MethodDesc
def ismeth(s_val):
if not isinstance(s_val, annmodel.SomePBC):
return False
if s_val.isNone():
return False
return s_val.getKind() is MethodDesc
bk = translator.annotator.bookkeeper
classdefs = bk.classdefs
withmeths = []
for clsdef in classdefs:
meths = []
for attr in clsdef.attrs.values():
if ismeth(attr.s_value):
meths.append(attr)
if meths:
withmeths.append((clsdef, meths))
for clsdef, meths in withmeths:
n = 0
subclasses = []
for clsdef1 in classdefs:
if clsdef1.issubclass(clsdef):
subclasses.append(clsdef1)
for meth in meths:
name = meth.name
funcs = dict.fromkeys([desc.funcdesc
for desc in meth.s_value.descriptions])
for subcls in subclasses:
if not subcls.classdesc.find_source_for(name):
continue
c = subcls.classdesc.read_attribute(name)
if isinstance(c, flowmodel.Constant):
if not isinstance(c.value, (types.FunctionType,
types.MethodType)):
continue
c = bk.getdesc(c.value)
if isinstance(c, FunctionDesc):
if c not in funcs:
yield "lost method: %s %s %s %s" % (name, subcls.name, clsdef.name, subcls.attrs.keys() )
def qoutput(queryg, write=None):
if write is None:
def write(s):
print s
c = 0
for bit in queryg:
write(bit)
c += 1
return c
def polluted(translator):
c = qoutput(polluted_qgen(translator))
print c
def sanity_check_methods(translator):
lost = qoutput(check_methods_qgen(translator))
print lost
| Python |
def getitem(list, index):
return list[index]
def entry_point(i):
return getitem([i, 2, 3, 4], 2) + getitem(None, i)
def target(*args):
return entry_point, [int]
def get_llinterp_args():
return [1]
# _____ Run translated _____
def run(c_entry_point):
c_entry_point(0)
| Python |
print '--- beginning of PyPy run of app_example.py ---'
print 6*7
print "OK, we managed to print a good number, now let's try 'import code'"
print "(this will last a while, because compiling happens at app-level)"
import code
print "fine, we managed to import 'code', now let's run code.interact()"
code.interact()
| Python |
# for test_app_main
import sys
print 'mymodule running'
print 'Name:', __name__
print 'File:', __file__
print 'Argv:', sys.argv
somevalue = "foobar"
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
#empty
| Python |
# benchmarks on a unix machine.
# to be executed in the goal folder,
# where a couple of pypy-* files is expected.
import os, sys, time, pickle
PYSTONE_CMD = 'from test import pystone;pystone.main(%s)'
PYSTONE_PATTERN = 'This machine benchmarks at'
PYSTONE_ASCENDING_GOOD = True
RICHARDS_CMD = 'from richards import *;main(iterations=%d)'
RICHARDS_PATTERN = 'Average time per iteration:'
RICHARDS_ASCENDING_GOOD = False
class BenchmarkResult(object):
def __init__(self, filename, max_results=10):
self.filename = filename
self.max_results = max_results
if os.path.exists(filename):
f = open(filename, 'r')
self.n_results = pickle.load(f)
self.best_result = pickle.load(f)
f.close()
# any exception while loading the file is best reported
# as a crash, instead of as a silent loss of all the
# data :-/
else:
self.n_results = {}
self.best_result = {}
def is_stable(self, name):
try:
return self.n_results[name] >= self.max_results
except:
return False
def update(self, name, result, ascending_good):
try:
if ascending_good:
self.best_result[name] = max(self.best_result[name], result)
else:
self.best_result[name] = min(self.best_result[name], result)
except KeyError:
self.n_results[name] = 0
self.best_result[name] = result
self.n_results[name] += 1
f = open(self.filename, 'w')
pickle.dump(self.n_results , f)
pickle.dump(self.best_result, f)
f.close()
def get_best_result(self, name):
return self.best_result[name]
def get_result(txt, pattern):
for line in txt.split('\n'):
if line.startswith(pattern):
break
else:
print 'warning: this is no valid output'
return 99999.0
return float(line.split()[len(pattern.split())])
def run_cmd(cmd):
#print "running", cmd
pipe = os.popen(cmd + ' 2>&1')
return pipe.read()
def run_pystone(executable='/usr/local/bin/python', n=0):
argstr = PYSTONE_CMD % (str(n) and n or '')
txt = run_cmd('"%s" -c "%s"' % (executable, argstr))
return get_result(txt, PYSTONE_PATTERN)
def run_richards(executable='/usr/local/bin/python', n=5):
argstr = RICHARDS_CMD % n
txt = run_cmd('"%s" -c "%s"' % (executable, argstr))
return get_result(txt, RICHARDS_PATTERN)
def get_executables(): #sorted by revision number (highest first)
exes = []
for exe in [os.path.join('.', name) for name in os.listdir('.') if name.startswith('pypy-')]:
if os.path.isdir(exe):
continue
try:
exes.append( (exe.split('-')[2], exe) )
except:
pass #skip filenames without version number
exes.sort()
exes.reverse()
exes = [s[1] for s in exes]
return exes
def main():
benchmark_result = BenchmarkResult('bench-unix.benchmark_result')
print 'date size codesize executable richards pystone'
sys.stdout.flush()
ref_rich, ref_stone = None, None
# for exe in '/usr/local/bin/python2.5 python2.4 python2.3'.split():
for exe in 'python2.4 python2.3'.split():
v = os.popen(exe + ' -c "import sys;print sys.version.split()[0]"').read().strip()
if not v:
continue
r = v + '_richards'
if not benchmark_result.is_stable(r):
benchmark_result.update(r, run_richards(exe), RICHARDS_ASCENDING_GOOD)
rich = benchmark_result.get_best_result(r)
if not ref_rich:
ref_rich = rich
p = v + '_pystone'
if not benchmark_result.is_stable(p):
benchmark_result.update(p, run_pystone(exe), PYSTONE_ASCENDING_GOOD)
stone = benchmark_result.get_best_result(p)
if not ref_stone:
ref_stone = stone
fmt = '%-26s %8s %8s <a href="microbench-archive/%s.txt">%-60s</a> %6dms (%6.1fx) %6d (%6.1fx)'
print fmt % (time.ctime(), '-', '-', 'python', 'CPython ' + v, rich, rich / ref_rich, stone, stone / ref_stone)
sys.stdout.flush()
for exe in get_executables():
exename = os.path.splitext(exe)[0].lstrip('./')
ctime = time.ctime( os.path.getmtime(exename) )
#compute microbenchmark results (only once)
f = '../microbench/archive/%s.txt' % exe
if not os.path.exists(f) or os.stat(f).st_size < 100:
os.chdir('../microbench')
run_cmd('python2.4 ./microbench.py python2.4 "../goal/%s" > "archive/%s.txt"' % (exe, exe))
os.chdir('../goal')
r = exe + '_richards'
if not benchmark_result.is_stable(r):
#continue with our regular benchmarks
benchmark_result.update(r, run_richards(exe, 1), RICHARDS_ASCENDING_GOOD)
rich = benchmark_result.get_best_result(r)
p = exe + '_pystone'
if not benchmark_result.is_stable(p):
benchmark_result.update(p, run_pystone(exe), PYSTONE_ASCENDING_GOOD)
stone = benchmark_result.get_best_result(p)
if 'pypy-cli' in exename:
dirname = exename + '-data'
codesize = 'N/A'
try:
exesize = os.path.getsize(os.path.join(dirname, 'main.exe'))
except OSError:
exesize = 'XXX'
else:
codesize = os.popen('size "%s" | tail -n1 | cut -f1'%(exename,)).read().strip()
exesize = os.path.getsize(exe)
print fmt % (ctime, exesize, codesize, exename, exename, rich, rich / ref_rich, stone, ref_stone / stone)
sys.stdout.flush()
if __name__ == '__main__':
main()
| Python |
import os, sys
from pypy.translator.goal import gcbench
def entry_point(argv):
if len(argv) > 1:
n = int(argv[1])
else:
n = 1
while n > 0:
gcbench.main()
n -= 1
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
"""
Why is this a stand-alone target?
The above target specifies None as the argument types list.
This is a case treated specially in the driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
| Python |
from pypy.rlib.jit import hint, we_are_jitted
def jitted():
print "jitted"
def compute(x, y):
if we_are_jitted():
jitted()
hint(x, concrete=True)
r = x + y
return r
# __________ Entry point __________
def entry_point(argv):
if len(argv) <3:
return -2
r = compute(int(argv[1]), int(argv[2]))
print r
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
def portal(drv):
from pypy.jit.hintannotator.annotator import HintAnnotatorPolicy
class MyHintAnnotatorPolicy(HintAnnotatorPolicy):
def __init__(self):
HintAnnotatorPolicy.__init__(self, oopspec=True,
novirtualcontainer=True)
def look_inside_graph(self, graph):
if graph.func is jitted:
return False
return True
return compute, MyHintAnnotatorPolicy()
| Python |
#! /usr/bin/env python
"""
Command-line options for translate:
See below
"""
import sys, os, new
import autopath
from pypy.config.config import to_optparse, OptionDescription, BoolOption, \
ArbitraryOption, StrOption, IntOption, Config, \
ChoiceOption, OptHelpFormatter
from pypy.config.translationoption import get_combined_translation_config
GOALS= [
("annotate", "do type inference", "-a --annotate", ""),
("rtype", "do rtyping", "-t --rtype", ""),
("prehannotatebackendopt", "backend optimize before hint-annotating",
"--prehannotatebackendopt", ""),
("hintannotate", "hint-annotate", "--hintannotate", ""),
("timeshift", "timeshift (jit generation)", "--timeshift", ""),
("backendopt", "do backend optimizations", "--backendopt", ""),
("source", "create source", "-s --source", ""),
("compile", "compile", "-c --compile", " (default goal)"),
("?jit", "generate JIT", "--jit", ""),
("run", "run the resulting binary", "--run", ""),
("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""),
]
def goal_options():
result = []
for name, doc, cmdline, extra in GOALS:
optional = False
if name.startswith('?'):
optional = True
name = name[1:]
yesdoc = doc[0].upper()+doc[1:]+extra
result.append(BoolOption(name, yesdoc, default=False, cmdline=cmdline,
negation=False))
if not optional:
result.append(BoolOption("no_%s" % name, "Don't "+doc, default=False,
cmdline="--no-"+name, negation=False))
return result
translate_optiondescr = OptionDescription("translate", "XXX", [
StrOption("targetspec", "XXX", default='targetpypystandalone',
cmdline=None),
BoolOption("profile",
"cProfile (to debug the speed of the translation process)",
default=False,
cmdline="--profile"),
BoolOption("batch", "Don't run interactive helpers", default=False,
cmdline="--batch", negation=False),
IntOption("huge", "Threshold in the number of functions after which "
"a local call graph and not a full one is displayed",
default=100, cmdline="--huge"),
BoolOption("text", "Don't start the pygame viewer", default=False,
cmdline="--text", negation=False),
BoolOption("help", "show this help message and exit", default=False,
cmdline="-h --help", negation=False),
ArbitraryOption("goals", "XXX",
defaultfactory=list),
# xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile']
ArbitraryOption("skipped_goals", "XXX",
defaultfactory=lambda: ['run']),
OptionDescription("goal_options",
"Goals that should be reached during translation",
goal_options()),
])
OVERRIDES = {
'translation.debug': False,
'translation.insist': False,
'translation.gc': 'boehm',
'translation.backend': 'c',
'translation.stackless': False,
'translation.backendopt.raisingop2direct_call' : False,
'translation.backendopt.merge_if_blocks': True,
'translation.cc': None,
'translation.profopt': None,
'translation.output': None,
}
import py
# we want 2.4 expand_default functionality
optparse = py.compat.optparse
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("translation")
py.log.setconsumer("translation", ansi_log)
def load_target(targetspec):
log.info("Translating target as defined by %s" % targetspec)
if not targetspec.endswith('.py'):
targetspec += '.py'
thismod = sys.modules[__name__]
sys.modules['translate'] = thismod
specname = os.path.splitext(os.path.basename(targetspec))[0]
sys.path.insert(0, os.path.dirname(targetspec))
mod = __import__(specname)
return mod.__dict__
def parse_options_and_load_target():
opt_parser = optparse.OptionParser(usage="%prog [options] [target] [target-specific-options]",
prog="translate",
formatter=OptHelpFormatter(),
add_help_option=False)
opt_parser.disable_interspersed_args()
config = get_combined_translation_config(
overrides=OVERRIDES, translating=True)
to_optparse(config, parser=opt_parser, useoptions=['translation.*'])
translateconfig = Config(translate_optiondescr)
to_optparse(translateconfig, parser=opt_parser)
options, args = opt_parser.parse_args()
# set goals and skipped_goals
reset = False
for name, _, _, _ in GOALS:
if name.startswith('?'):
continue
if getattr(translateconfig.goal_options, name):
if name not in translateconfig.goals:
translateconfig.goals.append(name)
if getattr(translateconfig.goal_options, 'no_'+name):
if name not in translateconfig.skipped_goals:
if not reset:
translateconfig.skipped_goals[:] = []
reset = True
translateconfig.skipped_goals.append(name)
if args:
arg = args[0]
args = args[1:]
if os.path.isfile(arg+'.py'):
assert not os.path.isfile(arg), (
"ambiguous file naming, please rename %s" % arg)
translateconfig.targetspec = arg
elif os.path.isfile(arg) and arg.endswith('.py'):
translateconfig.targetspec = arg[:-3]
else:
log.ERROR("Could not find target %r" % (arg, ))
sys.exit(1)
targetspec = translateconfig.targetspec
targetspec_dic = load_target(targetspec)
if args and not targetspec_dic.get('take_options', False):
log.WARNING("target specific arguments supplied but will be ignored: %s" % ' '.join(args))
# give the target the possibility to get its own configuration options
# into the config
if 'get_additional_config_options' in targetspec_dic:
optiondescr = targetspec_dic['get_additional_config_options']()
config = get_combined_translation_config(
optiondescr,
existing_config=config,
translating=True)
# let the target modify or prepare itself
# based on the config
if 'handle_config' in targetspec_dic:
targetspec_dic['handle_config'](config)
if 'handle_translate_config' in targetspec_dic:
targetspec_dic['handle_translate_config'](translateconfig)
if translateconfig.help:
opt_parser.print_help()
if 'print_help' in targetspec_dic:
print "\n\nTarget specific help:\n\n"
targetspec_dic['print_help'](config)
print "\n\nFor detailed descriptions of the command line options see"
print "http://codespeak.net/pypy/dist/pypy/doc/config/commandline.html"
sys.exit(0)
return targetspec_dic, translateconfig, config, args
def log_options(options, header="options in effect"):
# list options (xxx filter, filter for target)
log('%s:' % header)
optnames = options.__dict__.keys()
optnames.sort()
for name in optnames:
optvalue = getattr(options, name)
log('%25s: %s' %(name, optvalue))
def log_config(config, header="config used"):
log('%s:' % header)
log(str(config))
def main():
targetspec_dic, translateconfig, config, args = parse_options_and_load_target()
from pypy.translator import translator
from pypy.translator import driver
from pypy.translator.tool.pdbplus import PdbPlusShow
if translateconfig.profile:
from cProfile import Profile
prof = Profile()
prof.enable()
else:
prof = None
t = translator.TranslationContext(config=config)
pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands
def debug(got_error):
if prof:
prof.disable()
statfilename = 'prof.dump'
log.info('Dumping profiler stats to: %s' % statfilename)
prof.dump_stats(statfilename)
tb = None
if got_error:
import traceback
errmsg = ["Error:\n"]
exc, val, tb = sys.exc_info()
errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)])
block = getattr(val, '__annotator_block', None)
if block:
class FileLike:
def write(self, s):
errmsg.append(" %s" % s)
errmsg.append("Processing block:\n")
t.about(block, FileLike())
log.ERROR(''.join(errmsg))
else:
log.event('Done.')
if translateconfig.batch:
log.event("batch mode, not calling interactive helpers")
return
log.event("start debugger...")
if not translateconfig.text:
try:
t1 = drv.hint_translator
except (NameError, AttributeError):
t1 = t
from pypy.translator.tool import graphpage
page = graphpage.TranslatorPage(t1, translateconfig.huge)
page.display_background()
pdb_plus_show.start(tb)
try:
drv = driver.TranslationDriver.from_targetspec(targetspec_dic, config, args,
empty_translator=t,
disable=translateconfig.skipped_goals,
default_goal='compile')
log_config(translateconfig, "translate.py configuration")
if translateconfig.goal_options.jit:
if 'portal' not in targetspec_dic:
raise Exception('target has no portal defined.')
drv.set_extra_goals(['timeshift'])
log_config(config.translation, "translation configuration")
pdb_plus_show.expose({'drv': drv, 'prof': prof})
if config.translation.output:
drv.exe_name = config.translation.output
elif drv.exe_name is None and '__name__' in targetspec_dic:
drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s'
goals = translateconfig.goals
drv.proceed(goals)
except SystemExit:
raise
except:
debug(True)
raise SystemExit(1)
else:
debug(False)
if __name__ == '__main__':
main()
| Python |
"""
A simple standalone target.
The target below specifies None as the argument types list.
This is a case treated specially in driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
import os, sys
def debug(msg):
os.write(2, "debug: " + msg + '\n')
# __________ Entry point __________
def entry_point(argv):
debug("hello world")
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
| Python |
"""
A simple standalone target.
The target below specifies None as the argument types list.
This is a case treated specially in driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
import os, sys
# __________ Entry point __________
class A(object):
pass
class B(object):
pass
def f(x):
if x == 0:
return f(x - 1)
b = B()
b.x = x
return b
global_a = A()
def entry_point(argv):
a1 = A()
a2 = A()
a3 = A()
a4 = A()
global_a.next = a1
a1.x = 1
a2.x = 2
a3.x = 3
a4.x = 4
a1.next = a2
a2.next = a3
a3.next = a4
a4.next = None
# push stuff
global_a.b = f(len(argv))
global_a.b.x = len(argv)
# pop stuff
return a1.x + a2.x + a3.x + a4.x + global_a.b.x
# _____ Define and setup target ___
def target(*args):
return entry_point, None
| Python |
"""
A simple standalone target for the prolog interpreter.
"""
import sys
from pypy.lang.prolog.interpreter.translatedmain import repl, execute
# __________ Entry point __________
from pypy.lang.prolog.interpreter.engine import Engine
from pypy.lang.prolog.interpreter import engine, term
e = Engine()
engine.DEBUG = False
term.DEBUG = False
def entry_point(argv):
from pypy.jit.codegen.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable is None:
highleveljitinfo.sys_executable = argv[0]
if len(argv) == 2:
execute(e, argv[1])
try:
repl(e)
except SystemExit:
return 1
return 0
# _____ Define and setup target ___
def handle_config(config):
return
config.translation.stackless = True
def target(driver, args):
driver.exe_name = 'pyrolog-%(backend)s'
return entry_point, None
def portal(driver):
from pypy.lang.prolog.interpreter.portal import get_portal
return get_portal(driver)
if __name__ == '__main__':
entry_point(sys.argv)
| Python |
def buildcache(space):
from pypy.interpreter.typedef import interptypes
space.builtin.getdict()
print "*builtin*"
w_dic = space.builtin.w_dict
#print space.unwrap(space.call_method(w_dic,"keys"))
space.sys.getdict()
print "*sys*"
w_dic = space.sys.w_dict
#print space.unwrap(space.call_method(w_dic,"keys"))
# others
w_modules = space.sys.get('modules')
def getmodule(name):
return space.getitem(w_modules, space.wrap(name))
getmodule('math').getdict()
print "*math*"
for typedef in interptypes:
w_typ = space.gettypeobject(typedef)
w_typ.getdict()
print "*%s*" % typedef.name
for typedef in space.model.pythontypes:
w_typ = getattr(space, 'w_' + typedef.name)
w_typ.getdict()
print "*%s*" % typedef.name
#print w_typ.dict_w.keys()
space.builtin.get('file').getdict()
space.appexec([],"""():
try:
raise ValueError
except ValueError:
pass
exec 'pass'
""")
# freeze caches?
print "cache build finished"
if __name__ == '__main__':
import autopath
from pypy.objspace.std.objspace import StdObjSpace
space = StdObjSpace()
buildcache(space)
| Python |
# benchmarks on a windows machine.
# to be executed in the goal folder,
# where a couple of .exe files is expected.
USE_HIGH_PRIORITY = True
# usage with high priority:
# the program will try to import subprocess.
# you can have this with python older than 2.4: copy
# subprocess into lib and change line 392 to use win32
current_result = """
executable richards pystone size (MB)
pypy-c-17439 37413 47.7x 678.4 60.7x 5.65
pypy-c-17600-lo 26352 33.6x 906.2 45.4x 6.43
pypy-c-17634-lo 20108 25.7x 1023.5 40.2x 6.42
pypy-c-17649-lo 22612 28.9x 1042.0 39.5x 6.41
pypy-c-17674-lo 19248 24.6x 1358.8 30.3x 6.40
pypy-c-17674 12402 15.8x 1941.4 21.2x 7.37
pypy-c-17439-lo 29638 37.8x 971.4 42.4x 6.49
pypy-c-17707 14095 18.0x 2092.7 19.7x 7.37
pypy-c-17707-lo 19102 24.4x 1354.7 30.4x 6.40
pypy-c-17707-lo-range 18786 24.0x 2800.8 14.7x 6.40
pypy-c-17707-range 13980 17.8x 2899.9 14.2x 7.38
pypy-c-17743 13944 17.8x 2800.3 14.7x 7.30
pypy-c-17761-samuele 13243 16.9x 2983.3 13.8x 7.69
pypy-c-17794-ref-crash 41088 52.4x 1084.5 37.9x 14.62
pypy-c-17950 12888 16.4x 3203.0 12.8x 5.49
pypy-c-18236 9263 11.8x 3702.8 11.1x 5.12
python 2.4.1 783 1.0x 41150.3 1.0x 0.96
Defaults are: --gc=boehm
'lo' indicates --lowmem
STarting with rev. 18236, gc_pypy.dll is used
"""
import os, sys, pickle, md5
try:
from subprocess import *
except ImportError:
Popen = None
PYSTONE_CMD = 'from test import pystone;pystone.main(%s)'
PYSTONE_PATTERN = 'This machine benchmarks at'
RICHARDS_CMD = 'from richards import *;main(iterations=%d)'
RICHARDS_PATTERN = 'Average time per iteration:'
def get_result(txt, pattern):
for line in txt.split('\n'):
if line.startswith(pattern):
break
else:
raise ValueError, 'this is no valid output: %r' % txt
return float(line.split()[len(pattern.split())])
def run_cmd(cmd):
print "running", cmd
pipe = os.popen(cmd + ' 2>&1')
result = pipe.read()
print "done"
return result
def run_cmd_subprocess(cmd):
print "running", cmd
result = Popen(cmd, stdout=PIPE, creationflags=CREATIONFLAGS
).communicate()[0]
print "done"
return result
CREATIONFLAGS = 0
if Popen:
run_cmd = run_cmd_subprocess
try:
import win32con, win32api
except ImportError:
pass
else:
if USE_HIGH_PRIORITY:
CREATIONFLAGS = win32con.HIGH_PRIORITY_CLASS
print "configured to run under high priority"
BENCH_EXECONFIG = '_bench_windows_exe.txt'
bench_exe = None
def reference(progname):
global bench_exe
if not bench_exe:
if os.path.exists(BENCH_EXECONFIG):
progname = file(BENCH_EXECONFIG).read().strip()
print "using %s instead of the system default" % progname
bench_exe = progname
return bench_exe
def run_version_size(executable=reference('python'), *args):
ver, size, dll = run_cmd('%s -c "import sys, os; print sys.version.split()[0], '
'os.path.getsize(sys.executable), sys.dllhandle"'
% executable).split()
size = int(size)
try:
import win32api
except ImportError:
pass
else:
size += os.path.getsize(win32api.GetModuleFileName(int(dll)))
return ver, size
def run_pystone(executable=reference('python'), n=0, rpy=False):
if rpy:
txt = run_cmd('%s pystone' % executable)
else:
argstr = PYSTONE_CMD % (str(n) and n or '')
txt = run_cmd('%s -c "%s"' % (executable, argstr))
res = get_result(txt, PYSTONE_PATTERN)
print res
return res
def run_richards(executable=reference('python'), n=20, rpy=False):
if rpy:
txt = run_cmd('%s richards' % executable)
else:
argstr = RICHARDS_CMD % n
txt = run_cmd('%s -c "%s"' % (executable, argstr))
res = get_result(txt, RICHARDS_PATTERN)
print res
return res
def get_executables():
exes = [name for name in os.listdir('.') if name.endswith('.exe')]
exes.sort()
return exes
STAT_FILE = '_bench_windows.dump'
def load_stats(statfile=STAT_FILE):
try:
dic = pickle.load(file(statfile, 'rb'))
except IOError:
dic = {}
return dic
def save_stats(dic, statfile=STAT_FILE):
pickle.dump(dic, file(statfile, 'wb'))
HEADLINE = '''\
executable richards pystone size (MB)'''
FMT = '''\
%-27s''' + '%5d %5.1fx' + ' %9.1f %5.1fx %5.3f'
FMT2 = '''\
%-27s''' + '%5.3f %5.1f/' + ' %9.1f %5.1f/ %5.3f'
def main():
print 'getting the richards reference'
ref_rich = run_richards()
print 'getting the pystone reference'
ref_stone = run_pystone()
resdic = {}
prior = load_stats()
for exe in get_executables():
exename = os.path.splitext(exe)[0]
mtime = os.path.getmtime(exe)
size = os.path.getsize(exe)
rpy = size < 500000
key = md5.new(file(exe,'rb').read()).digest()
if key in prior:
print 'skipped', exename
resdic[key] = prior[key][:2] + (exename, mtime, size)
else:
resdic[key] = (run_richards(exe, 2,rpy), run_pystone(exe, 20000, rpy),
exename, mtime, size)
prior[key] = resdic[key] # save result, temporarily
save_stats(prior)
save_stats(resdic) # save cleaned result
res = [ (stone / rich, exe, size, rich, stone)
for rich, stone, exe, mtime, size in resdic.values()]
version, size = run_version_size()
res.append( (ref_stone/ref_rich, 'python %s' % version, size, ref_rich, ref_stone) )
res.sort()
print HEADLINE
for speed2, exe, size, rich, stone in res:
if speed2 <= ref_stone/ref_rich:
print FMT % (exe, rich, rich / ref_rich, stone, ref_stone / stone,
size / float(1024 * 1024))
else:
print FMT2 % (exe, rich, ref_rich / rich, stone, stone / ref_stone,
size / float(1024 * 1024))
if __name__ == '__main__':
main()
| Python |
import os, sys
from pypy.objspace.std.objspace import StdObjSpace
# XXX from pypy.annotation.model import *
# since we are execfile()'ed this would pull some
# weird objects into the globals, which we would try to pickle.
from pypy.interpreter import gateway
from pypy.interpreter.error import OperationError
from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy
from pypy.translator.goal.targetpypystandalone import PyPyTarget, debug
# WARNING: this requires the annotator.
# There is no easy way to build all caches manually,
# but the annotator can do it for us for free.
try:
this_dir = os.path.dirname(__file__)
except NameError:
this_dir = os.path.dirname(sys.argv[0])
def debug(msg):
os.write(2, "debug: " + msg + '\n')
# __________ Entry point __________
def entry_point(argv):
debug("entry point starting")
for arg in argv:
debug(" argv -> " + arg)
if len(argv) > 1 and argv[1] == "--space2":
del argv[1]
space = space2
w_entry_point = w_entry_point_2
else:
space = space1
w_entry_point = w_entry_point_1
try:
w_executable = space.wrap(argv[0])
w_argv = space.newlist([space.wrap(s) for s in argv[1:]])
w_exitcode = space.call_function(w_entry_point, w_executable, w_argv)
# try to pull it all in
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
except OperationError, e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space, '?'))
debug(" operror-value: " + space.str_w(space.str(e.w_value)))
return 1
return space.int_w(w_exitcode)
class MultipleSpaceTarget(PyPyTarget):
usage = "target multiple spaces standalone"
def handle_config(self, config):
config.set(**{"translation.thread": False})
def get_entry_point(self, config):
global space1, space2, w_entry_point_1, w_entry_point_2
space1 = StdObjSpace(config)
space2 = StdObjSpace(config)
space1.setattr(space1.getbuiltinmodule('sys'),
space1.wrap('pypy_space'),
space1.wrap(1))
space2.setattr(space2.getbuiltinmodule('sys'),
space2.wrap('pypy_space'),
space2.wrap(2))
# manually imports app_main.py
filename = os.path.join(this_dir, 'app_main.py')
w_dict = space1.newdict()
space1.exec_(open(filename).read(), w_dict, w_dict)
w_entry_point_1 = space1.getitem(w_dict, space1.wrap('entry_point'))
w_dict = space2.newdict()
space2.exec_(open(filename).read(), w_dict, w_dict)
w_entry_point_2 = space2.getitem(w_dict, space2.wrap('entry_point'))
# sanity-check: call the entry point
res = entry_point(["pypy", "app_basic_example.py"])
assert res == 0
res = entry_point(["pypy", "--space2", "app_basic_example.py"])
assert res == 0
return entry_point, None, PyPyAnnotatorPolicy()
MultipleSpaceTarget().interface(globals())
| Python |
import py
import os, sys
from pypy.objspace.std.objspace import StdObjSpace
from pypy.interpreter import gateway
from pypy.interpreter.error import OperationError
from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy
from pypy.config.config import Config, to_optparse, make_dict, SUPPRESS_USAGE
from pypy.tool.option import make_objspace
thisdir = py.magic.autopath().dirpath()
app_basic_example_path = str(thisdir.join("app_basic_example.py"))
try:
this_dir = os.path.dirname(__file__)
except NameError:
this_dir = os.path.dirname(sys.argv[0])
def debug(msg):
os.write(2, "debug: " + msg + '\n')
# __________ Entry point __________
def create_entry_point(space, w_dict):
w_entry_point = space.getitem(w_dict, space.wrap('entry_point'))
w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel'))
w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish))
w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup))
def entry_point(argv):
#debug("entry point starting")
#for arg in argv:
# debug(" argv -> " + arg)
try:
try:
space.call_function(w_run_toplevel, w_call_startup_gateway)
w_executable = space.wrap(argv[0])
w_argv = space.newlist([space.wrap(s) for s in argv[1:]])
w_exitcode = space.call_function(w_entry_point, w_executable, w_argv)
exitcode = space.int_w(w_exitcode)
# try to pull it all in
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
except OperationError, e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space, '?'))
debug(" operror-value: " + space.str_w(space.str(e.w_value)))
return 1
finally:
try:
space.call_function(w_run_toplevel, w_call_finish_gateway)
except OperationError, e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space, '?'))
debug(" operror-value: " + space.str_w(space.str(e.w_value)))
return 1
return exitcode
return entry_point
def call_finish(space):
space.finish()
def call_startup(space):
space.startup()
# _____ Define and setup target ___
# for now this will do for option handling
class PyPyTarget(object):
usage = SUPPRESS_USAGE
take_options = True
def opt_parser(self, config):
parser = to_optparse(config, useoptions=["objspace.*"],
parserkwargs={'usage': self.usage})
return parser
def handle_config(self, config):
# as of revision 27081, multimethod.py uses the InstallerVersion1 by default
# because it is much faster both to initialize and run on top of CPython.
# The InstallerVersion2 is optimized for making a translator-friendly
# structure for low level backends. However, InstallerVersion1 is still
# preferable for high level backends, so we patch here.
from pypy.objspace.std import multimethod
if config.translation.type_system == 'lltype':
assert multimethod.InstallerVersion1.instance_counter == 0,\
'The wrong Installer version has already been instatiated'
multimethod.Installer = multimethod.InstallerVersion2
else:
# don't rely on the default, set again here
assert multimethod.InstallerVersion2.instance_counter == 0,\
'The wrong Installer version has already been instatiated'
multimethod.Installer = multimethod.InstallerVersion1
def handle_translate_config(self, translateconfig):
self.translateconfig = translateconfig
def print_help(self, config):
self.opt_parser(config).print_help()
def get_additional_config_options(self):
from pypy.config.pypyoption import pypy_optiondescription
return pypy_optiondescription
def target(self, driver, args):
driver.exe_name = 'pypy-%(backend)s'
config = driver.config
parser = self.opt_parser(config)
parser.parse_args(args)
# expose the following variables to ease debugging
global space, entry_point
if config.translation.thread:
config.objspace.usemodules.thread = True
elif config.objspace.usemodules.thread:
config.translation.thread = True
if config.translation.stackless:
config.objspace.usemodules._stackless = True
elif config.objspace.usemodules._stackless:
config.translation.stackless = True
if self.translateconfig.goal_options.jit:
config.objspace.usemodules.pypyjit = True
elif config.objspace.usemodules.pypyjit:
self.translateconfig.goal_options.jit = True
if config.translation.backend == "cli":
config.objspace.usemodules.clr = True
# XXX did it ever work?
#elif config.objspace.usemodules.clr:
# config.translation.backend == "cli"
config.objspace.nofaking = True
config.objspace.compiler = "ast"
config.translating = True
import translate
translate.log_config(config.objspace, "PyPy config object")
# obscure hack to stuff the translation options into the translated PyPy
import pypy.module.sys
options = make_dict(config)
wrapstr = 'space.wrap(%r)' % (options)
pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr
return self.get_entry_point(config)
def portal(self, driver):
from pypy.module.pypyjit.portal import get_portal
return get_portal(driver)
def get_entry_point(self, config):
space = make_objspace(config)
if not config.objspace.std.oldstyle:
# disable translation of the whole of classobjinterp.py
StdObjSpace.setup_old_style_classes = lambda self: None
# manually imports app_main.py
filename = os.path.join(this_dir, 'app_main.py')
w_dict = space.newdict()
space.exec_(open(filename).read(), w_dict, w_dict)
entry_point = create_entry_point(space, w_dict)
# sanity-check: call the entry point
res = entry_point(["pypy", app_basic_example_path])
assert res == 0
return entry_point, None, PyPyAnnotatorPolicy(single_space = space)
def interface(self, ns):
for name in ['take_options', 'handle_config', 'print_help', 'target',
'handle_translate_config', 'portal',
'get_additional_config_options']:
ns[name] = getattr(self, name)
PyPyTarget().interface(globals())
| Python |
#! /usr/bin/env python
"""
Command-line options for translate:
See below
"""
import sys, os, new
import autopath
from pypy.config.config import to_optparse, OptionDescription, BoolOption, \
ArbitraryOption, StrOption, IntOption, Config, \
ChoiceOption, OptHelpFormatter
from pypy.config.translationoption import get_combined_translation_config
GOALS= [
("annotate", "do type inference", "-a --annotate", ""),
("rtype", "do rtyping", "-t --rtype", ""),
("prehannotatebackendopt", "backend optimize before hint-annotating",
"--prehannotatebackendopt", ""),
("hintannotate", "hint-annotate", "--hintannotate", ""),
("timeshift", "timeshift (jit generation)", "--timeshift", ""),
("backendopt", "do backend optimizations", "--backendopt", ""),
("source", "create source", "-s --source", ""),
("compile", "compile", "-c --compile", " (default goal)"),
("?jit", "generate JIT", "--jit", ""),
("run", "run the resulting binary", "--run", ""),
("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""),
]
def goal_options():
result = []
for name, doc, cmdline, extra in GOALS:
optional = False
if name.startswith('?'):
optional = True
name = name[1:]
yesdoc = doc[0].upper()+doc[1:]+extra
result.append(BoolOption(name, yesdoc, default=False, cmdline=cmdline,
negation=False))
if not optional:
result.append(BoolOption("no_%s" % name, "Don't "+doc, default=False,
cmdline="--no-"+name, negation=False))
return result
translate_optiondescr = OptionDescription("translate", "XXX", [
StrOption("targetspec", "XXX", default='targetpypystandalone',
cmdline=None),
BoolOption("profile",
"cProfile (to debug the speed of the translation process)",
default=False,
cmdline="--profile"),
BoolOption("batch", "Don't run interactive helpers", default=False,
cmdline="--batch", negation=False),
IntOption("huge", "Threshold in the number of functions after which "
"a local call graph and not a full one is displayed",
default=100, cmdline="--huge"),
BoolOption("text", "Don't start the pygame viewer", default=False,
cmdline="--text", negation=False),
BoolOption("help", "show this help message and exit", default=False,
cmdline="-h --help", negation=False),
ArbitraryOption("goals", "XXX",
defaultfactory=list),
# xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile']
ArbitraryOption("skipped_goals", "XXX",
defaultfactory=lambda: ['run']),
OptionDescription("goal_options",
"Goals that should be reached during translation",
goal_options()),
])
OVERRIDES = {
'translation.debug': False,
'translation.insist': False,
'translation.gc': 'boehm',
'translation.backend': 'c',
'translation.stackless': False,
'translation.backendopt.raisingop2direct_call' : False,
'translation.backendopt.merge_if_blocks': True,
'translation.cc': None,
'translation.profopt': None,
'translation.output': None,
}
import py
# we want 2.4 expand_default functionality
optparse = py.compat.optparse
from pypy.tool.ansi_print import ansi_log
log = py.log.Producer("translation")
py.log.setconsumer("translation", ansi_log)
def load_target(targetspec):
log.info("Translating target as defined by %s" % targetspec)
if not targetspec.endswith('.py'):
targetspec += '.py'
thismod = sys.modules[__name__]
sys.modules['translate'] = thismod
specname = os.path.splitext(os.path.basename(targetspec))[0]
sys.path.insert(0, os.path.dirname(targetspec))
mod = __import__(specname)
return mod.__dict__
def parse_options_and_load_target():
opt_parser = optparse.OptionParser(usage="%prog [options] [target] [target-specific-options]",
prog="translate",
formatter=OptHelpFormatter(),
add_help_option=False)
opt_parser.disable_interspersed_args()
config = get_combined_translation_config(
overrides=OVERRIDES, translating=True)
to_optparse(config, parser=opt_parser, useoptions=['translation.*'])
translateconfig = Config(translate_optiondescr)
to_optparse(translateconfig, parser=opt_parser)
options, args = opt_parser.parse_args()
# set goals and skipped_goals
reset = False
for name, _, _, _ in GOALS:
if name.startswith('?'):
continue
if getattr(translateconfig.goal_options, name):
if name not in translateconfig.goals:
translateconfig.goals.append(name)
if getattr(translateconfig.goal_options, 'no_'+name):
if name not in translateconfig.skipped_goals:
if not reset:
translateconfig.skipped_goals[:] = []
reset = True
translateconfig.skipped_goals.append(name)
if args:
arg = args[0]
args = args[1:]
if os.path.isfile(arg+'.py'):
assert not os.path.isfile(arg), (
"ambiguous file naming, please rename %s" % arg)
translateconfig.targetspec = arg
elif os.path.isfile(arg) and arg.endswith('.py'):
translateconfig.targetspec = arg[:-3]
else:
log.ERROR("Could not find target %r" % (arg, ))
sys.exit(1)
targetspec = translateconfig.targetspec
targetspec_dic = load_target(targetspec)
if args and not targetspec_dic.get('take_options', False):
log.WARNING("target specific arguments supplied but will be ignored: %s" % ' '.join(args))
# give the target the possibility to get its own configuration options
# into the config
if 'get_additional_config_options' in targetspec_dic:
optiondescr = targetspec_dic['get_additional_config_options']()
config = get_combined_translation_config(
optiondescr,
existing_config=config,
translating=True)
# let the target modify or prepare itself
# based on the config
if 'handle_config' in targetspec_dic:
targetspec_dic['handle_config'](config)
if 'handle_translate_config' in targetspec_dic:
targetspec_dic['handle_translate_config'](translateconfig)
if translateconfig.help:
opt_parser.print_help()
if 'print_help' in targetspec_dic:
print "\n\nTarget specific help:\n\n"
targetspec_dic['print_help'](config)
print "\n\nFor detailed descriptions of the command line options see"
print "http://codespeak.net/pypy/dist/pypy/doc/config/commandline.html"
sys.exit(0)
return targetspec_dic, translateconfig, config, args
def log_options(options, header="options in effect"):
# list options (xxx filter, filter for target)
log('%s:' % header)
optnames = options.__dict__.keys()
optnames.sort()
for name in optnames:
optvalue = getattr(options, name)
log('%25s: %s' %(name, optvalue))
def log_config(config, header="config used"):
log('%s:' % header)
log(str(config))
def main():
targetspec_dic, translateconfig, config, args = parse_options_and_load_target()
from pypy.translator import translator
from pypy.translator import driver
from pypy.translator.tool.pdbplus import PdbPlusShow
if translateconfig.profile:
from cProfile import Profile
prof = Profile()
prof.enable()
else:
prof = None
t = translator.TranslationContext(config=config)
pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands
def debug(got_error):
if prof:
prof.disable()
statfilename = 'prof.dump'
log.info('Dumping profiler stats to: %s' % statfilename)
prof.dump_stats(statfilename)
tb = None
if got_error:
import traceback
errmsg = ["Error:\n"]
exc, val, tb = sys.exc_info()
errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)])
block = getattr(val, '__annotator_block', None)
if block:
class FileLike:
def write(self, s):
errmsg.append(" %s" % s)
errmsg.append("Processing block:\n")
t.about(block, FileLike())
log.ERROR(''.join(errmsg))
else:
log.event('Done.')
if translateconfig.batch:
log.event("batch mode, not calling interactive helpers")
return
log.event("start debugger...")
if not translateconfig.text:
try:
t1 = drv.hint_translator
except (NameError, AttributeError):
t1 = t
from pypy.translator.tool import graphpage
page = graphpage.TranslatorPage(t1, translateconfig.huge)
page.display_background()
pdb_plus_show.start(tb)
try:
drv = driver.TranslationDriver.from_targetspec(targetspec_dic, config, args,
empty_translator=t,
disable=translateconfig.skipped_goals,
default_goal='compile')
log_config(translateconfig, "translate.py configuration")
if translateconfig.goal_options.jit:
if 'portal' not in targetspec_dic:
raise Exception('target has no portal defined.')
drv.set_extra_goals(['timeshift'])
log_config(config.translation, "translation configuration")
pdb_plus_show.expose({'drv': drv, 'prof': prof})
if config.translation.output:
drv.exe_name = config.translation.output
elif drv.exe_name is None and '__name__' in targetspec_dic:
drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s'
goals = translateconfig.goals
drv.proceed(goals)
except SystemExit:
raise
except:
debug(True)
raise SystemExit(1)
else:
debug(False)
if __name__ == '__main__':
main()
| Python |
#! /usr/bin/env python
import os
homedir = os.getenv('HOME')
os.environ['PATH'] += ':/usr/local/bin:/usr/local/llvm/cfrontend/ppc/llvm-gcc/bin:'+homedir+'/bin'
import autopath
import py
import time, os, sys, stat
from pypy.translator.llvm.buildllvm import Builder
os.umask(022) # allow everyone to read/execute the produced pypy-c's
tmpdir = py.std.tempfile.gettempdir() + '/usession-' + os.environ['USER'] + '/'
cflags = "-O3"
lflags = "-lgc -lm -lpthread"
dry_run = False
def run(cmd):
print 'RUN:', cmd
sys.stdout.flush()
result = 0 #OK
if not dry_run:
result = os.system(cmd) #note: result is system dependent but works on Linux the way we want
return result
def update_pypy():
os.chdir(homedir + '/projects/pypy-dist')
run('/usr/local/bin/svn up 2>&1')
def update_llvm():
os.chdir(homedir + '/projects/llvm')
run('cvs -q up 2>&1')
run('make -k -j3 tools-only 2>&1')
def compile_llvm_variants(revision, features):
ll2bc(revision, features)
bc2c_exe(revision, features, 'from richards import *;main(iterations=1)')
bc2x86_exe(revision, features, 'llvm')
def ll2bc(revision, features):
if features:
features = '-' + features
cmd = 'cp %spypy.ll pypy/translator/goal/archive/pypy%s-%s.ll' % (tmpdir, features, revision)
run(cmd)
opts = Builder(None).optimizations()
cmd = '~/bin/llvm-as < %spypy.ll | ~/bin/opt %s -f -o %spypy.bc' % (
tmpdir, opts, tmpdir)
run(cmd)
cmd = 'cp %spypy.bc pypy/translator/goal/archive/pypy%s-%s.bc' % (tmpdir, features, revision)
run(cmd)
def bc2c_exe(revision, features, profile_command=None):
if features:
features = '-' + features
filename = "pypy-llvm-%s%s-c" % (revision, features)
b = tmpdir + filename
run("~/bin/llc %spypy.bc -march=c -f -o %s.c" % (tmpdir, b))
run("cp %s.c pypy/translator/goal/archive" % b)
run("gcc %s.c %s -S -o %s.s" % (b, cflags, b))
run("cp %s.s pypy/translator/goal/archive" % b)
run("gcc %s.s %s -o %s" % (b, lflags, b))
run("cp %s pypy/translator/goal" % b)
if profile_command:
run("gcc %s.c -fprofile-generate %s -S -o %s.s" % (b, cflags, b))
run("gcc %s.s -fprofile-generate %s -o %s" % (b, lflags, b))
run("%s -c '%s'" % (b, profile_command))
run("gcc %s.c -fprofile-use %s -S -o %s.s" % (b, cflags, b))
run("cp %s.s pypy/translator/goal/archive/%s-prof.s" % (b, filename))
run("gcc %s.s -fprofile-use %s -o %s" % (b, lflags, b))
run("cp %s pypy/translator/goal/%s-prof" % (b, filename))
def bc2x86_exe(revision, features, name_extra, llc_extra_options=''):
if features:
features = '-' + features
b = "%spypy-llvm-%s%s-%s" % (tmpdir, revision, features, name_extra)
cmd = "~/bin/llc %spypy.bc %s -f -o %s.s" % (tmpdir, llc_extra_options, b)
run(cmd)
cmd = 'cp %s.s pypy/translator/goal/archive' % b
run(cmd)
cmd = "gcc %s.s %s -o %s" % (b, lflags, b)
run(cmd)
cmd = "cp %s pypy/translator/goal" % b
run(cmd)
def compile(backend):
try:
backend, features = backend.split('--', 1)
featureoptions = ''.join([" --" + f for f in features.split('--') if f[0] != '_'])
targetoptions = ''.join([" --" + f[1:] for f in features.split('--') if f[0] == '_'])
except:
features = ''
featureoptions = ''
targetoptions = ''
if backend == 'llvm':
translateoptions = ' --source --raisingop2direct_call'
else:
translateoptions = ''
def normalize(f):
if f.startswith('_'):
f = f[1:]
if f.startswith('profopt'):
f = 'prof'
return f
features = '--'.join([normalize(f) for f in features.split('--')])
os.chdir(homedir + '/projects/pypy-dist/pypy/translator/goal')
run('/usr/local/bin/python translate.py --backend=%(backend)s%(featureoptions)s%(translateoptions)s --text --batch targetpypystandalone.py %(targetoptions)s 2>&1' % locals())
if backend == 'llvm':
run('mv %s/entry_point.ll %s/pypy.ll' % (tmpdir, tmpdir))
os.chdir(homedir + '/projects/pypy-dist')
try:
revision = '%d' % (py.path.svnwc('.').info().rev,)
except:
revision = 'unknown'
basename = homedir + '/projects/pypy-dist/pypy/translator/goal/' + 'pypy-' + backend
realname = basename + '-' + revision
if features:
realname += "-" + features
if backend == 'llvm': #create llvm exectutable from the current source
compile_llvm_variants(revision, features)
elif os.path.exists(basename): #copy executable
run("mv %s %s" % (basename, realname))
if backend == 'cli':
basename_dir = basename + '-data'
realname_dir = realname + '-data'
run("mv %s %s" % (basename_dir, realname_dir))
#pypy = open(basename, 'rb').read()
#if len(pypy) > 0:
# open(realname, 'wb').write(pypy)
#os.chmod(realname, stat.S_IRWXU)
#os.unlink(basename)
def get_load():
g = os.popen('uptime', 'r')
buf = g.read().strip()
g.close()
return buf
def benchmark():
os.chdir(homedir + '/projects/pypy-dist/pypy/translator/goal')
uname = os.popen('uname -a', 'r').read()
startload = get_load()
# result = run('/usr/local/bin/withlock /tmp/cpu_cycles_lock /usr/local/bin/python bench-unix.py 2>&1 | tee benchmark.txt' % locals())
result = run('/usr/local/bin/python bench-unix.py 2>&1 | tee benchmark.txt' % locals())
endload = get_load()
if not dry_run and result == 0:
f = open('benchmark.html', 'w')
print >> f, "<html><body>"
print >> f, "<pre>"
print >> f, "uname -a:", uname
print >> f, "Benchmark started:", startload
print >> f, " ended:", endload
print >> f
f.write(open('benchmark.txt').read())
print >> f, "</pre>"
print >> f, "</body></html>"
f.close()
def main(backends=[]):
if backends == []: #_ prefix means target specific option, # prefix to outcomment
backends = [backend.strip() for backend in """
llvm--_faassen
c
c--stackless--_faassen
c--_faassen
c--thread
c--_objspace=taint
c--_allworkingmodules
c--_objspace-std-withtproxy--_faassen
c--gc=framework--_faassen
c--_objspace-std-withrope
cli
""".split('\n') if backend.strip() and not backend.strip().startswith('#')]
print time.ctime()
for backend in backends:
if backend.startswith('llvm'):
update_llvm()
break
update_pypy()
for backend in backends:
try:
compile(backend)
except:
raise
pass
benchmark()
print time.ctime()
print 80*'-'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--benchmark-only':
benchmark()
else:
if args and args[0] == '--dry-run':
del args[0]
dry_run = True
main(args)
| Python |
#! /usr/bin/env python
# App-level version of py.py.
# See test/test_app_main.
"""
options:
-i inspect interactively after running script
-O dummy optimization flag for compatibility with C Python
-c CMD program passed in as CMD (terminates option list)
-S do not 'import site' on initialization
-u unbuffered binary stdout and stderr
-h, --help show this help message and exit
-m library module to be run as a script (terminates option list)
--version print the PyPy version
--info print translation information about this PyPy executable
"""
import sys, os
DEBUG = False # dump exceptions before calling the except hook
originalexcepthook = sys.__excepthook__
def run_toplevel(f, *fargs, **fkwds):
"""Calls f() and handle all OperationErrors.
Intended use is to run the main program or one interactive statement.
run_protected() handles details like forwarding exceptions to
sys.excepthook(), catching SystemExit, printing a newline after
sys.stdout if needed, etc.
"""
try:
# run it
f(*fargs, **fkwds)
# we arrive here if no exception is raised. stdout cosmetics...
try:
stdout = sys.stdout
softspace = stdout.softspace
except AttributeError:
pass
# Don't crash if user defined stdout doesn't have softspace
else:
if softspace:
stdout.write('\n')
except SystemExit, e:
# exit if we catch a w_SystemExit
exitcode = e.code
if exitcode is None:
exitcode = 0
else:
try:
exitcode = int(exitcode)
except:
# not an integer: print it to stderr
try:
stderr = sys.stderr
except AttributeError:
pass # too bad
else:
print >> stderr, exitcode
exitcode = 1
raise SystemExit(exitcode)
except:
etype, evalue, etraceback = sys.exc_info()
try:
# extra debugging info in case the code below goes very wrong
if DEBUG and hasattr(sys, 'stderr'):
s = getattr(etype, '__name__', repr(etype))
print >> sys.stderr, "debug: exception-type: ", s
print >> sys.stderr, "debug: exception-value:", str(evalue)
tbentry = etraceback
if tbentry:
while tbentry.tb_next:
tbentry = tbentry.tb_next
lineno = tbentry.tb_lineno
filename = tbentry.tb_frame.f_code.co_filename
print >> sys.stderr, "debug: exception-tb: %s:%d" % (
filename, lineno)
# set the sys.last_xxx attributes
sys.last_type = etype
sys.last_value = evalue
sys.last_traceback = etraceback
# call sys.excepthook
hook = getattr(sys, 'excepthook', originalexcepthook)
hook(etype, evalue, etraceback)
return False # done
except:
try:
stderr = sys.stderr
except AttributeError:
pass # too bad
else:
print >> stderr, 'Error calling sys.excepthook:'
originalexcepthook(*sys.exc_info())
print >> stderr
print >> stderr, 'Original exception was:'
# we only get here if sys.excepthook didn't do its job
originalexcepthook(etype, evalue, etraceback)
return False
return True # success
# ____________________________________________________________
# Option parsing
def print_info():
try:
options = sys.pypy_translation_info
except AttributeError:
print >> sys.stderr, 'no translation information found'
else:
optitems = options.items()
optitems.sort()
for name, value in optitems:
print ' %51s: %s' % (name, value)
def print_help():
print 'usage: %s [options]' % (sys.executable,)
print __doc__
def print_error(msg):
print >> sys.stderr, msg
print >> sys.stderr, 'usage: %s [options]' % (sys.executable,)
print >> sys.stderr, 'Try `%s -h` for more information.' % (sys.executable,)
def set_unbuffered_io():
if os.name == 'nt':
raise NotImplementedError("binary stdin/stdout not implemented "
"on Windows")
sys.stdin = sys.__stdin__ = os.fdopen(0, 'rb', 0)
sys.stdout = sys.__stdout__ = os.fdopen(1, 'wb', 0)
sys.stderr = sys.__stderr__ = os.fdopen(2, 'wb', 0)
# ____________________________________________________________
# Main entry point
AUTOSUBPATH = 'share' + os.sep + 'pypy-%d.%d'
def entry_point(executable, argv):
# find the full path to the executable, assuming that if there is no '/'
# in the provided one then we must look along the $PATH
if os.sep not in executable:
path = os.getenv('PATH')
if path:
for dir in path.split(os.pathsep):
fn = os.path.join(dir, executable)
if os.path.isfile(fn):
executable = fn
break
sys.executable = os.path.abspath(executable)
# set up a sys.path that depends on the local machine
autosubpath = AUTOSUBPATH % sys.pypy_version_info[:2]
search = executable
while 1:
dirname = resolvedirof(search)
if dirname == search:
# not found! let's hope that the compiled-in path is ok
print >> sys.stderr, ('debug: WARNING: library path not found, '
'using compiled-in sys.path')
break
newpath = sys.pypy_initial_path(dirname)
if newpath is None:
newpath = sys.pypy_initial_path(os.path.join(dirname, autosubpath))
if newpath is None:
search = dirname # walk to the parent directory
continue
sys.path = newpath # found!
break
go_interactive = False
run_command = False
import_site = True
i = 0
run_module = False
run_stdin = False
while i < len(argv):
arg = argv[i]
if not arg.startswith('-'):
break
if arg == '-i':
go_interactive = True
elif arg == '-c':
if i+1 >= len(argv):
print_error('Argument expected for the -c option')
return 2
run_command = True
break
elif arg == '-u':
set_unbuffered_io()
elif arg == '-O':
pass
elif arg == '--version':
print sys.version
return 0
elif arg == '--info':
print_info()
return 0
elif arg == '-h' or arg == '--help':
print_help()
return 0
elif arg == '-S':
import_site = False
elif arg == '-':
run_stdin = True
break # not an option but a file name representing stdin
elif arg == '-m':
i += 1
if i >= len(argv):
print_error('Argument expected for the -m option')
return 2
run_module = True
break
elif arg == '--':
i += 1
break # terminates option list
else:
print_error('unrecognized option %r' % (arg,))
return 2
i += 1
sys.argv = argv[i:]
if not sys.argv:
sys.argv.append('')
run_stdin = True
# with PyPy in top of CPython we can only have around 100
# but we need more in the translated PyPy for the compiler package
sys.setrecursionlimit(5000)
mainmodule = type(sys)('__main__')
sys.modules['__main__'] = mainmodule
if import_site:
try:
import site
except:
print >> sys.stderr, "'import site' failed"
# set up the Ctrl-C => KeyboardInterrupt signal handler, if the
# signal module is available
try:
import signal
except ImportError:
pass
else:
signal.signal(signal.SIGINT, signal.default_int_handler)
if hasattr(signal, "SIGPIPE"):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
def is_interactive():
return go_interactive or os.getenv('PYTHONINSPECT')
success = True
try:
if run_command:
cmd = sys.argv.pop(1)
def run_it():
exec cmd in mainmodule.__dict__
success = run_toplevel(run_it)
elif run_module:
def run_it():
import runpy
runpy.run_module(sys.argv[0], None, '__main__', True)
success = run_toplevel(run_it)
elif run_stdin:
if is_interactive() or sys.stdin.isatty():
print_banner()
python_startup = os.getenv('PYTHONSTARTUP')
if python_startup:
try:
startup = open(python_startup).read()
except IOError:
pass
else:
def run_it():
co_python_startup = compile(startup,
python_startup,
'exec')
exec co_python_startup in mainmodule.__dict__
run_toplevel(run_it)
go_interactive = True
else:
def run_it():
co_stdin = compile(sys.stdin.read(), '<stdin>', 'exec')
exec co_stdin in mainmodule.__dict__
mainmodule.__file__ = '<stdin>'
success = run_toplevel(run_it)
else:
mainmodule.__file__ = sys.argv[0]
scriptdir = resolvedirof(sys.argv[0])
sys.path.insert(0, scriptdir)
success = run_toplevel(execfile, sys.argv[0], mainmodule.__dict__)
if is_interactive():
try:
import _curses
import termios
from pyrepl.python_reader import main
from pyrepl import cmdrepl
#import pdb
#pdb.Pdb = cmdrepl.replize(pdb.Pdb, 1)
except ImportError:
success = run_toplevel(interactive_console, mainmodule)
else:
main(print_banner=False)
success = True
except SystemExit, e:
return e.code
else:
return not success
def resolvedirof(filename):
try:
filename = os.path.abspath(filename)
except OSError:
pass
dirname = os.path.dirname(filename)
if os.path.islink(filename):
try:
link = os.readlink(filename)
except OSError:
pass
else:
return resolvedirof(os.path.join(dirname, link))
return dirname
def print_banner():
print 'Python %s on %s' % (sys.version, sys.platform)
print ('Type "help", "copyright", "credits" or '
'"license" for more information.')
def interactive_console(mainmodule):
# some parts of code.py are copied here because it seems to be impossible
# to start an interactive console without printing at least one line
# of banner
import code
console = code.InteractiveConsole(mainmodule.__dict__)
try:
import readline
except ImportError:
pass
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = raw_input(prompt)
except EOFError:
console.write("\n")
break
else:
more = console.push(line)
except KeyboardInterrupt:
console.write("\nKeyboardInterrupt\n")
console.resetbuffer()
more = 0
if __name__ == '__main__':
import autopath
# obscure! try removing the following line, see how it crashes, and
# guess why...
ImStillAroundDontForgetMe = sys.modules['__main__']
sys.ps1 = '>>>> '
sys.ps2 = '.... '
# debugging only
def pypy_initial_path(s):
from pypy.module.sys.state import getinitialpath
try:
return getinitialpath(s)
except OSError:
return None
from pypy.module.sys.version import PYPY_VERSION
sys.pypy_version_info = PYPY_VERSION
sys.pypy_initial_path = pypy_initial_path
sys.exit(entry_point(sys.argv[0], sys.argv[1:]))
#sys.exit(entry_point('app_main.py', sys.argv[1:]))
| Python |
import os, sys
def restart_process():
import sys
os.execv(sys.executable, [sys.executable] + sys.argv)
def restartable_point_fork(auto=None, extra_msg=None):
while True:
while True:
if extra_msg:
print extra_msg
print '---> Checkpoint: cont / restart-it-all / quit / pdb ?'
if auto:
print 'auto-%s' % (auto,)
line = auto
auto = None
else:
try:
line = raw_input().strip().lower()
except (KeyboardInterrupt, EOFError), e:
print '(%s ignored)' % e.__class__.__name__
continue
if line in ('run', 'cont'):
break
if line == 'quit':
raise SystemExit
if line == 'pdb':
try:
import pdb; pdb.set_trace()
dummy_for_pdb = 1 # for pdb to land
except Exception, e:
print '(%s ignored)' % e.__class__.__name__
continue
if line == 'restart-it-all':
restart_process()
try:
pid = os.fork()
except AttributeError:
# windows case
return
if pid != 0:
# in parent
while True:
try:
pid, status = os.waitpid(pid, 0)
except KeyboardInterrupt:
continue
else:
break
print
print '_'*78
print 'Child %d exited' % pid,
if os.WIFEXITED(status):
print '(exit code %d)' % os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
print '(caught signal %d)' % os.WTERMSIG(status)
else:
print 'abnormally (status 0x%x)' % status
continue
# in child
print '_'*78
break
# special version for win32 which does not have fork() at all,
# but epople can simulate it by hand using VMware
def restartable_point_nofork(auto=None):
# auto ignored, no way to automate VMware, yet
restartable_point_fork(None, '+++ this system does not support fork +++\n'
'if you have a virtual machine, you can save a snapshot now')
if sys.platform == 'win32':
restartable_point = restartable_point_nofork
else:
restartable_point = restartable_point_fork
if __name__ == '__main__':
print 'doing stuff...'
print 'finished'
restartable_point()
print 'doing more stuff'
print 'press Enter to quit...'
raw_input()
| Python |
import thread, time
class MonitorList(list):
def append(self, obj):
list.append(self, obj)
print "running grown to %r\n" % self,
def remove(self, obj):
list.remove(self, obj)
print "running shrunk to %r\n" % self,
running = MonitorList()
def f(name, count, modulus):
running.append(name)
print "starting %s %d %d\n" % (name, count, modulus),
for i in xrange(count):
if i % modulus == 0:
print "%s %d\n" % (name, i),
running.remove(name)
thread.start_new_thread(f, ("eins", 10000000, 12345))
thread.start_new_thread(f, ("zwei", 10000000, 13579))
thread.start_new_thread(f, ("drei", 10000000, 14680))
thread.start_new_thread(f, ("vier", 10000000, 15725))
while not running:
pass
print "waiting for %r to finish\n" % running,
while running:
pass
print "finished waiting.\n",
| Python |
"""
A simple standalone target.
The target below specifies None as the argument types list.
This is a case treated specially in driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
import os, sys
def debug(msg):
os.write(2, "debug: " + msg + '\n')
# __________ Entry point __________
def entry_point(argv):
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
| Python |
# based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self,l,i,k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self,lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self,p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self,p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print
layout = 50
print a,
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self,i,p,w,initialState,r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self,pkt,r):
raise NotImplementedError
def addPacket(self,p,old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg,self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self,i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self,pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt,self)
def findtcb(self,id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing: trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt,r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control /= 2
return self.release(I_DEVA)
else:
i.control = i.control/2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # xrange(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
import time
def schedule():
t = taskWorkArea.taskList
while t is not None:
pkt = None
if tracing:
print "tcb =",t.ident
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing: trace(chr(ord("0")+t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in xrange(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = None;
DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec());
DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec());
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations):
r = Richards()
startTime = time.time()
result = r.run(iterations)
endTime = time.time()
return result, startTime, endTime
def main(entry_point = entry_point, iterations = 10):
print "Richards benchmark (Python) starting... [%r]" % entry_point
result, startTime, endTime = entry_point(iterations)
if not result:
print "Incorrect results!"
return -1
print "finished."
total_s = endTime - startTime
print "Total time for %d iterations: %.2f secs" %(iterations,total_s)
print "Average time per iteration: %.2f ms" %(total_s*1000/iterations)
return 42
try:
import sys
if '-nojit' in sys.argv:
sys.argv.remove('-nojit')
raise ImportError
import pypyjit
except ImportError:
pass
else:
import types
for item in globals().values():
if isinstance(item, types.FunctionType):
pypyjit.enable(item.func_code)
elif isinstance(item, type):
for it in item.__dict__.values():
if isinstance(it, types.FunctionType):
pypyjit.enable(it.func_code)
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
main(iterations = int(sys.argv[1]))
else:
main()
| Python |
# overrides for annotation specific to PyPy codebase
from pypy.annotation.policy import AnnotatorPolicy, Sig
# for some reason, model must be imported first,
# or we create a cycle.
from pypy.objspace.flow.model import Constant
from pypy.annotation import model as annmodel
from pypy.annotation.bookkeeper import getbookkeeper
from pypy.annotation import specialize
from pypy.interpreter import baseobjspace
def isidentifier(s):
if not s: return False
s = s.replace('_', 'x')
return s[0].isalpha() and s.isalnum()
# patch - mostly for debugging, to enfore some signatures
baseobjspace.ObjSpace.newbool.im_func._annenforceargs_ = Sig(lambda s1,s2: s1,
bool)
class PyPyAnnotatorPolicy(AnnotatorPolicy):
allow_someobjects = False
def __init__(pol, single_space=None):
pol.lookups = {}
pol.lookups_where = {}
pol.pypytypes = {}
pol.single_space = single_space
#def override__wrap_exception_cls(pol, space, x):
# import pypy.objspace.std.typeobject as typeobject
# clsdef = getbookkeeper().getuniqueclassdef(typeobject.W_TypeObject)
# return annmodel.SomeInstance(clsdef, can_be_None=True)
#
#def override__fake_object(pol, space, x):
# from pypy.interpreter import typedef
# clsdef = getbookkeeper().getuniqueclassdef(typedef.W_Root)
# return annmodel.SomeInstance(clsdef)
#
#def override__cpy_compile(pol, self, source, filename, mode, flags):
# from pypy.interpreter import pycode
# clsdef = getbookkeeper().getuniqueclassdef(pycode.PyCode)
# return annmodel.SomeInstance(clsdef)
def specialize__yield_thread(pol, funcdesc, args_s):
def yield_thread(self):
GIL = self.GIL
GIL.fused_release_acquire()
def builder(translator, func):
return translator.buildflowgraph(yield_thread)
return funcdesc.cachedgraph(None, builder=builder)
def specialize__wrap(pol, funcdesc, args_s):
from pypy.interpreter.baseobjspace import Wrappable
from pypy.annotation.classdef import ClassDef
Wrappable_def = funcdesc.bookkeeper.getuniqueclassdef(Wrappable)
typ = args_s[1].knowntype
if isinstance(typ, ClassDef):
assert typ.issubclass(Wrappable_def)
typ = Wrappable
else:
assert not issubclass(typ, Wrappable)
if args_s[0].is_constant() and args_s[1].is_constant():
if typ in (str, bool, int, float):
space = args_s[0].const
x = args_s[1].const
def fold():
if typ is str and isidentifier(x):
return space.new_interned_str(x)
else:
return space.wrap(x)
builder = specialize.make_constgraphbuilder(2, factory=fold)
return funcdesc.cachedgraph((typ, x), builder=builder)
return funcdesc.cachedgraph(typ)
def attach_lookup(pol, t, attr):
cached = "cached_%s" % attr
if not t.is_heaptype():
setattr(t, cached, t._lookup(attr))
return True
return False
def attach_lookup_in_type_where(pol, t, attr):
cached = "cached_where_%s" % attr
if not t.is_heaptype():
setattr(t, cached, t._lookup_where(attr))
return True
return False
def consider_lookup(pol, bookkeeper, attr):
from pypy.annotation.classdef import InstanceSource
assert attr not in pol.lookups
from pypy.objspace.std import typeobject
cached = "cached_%s" % attr
clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
classdesc = clsdef.classdesc
classdesc.classdict[cached] = Constant(None)
clsdef.add_source_for_attribute(cached, classdesc)
for t in pol.pypytypes:
if pol.attach_lookup(t, attr):
source = InstanceSource(bookkeeper, t)
clsdef.add_source_for_attribute(cached, source)
pol.lookups[attr] = True
def consider_lookup_in_type_where(pol, bookkeeper, attr):
from pypy.annotation.classdef import InstanceSource
assert attr not in pol.lookups_where
from pypy.objspace.std import typeobject
cached = "cached_where_%s" % attr
clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
classdesc = clsdef.classdesc
classdesc.classdict[cached] = Constant((None, None))
clsdef.add_source_for_attribute(cached, classdesc)
for t in pol.pypytypes:
if pol.attach_lookup_in_type_where(t, attr):
source = InstanceSource(bookkeeper, t)
clsdef.add_source_for_attribute(cached, source)
pol.lookups_where[attr] = True
def specialize__lookup(pol, funcdesc, args_s):
s_space, s_obj, s_name = args_s
if s_name.is_constant():
attr = s_name.const
def builder(translator, func):
#print "LOOKUP", attr
pol.consider_lookup(funcdesc.bookkeeper, attr)
d = {}
exec CACHED_LOOKUP % {'attr': attr} in d
return translator.buildflowgraph(d['lookup_'+attr])
return funcdesc.cachedgraph(attr, builder=builder)
else:
pol.lookups[None] = True
return funcdesc.cachedgraph(None) # don't specialize
def specialize__lookup_in_type_where(pol, funcdesc, args_s):
s_space, s_obj, s_name = args_s
if s_name.is_constant():
attr = s_name.const
def builder(translator, func):
#print "LOOKUP_IN_TYPE_WHERE", attr
pol.consider_lookup_in_type_where(funcdesc.bookkeeper, attr)
d = {}
exec CACHED_LOOKUP_IN_TYPE_WHERE % {'attr': attr} in d
return translator.buildflowgraph(d['lookup_in_type_where_'+attr])
return funcdesc.cachedgraph(attr, builder=builder)
else:
pol.lookups_where[None] = True
return funcdesc.cachedgraph(None)
def event(pol, bookkeeper, what, x):
from pypy.objspace.std import typeobject
if isinstance(x, typeobject.W_TypeObject):
from pypy.annotation.classdef import InstanceSource
clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
pol.pypytypes[x] = True
#print "TYPE", x
for attr in pol.lookups:
if attr and pol.attach_lookup(x, attr):
cached = "cached_%s" % attr
source = InstanceSource(bookkeeper, x)
clsdef.add_source_for_attribute(cached, source)
for attr in pol.lookups_where:
if attr and pol.attach_lookup_in_type_where(x, attr):
cached = "cached_where_%s" % attr
source = InstanceSource(bookkeeper, x)
clsdef.add_source_for_attribute(cached, source)
return
CACHED_LOOKUP = """
from pypy.rlib.jit import hint
def lookup_%(attr)s(space, w_obj, name):
w_type = space.type(w_obj)
if not w_type.is_heaptype():
w_type = hint(w_type, deepfreeze=True)
return w_type.cached_%(attr)s
return w_type.lookup("%(attr)s")
"""
CACHED_LOOKUP_IN_TYPE_WHERE = """
from pypy.rlib.jit import hint
def lookup_in_type_where_%(attr)s(space, w_type, name):
if not w_type.is_heaptype():
w_type = hint(w_type, deepfreeze=True)
return w_type.cached_where_%(attr)s
return w_type.lookup_where("%(attr)s")
"""
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.