code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#! /usr/bin/env python
import autopath
import py
import sys
mydir = py.magic.autopath().dirpath().realpath()
from pypy.tool.pytest import htmlreport
from pypy.tool.pytest import confpath
if __name__ == '__main__':
if len(sys.argv) > 1:
testresultdir = py.path.local(sys.argv[1])
assert testresultdir.check(dir=1)
else:
testresultdir = confpath.testresultdir
assert testresultdir.check(dir=1)
try:
resultwc = py.path.svnwc(testresultdir)
print "updating", resultwc
resultwc.update()
except KeyboardInterrupt, RuntimeError:
raise
except Exception,e: #py.process.ExecutionFailed,e:
print >> sys.stderr, "Warning: ",e #Subversion update failed"
print "traversing", mydir
rep = htmlreport.HtmlReport(testresultdir)
rep.parselatest()
print "making html files"
rep.makeindex(testresultdir.join('index.html'))
| Python |
#empty
| Python |
#empty
| Python |
import autopath
import re
from os import listdir
from sys import stdin, stdout, stderr
where = autopath.pypydir + '/objspace/std/'
quote = '(' + "'" + '|' + '"' + ')'
triplequotes = '(' + "'''" + '|' + '"""' + ')'
# Note: this will produce erroneous result if you nest triple quotes
# in your docstring.
def mk_std_filelist():
''' go to pypy/objs/std and get all the *type.py files, except for
typetype.py which has to be patched by hand.'''
filelist = []
filenames = listdir(where)
for f in filenames:
if f.endswith('type.py'):
if f != 'typetype.py':
filelist.append(f)
return filelist
def compile_doc():
return re.compile(r"__doc__\s+=\s+" + triplequotes +
r"(?P<docstring>.*)"+ triplequotes ,
re.DOTALL
)
def compile_typedef(typ):
return re.compile(r"(?P<whitespace>\s+)"
+ r"(?P<typeassign>" + typ
+ "_typedef = StdTypeDef+\s*\(\s*"
+ quote + typ + quote + ",).*"
+ r"(?P<indent>^\s+)"
+ r"(?P<newassign>__new__\s*=\s*newmethod)",
re.DOTALL | re.MULTILINE)
def get_pypydoc(sourcefile):
doc = compile_doc()
try: # if this works we already have a docstring
pypydoc = doc.search(sourcefile).group('docstring')
except AttributeError: # No pypy docstring
return None
return pypydoc
def get_cpydoc(typ):
# relies on being run by CPython.
try:
cpydoc = eval(typ + '.__doc__')
except NameError: # No CPython docstring
cpydoc = None
return cpydoc
def add_docstring(typ, sourcefile):
pypydoc = get_pypydoc(sourcefile)
cpydoc = get_cpydoc(typ)
if pypydoc:
stderr.write('%s: already has a pypy docstring\n' % typ)
return None
elif not cpydoc:
stderr.write('%s: does not have a cpython docstring\n' % typ)
return None
else:
docstring="__doc__ = '''" + cpydoc + "''',"
typedef = compile_typedef(typ)
newsearch = typedef.search(sourcefile)
if not newsearch:
stderr.write('%s: has a cpython docstring, but no __new__, to determine where to put it.\n' % typ)
return None
else:
return re.sub(newsearch.group('indent') +
newsearch.group('newassign'),
newsearch.group('indent') +
docstring + '\n' +
newsearch.group('indent') +
newsearch.group('newassign'),
sourcefile)
if __name__ == '__main__':
filenames = mk_std_filelist()
for f in filenames:
inf = file(where + f).read()
outs = add_docstring(f[:-7], inf)
if outs is not None:
outf = file(where + f, 'w')
outf.write(outs)
| Python |
import sys, os, signal
import threading
def getsignalname(n):
for name, value in signal.__dict__.items():
if value == n and name.startswith('SIG'):
return name
return 'signal %d' % (n,)
timeout = float(sys.argv[1])
timedout = False
def childkill():
global timedout
timedout = True
sys.stderr.write("="*26 + "timedout" + "="*26 + "\n")
try:
os.kill(pid, signal.SIGTERM)
except OSError:
pass
pid = os.fork()
if pid == 0:
os.execvp(sys.argv[2], sys.argv[2:])
else: # parent
t = threading.Timer(timeout, childkill)
t.start()
while True:
try:
pid, status = os.waitpid(pid, 0)
except KeyboardInterrupt:
continue
else:
t.cancel()
break
if os.WIFEXITED(status):
sys.exit(os.WEXITSTATUS(status))
else:
assert os.WIFSIGNALED(status)
sign = os.WTERMSIG(status)
if timedout and sign == signal.SIGTERM:
sys.exit(1)
signame = getsignalname(sign)
sys.stderr.write("="*26 + "timedout" + "="*26 + "\n")
sys.stderr.write("="*25 + " %-08s " % signame + "="*25 + "\n")
sys.exit(1)
| Python |
#!/usr/bin/env python
# XXX needs to run on codespeak
import py
import sys
import os
base = py.path.local('/www/codespeak.net/htdocs')
def runpytest(path, outfile):
lockfile = base.join(".gendoclock")
return os.system("/admin/bin/withlock %s py.test %s >>%s 2>&1" %(
lockfile, path, outfile))
if __name__ == '__main__':
results = []
for fn in sys.argv[1:]:
p = base.join(fn, abs=True)
assert p.check(), p
outfile = p.join("gendoc.log")
wc = py.path.svnwc(p)
wc.update()
rev = wc.info().rev
outfile.write("gendoc for %s revision %d\n\n" %(p, rev))
errcode = runpytest(p, outfile)
if errcode:
results.append("in revision %d of %s" %(rev, p))
results.append(" gendoc failed with %d, see %s " %(
errcode, outfile))
print results[-1]
if results:
for line in results:
print >>sys.stderr, line
sys.exit(1)
| Python |
from __future__ import division
import autopath
import py
import math
import random
import sets
exclude_files = ["__init__.py", "autopath.py", "conftest.py"]
def include_file(path):
if ("test" in str(path) or "tool" in str(path) or
"documentation" in str(path) or "pyrex" in str(path) or
"_cache" in str(path)):
return False
if path.basename in exclude_files:
return False
return True
def get_mod_from_path(path):
dirs = path.get("dirname")[0].split("/")
pypyindex = dirs.index("pypy")
return ".".join(dirs[pypyindex:] + path.get("purebasename"))
def find_references(path):
refs = []
for line in path.open("r"):
if line.startswith(" "): # ignore local imports to reduce graph size
continue
if "\\" in line: #ignore line continuations
continue
line = line.strip()
line = line.split("#")[0].strip()
if line.startswith("import pypy."): # import pypy.bla.whatever
if " as " not in line:
refs.append((line[7:].strip(), None))
else: # import pypy.bla.whatever as somethingelse
assert line.count(" as ") == 1
line = line.split(" as ")
refs.append((line[0][7:].strip(), line[1].strip()))
elif line.startswith("from ") and "pypy" in line: #from pypy.b import a
line = line[5:]
if " as " not in line:
line = line.split(" import ")
what = line[1].split(",")
for w in what:
refs.append((line[0].strip() + "." + w.strip(), None))
else: # prom pypy.b import a as c
if line.count(" as ") != 1 or "," in line:
print"can't handle this: " + line
continue
line = line.split(" as ")
what = line[0].replace(" import ", ".").replace(" ", "")
refs.append((what, line[1].strip()))
return refs
def get_module(ref, imports):
ref = ref.split(".")
i = len(ref)
while i:
possible_mod = ".".join(ref[:i])
if possible_mod in imports:
return possible_mod
i -= 1
return None
def casteljeau(points, t):
points = points[:]
while len(points) > 1:
for i in range(len(points) - 1):
points[i] = points[i] * (1 - t) + points[i + 1] * t
del points[-1]
return points[0]
def color(t):
points = [0, 0, 1, 0, 0]
casteljeau([0, 0, 1, 0, 0], t) / 0.375
class ModuleGraph(object):
def __init__(self, path):
self.imports = {}
self.clusters = {}
self.mod_to_cluster = {}
for f in path.visit("*.py"):
if include_file(f):
self.imports[get_mod_from_path(f)] = find_references(f)
self.remove_object_refs()
self.remove_double_refs()
self.incoming = {}
for mod in self.imports:
self.incoming[mod] = sets.Set()
for mod, refs in self.imports.iteritems():
for ref in refs:
if ref[0] in self.incoming:
self.incoming[ref[0]].add(mod)
self.remove_single_nodes()
self.topgraph_properties = ["rankdir=LR"]
def remove_object_refs(self):
# reduces cases like import pypy.translator.genc.basetype.CType to
# import pypy.translator.genc.basetype
for mod, refs in self.imports.iteritems():
i = 0
while i < len(refs):
if refs[i][0] in self.imports:
i += 1
else:
nref = get_module(refs[i][0], self.imports)
if nref is None:
print "removing", repr(refs[i])
del refs[i]
else:
refs[i] = (nref, None)
i += 1
def remove_double_refs(self):
# remove several references to the same module
for mod, refs in self.imports.iteritems():
i = 0
seen_refs = sets.Set()
while i < len(refs):
if refs[i] not in seen_refs:
seen_refs.add(refs[i])
i += 1
else:
del refs[i]
def remove_single_nodes(self):
# remove nodes that have no attached edges
rem = []
for mod, refs in self.imports.iteritems():
if len(refs) == 0 and len(self.incoming[mod]) == 0:
rem.append(mod)
for m in rem:
del self.incoming[m]
del self.imports[m]
def create_clusters(self):
self.topgraph_properties.append("compound=true;")
self.clustered = True
hierarchy = [sets.Set() for i in range(6)]
for mod in self.imports:
for i, d in enumerate(mod.split(".")):
hierarchy[i].add(d)
for i in range(6):
if len(hierarchy[i]) != 1:
break
for mod in self.imports:
cluster = mod.split(".")[i]
if i == len(mod.split(".")) - 1:
continue
if cluster not in self.clusters:
self.clusters[cluster] = sets.Set()
self.clusters[cluster].add(mod)
self.mod_to_cluster[mod] = cluster
def remove_tangling_randomly(self):
# remove edges to nodes that have a lot incoming edges randomly
tangled = []
for mod, incoming in self.incoming.iteritems():
if len(incoming) > 10:
tangled.append(mod)
for mod in tangled:
remove = sets.Set()
incoming = self.incoming[mod]
while len(remove) < len(incoming) * 0.80:
remove.add(random.choice(list(incoming)))
for rem in remove:
for i in range(len(self.imports[rem])):
if self.imports[rem][i][1] == mod:
break
del self.imports[rem][i]
incoming.remove(rem)
print "removing", mod, "<-", rem
self.remove_single_nodes()
def dotfile(self, dot):
f = dot.open("w")
f.write("digraph G {\n")
for prop in self.topgraph_properties:
f.write("\t%s\n" % prop)
#write clusters and inter-cluster edges
for cluster, nodes in self.clusters.iteritems():
f.write("\tsubgraph cluster_%s {\n" % cluster)
f.write("\t\tstyle=filled;\n\t\tcolor=lightgrey\n")
for node in nodes:
f.write('\t\t"%s";\n' % node[5:])
for mod, refs in self.imports.iteritems():
for ref in refs:
if mod in nodes and ref[0] in nodes:
f.write('\t\t"%s" -> "%s";\n' % (mod[5:], ref[0][5:]))
f.write("\t}\n")
#write edges between clusters
for mod, refs in self.imports.iteritems():
try:
nodes = self.clusters[self.mod_to_cluster[mod]]
except KeyError:
nodes = sets.Set()
for ref in refs:
if ref[0] not in nodes:
f.write('\t"%s" -> "%s";\n' % (mod[5:], ref[0][5:]))
f.write("}")
f.close()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
path = py.path.local(sys.argv[1])
else:
path = py.path.local(".")
gr = ModuleGraph(path)
gr.create_clusters()
dot = path.join("import_graph.dot")
gr.dotfile(dot)
| Python |
# This is where the options for py.py are defined.
import os
from pypy.config.pypyoption import get_pypy_config
from pypy.config.config import Config, OptionDescription, to_optparse
from py.compat import optparse
extra_useage = """For detailed descriptions of all the options see
http://codespeak.net/pypy/dist/pypy/doc/config/commandline.html"""
def run_tb_server(option, opt, value, parser):
from pypy.tool import tb_server
tb_server.start()
def get_standard_options():
config = get_pypy_config()
parser = to_optparse(config, useoptions=["objspace.*"],
extra_useage=extra_useage)
parser.add_option(
'-H', action="callback",
callback=run_tb_server,
help="use web browser for traceback info")
return config, parser
def process_options(parser, argv=None):
parser.disable_interspersed_args()
options, args = parser.parse_args(argv)
return args
def make_config(cmdlineopt, **kwds):
""" make a config from cmdline options (which overrides everything)
and kwds """
config = get_pypy_config(translating=False)
objspace = kwds.pop("objspace", None)
if objspace is not None:
config.objspace.name = objspace
for modname in kwds.pop("usemodules", []):
setattr(config.objspace.usemodules, modname, True)
config.set(**kwds)
return config
def make_objspace(config):
mod = __import__('pypy.objspace.%s' % config.objspace.name,
None, None, ['Space'])
Space = mod.Space
#conf.objspace.logbytecodes = True
space = Space(config)
return space
| Python |
import exceptions, os
from pypy.tool import slaveproc
class IsolateException(Exception):
pass
class IsolateInvoker(object):
# to have a nice repr
def __init__(self, isolate, name):
self.isolate = isolate
self.name = name
def __call__(self, *args):
return self.isolate._invoke(self.name, args)
def __repr__(self):
return "<invoker for %r . %r>" % (self.isolate.module, self.name)
class Isolate(object):
"""
Isolate lets load a module in a different process,
and support invoking functions from it passing and
returning simple values
module: a dotted module name or a tuple (directory, module-name)
"""
_closed = False
def __init__(self, module):
self.module = module
self.slave = slaveproc.SlaveProcess(os.path.join(os.path.dirname(__file__),
'isolate_slave.py'))
res = self.slave.cmd(('load', module))
assert res == 'loaded'
def __getattr__(self, name):
return IsolateInvoker(self, name)
def _invoke(self, func, args):
status, value = self.slave.cmd(('invoke', (func, args)))
print 'OK'
if status == 'ok':
return value
else:
exc_type_module, exc_type_name = value
if exc_type_module == 'exceptions':
raise getattr(exceptions, exc_type_name)
else:
raise IsolateException, "%s.%s" % value
def _close(self):
if not self._closed:
self.slave.close()
self._closed = True
def __del__(self):
self._close()
def close_isolate(isolate):
assert isinstance(isolate, Isolate)
isolate._close()
| Python |
"""
A color print.
"""
import sys
from py.__.misc.terminal_helper import ansi_print
from pypy.tool.ansi_mandelbrot import Driver
class AnsiLog:
wrote_dot = False # XXX sharing state with all instances
KW_TO_COLOR = {
# color supress
'red': ((31,), True),
'bold': ((1,), True),
'WARNING': ((31,), False),
'event': ((1,), True),
'ERROR': ((1, 31), False),
'info': ((35,), False),
'stub': ((34,), False),
}
def __init__(self, kw_to_color={}, file=None):
self.kw_to_color = self.KW_TO_COLOR.copy()
self.kw_to_color.update(kw_to_color)
self.file = file
self.fancy = True
self.isatty = getattr(sys.stderr, 'isatty', lambda: False)
if self.fancy and self.isatty():
self.mandelbrot_driver = Driver()
else:
self.mandelbrot_driver = None
def __call__(self, msg):
tty = self.isatty()
flush = False
newline = True
keywords = []
esc = []
for kw in msg.keywords:
color, supress = self.kw_to_color.get(kw, (None, False))
if color:
esc.extend(color)
if not supress:
keywords.append(kw)
if 'start' in keywords:
if tty:
newline = False
flush = True
keywords.remove('start')
elif 'done' in keywords:
if tty:
print >> sys.stderr
return
elif 'dot' in keywords:
if tty:
if self.fancy:
if not AnsiLog.wrote_dot:
self.mandelbrot_driver.reset()
self.mandelbrot_driver.dot()
else:
ansi_print(".", tuple(esc), file=self.file, newline=False, flush=flush)
AnsiLog.wrote_dot = True
return
if AnsiLog.wrote_dot:
AnsiLog.wrote_dot = False
sys.stderr.write("\n")
esc = tuple(esc)
for line in msg.content().splitlines():
ansi_print("[%s] %s" %(":".join(keywords), line), esc,
file=self.file, newline=newline, flush=flush)
ansi_log = AnsiLog()
# ____________________________________________________________
# Nice helper
def raise_nicer_exception(*extraargs):
cls, e, tb = sys.exc_info()
str_e = str(e)
class ExcSubclass(cls):
def __str__(self):
lines = [str_e]
for extra in extraargs:
lines.append('\t.. %r' % (extra,))
return '\n'.join(lines)
ExcSubclass.__name__ = cls.__name__ + "'"
ExcSubclass.__module__ = cls.__module__
try:
e.__class__ = ExcSubclass
except TypeError: # doesn't work any more on 2.5 :-(
pass
raise ExcSubclass, e, tb
| Python |
import autopath
import py
from py.__.misc.cmdline import countloc
from py.xml import raw
pypydir = py.path.local(autopath.pypydir)
def isdocfile(p):
return p.ext == '.txt' or p.basename in ('README', 'NOTES', 'LICENSE')
def istestfile(p):
if not p.check(file=1, ext='.py'):
return False
pb = p.purebasename
if pb.startswith('test_') or pb.endswith('_test'):
return True
if 'test' in [x.basename for x in p.parts()[-4:]]:
return True
notistestfile = lambda x: not istestfile(x)
class relchecker:
def __init__(self, rel):
self.rel = rel
def __call__(self, p):
return p.relto(autopath.pypydir).startswith(self.rel)
def isfile(p):
return p.check(file=1) and p.ext in ('.py', '.txt', '')
def recpypy(p):
if p.basename[0] == '.':
return False
if p.basename in ('Pyrex',
'_cache',
'unicodedata',
'pypy-translation-snapshot'):
return False
return True
def getpypycounter():
filecounter = countloc.FileCounter()
root = py.path.local(autopath.pypydir)
filecounter.addrecursive(root, isfile, rec=recpypy)
return filecounter
class CounterModel:
def __init__(self, pypycounter):
self.counter = pypycounter
self.totallines = pypycounter.numlines
self.totalfiles = pypycounter.numfiles
self.testlines = pypycounter.getnumlines(istestfile)
self.testfiles = pypycounter.getnumfiles(istestfile)
self.notestlines = pypycounter.getnumlines(notistestfile)
self.notestfiles = pypycounter.getnumfiles(notistestfile)
self.doclines = pypycounter.getnumlines(isdocfile)
self.docfiles = pypycounter.getnumfiles(isdocfile)
#
# rendering
#
def row(*args):
return html.tr([html.td(arg) for arg in args])
def percent(x, y):
return "%.2f%%" % (x / (y/100.0))
def viewlocsummary(model):
t = html.table(
row("total number of lines", model.totallines, raw(" ")),
row("number of testlines", model.testlines,
percent(model.testlines, model.totallines)),
row("number of non-testlines", model.notestlines,
percent(model.notestlines, model.totallines)),
row("total number of files", model.totalfiles, raw(" ")),
row("number of testfiles", model.testfiles,
percent(model.testfiles, model.totalfiles)),
row("number of non-testfiles", model.notestfiles,
percent(model.notestfiles, model.totalfiles)),
)
if model.docfiles:
t.append(row("number of docfiles", model.docfiles,
percent(model.docfiles, model.totalfiles)))
t.append(row("number of doclines", model.doclines,
percent(model.doclines, model.totallines)))
return t
def viewloclist(model):
t = html.table()
d = model.counter.file2numlines
paths = d.items()
paths.sort(lambda x,y : -cmp(x[1], y[1])) # sort by numlines
for p, numlines in paths:
if numlines < 3:
continue
t.append(row(p.relto(pypydir.dirpath()), numlines))
return t
def viewsubdirs(model):
t = html.table()
for p in pypydir.listdir():
if p.basename in '_cache .svn'.split():
continue
if p.check(dir=1):
counter = countloc.FileCounter()
counter.addrecursive(p, isfile, recpypy)
model = CounterModel(counter)
t.append(row(html.h2(p.relto(pypydir.dirpath()))))
t.append(viewlocsummary(model))
t.append(viewloclist(model))
return t
if __name__ == '__main__':
if len(py.std.sys.argv) >= 2:
target = py.path.local(py.std.sys.argv[1])
else:
target = py.path.local('index.html')
print "writing source statistics to", target
pypycounter = getpypycounter()
model = CounterModel(pypycounter)
rev = py.path.svnwc(autopath.pypydir).info().rev
html = py.xml.html
doc = html.html(
html.head(
html.title("PyPy Statistics %d" % rev),
),
html.body(
html.h2("rev %d PyPy Summary of Files and Lines" % rev),
viewlocsummary(model),
html.h2("Details on first-level subdirectories"),
viewsubdirs(model),
html.h3("PyPy Full List Files and Lines"),
viewloclist(model),
html.p("files with less than 3 lines ignored")
)
)
content = doc.unicode(indent=2).encode('utf8')
target.write(content)
| Python |
import py
from os import system, chdir
from urllib import urlopen
log_URL = 'http://tismerysoft.de/pypy/irc-logs/'
archive_FILENAME = 'pypy.tar.gz'
tempdir = py.test.ensuretemp("irc-log")
# get compressed archive
chdir( str(tempdir))
system('wget -q %s%s' % (log_URL, archive_FILENAME))
system('tar xzf %s' % archive_FILENAME)
chdir('pypy')
# get more recent daily logs
pypydir = tempdir.join('pypy')
for line in urlopen(log_URL + 'pypy/').readlines():
i = line.find('%23pypy.log.')
if i == -1:
continue
filename = line[i:].split('"')[0]
system('wget -q %spypy/%s' % (log_URL, filename))
# rename to YYYYMMDD
for log_filename in pypydir.listdir('#pypy.log.*'):
rename_to = None
b = log_filename.basename
if '-' in b:
rename_to = log_filename.basename.replace('-', '')
elif len(b) == 19:
months= 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
day = b[10:12]
month = months.index(b[12:15]) + 1
year = b[15:20]
rename_to = '#pypy.log.%04s%02d%02s' % (year, month, day)
if rename_to:
log_filename.rename(rename_to)
#print 'RENAMED', log_filename, 'TO', rename_to
# print sorted list of filenames of daily logs
print 'irc://irc.freenode.org/pypy'
print 'date, messages, visitors'
for log_filename in pypydir.listdir('#pypy.log.*'):
n_messages, visitors = 0, {}
f = str(log_filename)
for s in file(f):
if '<' in s and '>' in s:
n_messages += 1
elif ' joined #pypy' in s:
v = s.split()[1]
visitors[v] = True
print '%04s-%02s-%02s, %d, %d' % (f[-8:-4], f[-4:-2], f[-2:], n_messages, len(visitors.keys()))
| Python |
import py
release_URL = 'http://codespeak.net/svn/pypy/release/'
releases = [r[:-2] for r in py.std.os.popen('svn list ' + release_URL).readlines() if 'x' not in r]
f = file('release_dates.txt', 'w')
print >> f, 'date, release'
for release in releases:
for s in py.std.os.popen('svn info ' + release_URL + release).readlines():
if s.startswith('Last Changed Date'):
date = s.split()[3]
print >> f, date, ',', release
break
f.close()
| Python |
##"""Thread-local storage."""
##
##try:
## from thread import _local as tlsobject
##except ImportError: # Python < 2.4
##
## # XXX needs a real object whose attributes are visible only in
## # the thread that reads/writes them.
##
## import autopath, os
## filename = os.path.join(os.path.dirname(autopath.pypydir),
## 'lib-python', '2.4.1', '_threading_local.py')
## glob = {'__name__': '_threading_local'}
## execfile(filename, glob)
## tlsobject = glob['local']
## del glob, filename
class tlsobject(object):
"""Storage that is NOT THREAD-LOCAL AT ALL because we don't really need it
at the moment, and it has a performance impact -- a minor one on top of 2.4,
and an extreme one on top of 2.3 :-(((((
"""
| Python |
""" Trace object space configuration options - set with __pytrace__=1
in py.py """
from pypy.tool.traceop import ResultPrinter, ResultPrinterVerbose
def get_operations_all():
from pypy.interpreter.baseobjspace import ObjSpace
operations = dict([(r[0], r[0]) for r in ObjSpace.MethodTable])
for name in ObjSpace.IrregularOpTable + ["get_and_call_function"]:
operations[name] = name
# Remove list
for name in ["wrap", "unwrap", "interpclass_w"]:
if name in operations:
del operations[name]
return operations
config = {
# An optional filename to use for trace output. None is stdout
"output_filename" : None,
# Use a simple wrapped repr (fast) or try to do something more intelligent (slow)
"repr_type_simple" : True,
# Some internal interpreter code is written at applevel - by default
# it is a good idea to hide this.
"show_hidden_applevel" : False,
# Many operations call back into the object space
"recursive_operations" : False,
# Show the bytecode or just the operations
"show_bytecode" : True,
# Indentor string used for output
"indentor" : ' ',
# Show wrapped values in bytecode
"show_wrapped_consts_bytecode" : True,
# Used to show realtive position in tree
"tree_pos_indicator" : "|-",
"result_printer_clz" : ResultPrinter,
"operations" : get_operations_all()
}
| Python |
# load opcode.py as pythonopcode from our own lib
__all__ = ['opmap', 'opname', 'HAVE_ARGUMENT',
'hasconst', 'hasname', 'hasjrel', 'hasjabs',
'haslocal', 'hascompare', 'hasfree', 'cmp_op']
def load_opcode():
import py
opcode_path = py.path.local(__file__).dirpath().dirpath().dirpath('lib-python/modified-2.4.1/opcode.py')
d = {}
execfile(str(opcode_path), d)
return d
opcode_dict = load_opcode()
del load_opcode
# copy some stuff from opcode.py directly into our globals
for name in __all__:
if name in opcode_dict:
globals()[name] = opcode_dict[name]
opcode_method_names = ['MISSING_OPCODE'] * 256
for name, index in opmap.items():
opcode_method_names[index] = name.replace('+', '_')
# ____________________________________________________________
# RPython-friendly helpers and structures
from pypy.rlib.unroll import unrolling_iterable
class OpcodeDesc(object):
def __init__(self, name, index):
self.name = name
self.methodname = opcode_method_names[index]
self.index = index
self.hasarg = index >= HAVE_ARGUMENT
def _freeze_(self):
return True
def is_enabled(self, space):
"""Check if the opcode should be enabled in the space's configuration.
(Returns True for all standard opcodes.)"""
opt = space.config.objspace.opcodes
return getattr(opt, self.name, True)
is_enabled._annspecialcase_ = 'specialize:memo'
# for predictable results, we try to order opcodes most-used-first
opcodeorder = [124, 125, 100, 105, 1, 131, 116, 111, 106, 83, 23, 93, 113, 25, 95, 64, 112, 66, 102, 110, 60, 92, 62, 120, 68, 87, 32, 136, 4, 103, 24, 63, 18, 65, 15, 55, 121, 3, 101, 22, 12, 80, 86, 135, 126, 90, 140, 104, 2, 33, 20, 108, 107, 31, 134, 132, 88, 30, 133, 130, 137, 141, 61, 122, 11, 40, 74, 73, 51, 96, 21, 42, 56, 85, 82, 89, 142, 77, 78, 79, 91, 76, 97, 57, 19, 43, 84, 50, 41, 99, 53, 26]
def sortkey(self):
try:
i = self.opcodeorder.index(self.index)
except ValueError:
i = 1000000
return i, self.index
def __cmp__(self, other):
return cmp(self.sortkey(), other.sortkey())
opdescmap = {}
class opcodedesc:
"""A namespace mapping OPCODE_NAME to OpcodeDescs."""
for name, index in opmap.items():
desc = OpcodeDesc(name, index)
setattr(opcodedesc, name, desc)
opdescmap[index] = desc
lst = opdescmap.values()
lst.sort()
unrolling_opcode_descs = unrolling_iterable(lst)
del name, index, desc, lst
| Python |
import sys, os
# this file runs some benchmarks with a pypy-c that is assumed to be
# built using the MeasuringDictImplementation.
# it should be run with pypy/translator/goal as the cwd, and you'll
# need to hack a copy of rst2html for yourself (svn docutils
# required).
try:
os.unlink("dictinfo.txt")
except os.error:
pass
progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']),
('richards', ['richards.py']),
('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']),
('translate', ['translate.py', '--backendopt', '--no-compile', '--batch',
'--text', 'targetrpystonedalone.py'])
]
EXE = sys.argv[1]
for suffix, args in progs:
os.spawnv(os.P_WAIT, EXE, [EXE] + args)
os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix)
| Python |
#
# Common entry point to access a temporary directory (for testing, etc.)
# This uses the py lib's logic to create numbered directories. The last
# three temporary directories are kept.
#
import autopath
import os
from py.path import local
udir = local.make_numbered_dir(prefix='usession-', keep=3)
| Python |
#!/usr/bin/env python
import Image
import ImageDraw
import urllib
import StringIO
import math
import sys
import colorsys
import py
pyhtml = py.xml.html
from result import (
PerfResult, PerfResultDelta, PerfResultCollection,
PerfTable
)
class Page:
"""generates a benchmark summary page
The generated page is self contained, all images are inlined. The
page refers to a local css file 'benchmark_report.css'.
"""
def __init__(self, perftable=None):
"""perftable is of type PerfTable"""
self.perftable = perftable
def render(self):
"""return full rendered page html tree for the perftable."""
perftable = self.perftable
testid2collections = perftable.get_testid2collections()
# loop to get per-revision collection and the
# maximum delta revision collections.
maxdeltas = []
revdeltas = {}
start = end = None
for testid, collections in testid2collections.iteritems():
if len(collections) < 2: # less than two revisions sampled
continue
# collections are sorted by lowest REVNO first
delta = PerfResultDelta(collections[0], collections[-1])
maxdeltas.append(delta)
# record deltas on target revisions
for col1, col2 in zip(collections, collections[1:]):
revdelta = PerfResultDelta(col1, col2)
l = revdeltas.setdefault(col2.revision, [])
l.append(revdelta)
# keep track of overall earliest and latest revision
if start is None or delta._from.revision < start.revision:
start = delta._from.results[0]
if end is None or delta._to.revision > end.revision:
end = delta._to.results[0]
# sort by best changes first
maxdeltas.sort(key=lambda x: x.percent)
# generate revision reports
revno_deltas = revdeltas.items()
revno_deltas.sort()
revno_deltas.reverse()
revreports = []
for revno, deltas in revno_deltas:
# sort by best changes first
deltas.sort(key=lambda x: x.percent)
revreports.append(self.render_report(deltas))
assert revreports
# generate images
#
# generate the x axis, a list of revision numbers
xaxis = perftable.list_values_of('revision')
xaxis.sort()
# samples of tests in the order of max_deltas test_ids
samples = [testid2collections[delta.test_id] for delta in maxdeltas]
# images in the order of max_deltas test_ids
images = [self.gen_image_map(sample, xaxis) for sample in samples]
page = pyhtml.html(
pyhtml.head(
pyhtml.meta(
name="Content-Type",
value="text/html; charset=latin1",
),
pyhtml.link(rel="stylesheet",
type="text/css",
href="benchmark_report.css")
),
pyhtml.body(
#self.render_header(start, end),
self.render_table(maxdeltas, images, anchors=False),
*revreports
)
)
return page
def _revision_report_name(self, sample):
"""return anchor name for reports,
used to link from an image to a report"""
return 'revno_%s' % (sample.revision,)
def _revision_test_report_name(self, sample):
"""return anchor name for reports,
used to link from an image to a report"""
return 'revno_%s_test_id_%s' % (sample.revision, sample.test_id)
def gen_image_map(self, samples, revisions=[]):
"""return a tuple of an inlined image and the corresponding image map
samples is a list of PerfResultCollections
revisions is a list of revision numbers and represents the x
axis of the graph"""
revision2collection = dict(((s.revision, s) for s in samples))
revision2delta = dict()
for col1, col2 in zip(samples, samples[1:]):
revision2delta[col2.revision] = PerfResultDelta(col1, col2)
max_value = max([s.min_elapsed for s in samples])
map_name = samples[0].test_id # link between the image and the image map
if max_value == 0:
#nothing to draw
return (pyhtml.span('No value greater than 0'), py.html.span(''))
step = 3 # pixels for each revision on x axis
xsize = (len(revisions) - 1) * step +2
ysize = 32 # height of the image
im = Image.new("RGB", (xsize + 2, ysize), 'white')
draw = ImageDraw.Draw(im)
areas = []
for x, revno in enumerate(revisions):
if revno not in revision2collection: # data for this revision?
continue
sample = revision2collection[revno]
y = ysize - (sample.min_elapsed *(ysize -2)/max_value) #scale value
#draw.line((x*step, y, (x+1)*step, y), fill="#888888")
draw.rectangle((x*step+1, y, x*step + step -1, ysize),
fill="#BBBBBB")
head_color = "#000000"
if revno in revision2delta:
change = revision2delta[revno].percent
if change < -0.15:
head_color = "#00FF00"
elif change > 0.15:
head_color = "#FF0000"
draw.rectangle((x*step+1, y-1, x*step + step -1, y+1),
fill=head_color)
areas.append(
pyhtml.area(
shape="rect",
coords= '%s,0,%s,%s' % (x*step, (x+1)*step, ysize),
href='#%s' % (self._revision_test_report_name(sample),),
title="%s Value: %s" % (sample.revision,sample.min_elapsed)
))
del draw
f = StringIO.StringIO()
im.save(f, "GIF")
image_src = 'data:image/gif,%s' % (urllib.quote(f.getvalue()),)
html_image = pyhtml.img(src=image_src,
alt='Benchmark graph of %s' % (self._test_id(
sample)),
usemap='#%s' % (map_name,))
html_map = pyhtml.map(areas, name=map_name)
return html_image, html_map
def _color_for_change(self, delta, max_value=20):
"""return green for negative change_in_percent and red for
positve change_in_percent. If change_in_percent equals 0, then
grey is returned.
The colors range from light green to full saturated green and
light red to full saturated red. Full saturation is reached
when change_in_percent >= max_value.
"""
#rgb values are between 0 and 255
#hsv values are between 0 and 1
if len(delta._from) < 3 or len(delta._to) < 3:
return '#%02x%02x%02x' % (200,200,200) # grey
change_in_percent = delta.percent * 100
if change_in_percent < 0:
basic_color = (0,1,0) # green
else:
basic_color = (1,0,0) # red
max_value = 20
change = min(abs(change_in_percent), max_value)
h,s,v = colorsys.rgb_to_hsv(*basic_color)
rgb = colorsys.hsv_to_rgb(h, float(change) / max_value, 255)
return '#%02x%02x%02x' % rgb
def _change_report(self, delta):
"""return a red,green or gray colored html representation of a
PerfResultDelta object.
"""
fromtimes = [x.elapsed_time for x in delta._from.results]
totimes = [x.elapsed_time for x in delta._to.results]
results = pyhtml.div(
"r%d [%s] -> r%d[%s]" %(delta._from.revision,
", ".join(map(str, fromtimes)),
delta._to.revision,
", ".join(map(str, totimes)))
)
return pyhtml.td(
pyhtml.div(
'%+.1f%% change [%.0f - %.0f = %+.0f ms]' %(
delta.percent * 100,
delta._to.min_elapsed,
delta._from.min_elapsed,
delta.delta),
style= "background-color: %s" % (
self._color_for_change(delta)),
),
results,
)
def render_revision_header(self, sample):
"""return a header for a report with informations about
committer, messages, revision date.
"""
revision_id = pyhtml.li('Revision ID: %s' % (sample.revision_id,))
revision = pyhtml.li('Revision: %s' % (sample.revision,))
date = pyhtml.li('Date: %s' % (sample.revision_date,))
logmessage = pyhtml.li('Log Message: %s' % (sample.message,))
committer = pyhtml.li('Committer: %s' % (sample.committer,))
return pyhtml.ul([date, committer, revision, revision_id, logmessage])
def render_report(self, deltas):
"""return a report table with header.
All deltas must have the same revision_id."""
deltas = [d for d in deltas if d.test_id]
sample = deltas[0]._to.getfastest()
report_list = self.render_revision_header(sample)
table = self.render_table(deltas)
return pyhtml.div(
pyhtml.a(name=self._revision_report_name(sample)),
report_list,
table,
)
def render_header(self, start, end):
"""return the header of the page, sample output:
benchmarks on bzr.dev
from r1231 2006-04-01
to r1888 2006-07-01
"""
return [
pyhtml.div(
'Benchmarks for %s' % (start.nick,),
class_="titleline maintitle",
),
pyhtml.div(
'from r%s %s' % (
start.revision,
start.revision_date,
),
class_="titleline",
),
pyhtml.div(
'to r%s %s' % (
end.revision,
end.revision_date,
),
class_="titleline",
),
]
def _test_id(self, sample):
"""helper function, return a short form of a test_id """
return '.'.join(sample.test_id.split('.')[-2:])
def render_table(self, deltas, images=None, anchors=True):
"""return an html table for deltas and images.
this function is used to generate the main table and
the table of each report"""
classname = "main"
if images is None:
classname = "report"
images = [None] * len(deltas)
table = []
for delta, image in zip(deltas, images):
row = []
anchor = ''
if anchors:
anchor = pyhtml.a(name=self._revision_test_report_name(
delta._to.getfastest()))
row.append(pyhtml.td(anchor, self._test_id(delta._to.getfastest()),
class_='testid'))
if image:
row.append(pyhtml.td(pyhtml.div(*image)))
row.append(self._change_report(delta))
table.append(pyhtml.tr(*row))
return pyhtml.table(border=1, class_=classname, *table)
def main(path_to_perf_history='../.perf_history'):
try:
perftable = PerfTable(file(path_to_perf_history).readlines())
except IOError:
print 'Cannot find a data file. Please specify one.'
sys.exit(-1)
page = Page(perftable).render()
f = file('benchmark_report.html', 'w')
try:
f.write(page.unicode(indent=2).encode('latin-1'))
finally:
f.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
main()
elif len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv ) == 3:
main(*sys.argv[1:3])
else:
print 'Usage: benchmark_report.py [perf_history [branch]]'
| Python |
import py
class PerfResult:
"""Holds information about a benchmark run of a particular test run."""
def __init__(self, date=0.0, test_id="", revision=0.0,
revision_id="NONE", timestamp=0.0,
revision_date=0.0, elapsed_time=-1,
committer="", message="", nick=""):
self.__dict__.update(locals())
del self.self
class PerfResultCollection(object):
"""Holds informations about several PerfResult objects. The
objects should have the same test_id and revision_id"""
def __init__(self, results=None):
if results is None:
self.results = []
else:
self.results = results[:]
#self.check()
def __repr__(self):
self.check()
if not self.results:
return "<PerfResultCollection EMPTY>"
sample = self.results[0]
return "<PerfResultCollection test_id=%s, revno=%s>" %(
sample.test_id, sample.revision)
@property
def min_elapsed(self):
return self.getfastest().elapsed_time
def getfastest(self):
x = None
for res in self.results:
if x is None or res.elapsed_time < x.elapsed_time:
x = res
return x
@property
def test_id(self):
# check for empty results?
return self.results[0].test_id
@property
def revision_id(self):
# check for empty results?
return self.results[0].revision_id
@property
def revision(self):
# check for empty results?
return self.results[0].revision
def check(self):
for s1, s2 in zip(self.results, self.results[1:]):
assert s1.revision_id == s2.revision_id
assert s1.test_id == s2.test_id
assert s1.revision == s2.revision
assert s1.date != s2.date
def append(self, sample):
self.results.append(sample)
self.check()
def extend(self, results):
self.results.extend(results)
self.check()
def __len__(self):
return len(self.results)
class PerfResultDelta:
"""represents the difference of two PerfResultCollections"""
def __init__(self, _from, _to=None):
if _from is None:
_from = _to
if _to is None:
_to = _from
if isinstance(_from, list):
_from = PerfResultCollection(_from)
if isinstance(_to, list):
_to = PerfResultCollection(_to)
assert isinstance(_from, PerfResultCollection)
assert isinstance(_to, PerfResultCollection)
assert _from.test_id == _to.test_id, (_from.test_id, _to.test_id)
self._from = _from
self._to = _to
self.test_id = self._to.test_id
self.delta = self._to.min_elapsed - self._from.min_elapsed
# percentage
m1 = self._from.min_elapsed
m2 = self._to.min_elapsed
if m1 == 0:
self.percent = 0.0
else:
self.percent = float(m2-m1) / float(m1)
class PerfTable:
"""parses performance history data files and yields PerfResult objects
through the get_results method.
if an branch is given, it is used to get more information for each
revision we have data from.
"""
branch = None
def __init__(self, iterlines = []):
""":param iterline: lines of performance history data,
e.g., history_file.realdlines()
"""
self._revision_cache = {}
self.results = list(self.parse(iterlines))
def parse(self, iterlines):
"""parse lines like
--date 1152625530.0 hacker@canonical.com-20..6dc
1906ms bzrlib....one_add_kernel_like_tree
"""
date = None
revision_id = None
for line in iterlines:
line = line.strip()
if not line:
continue
if line.startswith('--date'):
_, date, revision_id = line.split(None, 2)
date = float(date)
continue
perfresult = PerfResult(date=date, revision_id=revision_id)
elapsed_time, test_id = line.split(None, 1)
perfresult.elapsed_time = int(elapsed_time[:-2])
perfresult.test_id = test_id.strip()
yield self.annotate(perfresult)
def add_lines(self, lines):
"""add lines of performance history data """
self.results += list(self.parse(lines))
def get_time_for_revision_id(self, revision_id):
"""return the data of the revision or 0"""
if revision_id in self._revision_cache:
return self._revision_cache[revision_id][1].timestamp
return 0
def get_time(self, revision_id):
"""return revision date or the date of recording the
performance history data"""
t = self.get_time_for_revision_id(revision_id)
if t:
return t
result = list(self.get_results(revision_ids=[revision_id],
sorted_by_rev_date=False))[0]
return result.date
count = py.std.itertools.count()
def annotate(self, result):
"""Try to put extra information for each revision on the
PerfResult objects. These informations are retrieved from a
branch object.
"""
#if self.branch is None:
# return result
class Branch:
revision_id = result.revision_id
nick = "fake"
self.branch = Branch()
result.revision = self.count.next()
result.revision_date = "01/01/2007"
result.message = "fake log message"
result.timestamp = 1231231.0
return result
revision_id = result.revision_id
if revision_id in self._revision_cache:
revision, rev, nick = self._revision_cache[revision_id]
else:
revision = self.branch.revision_id_to_revno(revision_id)
rev = self.branch.repository.get_revision(revision_id)
nick = self.branch._get_nick()
self._revision_cache[revision_id] = (revision, rev, nick)
result.revision = revision
result.committer = rev.committer
result.message = rev.message
result.timstamp = rev.timestamp
# XXX no format_date, but probably this whole function
# goes away soon
result.revision_date = format_date(rev.timestamp, rev.timezone or 0)
result.nick = nick
return result
def get_results(self, test_ids=None, revision_ids=None,
sorted_by_rev_date=True):
# XXX we might want to build indexes for speed
for result in self.results:
if test_ids and result.test_id not in test_ids:
continue
if revision_ids and result.revision_id not in revision_ids:
continue
yield result
def list_values_of(self, attr):
"""return a list of unique values of the specified attribute
of PerfResult objects"""
return dict.fromkeys((getattr(r, attr) for r in self.results)).keys()
def get_testid2collections(self):
"""return a mapping of test_id to list of PerfResultCollection
sorted by revision"""
test_ids = self.list_values_of('test_id')
testid2resultcollections = {}
for test_id in test_ids:
revnos = {}
for result in self.get_results(test_ids=[test_id]):
revnos.setdefault(result.revision, []).append(result)
for revno, results in revnos.iteritems():
collection = PerfResultCollection(results)
l = testid2resultcollections.setdefault(test_id, [])
l.append(collection)
# sort collection list by revision number
for collections in testid2resultcollections.itervalues():
collections.sort(lambda x,y: cmp(x.revision, y.revision))
return testid2resultcollections
| Python |
#!/usr/bin/env python
import Image
import ImageDraw
import urllib
import StringIO
import math
import sys
import colorsys
import py
pyhtml = py.xml.html
from result import (
PerfResult, PerfResultDelta, PerfResultCollection,
PerfTable
)
class Page:
"""generates a benchmark summary page
The generated page is self contained, all images are inlined. The
page refers to a local css file 'benchmark_report.css'.
"""
def __init__(self, perftable=None):
"""perftable is of type PerfTable"""
self.perftable = perftable
def render(self):
"""return full rendered page html tree for the perftable."""
perftable = self.perftable
testid2collections = perftable.get_testid2collections()
# loop to get per-revision collection and the
# maximum delta revision collections.
maxdeltas = []
revdeltas = {}
start = end = None
for testid, collections in testid2collections.iteritems():
if len(collections) < 2: # less than two revisions sampled
continue
# collections are sorted by lowest REVNO first
delta = PerfResultDelta(collections[0], collections[-1])
maxdeltas.append(delta)
# record deltas on target revisions
for col1, col2 in zip(collections, collections[1:]):
revdelta = PerfResultDelta(col1, col2)
l = revdeltas.setdefault(col2.revision, [])
l.append(revdelta)
# keep track of overall earliest and latest revision
if start is None or delta._from.revision < start.revision:
start = delta._from.results[0]
if end is None or delta._to.revision > end.revision:
end = delta._to.results[0]
# sort by best changes first
maxdeltas.sort(key=lambda x: x.percent)
# generate revision reports
revno_deltas = revdeltas.items()
revno_deltas.sort()
revno_deltas.reverse()
revreports = []
for revno, deltas in revno_deltas:
# sort by best changes first
deltas.sort(key=lambda x: x.percent)
revreports.append(self.render_report(deltas))
assert revreports
# generate images
#
# generate the x axis, a list of revision numbers
xaxis = perftable.list_values_of('revision')
xaxis.sort()
# samples of tests in the order of max_deltas test_ids
samples = [testid2collections[delta.test_id] for delta in maxdeltas]
# images in the order of max_deltas test_ids
images = [self.gen_image_map(sample, xaxis) for sample in samples]
page = pyhtml.html(
pyhtml.head(
pyhtml.meta(
name="Content-Type",
value="text/html; charset=latin1",
),
pyhtml.link(rel="stylesheet",
type="text/css",
href="benchmark_report.css")
),
pyhtml.body(
#self.render_header(start, end),
self.render_table(maxdeltas, images, anchors=False),
*revreports
)
)
return page
def _revision_report_name(self, sample):
"""return anchor name for reports,
used to link from an image to a report"""
return 'revno_%s' % (sample.revision,)
def _revision_test_report_name(self, sample):
"""return anchor name for reports,
used to link from an image to a report"""
return 'revno_%s_test_id_%s' % (sample.revision, sample.test_id)
def gen_image_map(self, samples, revisions=[]):
"""return a tuple of an inlined image and the corresponding image map
samples is a list of PerfResultCollections
revisions is a list of revision numbers and represents the x
axis of the graph"""
revision2collection = dict(((s.revision, s) for s in samples))
revision2delta = dict()
for col1, col2 in zip(samples, samples[1:]):
revision2delta[col2.revision] = PerfResultDelta(col1, col2)
max_value = max([s.min_elapsed for s in samples])
map_name = samples[0].test_id # link between the image and the image map
if max_value == 0:
#nothing to draw
return (pyhtml.span('No value greater than 0'), py.html.span(''))
step = 3 # pixels for each revision on x axis
xsize = (len(revisions) - 1) * step +2
ysize = 32 # height of the image
im = Image.new("RGB", (xsize + 2, ysize), 'white')
draw = ImageDraw.Draw(im)
areas = []
for x, revno in enumerate(revisions):
if revno not in revision2collection: # data for this revision?
continue
sample = revision2collection[revno]
y = ysize - (sample.min_elapsed *(ysize -2)/max_value) #scale value
#draw.line((x*step, y, (x+1)*step, y), fill="#888888")
draw.rectangle((x*step+1, y, x*step + step -1, ysize),
fill="#BBBBBB")
head_color = "#000000"
if revno in revision2delta:
change = revision2delta[revno].percent
if change < -0.15:
head_color = "#00FF00"
elif change > 0.15:
head_color = "#FF0000"
draw.rectangle((x*step+1, y-1, x*step + step -1, y+1),
fill=head_color)
areas.append(
pyhtml.area(
shape="rect",
coords= '%s,0,%s,%s' % (x*step, (x+1)*step, ysize),
href='#%s' % (self._revision_test_report_name(sample),),
title="%s Value: %s" % (sample.revision,sample.min_elapsed)
))
del draw
f = StringIO.StringIO()
im.save(f, "GIF")
image_src = 'data:image/gif,%s' % (urllib.quote(f.getvalue()),)
html_image = pyhtml.img(src=image_src,
alt='Benchmark graph of %s' % (self._test_id(
sample)),
usemap='#%s' % (map_name,))
html_map = pyhtml.map(areas, name=map_name)
return html_image, html_map
def _color_for_change(self, delta, max_value=20):
"""return green for negative change_in_percent and red for
positve change_in_percent. If change_in_percent equals 0, then
grey is returned.
The colors range from light green to full saturated green and
light red to full saturated red. Full saturation is reached
when change_in_percent >= max_value.
"""
#rgb values are between 0 and 255
#hsv values are between 0 and 1
if len(delta._from) < 3 or len(delta._to) < 3:
return '#%02x%02x%02x' % (200,200,200) # grey
change_in_percent = delta.percent * 100
if change_in_percent < 0:
basic_color = (0,1,0) # green
else:
basic_color = (1,0,0) # red
max_value = 20
change = min(abs(change_in_percent), max_value)
h,s,v = colorsys.rgb_to_hsv(*basic_color)
rgb = colorsys.hsv_to_rgb(h, float(change) / max_value, 255)
return '#%02x%02x%02x' % rgb
def _change_report(self, delta):
"""return a red,green or gray colored html representation of a
PerfResultDelta object.
"""
fromtimes = [x.elapsed_time for x in delta._from.results]
totimes = [x.elapsed_time for x in delta._to.results]
results = pyhtml.div(
"r%d [%s] -> r%d[%s]" %(delta._from.revision,
", ".join(map(str, fromtimes)),
delta._to.revision,
", ".join(map(str, totimes)))
)
return pyhtml.td(
pyhtml.div(
'%+.1f%% change [%.0f - %.0f = %+.0f ms]' %(
delta.percent * 100,
delta._to.min_elapsed,
delta._from.min_elapsed,
delta.delta),
style= "background-color: %s" % (
self._color_for_change(delta)),
),
results,
)
def render_revision_header(self, sample):
"""return a header for a report with informations about
committer, messages, revision date.
"""
revision_id = pyhtml.li('Revision ID: %s' % (sample.revision_id,))
revision = pyhtml.li('Revision: %s' % (sample.revision,))
date = pyhtml.li('Date: %s' % (sample.revision_date,))
logmessage = pyhtml.li('Log Message: %s' % (sample.message,))
committer = pyhtml.li('Committer: %s' % (sample.committer,))
return pyhtml.ul([date, committer, revision, revision_id, logmessage])
def render_report(self, deltas):
"""return a report table with header.
All deltas must have the same revision_id."""
deltas = [d for d in deltas if d.test_id]
sample = deltas[0]._to.getfastest()
report_list = self.render_revision_header(sample)
table = self.render_table(deltas)
return pyhtml.div(
pyhtml.a(name=self._revision_report_name(sample)),
report_list,
table,
)
def render_header(self, start, end):
"""return the header of the page, sample output:
benchmarks on bzr.dev
from r1231 2006-04-01
to r1888 2006-07-01
"""
return [
pyhtml.div(
'Benchmarks for %s' % (start.nick,),
class_="titleline maintitle",
),
pyhtml.div(
'from r%s %s' % (
start.revision,
start.revision_date,
),
class_="titleline",
),
pyhtml.div(
'to r%s %s' % (
end.revision,
end.revision_date,
),
class_="titleline",
),
]
def _test_id(self, sample):
"""helper function, return a short form of a test_id """
return '.'.join(sample.test_id.split('.')[-2:])
def render_table(self, deltas, images=None, anchors=True):
"""return an html table for deltas and images.
this function is used to generate the main table and
the table of each report"""
classname = "main"
if images is None:
classname = "report"
images = [None] * len(deltas)
table = []
for delta, image in zip(deltas, images):
row = []
anchor = ''
if anchors:
anchor = pyhtml.a(name=self._revision_test_report_name(
delta._to.getfastest()))
row.append(pyhtml.td(anchor, self._test_id(delta._to.getfastest()),
class_='testid'))
if image:
row.append(pyhtml.td(pyhtml.div(*image)))
row.append(self._change_report(delta))
table.append(pyhtml.tr(*row))
return pyhtml.table(border=1, class_=classname, *table)
def main(path_to_perf_history='../.perf_history'):
try:
perftable = PerfTable(file(path_to_perf_history).readlines())
except IOError:
print 'Cannot find a data file. Please specify one.'
sys.exit(-1)
page = Page(perftable).render()
f = file('benchmark_report.html', 'w')
try:
f.write(page.unicode(indent=2).encode('latin-1'))
finally:
f.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
main()
elif len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv ) == 3:
main(*sys.argv[1:3])
else:
print 'Usage: benchmark_report.py [perf_history [branch]]'
| Python |
import py
class ResultDB(object):
def __init__(self):
self.benchmarks = []
def parsepickle(self, path):
f = path.open("rb")
id2numrun = py.std.pickle.load(f)
id2bestspeed = py.std.pickle.load(f)
f.close()
for id in id2numrun:
besttime = id2bestspeed[id]
numruns = id2numrun[id]
print id
bench = BenchResult(id, besttime, numruns)
self.benchmarks.append(bench)
def getbenchmarks(self, name=None):
l = []
for bench in self.benchmarks:
if name is not None and name != bench.name:
continue
l.append(bench)
return l
class BenchResult(object):
def __init__(self, id, besttime, numruns):
self._id = id
if id.startswith("./"):
id = id[2:]
if id.startswith("pypy"):
parts = id.rsplit("_", 1)
self.executable = parts[0]
self.name = parts[1]
parts = self.executable.split("-")
self.backend = parts[1]
try:
self.revision = int(parts[2])
except ValueError:
self.revision = None
else: # presumably cpython
version, name = id.split("_", 1)
self.name = name
self.backend = None
self.revision = version
self.executable = "cpython"
self.besttime = besttime
self.numruns = numruns
def __repr__(self):
return "<BenchResult %r>" %(self._id, )
if __name__ == "__main__":
x = py.magic.autopath().dirpath("bench-unix.benchmark_result")
db = ResultDB()
db.parsepickle(x)
| Python |
#
| Python |
#!/usr/bin/env python
# XXX needs to run on codespeak
import py
import sys
import os
base = py.path.local('/www/codespeak.net/htdocs')
def runpytest(path, outfile):
lockfile = base.join(".gendoclock")
return os.system("/admin/bin/withlock %s py.test %s >>%s 2>&1" %(
lockfile, path, outfile))
if __name__ == '__main__':
results = []
for fn in sys.argv[1:]:
p = base.join(fn, abs=True)
assert p.check(), p
outfile = p.join("gendoc.log")
wc = py.path.svnwc(p)
wc.update()
rev = wc.info().rev
outfile.write("gendoc for %s revision %d\n\n" %(p, rev))
errcode = runpytest(p, outfile)
if errcode:
results.append("in revision %d of %s" %(rev, p))
results.append(" gendoc failed with %d, see %s " %(
errcode, outfile))
print results[-1]
if results:
for line in results:
print >>sys.stderr, line
sys.exit(1)
| Python |
#! /usr/bin/env python
"""
This script walks over the files and subdirs of the specified directories
('.' by default), and changes the svn properties to match the PyPy guidelines:
svn:ignore includes '*.pyc' and '*.pyo' for all directories
svn:eol-style is 'native' for *.py and *.txt files
"""
import sys, os
import autopath
import py
forbidden = range(0,32)
forbidden.remove(9) # tab
forbidden.remove(10) # lf
forbidden.remove(12) # ff, ^L
forbidden.remove(13) # cr
def looksbinary(data, forbidden = [chr(i) for i in forbidden]):
"Check if some data chunk appears to be binary."
for c in forbidden:
if c in data:
return True
return False
def can_set_eol_style(path):
"check to see if we could set eol-style on the path."
data = path.read(mode='rb')
if looksbinary(data):
print "%s looks like a binary, ignoring" % path
return False
original = data
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
data = data.replace('\n', os.linesep)
if data != original:
print "*"*30
print "---> %s <---" % path
print ("WARNING: the file content was modified "
"by fixing the EOL style.")
print "*"*30
#return False
path.write(data, mode='wb')
return True
return True
def checkeolfile(path):
return path.ext in ('.txt', '.py', '.asc')
def fixdirectory(path):
print "+ checking directory", path,
fns = path.listdir(checkeolfile)
if fns:
ignores = path.propget('svn:ignore')
newignores = ignores
l = ignores.split('\n')
for x in ('*.pyc', '*.pyo'):
if x not in l:
l.append(x)
newignores = "\n".join(l)
print ", setting ignores", newignores
path.propset('svn:ignore', newignores)
else:
print
for fn in fns:
fixfile(fn)
for x in path.listdir(lambda x: x.check(dir=1, versioned=True)):
if x.check(link=1):
continue
fixdirectory(x)
def fixfile(path):
x = path.localpath.relto(py.path.local())
if not x:
x = path.localpath
print "checking", x,
if path.check(versioned=0):
return False
oldprop = path.propget('svn:eol-style')
if oldprop:
print "eol-style already set (%r)" %(oldprop, )
else:
if can_set_eol_style(path):
print "setting eol-style native"
path.propset('svn:eol-style', 'native')
else:
print "cannot set eol-style"
if __name__ == '__main__':
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
paths = [py.path.svnwc(x) for x in sys.argv[1:]]
else:
paths = [py.path.svnwc()]
for path in paths:
if path.check(link=1):
print 'ignoring link', path
elif path.check(dir=1):
fixdirectory(path)
elif path.check(file=1):
fixfile(path)
else:
print "ignoring", path
| Python |
import pypy
import py
def pypyrev(cache=[]):
""" return subversion revision number for current pypy package.
"""
try:
return cache[0]
except IndexError:
pypydir = py.path.svnwc(pypy.__file__).dirpath()
rev = pypydir.info().rev
cache.append(rev)
return rev
| Python |
import struct, sys
# This is temporary hack to run PyPy on PyPy
# until PyPy's struct module handle P format character.
try:
HUGEVAL_FMT = 'P'
HUGEVAL_BYTES = struct.calcsize('P')
except struct.error:
if sys.maxint <= 2147483647:
HUGEVAL_FMT = 'l'
HUGEVAL_BYTES = 4
else:
HUGEVAL_FMT = 'q'
HUGEVAL_BYTES = 8
HUGEVAL = 256 ** HUGEVAL_BYTES
def fixid(result):
if result < 0:
result += HUGEVAL
return result
if sys.version_info < (2, 5):
def uid(obj):
"""
Return the id of an object as an unsigned number so that its hex
representation makes sense
"""
return fixid(id(obj))
else:
uid = id # guaranteed to be positive from CPython 2.5 onwards
class Hashable(object):
"""
A Hashable instance encapsulates any object, but is always usable as a
key in dictionaries. This is based on id() for mutable objects and on
real hash/compare for immutable ones.
"""
__slots__ = ["key", "value"]
def __init__(self, value):
self.value = value # a concrete value
# try to be smart about constant mutable or immutable values
key = type(self.value), self.value # to avoid confusing e.g. 0 and 0.0
try:
hash(key)
except TypeError:
key = id(self.value)
self.key = key
def __eq__(self, other):
return self.__class__ is other.__class__ and self.key == other.key
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.key)
def __repr__(self):
return '(%s)' % (self,)
def __str__(self):
# try to limit the size of the repr to make it more readable
r = repr(self.value)
if (r.startswith('<') and r.endswith('>') and
hasattr(self.value, '__name__')):
r = '%s %s' % (type(self.value).__name__, self.value.__name__)
elif len(r) > 60 or (len(r) > 30 and type(self.value) is not str):
r = r[:22] + '...' + r[-7:]
return r
| Python |
import autopath
import sys, imp
from pypy.tool import slaveproc
class IsolateSlave(slaveproc.Slave):
mod = None
def do_cmd(self, cmd):
cmd, data = cmd
if cmd == 'load':
assert self.mod is None
mod = data
if isinstance(mod, str):
mod = __import__(mod, {}, {}, ['__doc__'])
else:
dir, name = mod
file, pathname, description = imp.find_module(name, [dir])
try:
mod = imp.load_module(name, file, pathname, description)
finally:
if file:
file.close()
self.mod = mod
return 'loaded'
elif cmd == 'invoke':
assert self.mod is not None
func, args = data
try:
res = getattr(self.mod, func)(*args)
except KeyboardInterrupt:
raise
except:
exc_type = sys.exc_info()[0]
return ('exc', (exc_type.__module__, exc_type.__name__))
else:
return ('ok', res)
else:
return 'no-clue'
if __name__ == '__main__':
IsolateSlave().do()
| Python |
#! /usr/bin/env python
"""
This script walks over the files and subdirs of the specified directories
('.' by default), and changes the svn properties to match the PyPy guidelines:
svn:ignore includes '*.pyc' and '*.pyo' for all directories
svn:eol-style is 'native' for *.py and *.txt files
"""
import sys, os
import autopath
import py
forbidden = range(0,32)
forbidden.remove(9) # tab
forbidden.remove(10) # lf
forbidden.remove(12) # ff, ^L
forbidden.remove(13) # cr
def looksbinary(data, forbidden = [chr(i) for i in forbidden]):
"Check if some data chunk appears to be binary."
for c in forbidden:
if c in data:
return True
return False
def can_set_eol_style(path):
"check to see if we could set eol-style on the path."
data = path.read(mode='rb')
if looksbinary(data):
print "%s looks like a binary, ignoring" % path
return False
original = data
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
data = data.replace('\n', os.linesep)
if data != original:
print "*"*30
print "---> %s <---" % path
print ("WARNING: the file content was modified "
"by fixing the EOL style.")
print "*"*30
#return False
path.write(data, mode='wb')
return True
return True
def checkeolfile(path):
return path.ext in ('.txt', '.py', '.asc')
def fixdirectory(path):
print "+ checking directory", path,
fns = path.listdir(checkeolfile)
if fns:
ignores = path.propget('svn:ignore')
newignores = ignores
l = ignores.split('\n')
for x in ('*.pyc', '*.pyo'):
if x not in l:
l.append(x)
newignores = "\n".join(l)
print ", setting ignores", newignores
path.propset('svn:ignore', newignores)
else:
print
for fn in fns:
fixfile(fn)
for x in path.listdir(lambda x: x.check(dir=1, versioned=True)):
if x.check(link=1):
continue
fixdirectory(x)
def fixfile(path):
x = path.localpath.relto(py.path.local())
if not x:
x = path.localpath
print "checking", x,
if path.check(versioned=0):
return False
oldprop = path.propget('svn:eol-style')
if oldprop:
print "eol-style already set (%r)" %(oldprop, )
else:
if can_set_eol_style(path):
print "setting eol-style native"
path.propset('svn:eol-style', 'native')
else:
print "cannot set eol-style"
if __name__ == '__main__':
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
paths = [py.path.svnwc(x) for x in sys.argv[1:]]
else:
paths = [py.path.svnwc()]
for path in paths:
if path.check(link=1):
print 'ignoring link', path
elif path.check(dir=1):
fixdirectory(path)
elif path.check(file=1):
fixfile(path)
else:
print "ignoring", path
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
# load __future__.py constants
def load_module():
import py
module_path = py.path.local(__file__).dirpath().dirpath().dirpath('lib-python/modified-2.4.1/__future__.py')
execfile(str(module_path), globals())
load_module()
del load_module
# this could be generalized, it's also in opcode.py
| Python |
# this is for use with a pypy-c build with multidicts and using the
# MeasuringDictImplementation -- it will create a file called
# 'dictinfo.txt' in the local directory and this file will turn the
# contents back into DictInfo objects.
# run with python -i !
import sys
infile = open(sys.argv[1])
curr = None
slots = []
for line in infile:
if line == '------------------\n':
if curr:
break
curr = 1
else:
attr, val = [s.strip() for s in line.split(':')]
slots.append(attr)
class DictInfo(object):
__slots__ = slots
infile = open(sys.argv[1])
infos = []
for line in infile:
if line == '------------------\n':
curr = object.__new__(DictInfo)
infos.append(curr)
else:
attr, val = [s.strip() for s in line.split(':')]
if '.' in val:
val = float(val)
else:
val = int(val)
setattr(curr, attr, val)
def histogram(infos, keyattr, *attrs):
r = {}
for info in infos:
v = getattr(info, keyattr)
l = r.setdefault(v, [0, {}])
l[0] += 1
for a in attrs:
d2 = l[1].setdefault(a, {})
v2 = getattr(info, a)
d2[v2] = d2.get(v2, 0) + 1
return sorted(r.items())
def reportDictInfos():
d = {}
stillAlive = 0
totLifetime = 0.0
for info in infos:
for attr in slots:
if attr == 'maxcontents':
continue
v = getattr(info, attr)
if not isinstance(v, int):
continue
d[attr] = d.get(attr, 0) + v
if info.lifetime != -1.0:
totLifetime += info.lifetime
else:
stillAlive += 1
print 'read info on', len(infos), 'dictionaries'
if stillAlive != len(infos):
print 'average lifetime', totLifetime/(len(infos) - stillAlive),
print '('+str(stillAlive), 'still alive at exit)'
print d
def Rify(fname, *attributes):
output = open(fname, 'w')
for attr in attributes:
print >>output, attr,
print >>output
for info in infos:
for attr in attributes:
print >>output, getattr(info, attr),
print >>output
if __name__ == '__main__':
# reportDictInfos()
# interactive stuff:
import __builtin__
def displayhook(v):
if v is not None:
__builtin__._ = v
pprint.pprint(v)
sys.displayhook = displayhook
import pprint
try:
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind('tab: complete')
if len(sys.argv) > 2:
attrs = sys.argv[2].split(',')
if attrs == ['all']:
attrs = slots
Rify("R.txt", *attrs)
| Python |
#
# support code for the trace object space
#
import autopath
import sys
class Stack(list):
push = list.append
def pop(self):
return super(Stack, self).pop(-1)
def top(self):
try:
return self[-1]
except IndexError:
return None
class ResultPrinter(object):
def __init__(self,
indentor = ' ',
repr_type_simple = True,
show_bytecode = True,
output_filename = None,
tree_pos_indicator = "|-",
show_hidden_applevel = False,
recursive_operations = False,
show_wrapped_consts_bytecode = True,
**kwds
):
if output_filename is None:
self.out = sys.stdout
else:
self.out = open(output_filename, "w")
# Configurable stuff
self.indentor = indentor
self.tree_pos_indicator = tree_pos_indicator
self.show_bytecode = show_bytecode
self.show_hidden_applevel = show_hidden_applevel
self.recursive_operations = recursive_operations
self.show_wrapped_consts_bytecode = show_wrapped_consts_bytecode
if repr_type_simple:
self.repr_value = simple_repr
else:
self.repr_value = repr_value
# Keeps a stack of current state to handle
# showing of applevel and recursive operations
self.indent_state = Stack()
# Some printing state
self.last_line_was_new = True
def reset(self):
self.indent_state = Stack()
def valid_state(self):
state = self.indent_state.top()
if state is not None and not state[0]:
return False
return True
def print_line(self, line, new_line = True):
if self.last_line_was_new:
indent_count = len([c for c, t, f in self.indent_state if c])
if indent_count:
indent = indent_count - 1
assert (indent >= 0)
line = (self.indentor * indent) + self.tree_pos_indicator + line
if new_line:
self.last_line_was_new = True
print >>self.out, line
else:
print >>self.out, line,
self.last_line_was_new = False
def print_frame(self, print_type, frame):
if not self.valid_state():
return
# Force new line if not the case
if not self.last_line_was_new:
print >>self.out, ""
self.last_line_was_new = True
code = getattr(frame, 'pycode', None)
filename = getattr(code, 'co_filename', "")
filename = filename.replace("\n", "\\n")
lineno = getattr(code, 'co_firstlineno', "")
s = " <<<< %s %s @ %s >>>>" % (print_type, filename, lineno)
self.print_line(s)
def print_bytecode(self, index, bytecode, space):
if not self.valid_state():
return
if self.show_bytecode:
if self.show_wrapped_consts_bytecode:
bytecode_str = repr(bytecode)
else:
bytecode_str = bytecode.repr_with_space(space)
s = "%2d%s%s" % (index, (self.indentor * 2), bytecode_str)
self.print_line(s)
def print_op_enter(self, space, name, args):
if not self.valid_state():
return
s = " " * 4
s += "%s" % name
s += "(" + ", ".join([self.repr_value(space, ii) for ii in args]) + ")"
self.print_line(s, new_line=False)
def print_op_leave(self, space, name, res):
if not self.valid_state():
return
if self.last_line_was_new:
s = " " * 4
else:
s = " "
s += "-> %s" % self.repr_value(space, res)
self.print_line(s)
def print_op_exc(self, name, exc, space):
if not self.valid_state():
return
if self.last_line_was_new:
s = " " * 4
else:
s = " "
s += "-> <raised> (%s)" % self.repr_value(space, exc)
self.print_line(s)
def print_event(self, space, event_result, event):
from pypy.objspace import trace
if isinstance(event, trace.EnterFrame):
frame = event.frame
if self.show_hidden_applevel or not frame.pycode.hidden_applevel:
show = True
else:
show = False
self.indent_state.push((show, trace.EnterFrame, frame))
self.print_frame("enter", frame)
elif isinstance(event, trace.LeaveFrame):
lastframe = self.indent_state.top()[2]
assert lastframe is not None
self.print_frame("leave", lastframe)
self.indent_state.pop()
elif isinstance(event, trace.ExecBytecode):
frame = event.frame
assert (frame == self.get_last_frame())
# Get bytecode from frame
disresult = event_result.getdisresult(frame)
bytecode = disresult.getbytecode(event.index)
self.print_bytecode(event.index, bytecode, space)
elif isinstance(event, trace.CallBegin):
lastframe = self.get_last_frame()
info = event.callinfo
show = True
# Check if we are in applevel?
if not self.show_hidden_applevel:
if lastframe is None or lastframe.pycode.hidden_applevel:
show = False
# Check if recursive operations?
prev_indent_state = self.indent_state.top()
if not self.recursive_operations and prev_indent_state is not None:
if prev_indent_state[1] == trace.CallBegin:
show = False
self.indent_state.push((show, trace.CallBegin, None))
self.print_op_enter(space, info.name, info.args)
elif isinstance(event, trace.CallFinished):
info = event.callinfo
self.print_op_leave(space, info.name, event.res)
self.indent_state.pop()
elif isinstance(event, trace.CallException):
info = event.callinfo
self.print_op_exc(info.name, event.ex, space)
self.indent_state.pop()
def get_last_frame(self):
for c, t, f in self.indent_state[::-1]:
if f is not None:
return f
class ResultPrinterVerbose(ResultPrinter):
""" Puts result on same line """
def print_op_enter(self, space, name, args):
if not self.valid_state():
return
s = " " * 4
s += "%s" % name
s += "(" + ", ".join([self.repr_value(space, ii) for ii in args]) + ")"
self.print_line(s)
def print_op_exc(self, name, exc, space):
if not self.valid_state():
return
if self.last_line_was_new:
s = " " * 4
else:
s = " "
s += "x-> %s" % self.repr_value(space, exc)
self.print_line(s)
def simple_repr(space, obj):
res = repr(obj)
if len(res) > 80:
res = res[:76] + "..."
return res
def repr_value_complex(space, obj):
""" representations - very slow """
from pypy.interpreter.argument import Arguments
from pypy.interpreter.error import OperationError
# Special case true and false (from space.is_true()) - we use a
# different representation from a wrapped object reprs method.
if obj is True:
return "TRUE"
elif obj is False:
return "FALSE"
if hasattr(obj, "__iter__"):
return ", ".join([repr_value(space, ii) for ii in obj])
# Special case - arguments
if isinstance(obj, Arguments):
args = [repr_value(space, ii) for ii in obj.arguments_w]
if obj.kwds_w:
args += ["%s = %s" % (k, repr_value(space, v))
for k, v in obj.kwds_w.items()]
if not obj.w_stararg is None:
args.append("*" + repr_value_complex(space, obj.w_stararg))
if not obj.w_starstararg is None:
args.append("**" + repr_value_complex(space, obj.w_starstararg))
return "Args(%s)" % (", ".join(args))
# Special case - operation error
if isinstance(obj, OperationError):
return "OpError(%s, %s)" % (repr_value(space, obj.w_type),
repr_value(space, obj.w_value))
# Try object repr
try:
return space.str_w(space.repr(obj))
except:
# Give up
return repr(obj)
def repr_value(space, obj):
return repr_value_complex(space, obj)[:120]
# __________________________________________________________________________
def perform_trace(tspace, app_func, *args_w):
from pypy.interpreter.gateway import app2interp
from pypy.interpreter.argument import Arguments
# Create our function
func_gw = app2interp(app_func)
w_func = func_gw.get_function(tspace)
# Run the func in the trace space and return results
tspace.settrace()
w_result = tspace.call_function(w_func, *args_w)
trace_result = tspace.getresult()
tspace.settrace()
return w_result, trace_result
if __name__ == '__main__':
from pypy.objspace import std, trace
# Wrap up std space, with a trace space
tspace = trace.create_trace_space(std.Space())
def func(x):
count = 0
for ii in range(x):
count += ii
return count
# Note includes lazy loading of builtins
res = perform_trace(tspace, func, tspace.wrap(5))
print "Result:", res
| Python |
import os, struct, marshal, sys
class Exchange(object):
def __init__(self, inp, out):
self.out = out
self.inp = inp
def send(self, data):
s = marshal.dumps(data)
h = struct.pack('L', len(s))
self.out.write(h+s)
self.out.flush()
def recv(self):
HSIZE = struct.calcsize('L')
h = self.inp.read(HSIZE)
if len(h) < HSIZE:
raise EOFError
size = struct.unpack('L', h)[0]
s = self.inp.read(size)
if len(s) < size:
raise EOFError
return marshal.loads(s)
def forceclose(self):
try:
self.out.close()
except:
pass
try:
self.inp.close()
except:
pass
class SlaveProcess(object):
_broken = False
def __init__(self, slave_impl):
if sys.platform == 'win32':
unbuffered = ''
else:
unbuffered = '-u'
inp, out = os.popen2('%s %s %s' % (sys.executable, unbuffered, os.path.abspath(slave_impl)))
self.exchg = Exchange(out, inp)
def cmd(self, data):
self.exchg.send(data)
try:
return self.exchg.recv()
except EOFError:
self._broken = True
raise
def close(self):
if not self._broken:
assert self.cmd(None) == 'done'
self.exchg.forceclose()
class Slave(object):
def do_cmd(self, data):
raise NotImplementedError
def do(self):
exchg = Exchange(sys.stdin, sys.stdout)
while True:
try:
cmd = exchg.recv()
except EOFError: # master died
break
if cmd is None:
exchg.send('done')
break
result = self.do_cmd(cmd)
exchg.send(result)
| Python |
# some analysis of global imports
"""
The idea:
compile a module's source text and walk recursively
through the code objects. Find out which globals
are used.
Then examine each 'import *' by importing that module
and looking for those globals.
Replace the 'import *' by the list found.
More advanced: If the new import has more than, say, 5 entries,
rewrite the import to use module.name throughout the source.
"""
import dis, cStringIO, sys
def disasm(code):
hold = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
dis.dis(code)
return sys.stdout.getvalue()
finally:
sys.stdout = hold
def opsequence(code):
for line in disasm(code).split('\n'):
pieces = line.split('(', 1)
if len(pieces) == 1:
start, arg = pieces[0], None
else:
start, arg = pieces
words = start.split()
while words and (words[0].isdigit() or words[0] == '>>'):
word = words.pop(0)
if word.isdigit():
ofs = int(word)
if not words:
continue
op = words[0]
if arg:
arg = arg[:-1] # )
if op.startswith('JUMP'):
arg = int(words[1])
yield ofs, op, arg
def globalsof(code, globrefs=None, stars=None, globals=None):
names = code.co_names
vars = code.co_varnames
if globrefs is None: globrefs = {}
if stars is None: stars = [] # do stars in order
if globals is None: globals = {}
in_seq = False
for ofs, op, arg in opsequence(code):
if op == 'LOAD_GLOBAL':
name = arg
refs = globrefs.setdefault(name, {})
offsets = refs.setdefault(code, [])
offsets.append(ofs)
elif op == 'IMPORT_NAME':
in_seq = True
imp_module = arg
imp_what = None
elif op == 'IMPORT_FROM':
in_seq = True
imp_what = arg
elif op == 'STORE_NAME':
# we are not interested in local imports, which
# would generate a STORE_FAST
name = arg
if in_seq:
globals[name] = imp_what, imp_module
in_seq = False
else:
globals[name] = None, None
elif op == 'IMPORT_STAR':
stars.append( (imp_module, ofs) )
in_seq = False
else:
in_seq = False
return globrefs, stars, globals
def offsetmap(c):
# create a mapping from offsets to line numbers.
# we count lines from zero, here.
tab = c.co_lnotab
line = c.co_firstlineno - 1
addr = 0
res = { addr: line }
for i in range(0, len(tab), 2):
addr = addr + ord(tab[i])
line = line + ord(tab[i+1])
res[addr] = line
return res
class Analyser:
def __init__(self, fname):
self.fname = fname
self.source = file(fname).read()
self.starimports = []
self.codeobjects = {}
self.globrefs, self.stars, self.globals = self.analyse()
def analyse(self):
globrefs = {}
stars = []
globals = {}
seen = {}
code = compile(self.source, self.fname, 'exec')
todo = [code]
while todo:
code = todo.pop(0)
self.codeobjects[code] = offsetmap(code)
globalsof(code, globrefs, stars, globals)
seen[code] = True
for const in code.co_consts:
if type(const) is type(code) and const not in seen:
todo.append(const)
return globrefs, stars, globals
def get_unknown_globals(self):
from __builtin__ import __dict__ as bltin
ret = [name for name in self.globrefs.keys()
if name not in bltin and name not in self.globals]
return ret
def get_from_star(self, modname):
dic = {}
exec "from %s import *" % modname in dic
return dic
def resolve_star_imports(self):
implicit = {}
which = {}
for star, ofs in self.stars:
which[star] = []
for key in self.get_from_star(star).keys():
implicit[key] = star
# sort out in which star import we find what.
# note that we walked star imports in order,
# so we are sure to resolve ambiguities correctly.
for name in self.get_unknown_globals():
mod = implicit[name]
which[mod].append(name)
imps = []
for star, ofs in self.stars:
imps.append( (ofs, star, which[star]) )
self.starimports = imps
def find_statements(self):
# go through all code objects and collect
# line numbers. This gives us all statements.
lineset = {}
for co, ofs2line in self.codeobjects.items():
for ofs, line in ofs2line.items():
lineset[line] = True
linenos = lineset.keys()
if 0 not in linenos:
linenos.append(0)
linenos.sort()
self.linenos = linenos
# now create statement chunks
srclines = self.source.split('\n')
stmts = []
start = 0
for lno in linenos[1:] + [sys.maxint]:
stmt = '\n'.join(srclines[start:lno])
stmts.append(stmt)
start = lno
self.statements = stmts
| Python |
#empty
| Python |
# a couple of support functions which
# help with generating Python source.
# XXX This module provides a similar, but subtly different, functionality
# XXX several times over, which used to be scattered over four modules.
# XXX We should try to generalize and single out one approach to dynamic
# XXX code compilation.
import sys, os, inspect, new
import autopath, py
def render_docstr(func, indent_str='', closing_str=''):
""" Render a docstring as a string of lines.
The argument is either a docstring or an object.
Note that we don't use a sequence, since we want
the docstring to line up left, regardless of
indentation. The shorter triple quotes are
choosen automatically.
The result is returned as a 1-tuple."""
if type(func) is not str:
doc = func.__doc__
else:
doc = func
if doc is None:
return None
doc = doc.replace('\\', r'\\')
compare = []
for q in '"""', "'''":
txt = indent_str + q + doc.replace(q[0], "\\"+q[0]) + q + closing_str
compare.append(txt)
doc, doc2 = compare
doc = (doc, doc2)[len(doc2) < len(doc)]
return doc
class NiceCompile(object):
""" Compiling parameterized strings in a way that debuggers
are happy. We provide correct line numbers and a real
__file__ attribute.
"""
def __init__(self, namespace_or_filename):
if type(namespace_or_filename) is str:
srcname = namespace_or_filename
else:
srcname = namespace_or_filename.get('__file__')
if not srcname:
# assume the module was executed from the
# command line.
srcname = os.path.abspath(sys.argv[-1])
self.srcname = srcname
if srcname.endswith('.pyc') or srcname.endswith('.pyo'):
srcname = srcname[:-1]
if os.path.exists(srcname):
self.srcname = srcname
self.srctext = file(srcname).read()
else:
# missing source, what to do?
self.srctext = None
def __call__(self, src, args=None):
""" instance NiceCompile (src, args) -- formats src with args
and returns a code object ready for exec. Instead of <string>,
the code object has correct co_filename and line numbers.
Indentation is automatically corrected.
"""
if self.srctext:
try:
p = self.srctext.index(src)
except ValueError, e:
e.args = "Source text not found in %s - use a raw string" % self.srcname
raise
prelines = self.srctext[:p].count("\n") + 1
else:
prelines = 0
# adjust indented def
for line in src.split('\n'):
content = line.strip()
if content and not content.startswith('#'):
break
# see if first line is indented
if line and line[0].isspace():
# fake a block
prelines -= 1
src = 'if 1:\n' + src
if args is not None:
src = '\n' * prelines + src % args
else:
src = '\n' * prelines + src
c = compile(src, self.srcname, "exec")
# preserve the arguments of the code in an attribute
# of the code's co_filename
if self.srcname:
srcname = MyStr(self.srcname)
if args is not None:
srcname.__sourceargs__ = args
c = newcode_withfilename(c, srcname)
return c
def getsource(object):
""" similar to inspect.getsource, but trying to
find the parameters of formatting generated methods and
functions.
"""
name = inspect.getfile(object)
if hasattr(name, '__source__'):
src = str(name.__source__)
else:
try:
src = inspect.getsource(object)
except IOError:
return None
except IndentationError:
return None
if hasattr(name, "__sourceargs__"):
return src % name.__sourceargs__
return src
## the following is stolen from py.code.source.py for now.
## XXX discuss whether and how to put this functionality
## into py.code.source.
#
# various helper functions
#
class MyStr(str):
""" custom string which allows to add attributes. """
def newcode(fromcode, **kwargs):
names = [x for x in dir(fromcode) if x[:3] == 'co_']
for name in names:
if name not in kwargs:
kwargs[name] = getattr(fromcode, name)
import new
return new.code(
kwargs['co_argcount'],
kwargs['co_nlocals'],
kwargs['co_stacksize'],
kwargs['co_flags'],
kwargs['co_code'],
kwargs['co_consts'],
kwargs['co_names'],
kwargs['co_varnames'],
kwargs['co_filename'],
kwargs['co_name'],
kwargs['co_firstlineno'],
kwargs['co_lnotab'],
kwargs['co_freevars'],
kwargs['co_cellvars'],
)
def newcode_withfilename(co, co_filename):
newconstlist = []
cotype = type(co)
for c in co.co_consts:
if isinstance(c, cotype):
c = newcode_withfilename(c, co_filename)
newconstlist.append(c)
return newcode(co, co_consts = tuple(newconstlist),
co_filename = co_filename)
# ____________________________________________________________
import __future__
def compile2(source, filename='', mode='exec', flags=
__future__.generators.compiler_flag, dont_inherit=0):
"""
A version of compile() that caches the code objects it returns.
It uses py.code.compile() to allow the source to be displayed in tracebacks.
"""
key = (source, filename, mode, flags)
try:
co = compile2_cache[key]
#print "***** duplicate code ******* "
#print source
except KeyError:
#if DEBUG:
co = py.code.compile(source, filename, mode, flags)
#else:
# co = compile(source, filename, mode, flags)
compile2_cache[key] = co
return co
compile2_cache = {}
# ____________________________________________________________
def compile_template(source, resultname):
"""Compiles the source code (a string or a list/generator of lines)
which should be a definition for a function named 'resultname'.
The caller's global dict and local variable bindings are captured.
"""
if not isinstance(source, py.code.Source):
if isinstance(source, str):
lines = [source]
else:
lines = list(source)
lines.append('')
source = py.code.Source('\n'.join(lines))
caller = sys._getframe(1)
locals = caller.f_locals
if locals is caller.f_globals:
localnames = []
else:
localnames = locals.keys()
localnames.sort()
values = [locals[key] for key in localnames]
source = source.putaround(
before = "def container(%s):" % (', '.join(localnames),),
after = "# no unindent\n return %s" % resultname)
d = {}
exec source.compile() in caller.f_globals, d
container = d['container']
return container(*values)
# ____________________________________________________________
if sys.version_info >= (2, 3):
def func_with_new_name(func, newname):
"""Make a renamed copy of a function."""
f = new.function(func.func_code, func.func_globals,
newname, func.func_defaults,
func.func_closure)
if func.func_dict:
f.func_dict = {}
f.func_dict.update(func.func_dict)
return f
else:
raise Exception("sorry, Python 2.2 not supported")
# because we need to return a new function object -- impossible in 2.2,
# cannot create functions with closures without using veeeery strange code
PY_IDENTIFIER = ''.join([(('0' <= chr(i) <= '9' or
'a' <= chr(i) <= 'z' or
'A' <= chr(i) <= 'Z') and chr(i) or '_')
for i in range(256)])
def valid_identifier(stuff):
stuff = str(stuff).translate(PY_IDENTIFIER)
if not stuff or ('0' <= stuff[0] <= '9'):
stuff = '_' + stuff
return stuff
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
def has_varargs(func):
func = getattr(func, 'func_code', func)
return (func.co_flags & CO_VARARGS) != 0
def has_varkeywords(func):
func = getattr(func, 'func_code', func)
return (func.co_flags & CO_VARKEYWORDS) != 0
def nice_repr_for_func(fn, name=None):
mod = getattr(fn, '__module__', None)
if name is None:
name = getattr(fn, '__name__', None)
cls = getattr(fn, 'class_', None)
if name is not None and cls is not None:
name = "%s.%s" % (cls.__name__, name)
try:
firstlineno = fn.func_code.co_firstlineno
except AttributeError:
firstlineno = -1
return "(%s:%d)%s" % (mod or '?', firstlineno, name or 'UNKNOWN')
| Python |
class InstanceMethod(object):
"Like types.InstanceMethod, but with a reasonable (structural) equality."
def __init__(self, im_func, im_self, im_class):
self.im_func = im_func
self.im_self = im_self
self.im_class = im_class
def __call__(self, *args, **kwds):
firstarg = self.im_self
if firstarg is None:
if not args or not isinstance(args[0], self.im_class):
raise TypeError(
"must be called with %r instance as first argument" % (
self.im_class,))
firstarg = args[0]
args = args[1:]
return self.im_func(firstarg, *args, **kwds)
def __eq__(self, other):
return isinstance(other, InstanceMethod) and (
self.im_func == other.im_func and
self.im_self == other.im_self)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.im_func, self.im_self))
| Python |
import autopath
import py
log = py.log.Producer("log")
logexec = py.log.Producer("exec")
import os
BASEURL = "file:///svn/pypy/release/1.0.x"
DDIR = py.path.local('/www/codespeak.net/htdocs/download/pypy')
def usage():
print "usage: %s [-tag .<micro>] versionbasename" %(py.std.sys.argv[0])
raise SystemExit, 1
def cexec(cmd):
logexec(cmd)
return py.process.cmdexec(cmd)
def maketargz(target):
targz = target + ".tar.gz"
basename = target.basename
old = target.dirpath().chdir()
try:
out = cexec("tar zcvf %(targz)s %(basename)s" % locals())
finally:
old.chdir()
assert targz.check(file=1)
assert targz.size() > 0
return targz
def maketarbzip(target):
targz = target + ".tar.bz2"
basename = target.basename
old = target.dirpath().chdir()
try:
out = cexec("tar jcvf %(targz)s %(basename)s" % locals())
finally:
old.chdir()
assert targz.check(file=1)
assert targz.size() > 0
return targz
def makezip(target):
tzip = target + ".zip"
if tzip.check(file=1):
log("removing", tzip)
tzip.remove()
basename = target.basename
old = target.dirpath().chdir()
try:
out = cexec("zip -r9 %(tzip)s %(basename)s" % locals())
finally:
old.chdir()
assert tzip.check(file=1)
assert tzip.size() > 0
return tzip
def copydownload(fn):
log("copying to download location")
#fn.copy(dtarget)
ddir = DDIR
out = cexec("cp %(fn)s %(ddir)s"
% locals())
def forced_export(BASEURL, target, lineend="LF"):
if target.check(dir=1):
log("removing", target)
target.remove()
out = cexec("svn export --native-eol %s %s %s"
%(lineend, BASEURL, target))
assert target.check(dir=1)
def build_html(target):
docdir = target.join('pypy').join('doc')
old = docdir.chdir()
try:
# Generate the html files.
cmd = "python2.4 ../test_all.py -k -test_play1_snippets"
logexec(cmd)
r = os.system(cmd)
if r:
raise SystemExit, -1
# Remove any .pyc files created in the process
target.chdir()
out = cexec("find . -name '*.pyc' -print0 | xargs -0 -r rm")
finally:
old.chdir()
if __name__ == '__main__':
argc = len(py.std.sys.argv)
if argc <= 1:
usage()
j = 1
if py.std.sys.argv[1] == '-tag':
micro = py.std.sys.argv[2]
assert micro.startswith('.')
NEWURL = BASEURL.replace('.x', micro)
r = os.system("svn cp %s %s" % (BASEURL, NEWURL))
if r:
raise SystemExit, -1
BASEURL = NEWURL
j = 3
ver = py.std.sys.argv[j]
assert ver.startswith('pypy-')
tmpdir = py.path.local("/tmp/pypy-release")
target = tmpdir.join(ver)
forced_export(BASEURL, target, lineend="LF")
build_html(target)
target_targz = maketargz(target)
assert target_targz.check(file=1)
copydownload(target_targz)
target_tarbzip = maketarbzip(target)
assert target_tarbzip.check(file=1)
copydownload(target_tarbzip)
forced_export(BASEURL, target, lineend="CRLF")
build_html(target)
target_zip = makezip(target)
assert target_zip.check(file=1)
copydownload(target_zip)
| Python |
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import threading
import sys
server_thread = None
server_port = None
class TBRequestHandler(BaseHTTPRequestHandler):
views = {}
def do_GET(self):
if self.path == '/quit':
global server_thread
server_thread = None
raise SystemExit
i = self.path.find('/', 1)
parts = self.path[1:].split('/', 1)
if not parts:
tp_name = 'traceback'
else:
tb_name = parts[0]
if not self.views.has_key(tb_name):
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write('traceback named %r not found' % tb_name)
else:
tbview = self.views[tb_name]
s = tbview.render(self.path)
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write(unicode(s).encode('utf8'))
def log_message(self, format, *args):
pass
class TBServer(HTTPServer):
def handle_error(self, request, client_address):
exc = sys.exc_info()[1]
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
raise
else:
HTTPServer.handle_error(self, request, client_address)
def serve():
import socket
port = 8080
while 1:
try:
server = TBServer(('localhost', port), TBRequestHandler)
except socket.error:
port += 1
continue
else:
break
global server_port
server_port = port
print "serving on", port
server.serve_forever()
def start():
global server_thread
server_thread = threading.Thread(target=serve)
server_thread.start()
return server_thread
def stop():
if server_thread is None:
return
import urllib2
try:
urllib2.urlopen('http://localhost:%s/quit'%(server_port,))
except urllib2.HTTPError:
pass
def wait_until_interrupt():
if server_thread is None:
return
print "waiting"
import signal
try:
signal.pause()
except KeyboardInterrupt:
stop()
def publish_exc(exc):
if server_thread is None:
return
from pypy.tool.tb_server.render import TracebackView
x = TracebackView(exc)
print "traceback is at http://localhost:%d/%s" % (server_port, x.name)
if __name__ == "__main__":
t = start()
wait_until_interrupt()
| Python |
from pypy.tool.tb_server.server import TBRequestHandler
import py
html = py.xml.html
import traceback
import cgi
import urllib
views = TBRequestHandler.views
class URL(object):
attrs='scm','netloc','path','params','query','fragment'
attrindex = dict(zip(attrs, range(len(attrs))))
# XXX authentication part is not parsed
def __init__(self, string='', **kw):
from urlparse import urlparse
for name,value in zip(self.attrs, urlparse(string, 'http')):
setattr(self, name, value)
self.__dict__.update(kw)
self.query = cgi.parse_qs(self.query)
def link_with_options(self, kw):
nq = {}
for k in self.query:
nq[k] = self.query[k][0]
nq.update(kw)
query = urllib.urlencode(nq)
from urlparse import urlunparse
return urlunparse(('', self.netloc, self.path,
self.params, query, self.fragment))
class Renderer:
def render(self, path):
url = URL(path)
args = url.path.split('/')[2:]
try:
inner = self.render_self(url, args)
except:
import sys, traceback
lines = traceback.format_exception(*sys.exc_info())
inner = html.pre(
py.xml.escape(''.join(
['Internal Rendering Error, traceback follows\n'] + lines)))
tag = html.html(
html.head(),
html.body(
inner
)
)
return tag.unicode(indent=2)
class TracebackView(Renderer):
def __init__(self, excinfo):
self.name = 'traceback%d' % len(views)
views[self.name] = self
if not isinstance(excinfo, py.code.ExceptionInfo):
excinfo = py.code.ExceptionInfo(excinfo)
self.excinfo = excinfo
def render_self(self, url, args):
lines = html.div()
opts = {}
for k in url.query:
ent, opt = k.split(':')
val = int(url.query[k][0])
opts.setdefault(ent, {})[opt] = val
i = 0
for tbentry in self.excinfo.traceback:
lines.append(self.render_tb(
url, tbentry, i,
**opts.get('entry' + str(i), {})))
i += 1
lines.append(html.pre(py.xml.escape(self.excinfo.exconly())))
return lines
def render_tb(self, url, tbentry, i, showlocals=0):
lines = html.pre()
filename = tbentry.frame.code.path
lineno = tbentry.lineno + 1
name = tbentry.frame.code.name
link = '/file%s?line=%d#%d' %(filename, lineno, lineno)
lines.append(' File "%s", line %d, in %s\n'%(
html.a(filename, href=link), lineno, name))
lines.append(html.a('locals', href=url.link_with_options(
{'entry%d:showlocals' % i : 1-showlocals})))
lines.append(' ' +
filename.readlines()[lineno-1].lstrip())
if showlocals:
for k, v in tbentry.frame.f_locals.items():
if k[0] == '_':
continue
lines.append(py.xml.escape('%s=%s\n'%(k, repr(v)[:1000])))
return lines
def ln(lineno):
return html.a(name=str(lineno))
class FileSystemView(Renderer):
def render_self(self, url, args):
fname = '/' + '/'.join(args)
lines = html.table()
i = 1
hilite = int(url.query.get('line', [-1])[0])
for line in open(fname):
if i == hilite:
kws = {'style': 'font-weight: bold;'}
else:
kws = {}
row = html.tr(
html.td(html.a("%03d" % i, name=str(i))),
html.td(
html.pre(py.xml.escape(line)[:-1],
**kws),
),
)
lines.append(row)
i += 1
return lines
views['file'] = FileSystemView()
| Python |
# Lazy import
def start():
global start, stop, publish_exc, wait_until_interrupt
from server import start, stop, publish_exc, wait_until_interrupt
return start()
def stop():
pass
def wait_until_interrupt():
pass
def publish_exc(exc):
pass
| Python |
"""
ref = UnionRef(x) -> creates a reference to x, such that ref() is x.
Two references can be merged: ref.merge(ref2) make ref and ref2 interchangeable.
After a merge, ref() is ref2(). This is done by asking the two older objects
that ref and ref2 pointed to how they should be merged. The point is that
large equivalence relations can be built this way:
>>> ref1.merge(ref2)
>>> ref3.merge(ref4)
>>> ref1() is ref4()
False
>>> ref2.merge(ref3)
>>> ref1() is ref4()
True
By default, two objects x and y are merged by calling x.update(y).
"""
import UserDict
from pypy.tool.uid import uid
class UnionRef(object):
__slots__ = ('_obj', '_parent', '_weight')
def __init__(self, obj):
"Build a new reference to 'obj'."
self._obj = obj
self._parent = None
self._weight = 1
def __call__(self):
"Return the 'obj' that self currently references."
return self._findrep()._obj
def _findrep(self):
p = self._parent
if p:
if p._parent:
# this linked list is unnecessarily long, shorten it
path = [self]
while p._parent:
path.append(p)
p = p._parent
for q in path:
q._parent = p
return p
return self
def merge(self, other, union=None):
"Merge two references. After a.merge(b), a() and b() are identical."
self = self ._findrep()
other = other._findrep()
if self is not other:
w1 = self ._weight
w2 = other._weight
if w1 < w2:
self, other = other, self
self._weight = w1 + w2
other._parent = self
o = other._obj
del other._obj
if union is not None:
self._obj = union(self._obj, o)
else:
self.update(o)
return self
def update(self, obj):
"Merge 'obj' in self. Default implementation, can be overridden."
self._obj.update(obj)
def __hash__(self):
raise TypeError("UnionRef objects are unhashable")
def __eq__(self, other):
return (isinstance(other, UnionRef) and
self._findrep() is other._findrep())
def __ne__(self, other):
return not (self == other)
class UnionDict(object, UserDict.DictMixin):
"""Mapping class whose items can be unified. Conceptually, instead of
a set of (key, value) pairs, this is a set of ({keys}, value) pairs.
The method merge(key1, key2) merges the two pairs containing, respectively,
key1 and key2.
"""
_slots = ('_data',)
def __init__(self, dict=None, **kwargs):
self._data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def merge(self, key1, key2, union=None):
self._data[key1] = self._data[key1].merge(self._data[key2], union)
def copy(self):
result = UnionDict()
newrefs = {}
for key, valueref in self._data.iteritems():
valueref = valueref._findrep()
try:
newref = newrefs[valueref]
except KeyError:
newref = newrefs[valueref] = UnionRef(valueref())
result._data[key] = newref
return result
def __repr__(self):
return "<UnionDict at 0x%x>" % uid(self)
def __getitem__(self, key):
return self._data[key]()
def __setitem__(self, key, value):
self._data[key] = UnionRef(value)
def __delitem__(self, key):
del self._data[key]
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def __contains__(self, key):
return key in self._data
def __iter__(self):
return iter(self._data)
def iteritems(self):
for key, valueref in self._data.iteritems():
yield (key, valueref())
def clear(self):
self._data.clear()
def popitem(self):
key, valueref = self._data.popitem()
return key, valueref()
def __len__(self):
return len(self._data)
| Python |
# This is a general algorithm used by the annotator.
# union-find impl, a info object is attached to the roots
class UnionFind(object):
def __init__(self, info_factory=None):
self.link_to_parent = {}
self.weight = {}
self.info_factory = info_factory
self.root_info = {}
# mapping-like [] access
def __getitem__(self, obj):
if obj not in self.link_to_parent:
raise KeyError, obj
ignore, rep, info = self.find(obj)
return info
def __contains__(self, obj):
return obj in self.link_to_parent
def __iter__(self):
return iter(self.link_to_parent)
def keys(self):
return self.link_to_parent.keys()
def infos(self):
return self.root_info.values()
def find_rep(self, obj):
try:
# fast path (shortcut for performance reasons)
parent = self.link_to_parent[obj]
self.root_info[parent] # may raise KeyError
return parent
except KeyError:
# general case
ignore, rep, info = self.find(obj)
return rep
def find(self, obj): # -> new_root, obj, info
if obj not in self.link_to_parent:
if self.info_factory:
info = self.info_factory(obj)
else:
info = None
self.root_info[obj] = info
self.weight[obj] = 1
self.link_to_parent[obj] = obj
return True, obj, info
to_root = [obj]
parent = self.link_to_parent[obj]
while parent is not to_root[-1]:
to_root.append(parent)
parent = self.link_to_parent[parent]
for obj in to_root:
self.link_to_parent[obj] = parent
return False, parent, self.root_info[parent]
def union(self, obj1, obj2): # -> not_noop, rep, info
new1, rep1, info1 = self.find(obj1)
new2, rep2, info2 = self.find(obj2)
if rep1 is rep2:
return new1 or new2, rep1, info1
w1 = self.weight[rep1]
w2 = self.weight[rep2]
w = w1 + w2
if w1 < w2:
rep1, rep2, info1, info2, = rep2, rep1, info2, info1
if info1 is not None:
info1.update(info2)
self.link_to_parent[rep2] = rep1
del self.weight[rep2]
del self.root_info[rep2]
self.weight[rep1] = w
self.root_info[rep1] = info1
return True, rep1, info1
| Python |
"""
Utilities to manipulate graphs (vertices and edges, not control flow graphs).
Convention:
'vertices' is a set of vertices (or a dict with vertices as keys);
'edges' is a dict mapping vertices to a list of edges with its source.
Note that we can usually use 'edges' as the set of 'vertices' too.
"""
class Edge:
def __init__(self, source, target):
self.source = source
self.target = target
def __repr__(self):
return '%r -> %r' % (self.source, self.target)
def make_edge_dict(edge_list):
"Put a list of edges in the official dict format."
edges = {}
for edge in edge_list:
edges.setdefault(edge.source, []).append(edge)
edges.setdefault(edge.target, [])
return edges
def depth_first_search(root, vertices, edges):
seen = {}
result = []
def visit(vertex):
result.append(('start', vertex))
seen[vertex] = True
for edge in edges[vertex]:
w = edge.target
if w in vertices and w not in seen:
visit(w)
result.append(('stop', vertex))
visit(root)
return result
def strong_components(vertices, edges):
"""Enumerates the strongly connected components of a graph. Each one is
a set of vertices where any vertex can be reached from any other vertex by
following the edges. In a tree, all strongly connected components are
sets of size 1; larger sets are unions of cycles.
"""
component_root = {}
discovery_time = {}
remaining = vertices.copy()
stack = []
for root in vertices:
if root in remaining:
for event, v in depth_first_search(root, remaining, edges):
if event == 'start':
del remaining[v]
discovery_time[v] = len(discovery_time)
component_root[v] = v
stack.append(v)
else: # event == 'stop'
vroot = v
for edge in edges[v]:
w = edge.target
if w in component_root:
wroot = component_root[w]
if discovery_time[wroot] < discovery_time[vroot]:
vroot = wroot
if vroot == v:
component = {}
while True:
w = stack.pop()
del component_root[w]
component[w] = True
if w == v:
break
yield component
else:
component_root[v] = vroot
def all_cycles(root, vertices, edges):
"""Enumerates cycles. Each cycle is a list of edges."""
stackpos = {}
edgestack = []
result = []
def visit(v):
if v not in stackpos:
stackpos[v] = len(edgestack)
for edge in edges[v]:
if edge.target in vertices:
edgestack.append(edge)
visit(edge.target)
edgestack.pop()
stackpos[v] = None
else:
if stackpos[v] is not None: # back-edge
result.append(edgestack[stackpos[v]:])
visit(root)
return result
def break_cycles(vertices, edges):
"""Enumerates a reasonably minimal set of edges that must be removed to
make the graph acyclic."""
graphs = [(vertices, edges)]
for vertices, edges in graphs:
#print ''.join(vertices),
#print [e.source+e.target for l in edges.values() for e in l]
for component in strong_components(vertices, edges):
#print '-->', ''.join(component)
edge_weights = {}
random_vertex = component.iterkeys().next()
for cycle in all_cycles(random_vertex, component, edges):
#print '\tcycle:', [e.source+e.target for e in cycle]
for edge in cycle:
edge_weights[edge] = edge_weights.get(edge, 0) + 1
if edge_weights:
max_weight = max(edge_weights.values())
for edge, weight in edge_weights.iteritems():
if weight == max_weight:
break
# kill this edge
yield edge
new_edges = edges.copy()
new_edges[edge.source] = [e for e in new_edges[edge.source]
if e is not edge]
graphs.append((component, new_edges))
| Python |
import weakref
import UserDict
class MultiWeakKeyDictionary(UserDict.DictMixin):
def __init__(self):
self._bylength = {}
def __getitem__(self, key):
key = (len(key),) + key
d = self._bylength
for step in key:
d = d[step]
return d
def __setitem__(self, key, value):
key = (len(key),) + key
d = self._bylength
for step in key[:-1]:
try:
d = d[step]
except KeyError:
d[step] = newd = weakref.WeakKeyDictionary()
d = newd
d[key[-1]] = value
def __delitem__(self, key):
key = (len(key),) + key
d = self._bylength
for step in key[:-1]:
d = d[step]
del d[key[-1]]
def keys(self):
result = []
def enumkeys(initialkey, d, result):
if len(initialkey) == length:
result.append(initialkey)
else:
for key, value in d.iteritems():
enumkeys(initialkey + (key,), value, result)
for length, d in self._bylength.iteritems():
enumkeys((), d, result)
return result
| Python |
from __future__ import division
EPSILON = 1E-12
class SparseMatrix:
def __init__(self, height):
self.lines = [{} for row in range(height)]
def __getitem__(self, (row, col)):
return self.lines[row].get(col, 0)
def __setitem__(self, (row, col), value):
if abs(value) > EPSILON:
self.lines[row][col] = value
else:
try:
del self.lines[row][col]
except KeyError:
pass
def copy(self):
m = SparseMatrix(len(self.lines))
for line1, line2 in zip(self.lines, m.lines):
line2.update(line1)
return m
def solve(self, vector):
"""Solves 'self * [x1...xn] == vector'; returns the list [x1...xn].
Raises ValueError if no solution or indeterminate.
"""
vector = list(vector)
lines = [line.copy() for line in self.lines]
columns = [{} for i in range(len(vector))]
for i, line in enumerate(lines):
for j, a in line.items():
columns[j][i] = a
lines_left = dict.fromkeys(range(len(self.lines)))
nrows = []
for ncol in range(len(vector)):
currentcolumn = columns[ncol]
lst = [(abs(a), i) for (i, a) in currentcolumn.items()
if i in lines_left]
_, nrow = max(lst) # ValueError -> no solution
nrows.append(nrow)
del lines_left[nrow]
line1 = lines[nrow]
maxa = line1[ncol]
for _, i in lst:
if i != nrow:
line2 = lines[i]
a = line2.pop(ncol)
#del currentcolumn[i] -- but currentcolumn no longer used
factor = a / maxa
vector[i] -= factor*vector[nrow]
for col in line1:
if col > ncol:
value = line2.get(col, 0) - factor*line1[col]
if abs(value) > EPSILON:
line2[col] = columns[col][i] = value
else:
line2.pop(col, 0)
columns[col].pop(i, 0)
solution = [None] * len(vector)
for i in range(len(vector)-1, -1, -1):
row = nrows[i]
line = lines[row]
total = vector[row]
for j, a in line.items():
if j != i:
total -= a * solution[j]
solution[i] = total / line[i]
return solution
| Python |
__all__ = ['FSet', 'emptyset']
# Reference:
# "Implementing sets efficiently in a functional language"
# http://swiss.csail.mit.edu/~adams/BB/
# See BB.sml in the current directory.
class FSet(object):
"""Functional Set.
Behaves like a frozenset from Python 2.4 (incomplete, though).
This version is meant to have a better complexity than frozenset for
operations involving a lot of single-element adds and unions.
For example, a long chain of 'set.union([x]).union([y]).union([z])...'
takes quadratic time with frozensets, but only n*log(n) with FSets.
"""
__slots__ = ['_left', '_value', '_right', '_count']
def __new__(cls, items=()):
if isinstance(items, FSet):
return items
items = list(items)
if len(items) == 1:
return node(emptyset, items[0], emptyset)
if not items:
return emptyset
items.sort()
any = items[0]
items = [x for i, x in enumerate(items) if x != items[i-1]]
if not items:
items.append(any)
def maketree(start, stop):
if start == stop:
return emptyset
else:
mid = (start+stop)//2
return node(maketree(start, mid), items[mid],
maketree(mid+1, stop))
return maketree(0, len(items))
def __len__(self):
return self._count
def __repr__(self):
return '{%s}' % (', '.join([repr(n) for n in self]),)
def __iter__(self):
return treeiter(self)
def union(self, other):
return uniontree(self, FSet(other))
def __or__(self, other):
if not isinstance(other, FSet):
return NotImplemented
return uniontree(self, other)
def __eq__(self, other):
if not isinstance(other, FSet):
return NotImplemented
if self is other:
return True
if eqtree(self, other):
other._left = self._left
other._value = self._value
other._right = self._right
return True
return False
def __ne__(self, other):
res = self.__eq__(other)
if res is NotImplemented:
return NotImplemented
return not res
def __hash__(self):
return hash(tuple(self)) ^ 1043498183
def __contains__(self, value):
return contains(self, value)
emptyset = object.__new__(FSet)
emptyset._count = 0
# ____________________________________________________________
# creation and balancing stuff
WEIGHT = 3
def node(left, value, right):
result = object.__new__(FSet)
result._left = left
result._value = value
result._right = right
result._count = left._count + right._count + 1
return result
def node_balance_fast(left, value, right):
# used when an original tree was balanced, and changed by at most
# one element (as in adding or deleting one item).
ln = left._count
rn = right._count
if ln <= 1 and rn <= 1:
return node(left, value, right)
elif rn > WEIGHT * ln: # right too big
if right._left._count < right._right._count:
return single_L(left, value, right)
else:
return double_L(left, value, right)
elif ln > WEIGHT * rn: # left too big
if left._right._count < left._left._count:
return single_R(left, value, right)
else:
return double_R(left, value, right)
else:
return node(left, value, right)
def node_balance(left, value, right):
if left is emptyset:
return add(right, value)
elif right is emptyset:
return add(left, value)
elif WEIGHT * left._count < right._count:
t = node_balance(left, value, right._left)
return node_balance_fast(t, right._value, right._right)
elif WEIGHT * right._count < left._count:
t = node_balance(left._right, value, right)
return node_balance_fast(left._left, left._value, t)
else:
return node(left, value, right)
def add(tree, value):
if tree is emptyset:
return node(emptyset, value, emptyset)
elif value < tree._value:
t = add(tree._left, value)
return node_balance_fast(t, tree._value, tree._right)
elif value == tree._value:
return tree
else:
t = add(tree._right, value)
return node_balance_fast(tree._left, tree._value, t)
def single_L(left, value, right):
return node(node(left, value, right._left), right._value, right._right)
def single_R(left, value, right):
return node(left._left, left._value, node(left._right, value, right))
def double_L(left, value, right):
rl = right._left
n1 = node(left, value, rl._left)
n2 = node(rl._right, right._value, right._right)
return node(n1, rl._value, n2)
def double_R(left, value, right):
lr = left._right
n1 = node(left._left, left._value, lr._left)
n2 = node(lr._right, value, right)
return node(n1, lr._value, n2)
# ____________________________________________________________
# union
def uniontree(tree1, tree2):
if tree2._count <= 1:
if tree2 is emptyset:
return tree1
else:
return add(tree1, tree2._value)
elif tree1._count <= 1:
if tree1 is emptyset:
return tree2
else:
return add(tree2, tree1._value)
else:
left2, right2 = splittree(tree2, tree1._value)
return node_balance(uniontree(tree1._left, left2), tree1._value,
uniontree(tree1._right, right2))
def splittree(tree, value):
if tree is emptyset:
return emptyset, emptyset
elif tree._value < value:
t1, t2 = splittree(tree._right, value)
return node_balance(tree._left, tree._value, t1), t2
elif tree._value == value:
return tree._left, tree._right
else:
t1, t2 = splittree(tree._left, value)
return t1, node_balance(t2, tree._value, tree._right)
# ____________________________________________________________
# utilities
def treeiter(tree):
if tree is emptyset:
return
path = []
while True:
while tree._left is not emptyset:
path.append(tree)
tree = tree._left
yield tree._value
tree = tree._right
while tree is emptyset:
if not path:
return
tree = path.pop()
yield tree._value
tree = tree._right
def eqtree(tree1, tree2):
if tree1 is tree2:
return True
if tree1._count != tree2._count:
return False
assert tree1 is not emptyset and tree2 is not emptyset
left2, right2 = splittree(tree2, tree1._value)
if left2._count + right2._count == tree2._count:
return False # _value was not in tree2
return eqtree(tree1._left, left2) and eqtree(tree1._right, right2)
def contains(tree, value):
while tree is not emptyset:
if value < tree._value:
tree = tree._left
elif value == tree._value:
return True
else:
tree = tree._right
return False
_no = object()
def checktree(tree, bmin=_no, bmax=_no):
if tree is not emptyset:
if bmin is not _no:
assert bmin < tree._value
if bmax is not _no:
assert tree._value < bmax
assert tree._count == tree._left._count + tree._right._count + 1
checktree(tree._left, bmin, tree._value)
checktree(tree._right, tree._value, bmax)
| Python |
#empty
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
#empty
| Python |
import sys
import opcode
import dis
import imp
import os
import __builtin__
import time
"""
so design goal:
i want to take a pile of source code and analyze each module for the
names it defines and the modules it imports and the names it uses from
them.
then i can find things like:
- things which are just plain not used anywhere
- things which are defined in one module and only used in another
- importing of names from modules where they are just imported from
somewhere else
- cycles in the import graph
- unecessary imports
finding imports at top level is fairly easy, although the variety of
types of import statement can be baffling. a mini reference:
import foo
->
LOAD_CONST None
IMPORT_NAME foo
STORE_NAME foo
import foo as bar
->
LOAD_CONST None
IMPORT_NAME foo
STORE_NAME bar
from foo import bar
->
LOAD_CONST ('bar',)
IMPORT_NAME foo
IMPORT_FROM bar
STORE_NAME bar
POP_TOP
from foo import bar, baz
->
LOAD_CONST ('bar','baz')
IMPORT_NAME foo
IMPORT_FROM bar
STORE_NAME bar
IMPORT_FROM baz
STORE_NAME baz
POP_TOP
from foo.baz import bar
->
LOAD_CONST ('bar',)
IMPORT_NAME foo.baz
IMPORT_FROM bar
STORE_NAME bar
POP_TOP
import foo.bar
->
LOAD_CONST None
IMPORT_NAME foo.bar
STORE_NAME foo
(I hate this style)
there are other forms, but i don't support them (should hit an
assertion rather than silently fail).
"""
class System:
def __init__(self):
self.modules = {}
self.pendingmodules = {}
class Scope(object):
def __init__(self, parent=None):
self.modvars = {} # varname -> absolute module name
self.parent = parent
self.varsources = {}
def mod_for_name(self, name):
if name in self.modvars:
return self.modvars[name]
elif self.parent is not None:
return self.parent.mod_for_name(name)
else:
return None
def var_source(self, name):
if name in self.varsources:
return self.varsources[name]
elif self.parent is not None:
return self.parent.var_source(name)
else:
return None, None
class Module(object):
def __init__(self, name, system):
self.name = name
self.system = system
self._imports = {} # {modname:{name:was-it-used?}}
self.definitions = ['__file__']
if name == 'pypy.objspace.std.objspace':
self.definitions.extend([
'W_NoneObject', 'W_BoolObject', 'W_BoolObject', 'W_TypeObject',
'W_TypeObject', 'W_TypeObject', 'W_IntObject',
'W_StringObject', 'W_UnicodeObject', 'W_FloatObject',
'W_TupleObject', 'W_ListObject', 'W_LongObject', 'W_SliceObject',
'W_IntObject', 'W_FloatObject', 'W_LongObject', 'W_TupleObject',
'W_ListObject', 'W_DictObject', 'W_SliceObject',
'W_StringObject', 'W_UnicodeObject', 'W_SeqIterObject',
'W_TupleObject', 'W_DictObject', 'W_DictObject'])
self.toplevelscope = Scope()
self.importers = []
def import_(self, modname):
# should probably handle relative imports here.
if modname not in self._imports:
if recursive and modname not in self.system.modules:
self.system.pendingmodules[modname] = None
self._imports[modname] = {}
return self._imports[modname]
def iteropcodes(codestring):
n = len(codestring)
i = 0
while i < n:
op = ord(codestring[i])
i += 1
oparg = None
assert op != opcode.EXTENDED_ARG, 'EXTENDED_ARG'
if op >= opcode.HAVE_ARGUMENT:
oparg = ord(codestring[i]) + ord(codestring[i+1])*256
i += 2
yield op, oparg
class _Op(object):
def __getattr__(self, name):
if name in opcode.opmap:
return opcode.opmap[name]
else:
raise AttributeError, name
_op_ = _Op()
loadops = [_op_.LOAD_NAME, _op_.LOAD_GLOBAL, _op_.LOAD_FAST, _op_.LOAD_DEREF]
storeops = [_op_.STORE_NAME, _op_.STORE_FAST, _op_.STORE_DEREF, _op_.STORE_GLOBAL]
def name_for_op(code, op, oparg):
if op in [_op_.LOAD_GLOBAL, _op_.STORE_GLOBAL, _op_.LOAD_NAME, _op_.STORE_NAME]:
return code.co_names[oparg]
elif op in [_op_.LOAD_FAST, _op_.STORE_FAST]:
return code.co_varnames[oparg]
elif op in [_op_.LOAD_DEREF, _op_.STORE_DEREF]:
if oparg < len(code.co_cellvars):
return code.co_cellvars[oparg]
else:
return code.co_freevars[oparg - len(code.co_cellvars)]
else:
assert 0, "%s is not an opcode with a name!"%(opcode.opname[op],)
def process(r, codeob, scope, toplevel=False):
opcodes = list(iteropcodes(codeob.co_code))
i = 0
codeobjs = []
while i < len(opcodes):
op, oparg = opcodes[i]
if op == _op_.IMPORT_NAME:
preop, preoparg = opcodes[i-1]
assert preop == _op_.LOAD_CONST, 'LOAD_CONST'
fromlist = codeob.co_consts[preoparg]
modname = codeob.co_names[oparg]
if fromlist is None:
# this is the 'import foo' case
r.import_(modname)
seenloadattr = False
while 1:
postop, postoparg = opcodes[i+1]
i += 1
if postop != _op_.LOAD_ATTR:
break
seenloadattr = True
assert postop in storeops, 'postop'
storename = name_for_op(codeob, postop, postoparg)
if seenloadattr:
scope.modvars[storename] = modname
else:
scope.modvars[storename] = modname.split('.')[0]
elif fromlist == ('*',):
assert toplevel, 'toplevel'
if modname.startswith('pypy.'):
if modname not in r.system.modules:
if modname in r.system.pendingmodules:
del r.system.pendingmodules[modname]
process_module(modname, r.system)
M = r.system.modules[modname]
for d in M.definitions + list(M.toplevelscope.modvars) + \
[a[1] for a in M.toplevelscope.varsources.itervalues()]:
if d[0] != '_':
#print '* got ', d
scope.varsources[d] = modname, d
r.import_(modname)[d] = -1
r.import_(modname)['*'] = True
else:
# ok, this is from foo import bar
path = None
try:
for part in modname.split('.'):
path = [imp.find_module(part, path)[1]]
except ImportError:
path = -1
i += 1
for f in fromlist:
op, oparg = opcodes[i]
assert op == _op_.IMPORT_FROM, 'IMPORT_FROM'
assert codeob.co_names[oparg] == f, 'f'
i += 1
if path == -1:
i += 1
continue
op, oparg = opcodes[i]
i += 1
assert op in storeops, 'opstore'
storename = name_for_op(codeob, op, oparg)
try:
imp.find_module(f, path)
except ImportError:
r.import_(modname)[f] = False
scope.varsources[storename] = modname, f
else:
submod = modname + '.' + f
r.import_(submod)
scope.modvars[storename] = submod
op, oparg = opcodes[i]
assert op == _op_.POP_TOP, 'POP_TOP'
elif op == _op_.STORE_NAME and toplevel or op == _op_.STORE_GLOBAL:
r.definitions.append(codeob.co_names[oparg])
elif op == _op_.LOAD_ATTR:
preop, preoparg = opcodes[i-1]
if preop in loadops:
name = name_for_op(codeob, preop, preoparg)
m = scope.mod_for_name(name)
if m:
r.import_(m)[codeob.co_names[oparg]] = True
elif op in loadops:
name = name_for_op(codeob, op, oparg)
m, a = scope.var_source(name)
if m:
assert a in r.import_(m), 'a'
r.import_(m)[a] = True
## else:
## if name not in r.definitions \
## and scope.mod_for_name(name) is None \
## and scope.var_source(name) == (None, None) \
## and name not in __builtin__.__dict__ \
## and (op == LOAD_GLOBAL or toplevel):
## print 'where did', name, 'come from?'
elif op in [_op_.MAKE_FUNCTION, _op_.MAKE_CLOSURE]:
preop, preoparg = opcodes[i-1]
assert preop == _op_.LOAD_CONST, 'preop'
codeobjs.append(codeob.co_consts[preoparg])
i += 1
for c in codeobjs:
process(r, c, Scope(scope))
def find_from_dotted_name(modname):
path = None
for part in modname.split('.'):
try:
path = [imp.find_module(part, path)[1]]
except ImportError:
print modname
raise
return path[0]
def process_module(dottedname, system):
if dottedname.endswith('.py'):
path = dottedname
dottedname = path.lstrip('./').rstrip()[:-3].replace('/', '.')
else:
path = find_from_dotted_name(dottedname)
ispackage = False
if os.path.isdir(path):
ispackage = True
path += '/__init__.py'
r = Module(dottedname, system)
r.ispackage = ispackage
if dottedname in system.modules:
return system.modules[dottedname]
try:
code = compile(open(path, "U").read(), path, 'exec')
process(r, code, r.toplevelscope, True)
except (ImportError, SyntaxError), e:
print "failed!", e
#raise
else:
if dottedname in system.pendingmodules:
print
del system.pendingmodules[dottedname]
system.modules[dottedname] = r
return r
# --- stuff that uses the processed data ---
def report_unused_symbols(system):
for name, mod in sorted(system.modules.iteritems()):
printed = False
if not 'pypy.' in name or '_cache' in name:
continue
u = {}
for n in mod._imports:
if n in ('autopath', '__future__'):
continue
usedany = False
for field, used in mod._imports[n].iteritems():
if n in system.modules:
M = system.modules[n]
if not M.ispackage and field != '*' and field not in M.definitions \
and used != -1:
if not printed:
print '*', name
printed = True
sourcemod, nam = M.toplevelscope.var_source(field)
print ' ', field, 'used from', n, 'but came from', sourcemod
if not used:
u.setdefault(n, []).append(field)
else:
usedany = True
if not usedany:
if n in u:
u[n].append('(i.e. entirely)')
else:
u[n] = 'entirely'
if u:
if not printed:
print '*', name
printed = True
for k, v in u.iteritems():
print ' ', k, v
def find_cycles(system):
from pypy.tool.algo import graphlib
vertices = dict.fromkeys(system.modules)
edges = {}
for m in system.modules:
edges[m] = []
for n in system.modules[m]._imports:
edges[m].append(graphlib.Edge(m, n))
cycles = []
for component in graphlib.strong_components(vertices, edges):
random_vertex = component.iterkeys().next()
cycles.extend(graphlib.all_cycles(random_vertex, component, edges))
ncycles = []
for cycle in cycles:
packs = {}
for edge in cycle:
package = edge.source.rsplit('.', 1)[0]
packs[package] = True
if len(packs) > 1:
ncycles.append(cycle)
cycles = ncycles
for cycle in cycles:
l = len(cycle[0].source)
print cycle[0].source, '->', cycle[0].target
for edge in cycle[1:]:
print ' '*l, '->', edge.target
print len(cycles), 'inter-package cycles'
def summary(system):
mcount = float(len(system.modules))
importcount = 0
importstars = 0
importstarusage = 0
defcount = 0
importedcount = 0
for m in system.modules:
m = system.modules[m]
defcount += len(m.definitions)
importedcount += len(m.importers)
importcount += len(m._imports)
for n in m._imports:
if '*' in m._imports[n]:
importstars += 1
importstarusage += len([o for (o, v) in m._imports[n].iteritems() if v == True])
print
print 'the average module'
print 'was imported %.2f times'%(importedcount/mcount)
print 'imported %.2f other modules'%(importcount/mcount)
print 'defined %.2f names'%(defcount/mcount)
print
print 'there were %d import *s'%(importstars)
print 'the average one produced %.2f names that were actually used'\
%((1.0*importstarusage)/importstars)
def not_imported(system):
for m, M in sorted(system.modules.iteritems()):
if not M.importers and 'test' not in m and '__init__' not in m:
print m
def import_stars(system):
for m in sorted(system.modules):
m = system.modules[m]
for n in sorted(m._imports):
if '*' in m._imports[n]:
print m.name, 'imports * from', n
used = [o for (o, v) in m._imports[n].iteritems() if v == True and o != '*']
print len(used), 'out of', len(m._imports[n]) - 1, 'names are used'
print ' ', ', '.join(sorted(used))
def find_varargs_users(system):
for m in sorted(system.modules):
m = system.modules[m]
if 'pypy.interpreter.pycode' in m._imports:
if m._imports['pypy.interpreter.pycode'].get('CO_VARARGS') == True:
print m.name
# --- HTML generation stuff ---
def file_for_module(module):
fname = os.path.join('importfunhtml', *module.name.split('.')) + '.html'
dname = os.path.dirname(fname)
if not os.path.isdir(dname):
os.makedirs(dname)
return open(fname, 'w')
def link_for_module(fromlink, module):
link = '/'.join(module.name.split('.')) + '.html'
prefix = '/'.join(['..']*fromlink.count('/'))
if prefix:
return prefix + '/' + link
else:
return link
def file_for_name(module, name):
fname = os.path.join('importfunhtml', *(module.name.split('.') + [name])) + '.html'
dname = os.path.dirname(fname)
if not os.path.isdir(dname):
os.makedirs(dname)
return open(fname, 'w')
def link_for_name(fromlink, module, name):
link = '/'.join(module.name.split('.') + [name]) + '.html'
prefix = '/'.join(['..']*fromlink.count('/'))
if prefix:
return prefix + '/' + link
else:
return link
def html_for_module(module):
from py.xml import html
out = file_for_module(module)
ourlink = link_for_module('', module)
head = [html.title(module.name)]
body = [html.h1(module.name)]
body.append(html.p('This module defines these names:'))
listbody = []
defuses = {}
for d in module.definitions:
uses = []
for n in sorted(module.importers):
N = module.system.modules[n]
if N._imports[module.name].get(d) == True:
uses.append(n)
if not d.startswith('_'):
if uses:
listbody.append(html.li(
html.a(d, href=link_for_name(ourlink, module, d))))
defuses[d] = uses
else:
listbody.append(html.li(d))
body.append(html.ul(listbody))
body.append(html.p('This module imports the following:'))
listbody1 = []
for n in sorted(module._imports):
if n in ('autopath', '__future__'):
continue
if n in module.system.modules:
listbody2 = [html.a(
n, href=link_for_module(ourlink, module.system.modules[n]))]
else:
listbody2 = [n]
listbody3 = []
for o in sorted(module._imports[n]):
if module._imports[n][o] == True:
if n in module.system.modules:
listbody3.append(
html.li(html.a(
o, href=link_for_name(ourlink, module.system.modules[n], o))))
else:
listbody3.append(html.li(o))
if listbody3:
listbody2.append(html.ul(listbody3))
listbody1.append(html.li(listbody2))
body.append(html.ul(listbody1))
body.append(html.p('This module is imported by the following:'))
listbody1 = []
for n in module.importers:
licontents = [html.a(n, href=link_for_module(ourlink, module.system.modules[n]))]
contents = []
for o in sorted(module.system.modules[n]._imports[module.name]):
contents.append(html.li(html.a(o, href=link_for_name(ourlink, module, o))))
if contents:
licontents.append(html.ul(contents))
listbody1.append(html.li(licontents))
body.append(html.ul(listbody1))
out.write(html.html(head, body).unicode())
for d in defuses:
out = file_for_name(module, d)
ourlink = link_for_name('', module, d)
head = [html.title(module.name + '.' + d)]
body = [html.h1([html.a(module.name, href=link_for_module(ourlink, module)), '.' + d])]
contents = []
for n in defuses[d]:
N = module.system.modules[n]
contents.append(html.li(html.a(n, href=link_for_module(ourlink, N))))
body.append(html.p('This name is used in'))
body.append(html.ul(contents))
out.write(html.html(head, body).unicode())
def make_html_report(system):
if os.path.isdir('importfunhtml'):
os.system('rm -rf importfunhtml')
os.mkdir('importfunhtml')
for m in system.modules.itervalues():
html_for_module(m)
# --- the driver stuff! ---
def main(*paths):
system = System()
for path in paths:
system.pendingmodules[path] = None
T = time.time()
while system.pendingmodules:
path, d = system.pendingmodules.popitem()
if sys.stdout.isatty():
print '\r\033[K', len(system.modules), '/', len(system.pendingmodules), path,
sys.stdout.flush()
if '._cache' in path or '/_cache' in path:
continue
if '/' not in path and not path.startswith('pypy.'):
continue
process_module(path, system)
print
print 'analysed', len(system.modules), 'modules in %.2f seconds'%(time.time() - T)
print '------'
# record importer information
for name, mod in system.modules.iteritems():
for n in mod._imports:
if n in system.modules:
system.modules[n].importers.append(name)
make_html_report(system)
if interactive:
import pdb
pdb.set_trace()
recursive = False
interactive = False
if __name__=='__main__':
if '-r' in sys.argv:
recursive = True
sys.argv.remove('-r')
if '-i' in sys.argv:
interactive = True
sys.argv.remove('-i')
if len(sys.argv) > 1:
main(*sys.argv[1:])
else:
paths = []
for line in os.popen("find pypy -name '*.py'"):
paths.append(line[:-1])
main(*paths)
| Python |
import py
from py.__.misc.cmdline.countloc import get_loccount
import datetime
import time
URL = "http://codespeak.net/svn/pypy/dist"
tempdir = py.path.svnwc(py.test.ensuretemp("pypy-dist"))
print "checking out"
tempdir.checkout(URL)
print "done"
pypy = tempdir.join('pypy')
statistic = []
curr_rev = tempdir.info().rev
try:
while curr_rev > 7024: #afterwards the behaviour becomes strange :-(
num_revs = 0
num_files = 0
num_testfiles = 0
num_lines = 0
num_testlines = 0
curr_rev = tempdir.info(usecache=0).rev
olddate = datetime.date(*time.gmtime(pypy.info(0).mtime)[:3])
date = olddate
while date == olddate:
num_revs += 1
olddate = date
try:
tempdir.update(rev=curr_rev - 1)
except KeyboardInterrupt:
raise
except Exception, e:
print e
tempdir.localpath.remove(1)
tempdir.localpath.mkdir()
while 1:
try:
tempdir._svn("co -r %r" % (curr_rev - 1), URL)
except KeyboardInterrupt:
raise
except Exception, e:
print e, curr_rev
curr_rev -= 1
else:
break
info = tempdir.info(usecache=0)
curr_rev = info.rev
date = datetime.date(*time.gmtime(info.mtime)[:3])
counter, num_files, num_lines, num_testfiles, num_testlines = get_loccount([pypy.localpath])
print
print date, num_revs, num_files, num_testfiles, num_lines, num_testlines
statistic.append([date, num_revs, num_files, num_testfiles, num_lines, num_testlines])
f = file("intermediate.txt", "a")
print >> f, date, num_revs, num_files, num_testfiles, num_lines, num_testlines
f.close()
finally:
import pickle
f = file("out.txt", "w")
pickle.dump(statistic, f)
f.close()
| Python |
def test_something(space):
assert space.w_None is space.w_None
def app_test_something():
assert 42 == 42
class AppTestSomething:
def test_method_app(self):
assert 23 == 23
class TestSomething:
def test_method(self):
assert self.space
| Python |
def f(a,b):
return a+b
def g():
raise ValueError, "booh"
class FancyException(Exception):
pass
def h():
raise FancyException, "booh"
def bomb():
raise KeyboardInterrupt
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
"""
Caches that can freeze when the annotator needs it.
"""
#
# _freeze_() protocol:
# user-defined classes can define a method _freeze_(), which
# is called when a prebuilt instance is found. If the method
# returns True, the instance is considered immutable and becomes
# a SomePBC(). Otherwise it's just SomeInstance(). The method
# should force away any laziness that remains in the instance.
#
# Cache class:
# a cache meant to map a finite number of keys to values.
# It is normally extended lazily, until it contains all possible
# keys. The _annspecialcase_ attribute of the getorbuild() method
# forces the annotator to decode the argument's annotations,
# which must be constants or SomePBCs, actually call the
# method with all possible combinations, and gather the results.
# The idea is to completely fill the cache at annotation-time,
# using the information collected by the annotator itself about
# what the keys can actually be.
#
# Cache must be subclassed, and a _build() method provided.
# Be sure to call the parent __init__() if you override it.
#
class Cache(object):
def __init__(self):
self.content = {}
self._building = {}
def getorbuild(self, key):
try:
return self.content[key]
except KeyError:
if key in self._building:
raise Exception, "%s recursive building of %r" % (
self, key)
self._building[key] = True
try:
result = self._build(key)
self.content[key] = result
finally:
del self._building[key]
self._ready(result)
return result
getorbuild._annspecialcase_ = "specialize:memo"
def _ready(self, result):
pass
def _freeze_(self):
# needs to be SomePBC, but otherwise we can't really freeze the
# cache because more getorbuild() calls might be discovered later
# during annotation.
return True
| Python |
# NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
import sys
import _sre
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
class SRE_Pattern(object):
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxint):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _sre._State(string, pos, endpos, self.flags)
if _sre._match(state, self._code):
return SRE_Match(self, state)
else:
return None
def search(self, string, pos=0, endpos=sys.maxint):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _sre._State(string, pos, endpos, self.flags)
if _sre._search(state, self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxint):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _sre._State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
if not _sre._search(state, self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
filter = repl
if not callable(repl) and "\\" in repl:
# handle non-literal strings ; hand it over to the template compiler
import sre
filter = sre._subx(self, repl)
state = _sre._State(string, 0, sys.maxint, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
if not _sre._search(state, self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
return item, n
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
item, n = self.subn(repl, string, count)
return item
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _sre._State(string, 0, sys.maxint, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
if not _sre._search(state, self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxint):
"""Return a list of all non-overlapping matches of pattern in string."""
scanner = self.scanner(string, pos, endpos)
return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxint):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError, "cannot copy this pattern object"
def __deepcopy__(self):
raise TypeError, "cannot copy this pattern object"
class SRE_Scanner(object):
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _sre._State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
match = None
if matcher(state, self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(_sre._match)
def search(self):
return self._match_search(_sre._search)
class SRE_Match(object):
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = state.create_regs(self.re.groups)
if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if self.re.groupindex.has_key(group):
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError, "cannot copy this pattern object"
def __deepcopy__():
raise TypeError, "cannot copy this pattern object"
| Python |
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.typedef import GetSetProperty, TypeDef
from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w
from pypy.interpreter.gateway import interp2app, ObjSpace, W_Root
from pypy.interpreter.error import OperationError
from pypy.rlib.rarithmetic import intmask
# This can be compiled in two ways:
#
# * THREE_VERSIONS_OF_CORE=True: you get three copies of the whole
# regexp searching and matching code: for strings, for unicode strings,
# and for generic wrapped objects (like mmap.mmap or array.array).
#
# * THREE_VERSIONS_OF_CORE=False: there is only one copy of the code,
# at the cost of an indirect method call to fetch each character.
THREE_VERSIONS_OF_CORE = False
#### Constants and exposed functions
from pypy.rlib.rsre import rsre
from pypy.rlib.rsre.rsre_char import MAGIC, CODESIZE, getlower
copyright = "_sre.py 2.4 Copyright 2005 by Nik Haldimann"
def w_getlower(space, char_ord, flags):
return space.wrap(getlower(char_ord, flags))
w_getlower.unwrap_spec = [ObjSpace, int, int]
def w_getcodesize(space):
return space.wrap(CODESIZE)
# use the same version of unicodedb as the standard objspace
from pypy.objspace.std.unicodeobject import unicodedb
rsre.set_unicode_db(unicodedb)
#### State classes
def make_state(space, w_string, start, end, flags):
# XXX maybe turn this into a __new__ method of W_State
if space.is_true(space.isinstance(w_string, space.w_str)):
cls = W_StringState
elif space.is_true(space.isinstance(w_string, space.w_unicode)):
cls = W_UnicodeState
else:
cls = W_GenericState
return space.wrap(cls(space, w_string, start, end, flags))
make_state.unwrap_spec = [ObjSpace, W_Root, int, int, int]
class W_State(Wrappable):
if not THREE_VERSIONS_OF_CORE:
rsre.insert_sre_methods(locals(), 'all')
def __init__(self, space, w_string, start, end, flags):
self.space = space
self.w_string = w_string
length = self.unwrap_object()
if start < 0:
start = 0
if end > length:
end = length
self.start = start
self.pos = start # records the original start position
self.end = end
self.flags = flags
self.reset()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
# methods overridden by subclasses
def unwrap_object(self):
raise NotImplementedError
if 'reset' not in locals():
def reset(self):
raise NotImplementedError
if 'search' not in locals():
def search(self, pattern_codes):
raise NotImplementedError
if 'match' not in locals():
def match(self, pattern_codes):
raise NotImplementedError
# Accessors for the typedef
def w_reset(self):
self.reset()
def w_create_regs(self, group_count):
"""Creates a tuple of index pairs representing matched groups, a format
that's convenient for SRE_Match."""
space = self.space
lst = []
for value1, value2 in self.create_regs(group_count):
lst.append(space.newtuple([space.wrap(value1),
space.wrap(value2)]))
return space.newtuple(lst)
w_create_regs.unwrap_spec = ['self', int]
def fget_start(space, self):
return space.wrap(self.start)
def fset_start(space, self, w_value):
self.start = space.int_w(w_value)
def fget_string_position(space, self):
return space.wrap(self.string_position)
def fset_string_position(space, self, w_value):
self.start = space.int_w(w_value)
getset_start = GetSetProperty(W_State.fget_start, W_State.fset_start, cls=W_State)
getset_string_position = GetSetProperty(W_State.fget_string_position,
W_State.fset_string_position, cls=W_State)
W_State.typedef = TypeDef("W_State",
string = interp_attrproperty_w("w_string", W_State),
start = getset_start,
end = interp_attrproperty("end", W_State),
string_position = getset_string_position,
pos = interp_attrproperty("pos", W_State),
lastindex = interp_attrproperty("lastindex", W_State),
reset = interp2app(W_State.w_reset),
create_regs = interp2app(W_State.w_create_regs),
)
class W_StringState(W_State):
if THREE_VERSIONS_OF_CORE:
rsre.insert_sre_methods(locals(), 'str')
def unwrap_object(self):
self.string = self.space.str_w(self.w_string)
return len(self.string)
def get_char_ord(self, p):
return ord(self.string[p])
class W_UnicodeState(W_State):
if THREE_VERSIONS_OF_CORE:
rsre.insert_sre_methods(locals(), 'unicode')
def unwrap_object(self):
self.unichars = self.space.unichars_w(self.w_string)
return len(self.unichars)
def get_char_ord(self, p):
return ord(self.unichars[p])
class W_GenericState(W_State):
if THREE_VERSIONS_OF_CORE:
rsre.insert_sre_methods(locals(), 'generic')
def unwrap_object(self):
# cannot unwrap in the general case
space = self.space
# some type-checking
if (space.lookup(self.w_string, '__getitem__') is None or
space.lookup(self.w_string, 'keys') is not None):
msg = "string or sequence of characters expected"
raise OperationError(space.w_TypeError, space.wrap(msg))
return space.int_w(space.len(self.w_string))
def get_char_ord(self, p):
space = self.space
w_char = space.getitem(self.w_string, space.wrap(p))
return space.int_w(space.ord(w_char))
def w_search(space, w_state, w_pattern_codes):
state = space.interp_w(W_State, w_state)
pattern_codes = [intmask(space.uint_w(code)) for code
in space.unpackiterable(w_pattern_codes)]
return space.newbool(state.search(pattern_codes))
def w_match(space, w_state, w_pattern_codes):
state = space.interp_w(W_State, w_state)
pattern_codes = [intmask(space.uint_w(code)) for code
in space.unpackiterable(w_pattern_codes)]
return space.newbool(state.match(pattern_codes))
| Python |
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
appleveldefs = {
'compile': 'app_sre.compile',
}
interpleveldefs = {
'CODESIZE': 'space.wrap(interp_sre.CODESIZE)',
'MAGIC': 'space.wrap(interp_sre.MAGIC)',
'copyright': 'space.wrap(interp_sre.copyright)',
'getlower': 'interp_sre.w_getlower',
'getcodesize': 'interp_sre.w_getcodesize',
'_State': 'interp_sre.make_state',
'_match': 'interp_sre.w_match',
'_search': 'interp_sre.w_search',
}
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
"""Support functions for app-level _sre tests."""
import locale, _sre
from sre_constants import OPCODES, ATCODES, CHCODES
def encode_literal(string):
opcodes = []
for character in string:
opcodes.extend([OPCODES["literal"], ord(character)])
return opcodes
def assert_match(opcodes, strings):
assert_something_about_match(lambda x: x, opcodes, strings)
def assert_no_match(opcodes, strings):
assert_something_about_match(lambda x: not x, opcodes, strings)
def assert_something_about_match(assert_modifier, opcodes, strings):
if isinstance(strings, str):
strings = [strings]
for string in strings:
assert assert_modifier(search(opcodes, string))
def search(opcodes, string):
pattern = _sre.compile("ignore", 0, opcodes, 0, {}, None)
return pattern.search(string)
def void_locale():
locale.setlocale(locale.LC_ALL, (None, None))
def assert_lower_equal(tests, flags):
for arg, expected in tests:
assert ord(expected) == _sre.getlower(ord(arg), flags)
| Python |
from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool.libc import libc
import pypy.rpython.rctypes.implementation # this defines rctypes magic
from pypy.rpython.rctypes.aerrno import geterrno
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import W_Root, ObjSpace
from pypy.rlib.rarithmetic import ovfcheck_float_to_int
from ctypes import *
import math
import os
import sys
_POSIX = os.name == "posix"
_WIN = os.name == "nt"
_include = "#include <time.h>"
if _POSIX:
_include = """%s
#include <sys/time.h>""" % _include
class CConfig:
_header_ = _include
CLOCKS_PER_SEC = ctypes_platform.ConstantInteger("CLOCKS_PER_SEC")
clock_t = ctypes_platform.SimpleType("clock_t", c_ulong)
time_t = ctypes_platform.SimpleType("time_t", c_long)
size_t = ctypes_platform.SimpleType("size_t", c_long)
if _POSIX:
CConfig.timeval = ctypes_platform.Struct("struct timeval", [("tv_sec", c_int),
("tv_usec", c_int)])
CConfig.tm = ctypes_platform.Struct("struct tm", [("tm_sec", c_int),
("tm_min", c_int), ("tm_hour", c_int), ("tm_mday", c_int),
("tm_mon", c_int), ("tm_year", c_int), ("tm_wday", c_int),
("tm_yday", c_int), ("tm_isdst", c_int), ("tm_gmtoff", c_long),
("tm_zone", c_char_p)])
elif _WIN:
from ctypes import wintypes
CConfig.tm = ctypes_platform.Struct("struct tm", [("tm_sec", c_int),
("tm_min", c_int), ("tm_hour", c_int), ("tm_mday", c_int),
("tm_mon", c_int), ("tm_year", c_int), ("tm_wday", c_int),
("tm_yday", c_int), ("tm_isdst", c_int)])
LARGE_INTEGER = wintypes.LARGE_INTEGER
BOOL = wintypes.BOOL
DWORD = wintypes.DWORD
class cConfig:
pass
cConfig.__dict__.update(ctypes_platform.configure(CConfig))
cConfig.tm.__name__ = "_tm"
if _POSIX:
cConfig.timeval.__name__ = "_timeval"
timeval = cConfig.timeval
CLOCKS_PER_SEC = cConfig.CLOCKS_PER_SEC
clock_t = cConfig.clock_t
time_t = cConfig.time_t
size_t = cConfig.size_t
tm = cConfig.tm
has_gettimeofday = False
if hasattr(libc, "gettimeofday"):
libc.gettimeofday.argtypes = [c_void_p, c_void_p]
libc.gettimeofday.restype = c_int
has_gettimeofday = True
libc.clock.restype = clock_t
libc.time.argtypes = [POINTER(time_t)]
libc.time.restype = time_t
libc.ctime.argtypes = [POINTER(time_t)]
libc.ctime.restype = c_char_p
libc.gmtime.argtypes = [POINTER(time_t)]
libc.gmtime.restype = POINTER(tm)
libc.localtime.argtypes = [POINTER(time_t)]
libc.localtime.restype = POINTER(tm)
libc.mktime.argtypes = [POINTER(tm)]
libc.mktime.restype = time_t
libc.asctime.argtypes = [POINTER(tm)]
libc.asctime.restype = c_char_p
if _POSIX:
libc.tzset.restype = None # tzset() returns void
elif _WIN:
QueryPerformanceCounter = windll.kernel32.QueryPerformanceCounter
QueryPerformanceCounter.argtypes = [POINTER(LARGE_INTEGER)]
QueryPerformanceCounter.restype = BOOL
QueryPerformanceFrequency = windll.kernel32.QueryPerformanceFrequency
QueryPerformanceFrequency.argtypes = [POINTER(LARGE_INTEGER)]
QueryPerformanceFrequency.restype = BOOL
Sleep = windll.kernel32.Sleep
Sleep.argtypes = [DWORD]
Sleep.restype = None
libc.strftime.argtypes = [c_char_p, size_t, c_char_p, POINTER(tm)]
libc.strftime.restype = size_t
def _init_accept2dyear():
return (1, 0)[bool(os.getenv("PYTHONY2K"))]
def _init_timezone():
timezone = daylight = altzone = 0
tzname = ["", ""]
# pypy cant' use in_dll to access global exported variables
# so we can't compute these attributes
# if _WIN:
# cdll.msvcrt._tzset()
#
# timezone = c_long.in_dll(cdll.msvcrt, "_timezone").value
# if hasattr(cdll.msvcrt, "altzone"):
# altzone = c_long.in_dll(cdll.msvcrt, "altzone").value
# else:
# altzone = timezone - 3600
# daylight = c_long.in_dll(cdll.msvcrt, "_daylight").value
# tzname = _tzname_t.in_dll(cdll.msvcrt, "_tzname")
# tzname = (tzname.tzname_0, tzname.tzname_1)
if _POSIX:
YEAR = (365 * 24 + 6) * 3600
t = (((libc.time(byref(time_t(0)))) / YEAR) * YEAR)
tt = time_t(t)
p = libc.localtime(byref(tt)).contents
janzone = -p.tm_gmtoff
janname = [" ", p.tm_zone][bool(p.tm_zone)]
tt = time_t(tt.value + YEAR / 2)
p = libc.localtime(byref(tt)).contents
julyzone = -p.tm_gmtoff
julyname = [" ", p.tm_zone][bool(p.tm_zone)]
if janzone < julyzone:
# DST is reversed in the southern hemisphere
timezone = julyzone
altzone = janzone
daylight = int(janzone != julyzone)
tzname = [julyname, janname]
else:
timezone = janzone
altzone = julyzone
daylight = int(janzone != julyzone)
tzname = [janname, julyname]
return timezone, daylight, tzname, altzone
def _get_error_msg():
errno = geterrno()
return os.strerror(errno)
def _floattime():
""" _floattime() -> computes time since the Epoch for various platforms.
Since on some system gettimeofday may fail we fall back on ftime
or time.
gettimeofday() has a resolution in microseconds
ftime() has a resolution in milliseconds and it never fails
time() has a resolution in seconds
"""
if has_gettimeofday:
t = timeval()
if libc.gettimeofday(byref(t), c_void_p(None)) == 0:
return float(t.tv_sec) + t.tv_usec * 0.000001
return float(libc.time(None))
# elif hasattr(_libc, "ftime"):
# t = _timeb()
# _libc.ftime.argtypes = [c_void_p]
# _libc.ftime(byref(t))
# return float(t.time) + float(t.millitm) * 0.001
# elif hasattr(_libc, "time"):
# t = c_long()
# _libc.time.argtypes = [c_void_p]
# _libc.time(byref(t))
# return t.value
if _WIN:
def sleep(space, w_secs):
"""sleep(seconds)
Delay execution for a given number of seconds. The argument may be
a floating point number for subsecond precision."""
secs = space.float_w(w_secs)
if secs < 0.0:
secs = 0.0
msecs = secs * 1000.0
try:
msecs = ovfcheck_float_to_int(msecs)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("sleep length is too large"))
ul_millis = c_ulong(msecs)
Sleep(ul_millis)
def _get_module_object(space, obj_name):
w_module = space.getbuiltinmodule('time')
w_obj = space.getattr(w_module, space.wrap(obj_name))
return w_obj
def _set_module_object(space, obj_name, w_obj_value):
w_module = space.getbuiltinmodule('time')
space.setattr(w_module, space.wrap(obj_name), w_obj_value)
def _get_inttime(space, w_seconds):
# w_seconds can be a wrapped None (it will be automatically wrapped
# in the callers, so we never get a real None here).
if space.is_w(w_seconds, space.w_None):
seconds = _floattime()
else:
seconds = space.float_w(w_seconds)
try:
return ovfcheck_float_to_int(seconds)
except OverflowError:
raise OperationError(space.w_ValueError,
space.wrap("time argument too large"))
def _tm_to_tuple(space, t):
time_tuple = []
time_tuple.append(space.wrap(t.tm_year + 1900))
time_tuple.append(space.wrap(t.tm_mon + 1)) # want january == 1
time_tuple.append(space.wrap(t.tm_mday))
time_tuple.append(space.wrap(t.tm_hour))
time_tuple.append(space.wrap(t.tm_min))
time_tuple.append(space.wrap(t.tm_sec))
time_tuple.append(space.wrap((t.tm_wday + 6) % 7)) # want monday == 0
time_tuple.append(space.wrap(t.tm_yday + 1)) # want january, 1 == 1
time_tuple.append(space.wrap(t.tm_isdst))
w_struct_time = _get_module_object(space, 'struct_time')
w_time_tuple = space.newtuple(time_tuple)
return space.call_function(w_struct_time, w_time_tuple)
def _gettmarg(space, w_tup, allowNone=True):
if allowNone and space.is_w(w_tup, space.w_None):
# default to the current local time
tt = time_t(int(_floattime()))
pbuf = libc.localtime(byref(tt))
if not pbuf:
raise OperationError(space.w_ValueError,
space.wrap(_get_error_msg()))
return pbuf.contents
tup_w = space.unpackiterable(w_tup)
if len(tup_w) != 9:
raise OperationError(space.w_TypeError,
space.wrap("argument must be sequence of "
"length 9, not %d" % len(tup_w)))
buf = tm()
y = space.int_w(tup_w[0])
buf.tm_mon = space.int_w(tup_w[1])
buf.tm_mday = space.int_w(tup_w[2])
buf.tm_hour = space.int_w(tup_w[3])
buf.tm_min = space.int_w(tup_w[4])
buf.tm_sec = space.int_w(tup_w[5])
buf.tm_wday = space.int_w(tup_w[6])
buf.tm_yday = space.int_w(tup_w[7])
buf.tm_isdst = space.int_w(tup_w[8])
w_accept2dyear = _get_module_object(space, "accept2dyear")
accept2dyear = space.int_w(w_accept2dyear)
if y < 1900:
if not accept2dyear:
raise OperationError(space.w_ValueError,
space.wrap("year >= 1900 required"))
if 69 <= y <= 99:
y += 1900
elif 0 <= y <= 68:
y += 2000
else:
raise OperationError(space.w_ValueError,
space.wrap("year out of range"))
if buf.tm_wday < 0:
raise OperationError(space.w_ValueError,
space.wrap("day of week out of range"))
buf.tm_year = y - 1900
buf.tm_mon = buf.tm_mon - 1
buf.tm_wday = (buf.tm_wday + 1) % 7
buf.tm_yday = buf.tm_yday - 1
return buf
def time(space):
"""time() -> floating point number
Return the current time in seconds since the Epoch.
Fractions of a second may be present if the system clock provides them."""
secs = _floattime()
return space.wrap(secs)
if _WIN:
class PCCache:
pass
pccache = PCCache()
pccache.divisor = 0.0
pccache.ctrStart = LARGE_INTEGER()
def clock(space):
"""clock() -> floating point number
Return the CPU time or real time since the start of the process or since
the first call to clock(). This has as much precision as the system
records."""
if _POSIX:
res = float(libc.clock()) / CLOCKS_PER_SEC
return space.wrap(res)
elif _WIN:
if pccache.divisor == 0.0:
freq = LARGE_INTEGER()
res = QueryPerformanceFrequency(byref(freq))
if not res or not freq:
return space.wrap(float(libc.clock()) / CLOCKS_PER_SEC)
pccache.divisor = float(freq.value)
QueryPerformanceCounter(byref(pccache.ctrStart))
now = LARGE_INTEGER()
QueryPerformanceCounter(byref(now))
diff = float(now.value - pccache.ctrStart.value)
return space.wrap(diff / pccache.divisor)
def ctime(space, w_seconds=None):
"""ctime([seconds]) -> string
Convert a time in seconds since the Epoch to a string in local time.
This is equivalent to asctime(localtime(seconds)). When the time tuple is
not present, current time as returned by localtime() is used."""
seconds = _get_inttime(space, w_seconds)
tt = time_t(seconds)
p = libc.ctime(byref(tt))
if not p:
raise OperationError(space.w_ValueError,
space.wrap("unconvertible time"))
return space.wrap(p[:-1]) # get rid of new line
ctime.unwrap_spec = [ObjSpace, W_Root]
# by now w_tup is an optional argument (and not *args)
# because of the ext. compiler bugs in handling such arguments (*args, **kwds)
def asctime(space, w_tup=None):
"""asctime([tuple]) -> string
Convert a time tuple to a string, e.g. 'Sat Jun 06 16:26:11 1998'.
When the time tuple is not present, current time as returned by localtime()
is used."""
buf_value = _gettmarg(space, w_tup)
p = libc.asctime(byref(buf_value))
if not p:
raise OperationError(space.w_ValueError,
space.wrap("unconvertible time"))
return space.wrap(p[:-1]) # get rid of new line
asctime.unwrap_spec = [ObjSpace, W_Root]
def gmtime(space, w_seconds=None):
"""gmtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
tm_sec, tm_wday, tm_yday, tm_isdst)
Convert seconds since the Epoch to a time tuple expressing UTC (a.k.a.
GMT). When 'seconds' is not passed in, convert the current time instead.
"""
# rpython does not support that a variable has two incompatible builtins
# as value so we have to duplicate the code. NOT GOOD! see localtime() too
seconds = _get_inttime(space, w_seconds)
whent = time_t(seconds)
p = libc.gmtime(byref(whent))
if not p:
raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p.contents)
gmtime.unwrap_spec = [ObjSpace, W_Root]
def localtime(space, w_seconds=None):
"""localtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
tm_sec, tm_wday, tm_yday, tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time.
When 'seconds' is not passed in, convert the current time instead."""
seconds = _get_inttime(space, w_seconds)
whent = time_t(seconds)
p = libc.localtime(byref(whent))
if not p:
raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p.contents)
localtime.unwrap_spec = [ObjSpace, W_Root]
def mktime(space, w_tup):
"""mktime(tuple) -> floating point number
Convert a time tuple in local time to seconds since the Epoch."""
buf = _gettmarg(space, w_tup, allowNone=False)
tt = libc.mktime(byref(buf))
if tt == -1:
raise OperationError(space.w_OverflowError,
space.wrap("mktime argument out of range"))
return space.wrap(float(tt))
mktime.unwrap_spec = [ObjSpace, W_Root]
if _POSIX:
def tzset(space):
"""tzset()
Initialize, or reinitialize, the local timezone to the value stored in
os.environ['TZ']. The TZ environment variable should be specified in
standard Unix timezone format as documented in the tzset man page
(eg. 'US/Eastern', 'Europe/Amsterdam'). Unknown timezones will silently
fall back to UTC. If the TZ environment variable is not set, the local
timezone is set to the systems best guess of wallclock time.
Changing the TZ environment variable without calling tzset *may* change
the local timezone used by methods such as localtime, but this behaviour
should not be relied on"""
libc.tzset()
# reset timezone, altzone, daylight and tzname
timezone, daylight, tzname, altzone = _init_timezone()
_set_module_object(space, "timezone", space.wrap(timezone))
_set_module_object(space, 'daylight', space.wrap(daylight))
tzname_w = [space.wrap(tzname[0]), space.wrap(tzname[1])]
_set_module_object(space, 'tzname', space.newtuple(tzname_w))
_set_module_object(space, 'altzone', space.wrap(altzone))
tzset.unwrap_spec = [ObjSpace]
def strftime(space, format, w_tup=None):
"""strftime(format[, tuple]) -> string
Convert a time tuple to a string according to a format specification.
See the library reference manual for formatting codes. When the time tuple
is not present, current time as returned by localtime() is used."""
buf_value = _gettmarg(space, w_tup)
# Checks added to make sure strftime() does not crash Python by
# indexing blindly into some array for a textual representation
# by some bad index (fixes bug #897625).
# No check for year since handled in gettmarg().
if buf_value.tm_mon < 0 or buf_value.tm_mon > 11:
raise OperationError(space.w_ValueError,
space.wrap("month out of range"))
if buf_value.tm_mday < 1 or buf_value.tm_mday > 31:
raise OperationError(space.w_ValueError,
space.wrap("day of month out of range"))
if buf_value.tm_hour < 0 or buf_value.tm_hour > 23:
raise OperationError(space.w_ValueError,
space.wrap("hour out of range"))
if buf_value.tm_min < 0 or buf_value.tm_min > 59:
raise OperationError(space.w_ValueError,
space.wrap("minute out of range"))
if buf_value.tm_sec < 0 or buf_value.tm_sec > 61:
raise OperationError(space.w_ValueError,
space.wrap("seconds out of range"))
if buf_value.tm_yday < 0 or buf_value.tm_yday > 365:
raise OperationError(space.w_ValueError,
space.wrap("day of year out of range"))
if buf_value.tm_isdst < -1 or buf_value.tm_isdst > 1:
raise OperationError(space.w_ValueError,
space.wrap("daylight savings flag out of range"))
i = 1024
while True:
outbuf = create_string_buffer(i)
buflen = libc.strftime(outbuf, i, format, byref(buf_value))
if buflen > 0 or i >= 256 * len(format):
# if the buffer is 256 times as long as the format,
# it's probably not failing for lack of room!
# More likely, the format yields an empty result,
# e.g. an empty format, or %Z when the timezone
# is unknown.
if buflen < 0: buflen = 0 # should not occur
return space.wrap(outbuf.value[:buflen])
i += i
strftime.unwrap_spec = [ObjSpace, str, W_Root]
| Python |
# NOT_RPYTHON
import os
from _structseq import structseqtype, structseqfield
_POSIX = os.name == "posix"
class struct_time:
__metaclass__ = structseqtype
__module__ = 'time'
tm_year = structseqfield(0)
tm_mon = structseqfield(1)
tm_mday = structseqfield(2)
tm_hour = structseqfield(3)
tm_min = structseqfield(4)
tm_sec = structseqfield(5)
tm_wday = structseqfield(6)
tm_yday = structseqfield(7)
tm_isdst = structseqfield(8)
if _POSIX:
from select import select
def sleep(secs):
"""sleep(seconds)
Delay execution for a given number of seconds. The argument may be
a floating point number for subsecond precision."""
if secs is None:
raise TypeError('a float is required')
select([], [], [], secs)
def strptime(string, format="%a %b %d %H:%M:%S %Y"):
"""strptime(string, format) -> struct_time
Parse a string to a time tuple according to a format specification.
See the library reference manual for formatting codes
(same as strftime())."""
import _strptime # from the CPython standard library
return _strptime.strptime(string, format)
__doc__ = """This module provides various functions to manipulate time values.
There are two standard representations of time. One is the number
of seconds since the Epoch, in UTC (a.k.a. GMT). It may be an integer
or a floating point number (to represent fractions of seconds).
The Epoch is system-defined; on Unix, it is generally January 1st, 1970.
The actual value can be retrieved by calling gmtime(0).
The other representation is a tuple of 9 integers giving local time.
The tuple items are:
year (four digits, e.g. 1998)
month (1-12)
day (1-31)
hours (0-23)
minutes (0-59)
seconds (0-59)
weekday (0-6, Monday is 0)
Julian day (day in the year, 1-366)
DST (Daylight Savings Time) flag (-1, 0 or 1)
If the DST flag is 0, the time is given in the regular time zone;
if it is 1, the time is given in the DST time zone;
if it is -1, mktime() should guess based on the date and time.
Variables:
timezone -- difference in seconds between UTC and local standard time
altzone -- difference in seconds between UTC and local DST time
daylight -- whether local time should reflect DST
tzname -- tuple of (standard time zone name, DST time zone name)
Functions:
time() -- return current time in seconds since the Epoch as a float
clock() -- return CPU time since process start as a float
sleep() -- delay for a number of seconds given as a float
gmtime() -- convert seconds since Epoch to UTC tuple
localtime() -- convert seconds since Epoch to local time tuple
asctime() -- convert time tuple to string
ctime() -- convert time in seconds to string
mktime() -- convert local time tuple to seconds since Epoch
strftime() -- convert time tuple to string according to format specification
strptime() -- parse string to time tuple according to format specification
tzset() -- change the local timezone"""
| Python |
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
applevel_name = 'time'
interpleveldefs = {
'accept2dyear': 'interp_time.accept2dyear',
'timezone': 'interp_time.timezone',
'daylight': 'interp_time.daylight',
'tzname': 'interp_time.tzname',
'altzone': 'interp_time.altzone',
'time': 'interp_time.time',
'clock': 'interp_time.clock',
'ctime': 'interp_time.ctime',
'asctime': 'interp_time.asctime',
'gmtime': 'interp_time.gmtime',
'localtime': 'interp_time.localtime',
'mktime': 'interp_time.mktime',
'strftime': 'interp_time.strftime',
}
def buildloaders(cls):
from pypy.module.rctime import interp_time
import os
if os.name == "posix":
Module.appleveldefs['sleep'] = 'app_time.sleep'
Module.interpleveldefs['tzset'] = 'interp_time.tzset'
elif os.name == "nt":
Module.interpleveldefs['sleep'] = 'interp_time.sleep'
# this machinery is needed to expose constants
# that have to be initialized one time only
Module.interpleveldefs["accept2dyear"] = 'space.wrap(%r)' %\
interp_time._init_accept2dyear()
timezone, daylight, tzname, altzone = interp_time._init_timezone()
Module.interpleveldefs['timezone'] = 'space.wrap(%r)' % timezone
Module.interpleveldefs['daylight'] = 'space.wrap(%r)' % daylight
Module.interpleveldefs['tzname'] = \
'space.newlist([space.wrap(%r), space.wrap(%r)])' % tuple(tzname)
Module.interpleveldefs['altzone'] = 'space.wrap(%r)' % altzone
super(Module, cls).buildloaders()
buildloaders = classmethod(buildloaders)
appleveldefs = {
'struct_time': 'app_time.struct_time',
'__doc__': 'app_time.__doc__',
'strptime': 'app_time.strptime',
}
| Python |
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import ObjSpace, W_Root
from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool.util import find_library, load_library
import sys
from ctypes import *
class CConfig:
_includes_ = ('unistd.h',)
if sys.platform != 'darwin':
cryptlib = ctypes_platform.Library('crypt')
globals().update(ctypes_platform.configure(CConfig))
if sys.platform == 'darwin':
dllname = find_library('c')
assert dllname is not None
cryptlib = cdll.LoadLibrary(dllname)
c_crypt = cryptlib.crypt
c_crypt.argtypes = [c_char_p, c_char_p]
c_crypt.restype = c_char_p
def crypt(space, word, salt):
"""word will usually be a user's password. salt is a 2-character string
which will be used to select one of 4096 variations of DES. The characters
in salt must be either ".", "/", or an alphanumeric character. Returns
the hashed password as a string, which will be composed of characters from
the same alphabet as the salt."""
res = c_crypt(word, salt)
return space.wrap(res)
crypt.unwrap_spec = [ObjSpace, str, str]
| Python |
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""A demo built-in module based on ctypes."""
interpleveldefs = {
'crypt' : 'interp_crypt.crypt',
}
appleveldefs = {
}
| Python |
#
| Python |
class DemoError(Exception):
pass
| Python |
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""A demo built-in module based on ctypes."""
interpleveldefs = {
'measuretime' : 'demo.measuretime',
'sieve' : 'demo.sieve',
'MyType' : 'demo.W_MyType',
}
appleveldefs = {
'DemoError' : 'app_demo.DemoError',
}
| Python |
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import ObjSpace, W_Root, Wrappable
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool.libc import libc
import sys, math
from ctypes import *
time_t = ctypes_platform.getsimpletype('time_t', '#include <time.h>', c_long)
time = libc.time
time.argtypes = [POINTER(time_t)]
time.restype = time_t
def get(space, name):
w_module = space.getbuiltinmodule('_demo')
return space.getattr(w_module, space.wrap(name))
def measuretime(space, repetitions, w_callable):
if repetitions <= 0:
w_DemoError = get(space, 'DemoError')
msg = "repetition count must be > 0"
raise OperationError(w_DemoError, space.wrap(msg))
starttime = time(None)
for i in range(repetitions):
space.call_function(w_callable)
endtime = time(None)
return space.wrap(endtime - starttime)
measuretime.unwrap_spec = [ObjSpace, int, W_Root]
def sieve(space, n):
lst = range(2, n + 1)
head = 0
while 1:
first = lst[head]
if first > math.sqrt(n) + 1:
lst_w = [space.newint(i) for i in lst]
return space.newlist(lst_w)
newlst = []
for element in lst:
if element <= first:
newlst.append(element)
elif element % first != 0:
newlst.append(element)
lst = newlst
head += 1
sieve.unwrap_spec = [ObjSpace, int]
class W_MyType(Wrappable):
def __init__(self, space, x=1):
self.space = space
self.x = x
def multiply(self, w_y):
space = self.space
y = space.int_w(w_y)
return space.wrap(self.x * y)
def fget_x(space, self):
return space.wrap(self.x)
def fset_x(space, self, w_value):
self.x = space.int_w(w_value)
def mytype_new(space, w_subtype, x):
return space.wrap(W_MyType(space, x))
mytype_new.unwrap_spec = [ObjSpace, W_Root, int]
getset_x = GetSetProperty(W_MyType.fget_x, W_MyType.fset_x, cls=W_MyType)
W_MyType.typedef = TypeDef('MyType',
__new__ = interp2app(mytype_new),
x = getset_x,
multiply = interp2app(W_MyType.multiply),
)
| Python |
def index(space, w_a):
return space.index(w_a)
def abs(space, w_obj):
'abs(a) -- Same as abs(a).'
return space.abs(w_obj)
def add(space, w_obj1, w_obj2):
'add(a, b) -- Same as a a + b'
return space.add(w_obj1, w_obj2)
def and_(space, w_obj1, w_obj2):
'and_(a, b) -- Same as a a & b'
return space.and_(w_obj1, w_obj2)
# attrgetter
def concat(space, w_obj1, w_obj2):
'concat(a, b) -- Same as a a + b, for a and b sequences.'
return space.add(w_obj1, w_obj2) # XXX cPython only works on types with sequence api
# we support any with __add__
def contains(space, w_obj1, w_obj2):
'contains(a, b) -- Same as b in a (note reversed operands).'
return space.contains(w_obj1, w_obj2)
# countOf
def delitem(space, w_obj, w_key):
'delitem(a,b) -- Same as del a[b]'
space.delitem(w_obj, w_key)
# delslice
def div(space, w_a, w_b):
'div(a, b) -- Same as a / b when __future__.division is no in effect'
return space.div(w_a, w_b)
def eq(space, w_a, w_b):
'eq(a, b) -- Same as a==b'
return space.eq(w_a, w_b)
def floordiv(space, w_a, w_b):
'floordiv(a, b) -- Same as a // b.'
return space.floordiv(w_a, w_b)
def ge(space, w_a, w_b):
'ge(a, b) -- Same as a>=b.'
return space.ge(w_a, w_b)
def getitem(space, w_a, w_b):
'getitem(a, b) -- Same as a[b].'
return space.getitem(w_a, w_b)
# getslice
def gt(space, w_a, w_b):
'gt(a, b) -- Same as a>b.'
return space.gt(w_a, w_b)
# indexOf
def inv(space, w_obj,):
'inv(a) -- Same as ~a.'
return space.invert(w_obj)
def invert(space, w_obj,):
'invert(a) -- Same as ~a.'
return space.invert(w_obj)
def isCallable(space, w_obj):
'isCallable(a) -- Same as callable(a).'
return space.callable(w_obj)
# isMappingType
# isNumberType
# isSequenceType
def is_(space, w_a, w_b):
'is_(a,b) -- Same as a is b'
return space.is_(w_a, w_b)
def is_not(space, w_a, w_b):
'is_not(a, b) -- Same as a is not b'
return space.not_(space.is_(w_a, w_b))
# itemgetter
def le(space, w_a, w_b):
'le(a, b) -- Same as a<=b.'
return space.le(w_a, w_b)
def lshift(space, w_a, w_b):
'lshift(a, b) -- Same as a << b.'
return space.lshift(w_a, w_b)
def lt(space, w_a, w_b):
'lt(a, b) -- Same as a<b.'
return space.lt(w_a, w_b)
def mod(space, w_a, w_b):
'mod(a, b) -- Same as a % b.'
return space.mod(w_a, w_b)
def mul(space, w_a, w_b):
'mul(a, b) -- Same as a * b.'
return space.mul(w_a, w_b)
def ne(space, w_a, w_b):
'ne(a, b) -- Same as a!=b.'
return space.ne(w_a, w_b)
def neg(space, w_obj,):
'neg(a) -- Same as -a.'
return space.neg(w_obj)
def not_(space, w_obj,):
'not_(a) -- Same as not a.'
return space.not_(w_obj)
def or_(space, w_a, w_b):
'or_(a, b) -- Same as a | b.'
return space.or_(w_a, w_b)
def pos(space, w_obj,):
'pos(a) -- Same as +a.'
return space.pos(w_obj)
def pow(space, w_a, w_b):
'pow(a, b) -- Same as a**b.'
return space.pow(w_a, w_b, space.w_None)
# reapeat
def rshift(space, w_a, w_b):
'rshift(a, b) -- Same as a >> b.'
return space.rshift(w_a, w_b)
# sequenceIncludes
def setitem(space, w_obj, w_key, w_value):
'setitem(a, b, c) -- Same as a[b] = c.'
space.setitem(w_obj, w_key, w_value)
# setslice
def sub(space, w_a, w_b):
'sub(a, b) -- Same as a - b.'
return space.sub(w_a, w_b)
def truediv(space, w_a, w_b):
'truediv(a, b) -- Same as a / b when __future__.division is in effect.'
return space.truediv(w_a, w_b)
def truth(space, w_a,):
'truth(a) -- Return True if a is true, False otherwise.'
return space.nonzero(w_a)
def xor(space, w_a, w_b):
'xor(a, b) -- Same as a ^ b.'
return space.xor(w_a, w_b)
| Python |
'''NOT_RPYTHON: because of attrgetter and itemgetter
Operator interface.
This module exports a set of operators as functions. E.g. operator.add(x,y) is
equivalent to x+y.
'''
def attrgetter(attr):
def f(obj):
return getattr(obj, attr)
return f
def countOf(a,b):
'countOf(a, b) -- Return the number of times b occurs in a.'
count = 0
for x in a:
if x == b:
count += 1
return count
def delslice(obj, start, end):
'delslice(a, b, c) -- Same as del a[b:c].'
if not isinstance(start, int) or not isinstance(end, int):
raise TypeError("an integer is expected")
del obj[start:end]
__delslice__ = delslice
def getslice(a, start, end):
'getslice(a, b, c) -- Same as a[b:c].'
if not isinstance(start, int) or not isinstance(end, int):
raise TypeError("an integer is expected")
return a[start:end]
__getslice__ = getslice
def indexOf(a, b):
'indexOf(a, b) -- Return the first index of b in a.'
index = 0
for x in a:
if x == b:
return index
index += 1
raise ValueError, 'sequence.index(x): x not in sequence'
# XXX the following is approximative
def isMappingType(obj,):
'isMappingType(a) -- Return True if a has a mapping type, False otherwise.'
# XXX this is fragile and approximative anyway
return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
def isNumberType(obj,):
'isNumberType(a) -- Return True if a has a numeric type, False otherwise.'
return hasattr(obj, '__int__') or hasattr(obj, '__float__')
def isSequenceType(obj,):
'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.'
return hasattr(obj, '__getitem__')
def itemgetter(idx):
def f(obj):
return obj[idx]
return f
def repeat(obj, num):
'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.'
if not isinstance(num, (int, long)):
raise TypeError, 'an integer is required'
return obj * num # XXX cPython only supports objects with the sequence
# protocol. We support any with a __mul__
__repeat__ = repeat
def setslice(a, b, c, d):
'setslice(a, b, c, d) -- Same as a[b:c] = d.'
a[b:c] = d
__setslice__ = setslice
| Python |
from pypy.interpreter.mixedmodule import MixedModule
from pypy.interpreter.error import OperationError
class Module(MixedModule):
"""Operator Builtin Module. """
# HACK! override loaders to be able to access different operations
# under same name. I.e., operator.eq == operator.__eq__
def __init__(self, space, w_name):
def create_lambda(name, alsoname):
return lambda space : self.getdictvalue(space, space.wrap(alsoname))
MixedModule.__init__(self, space, w_name)
for name, alsoname in self.mapping.iteritems():
self.loaders[name] = create_lambda(name, alsoname)
appleveldefs = {}
app_names = ['__delslice__', '__getslice__', '__repeat__', '__setslice__',
'attrgetter', 'countOf', 'delslice', 'getslice', 'indexOf',
'isMappingType', 'isNumberType', 'isSequenceType',
'itemgetter','repeat', 'setslice',
]
for name in app_names:
appleveldefs[name] = 'app_operator.%s' % name
interp_names = ['index', 'abs', 'add',
'and_', 'concat', 'contains', 'delitem', 'div', 'eq', 'floordiv',
'ge', 'getitem', 'gt', 'inv',
'invert', 'is_', 'is_not', 'isCallable', 'le',
'lshift', 'lt', 'mod', 'mul',
'ne', 'neg', 'not_', 'or_',
'pos', 'pow', 'rshift', 'setitem', 'sequenceIncludes',
'sub', 'truediv', 'truth', 'xor']
interpleveldefs = {}
for name in interp_names:
interpleveldefs[name] = 'interp_operator.%s' % name
mapping = {
'__abs__' : 'abs',
'__add__' : 'add',
'__and__' : 'and_',
'__concat__' : 'concat',
'__contains__' : 'contains',
'sequenceIncludes' : 'contains',
'__delitem__' : 'delitem',
'__div__' : 'div',
'__eq__' : 'eq',
'__floordiv__' : 'floordiv',
'__ge__' : 'ge',
'__getitem__' : 'getitem',
'__gt__' : 'gt',
'__inv__' : 'inv',
'__invert__' : 'invert',
'__le__' : 'le',
'__lshift__' : 'lshift',
'__lt__' : 'lt',
'__mod__' : 'mod',
'__mul__' : 'mul',
'__ne__' : 'ne',
'__neg__' : 'neg',
'__not__' : 'not_',
'__or__' : 'or_',
'__pos__' : 'pos',
'__pow__' : 'pow',
'__rshift__' : 'rshift',
'__setitem__' : 'setitem',
'__sub__' : 'sub',
'__truediv__' : 'truediv',
'__xor__' : 'xor',
}
| Python |
"""NOT_RPYTHON"""
import sys
import _file
class file(object):
"""file(name[, mode[, buffering]]) -> file object
Open a file. The mode can be 'r', 'w' or 'a' for reading (default),
writing or appending. The file will be created if it doesn't exist
when opened for writing or appending; it will be truncated when
opened for writing. Add a 'b' to the mode for binary files.
Add a '+' to the mode to allow simultaneous reading and writing.
If the buffering argument is given, 0 means unbuffered, 1 means line
buffered, and larger numbers specify the buffer size.
Add a 'U' to mode to open the file for input with universal newline
support. Any line ending in the input file will be seen as a '\n'
in Python. Also, a file so opened gains the attribute 'newlines';
the value for this attribute is one of None (no newline read yet),
'\r', '\n', '\r\n' or a tuple containing all the newline types seen.
Note: open() is an alias for file().
"""
_closed = True # Until the file is successfully opened
def __init__(self, name, mode='r', buffering=-1):
stream = _file.open_file_as_stream(name, mode, buffering)
fd = stream.try_to_find_file_descriptor()
assert fd != -1
self._fdopenstream(fd, mode, buffering, name, stream)
def fdopen(cls, fd, mode='r', buffering=-1):
f = cls.__new__(cls)
stream = _file.fdopen_as_stream(fd, mode, buffering)
f._fdopenstream(fd, mode, buffering, '<fdopen>', stream)
return f
fdopen = classmethod(fdopen)
def _fdopenstream(self, fd, mode, buffering, name, stream):
self.fd = fd
self._name = name
self.softspace = 0 # Required according to file object docs
self.encoding = None # This is not used internally by file objects
self._closed = False
self.stream = stream
self._mode = mode
if stream.flushable():
sys.pypy__exithandlers__[stream] = stream.flush
def getnewlines(self):
"end-of-line convention used in this file"
newlines = self.stream.getnewlines()
if newlines == 0:
return None
if newlines in [1, 2, 4]:
if newlines == 1:
return "\r"
elif newlines == 2:
return "\n"
else:
return "\r\n"
result = []
if newlines & 1:
result.append('\r')
if newlines & 2:
result.append('\n')
if newlines & 4:
result.append('\r\n')
return tuple(result)
mode = property(lambda self: self._mode,
doc = "file mode ('r', 'U', 'w', 'a', "
"possibly with 'b' or '+' added)")
name = property(lambda self: self._name, doc = "file name")
closed = property(lambda self: self._closed,
doc = "True if the file is closed")
newlines = property(lambda self: self.getnewlines(),
doc = "end-of-line convention used in this file")
def read(self, n=-1):
"""read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given."""
if self._closed:
raise ValueError('I/O operation on closed file')
if not isinstance(n, (int, long)):
raise TypeError("an integer is required")
if n < 0:
return self.stream.readall()
else:
result = []
while n > 0:
data = self.stream.read(n)
if not data:
break
n -= len(data)
result.append(data)
return ''.join(result)
def readline(self, size=-1):
"""readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF."""
if self._closed:
raise ValueError('I/O operation on closed file')
if not isinstance(size, (int, long)):
raise TypeError("an integer is required")
if size < 0:
return self.stream.readline()
else:
# very inefficient unless there is a peek()
result = []
while size > 0:
# "peeks" on the underlying stream to see how many characters
# we can safely read without reading past an end-of-line
peeked = self.stream.peek()
pn = peeked.find("\n", 0, size)
if pn < 0:
pn = min(size-1, len(peeked))
c = self.stream.read(pn + 1)
if not c:
break
result.append(c)
if c.endswith('\n'):
break
size -= len(c)
return ''.join(result)
def readlines(self, size=-1):
"""readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned."""
if self._closed:
raise ValueError('I/O operation on closed file')
if not isinstance(size, (int, long)):
raise TypeError("an integer is required")
if size < 0:
return list(iter(self.stream.readline, ""))
else:
result = []
while size > 0:
line = self.stream.readline()
if not line:
break
result.append(line)
size -= len(line)
return result
def write(self, data):
"""write(str) -> None. Write string str to file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written."""
if self._closed:
raise ValueError('I/O operation on closed file')
return self.stream.write(data)
def writelines(self, sequence_of_strings):
"""writelines(sequence_of_strings) -> None. Write the strings to the file.
Note that newlines are not added. The sequence can be any iterable object
producing strings. This is equivalent to calling write() for each string."""
if self._closed:
raise ValueError('I/O operation on closed file')
for line in sequence_of_strings:
self.stream.write(line)
def tell(self):
"""tell() -> current file position, an integer (may be a long integer)."""
if self._closed:
raise ValueError('I/O operation on closed file')
return self.stream.tell()
def seek(self, offset, whence=0):
"""seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable."""
if self._closed:
raise ValueError('I/O operation on closed file')
self.stream.seek(offset, whence)
def __iter__(self):
"""Iterating over files, as in 'for line in f:', returns each line of
the file one by one."""
if self._closed:
raise ValueError('I/O operation on closed file')
return self
xreadlines = __iter__
def next(self):
"""next() -> the next line in the file, or raise StopIteration"""
if self._closed:
raise ValueError('I/O operation on closed file')
line = self.stream.readline()
if line == '':
raise StopIteration
return line
def truncate(self, size=None):
"""truncate([size]) -> None. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell()."""
if self._closed:
raise ValueError('I/O operation on closed file')
if size is None:
size = self.stream.tell()
self.stream.truncate(size)
def flush(self):
"""flush() -> None. Flush the internal I/O buffer."""
if self._closed:
raise ValueError('I/O operation on closed file')
self.stream.flush()
def close(self):
"""close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing."""
if not self._closed and hasattr(self, 'stream'):
self._closed = True
sys.pypy__exithandlers__.pop(self.stream, None)
self.stream.close()
__del__ = close
def readinto(self, a):
"""readinto() -> Undocumented. Don't use this; it may go away."""
if self._closed:
raise ValueError('I/O operation on closed file')
from array import array
if not isinstance(a, array):
raise TypeError('Can only read into array objects')
length = len(a)
data = self.read(length)
del a[:]
a.fromstring(data + '\x00' * (length-len(data)))
return len(data)
def fileno(self):
'''fileno() -> integer "file descriptor".
This is needed for lower-level file interfaces, such os.read().'''
if self._closed:
raise ValueError('I/O operation on closed file')
return self.fd
def isatty(self):
"""isatty() -> true or false. True if the file is connected to a tty device."""
if self._closed:
raise ValueError('I/O operation on closed file')
import os
return os.isatty(self.fd)
def __repr__(self):
return "<%s file '%s', mode %r at 0x%x>" % (
self._closed and 'closed' or 'open',
self._name,
self._mode,
id(self))
| Python |
import py
from pypy.rlib import streamio
from errno import EINTR
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped, applevel
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.miscutils import Action
import os
def wrap_oserror_as_ioerror(space, e):
assert isinstance(e, OSError)
errno = e.errno
if errno == EINTR:
# A signal was sent to the process and interupted
# a systemcall. We want to trigger running of
# any installed interrupt handlers.
# XXX: is there a better way?
ec = space.getexecutioncontext()
Action.perform_actions(space.pending_actions)
Action.perform_actions(ec.pending_actions)
try:
msg = os.strerror(errno)
except ValueError:
msg = 'error %d' % errno
w_error = space.call_function(space.w_IOError,
space.wrap(errno),
space.wrap(msg))
return OperationError(space.w_IOError, w_error)
class W_Stream(Wrappable):
def __init__(self, space, stream):
self.stream = stream
for name, argtypes in streamio.STREAM_METHODS.iteritems():
numargs = len(argtypes)
args = ", ".join(["v%s" % i for i in range(numargs)])
exec py.code.Source("""
def %(name)s(self, space, %(args)s):
try:
return space.wrap(self.stream.%(name)s(%(args)s))
except streamio.StreamError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.message))
except OSError, e:
raise wrap_oserror_as_ioerror(space, e)
%(name)s.unwrap_spec = [W_Stream, ObjSpace] + argtypes
""" % locals()).compile() in globals()
W_Stream.typedef = TypeDef("Stream",
**dict([(name, interp2app(globals()[name]))
for name, _ in streamio.STREAM_METHODS.iteritems()]))
def is_mode_ok(space, mode):
if not mode or mode[0] not in ['r', 'w', 'a', 'U']:
raise OperationError(
space.w_IOError,
space.wrap('invalid mode : %s' % mode))
def open_file_as_stream(space, path, mode="r", buffering=-1):
is_mode_ok(space, mode)
try:
return space.wrap(W_Stream(
space, streamio.open_file_as_stream(path, mode, buffering)))
except OSError, e:
raise wrap_oserror_as_ioerror(space, e)
open_file_as_stream.unwrap_spec = [ObjSpace, str, str, int]
def fdopen_as_stream(space, fd, mode="r", buffering=-1):
is_mode_ok(space, mode)
return space.wrap(W_Stream(
space, streamio.fdopen_as_stream(fd, mode, buffering)))
fdopen_as_stream.unwrap_spec = [ObjSpace, int, str, int]
| Python |
# Package initialisation
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
appleveldefs = {
"file": "app_file.file",
}
interpleveldefs = {
"open_file_as_stream": "interp_file.open_file_as_stream",
"fdopen_as_stream": "interp_file.fdopen_as_stream",
}
| Python |
# Note:
# This *is* now explicitly RPython.
# Please make sure not to break this.
"""
_codecs -- Provides access to the codec registry and the builtin
codecs.
This module should never be imported directly. The standard library
module "codecs" wraps this builtin module for use within Python.
The codec registry is accessible via:
register(search_function) -> None
lookup(encoding) -> (encoder, decoder, stream_reader, stream_writer)
The builtin Unicode codecs use the following interface:
<encoding>_encode(Unicode_object[,errors='strict']) ->
(string object, bytes consumed)
<encoding>_decode(char_buffer_obj[,errors='strict']) ->
(Unicode object, bytes consumed)
<encoding>_encode() interfaces also accept non-Unicode object as
input. The objects are then converted to Unicode using
PyUnicode_FromObject() prior to applying the conversion.
These <encoding>s are available: utf_8, unicode_escape,
raw_unicode_escape, unicode_internal, latin_1, ascii (7-bit),
mbcs (on win32).
Written by Marc-Andre Lemburg (mal@lemburg.com).
Copyright (c) Corporation for National Research Initiatives.
"""
#from unicodecodec import *
import sys
#/* --- Registry ----------------------------------------------------------- */
codec_search_path = []
codec_search_cache = {}
codec_error_registry = {}
codec_need_encodings = [True]
def codec_register( search_function ):
"""register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer).
"""
if callable(search_function):
codec_search_path.append(search_function)
register = codec_register
def codec_lookup(encoding):
"""lookup(encoding) -> (encoder, decoder, stream_reader, stream_writer)
Looks up a codec tuple in the Python codec registry and returns
a tuple of functions.
"""
if not isinstance(encoding, str):
raise TypeError("Encoding must be a string")
normalized_encoding = encoding.replace(" ", "-").lower()
result = codec_search_cache.get(normalized_encoding, None)
if not result:
if codec_need_encodings:
import encodings
if len(codec_search_path) == 0:
raise LookupError("no codec search functions registered: can't find encoding")
del codec_need_encodings[:]
for search in codec_search_path:
result = search(normalized_encoding)
if result:
if not (type(result) == tuple and len(result) == 4):
raise TypeError("codec search functions must return 4-tuples")
else:
codec_search_cache[normalized_encoding] = result
return result
if not result:
raise LookupError("unknown encoding: %s" % encoding)
return result
lookup = codec_lookup
def encode(v, encoding=None, errors='strict'):
"""encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
if encoding == None:
encoding = sys.getdefaultencoding()
if isinstance(encoding, str):
encoder = lookup(encoding)[0]
if encoder and isinstance(errors, str):
res = encoder(v, errors)
return res[0]
else:
raise TypeError("Errors must be a string")
else:
raise TypeError("Encoding must be a string")
def decode(obj, encoding=None, errors='strict'):
"""decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registerd with codecs.register_error that is
able to handle ValueErrors.
"""
if encoding == None:
encoding = sys.getdefaultencoding()
if isinstance(encoding, str):
decoder = lookup(encoding)[1]
if decoder and isinstance(errors, str):
res = decoder(obj, errors)
if not isinstance(res, tuple) or len(res) != 2:
raise TypeError("encoder must return a tuple (object, integer)")
return res[0]
else:
raise TypeError("Errors must be a string")
else:
raise TypeError("Encoding must be a string")
def latin_1_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeLatin1(obj, len(obj), errors)
res = ''.join(res)
return res, len(res)
# XXX MBCS codec might involve ctypes ?
def mbcs_decode():
"""None
"""
pass
def readbuffer_encode( obj, errors='strict'):
"""None
"""
res = str(obj)
return res, len(res)
def escape_encode( obj, errors='strict'):
"""None
"""
s = repr(obj)
v = s[1:-1]
return v, len(v)
def utf_8_decode( data, errors='strict', final=False):
"""None
"""
consumed = len(data)
if final:
consumed = 0
res, consumed = PyUnicode_DecodeUTF8Stateful(data, len(data), errors, final)
res = u''.join(res)
return res, consumed
def raw_unicode_escape_decode( data, errors='strict'):
"""None
"""
res = PyUnicode_DecodeRawUnicodeEscape(data, len(data), errors)
res = u''.join(res)
return res, len(res)
def utf_7_decode( data, errors='strict'):
"""None
"""
res = PyUnicode_DecodeUTF7(data, len(data), errors)
res = u''.join(res)
return res, len(res)
def unicode_escape_encode( obj, errors='strict'):
"""None
"""
res = unicodeescape_string(obj, len(obj), 0)
res = ''.join(res)
return res, len(res)
def latin_1_decode( data, errors='strict'):
"""None
"""
res = PyUnicode_DecodeLatin1(data, len(data), errors)
res = u''.join(res)
return res, len(res)
def utf_16_decode( data, errors='strict', final=False):
"""None
"""
consumed = len(data)
if final:
consumed = 0
res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'native', final)
res = ''.join(res)
return res, consumed
def unicode_escape_decode( data, errors='strict'):
"""None
"""
res = PyUnicode_DecodeUnicodeEscape(data, len(data), errors)
res = u''.join(res)
return res, len(res)
def ascii_decode( data, errors='strict'):
"""None
"""
res = PyUnicode_DecodeASCII(data, len(data), errors)
res = u''.join(res)
return res, len(res)
def charmap_encode(obj, errors='strict', mapping='latin-1'):
"""None
"""
res = PyUnicode_EncodeCharmap(obj, len(obj), mapping, errors)
res = ''.join(res)
return res, len(res)
if sys.maxunicode == 65535:
unicode_bytes = 2
else:
unicode_bytes = 4
def unicode_internal_encode( obj, errors='strict'):
"""None
"""
if type(obj) == unicode:
p = []
t = [ord(x) for x in obj]
for i in t:
bytes = []
for j in xrange(unicode_bytes):
bytes += chr(i%256)
i >>= 8
if sys.byteorder == "big":
bytes.reverse()
p += bytes
res = ''.join(p)
return res, len(res)
else:
res = "You can do better than this" # XXX make this right
return res, len(res)
def unicode_internal_decode( unistr, errors='strict'):
"""None
"""
if type(unistr) == unicode:
return unistr, len(unistr)
else:
p = []
i = 0
if sys.byteorder == "big":
start = unicode_bytes - 1
stop = -1
step = -1
else:
start = 0
stop = unicode_bytes
step = 1
while i < len(unistr)-unicode_bytes+1:
t = 0
h = 0
for j in range(start, stop, step):
t += ord(unistr[i+j])<<(h*8)
h += 1
i += unicode_bytes
p += unichr(t)
res = u''.join(p)
return res, len(res)
def utf_16_ex_decode( data, errors='strict', byteorder=0, final=0):
"""None
"""
if byteorder == 0:
bm = 'native'
elif byteorder == -1:
bm = 'little'
else:
bm = 'big'
consumed = len(data)
if final:
consumed = 0
res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, bm, final)
res = ''.join(res)
return res, consumed, byteorder
# XXX needs error messages when the input is invalid
def escape_decode(data, errors='strict'):
"""None
"""
l = len(data)
i = 0
res = []
while i < l:
if data[i] == '\\':
i += 1
if i >= l:
raise ValueError("Trailing \\ in string")
else:
if data[i] == '\\':
res += '\\'
elif data[i] == 'n':
res += '\n'
elif data[i] == 't':
res += '\t'
elif data[i] == 'r':
res += '\r'
elif data[i] == 'b':
res += '\b'
elif data[i] == '\'':
res += '\''
elif data[i] == '\"':
res += '\"'
elif data[i] == 'f':
res += '\f'
elif data[i] == 'a':
res += '\a'
elif data[i] == 'v':
res += '\v'
elif '0' <= data[i] <= '9':
# emulate a strange wrap-around behavior of CPython:
# \400 is the same as \000 because 0400 == 256
octal = data[i:i+3]
res += chr(int(octal, 8) & 0xFF)
i += 2
elif data[i] == 'x':
hexa = data[i+1:i+3]
res += chr(int(hexa, 16))
i += 2
else:
res += data[i]
i += 1
res = ''.join(res)
return res, len(res)
def charbuffer_encode( obj, errors='strict'):
"""None
"""
res = str(obj)
res = ''.join(res)
return res, len(res)
def charmap_decode( data, errors='strict', mapping=None):
"""None
"""
res = PyUnicode_DecodeCharmap(data, len(data), mapping, errors)
res = ''.join(res)
return res, len(res)
def utf_7_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeUTF7(obj, len(obj), 0, 0, errors)
res = ''.join(res)
return res, len(res)
def mbcs_encode( obj, errors='strict'):
"""None
"""
pass
## return (PyUnicode_EncodeMBCS(
## (obj),
## len(obj),
## errors),
## len(obj))
def ascii_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeASCII(obj, len(obj), errors)
res = ''.join(res)
return res, len(res)
def utf_16_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'native')
res = ''.join(res)
return res, len(res)
def raw_unicode_escape_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeRawUnicodeEscape(obj, len(obj))
res = ''.join(res)
return res, len(res)
def utf_8_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeUTF8(obj, len(obj), errors)
res = ''.join(res)
return res, len(res)
def utf_16_le_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'little')
res = ''.join(res)
return res, len(res)
def utf_16_be_encode( obj, errors='strict'):
"""None
"""
res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'big')
res = ''.join(res)
return res, len(res)
def utf_16_le_decode( data, errors='strict', byteorder=0, final = 0):
"""None
"""
consumed = len(data)
if final:
consumed = 0
res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'little', final)
res = u''.join(res)
return res, consumed
def utf_16_be_decode( data, errors='strict', byteorder=0, final = 0):
"""None
"""
consumed = len(data)
if final:
consumed = 0
res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'big', final)
res = u''.join(res)
return res, consumed
def strict_errors(exc):
if isinstance(exc, Exception):
raise exc
else:
raise TypeError("codec must pass exception instance")
def ignore_errors(exc):
if isinstance(exc, UnicodeEncodeError):
return u'', exc.end
elif isinstance(exc, (UnicodeDecodeError, UnicodeTranslateError)):
return u'', exc.end
else:
raise TypeError("don't know how to handle %.400s in error callback"%exc)
Py_UNICODE_REPLACEMENT_CHARACTER = u"\ufffd"
def replace_errors(exc):
if isinstance(exc, UnicodeEncodeError):
return u'?'*(exc.end-exc.start), exc.end
elif isinstance(exc, (UnicodeTranslateError, UnicodeDecodeError)):
return Py_UNICODE_REPLACEMENT_CHARACTER*(exc.end-exc.start), exc.end
else:
raise TypeError("don't know how to handle %.400s in error callback"%exc)
def xmlcharrefreplace_errors(exc):
if isinstance(exc, UnicodeEncodeError):
res = []
for ch in exc.object[exc.start:exc.end]:
res += '&#'
res += str(ord(ch))
res += ';'
return u''.join(res), exc.end
else:
raise TypeError("don't know how to handle %.400s in error callback"%type(exc))
def backslashreplace_errors(exc):
if isinstance(exc, UnicodeEncodeError):
p = []
for c in exc.object[exc.start:exc.end]:
p += '\\'
oc = ord(c)
if (oc >= 0x00010000):
p += 'U'
p += "%.8x" % ord(c)
elif (oc >= 0x100):
p += 'u'
p += "%.4x" % ord(c)
else:
p += 'x'
p += "%.2x" % ord(c)
return u''.join(p), exc.end
else:
raise TypeError("don't know how to handle %.400s in error callback"%type(exc))
# ----------------------------------------------------------------------
##import sys
##""" Python implementation of CPythons builtin unicode codecs.
##
## Generally the functions in this module take a list of characters an returns
## a list of characters.
##
## For use in the PyPy project"""
## indicate whether a UTF-7 character is special i.e. cannot be directly
## encoded:
## 0 - not special
## 1 - special
## 2 - whitespace (optional)
## 3 - RFC2152 Set O (optional)
utf7_special = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 3, 3, 3,
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 1, 1,
]
unicode_latin1 = [None]*256
def lookup_error(errors):
"""lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name.
"""
try:
err_handler = codec_error_registry[errors]
except KeyError:
raise LookupError("unknown error handler name %s"%errors)
return err_handler
def register_error(errors, handler):
"""register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
if callable(handler):
codec_error_registry[errors] = handler
else:
raise TypeError("handler must be callable")
register_error("strict", strict_errors)
register_error("ignore", ignore_errors)
register_error("replace", replace_errors)
register_error("xmlcharrefreplace", xmlcharrefreplace_errors)
register_error("backslashreplace", backslashreplace_errors)
def SPECIAL(c, encodeO, encodeWS):
c = ord(c)
return (c>127 or utf7_special[c] == 1) or \
(encodeWS and (utf7_special[(c)] == 2)) or \
(encodeO and (utf7_special[(c)] == 3))
def B64(n):
return ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[(n) & 0x3f])
def B64CHAR(c):
return (c.isalnum() or (c) == '+' or (c) == '/')
def UB64(c):
if (c) == '+' :
return 62
elif (c) == '/':
return 63
elif (c) >= 'a':
return ord(c) - 71
elif (c) >= 'A':
return ord(c) - 65
else:
return ord(c) + 4
def ENCODE( ch, bits) :
out = []
while (bits >= 6):
out += B64(ch >> (bits-6))
bits -= 6
return out, bits
def PyUnicode_DecodeUTF7(s, size, errors):
starts = s
errmsg = ""
inShift = 0
bitsleft = 0
charsleft = 0
surrogate = 0
p = []
errorHandler = None
exc = None
if (size == 0):
return unicode('')
i = 0
while i < size:
ch = s[i]
if (inShift):
if ((ch == '-') or not B64CHAR(ch)):
inShift = 0
i += 1
while (bitsleft >= 16):
outCh = ((charsleft) >> (bitsleft-16)) & 0xffff
bitsleft -= 16
if (surrogate):
## We have already generated an error for the high surrogate
## so let's not bother seeing if the low surrogate is correct or not
surrogate = 0
elif (0xDC00 <= (outCh) and (outCh) <= 0xDFFF):
## This is a surrogate pair. Unfortunately we can't represent
## it in a 16-bit character
surrogate = 1
msg = "code pairs are not supported"
out, x = unicode_call_errorhandler(errors, 'utf-7', msg, s, i-1, i)
p += out
bitsleft = 0
break
else:
p += unichr(outCh )
#p += out
if (bitsleft >= 6):
## /* The shift sequence has a partial character in it. If
## bitsleft < 6 then we could just classify it as padding
## but that is not the case here */
msg = "partial character in shift sequence"
out, x = unicode_call_errorhandler(errors, 'utf-7', msg, s, i-1, i)
## /* According to RFC2152 the remaining bits should be zero. We
## choose to signal an error/insert a replacement character
## here so indicate the potential of a misencoded character. */
## /* On x86, a << b == a << (b%32) so make sure that bitsleft != 0 */
## if (bitsleft and (charsleft << (sizeof(charsleft) * 8 - bitsleft))):
## raise UnicodeDecodeError, "non-zero padding bits in shift sequence"
if (ch == '-') :
if ((i < size) and (s[i] == '-')) :
p += '-'
inShift = 1
elif SPECIAL(ch, 0, 0) :
raise UnicodeDecodeError, "unexpected special character"
else:
p += ch
else:
charsleft = (charsleft << 6) | UB64(ch)
bitsleft += 6
i += 1
## /* p, charsleft, bitsleft, surrogate = */ DECODE(p, charsleft, bitsleft, surrogate);
elif ( ch == '+' ):
startinpos = i
i += 1
if (i<size and s[i] == '-'):
i += 1
p += '+'
else:
inShift = 1
bitsleft = 0
elif (SPECIAL(ch, 0, 0)):
i += 1
raise UnicodeDecodeError, "unexpected special character"
else:
p += ch
i += 1
if (inShift) :
#XXX This aint right
endinpos = size
raise UnicodeDecodeError, "unterminated shift sequence"
return p
def PyUnicode_EncodeUTF7(s, size, encodeSetO, encodeWhiteSpace, errors):
# /* It might be possible to tighten this worst case */
inShift = False
i = 0
bitsleft = 0
charsleft = 0
out = []
for ch in s:
if (not inShift) :
if (ch == '+'):
out += '+'
out += '-'
elif (SPECIAL(ch, encodeSetO, encodeWhiteSpace)):
charsleft = ord(ch)
bitsleft = 16
out += '+'
p, bitsleft = ENCODE( charsleft, bitsleft)
out += p
inShift = bitsleft > 0
else:
out += chr(ord(ch))
else:
if (not SPECIAL(ch, encodeSetO, encodeWhiteSpace)):
out += B64((charsleft) << (6-bitsleft))
charsleft = 0
bitsleft = 0
## /* Characters not in the BASE64 set implicitly unshift the sequence
## so no '-' is required, except if the character is itself a '-' */
if (B64CHAR(ch) or ch == '-'):
out += '-'
inShift = False
out += chr(ord(ch))
else:
bitsleft += 16
charsleft = (((charsleft) << 16) | ord(ch))
p, bitsleft = ENCODE(charsleft, bitsleft)
out += p
## /* If the next character is special then we dont' need to terminate
## the shift sequence. If the next character is not a BASE64 character
## or '-' then the shift sequence will be terminated implicitly and we
## don't have to insert a '-'. */
if (bitsleft == 0):
if (i + 1 < size):
ch2 = s[i+1]
if (SPECIAL(ch2, encodeSetO, encodeWhiteSpace)):
pass
elif (B64CHAR(ch2) or ch2 == '-'):
out += '-'
inShift = False
else:
inShift = False
else:
out += '-'
inShift = False
i += 1
if (bitsleft):
out += B64(charsleft << (6-bitsleft) )
out += '-'
return out
unicode_empty = u''
def unicodeescape_string(s, size, quotes):
p = []
if (quotes) :
p += 'u'
if (s.find('\'') != -1 and s.find('"') == -1):
p += '"'
else:
p += '\''
pos = 0
while (pos < size):
ch = s[pos]
#/* Escape quotes */
if (quotes and (ch == p[1] or ch == '\\')):
p += '\\'
p += chr(ord(ch))
pos += 1
continue
#ifdef Py_UNICODE_WIDE
#/* Map 21-bit characters to '\U00xxxxxx' */
elif (ord(ch) >= 0x10000):
p += '\\'
p += 'U'
p += '%08x' % ord(ch)
pos += 1
continue
#endif
#/* Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes */
elif (ord(ch) >= 0xD800 and ord(ch) < 0xDC00):
pos += 1
ch2 = s[pos]
if (ord(ch2) >= 0xDC00 and ord(ch2) <= 0xDFFF):
ucs = (((ord(ch) & 0x03FF) << 10) | (ord(ch2) & 0x03FF)) + 0x00010000
p += '\\'
p += 'U'
p += '%08x' % ucs
pos += 1
continue
#/* Fall through: isolated surrogates are copied as-is */
pos -= 1
#/* Map 16-bit characters to '\uxxxx' */
if (ord(ch) >= 256):
p += '\\'
p += 'u'
p += '%04x' % ord(ch)
#/* Map special whitespace to '\t', \n', '\r' */
elif (ch == '\t'):
p += '\\'
p += 't'
elif (ch == '\n'):
p += '\\'
p += 'n'
elif (ch == '\r'):
p += '\\'
p += 'r'
#/* Map non-printable US ASCII to '\xhh' */
elif (ch < ' ' or ch >= 0x7F) :
p += '\\'
p += 'x'
p += '%02x' % ord(ch)
#/* Copy everything else as-is */
else:
p += chr(ord(ch))
pos += 1
if (quotes):
p += p[1]
return p
def PyUnicode_DecodeASCII(s, size, errors):
# /* ASCII is equivalent to the first 128 ordinals in Unicode. */
if (size == 1 and ord(s) < 128) :
return [unichr(ord(s))]
if (size == 0):
return [u''] #unicode('')
p = []
pos = 0
while pos < len(s):
c = s[pos]
if ord(c) < 128:
p += unichr(ord(c))
pos += 1
else:
res = unicode_call_errorhandler(
errors, "ascii", "ordinal not in range(128)",
s, pos, pos+1)
p += [unichr(ord(x)) for x in res[0]]
pos = res[1]
return p
def PyUnicode_EncodeASCII(p, size, errors):
return unicode_encode_ucs1(p, size, errors, 128)
def PyUnicode_AsASCIIString(unistr):
if not type(unistr) == unicode:
raise TypeError
return PyUnicode_EncodeASCII(unicode(unistr),
len(unicode),
None)
def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=True):
bo = 0 #/* assume native ordering by default */
consumed = 0
errmsg = ""
if sys.byteorder == 'little':
ihi = 1
ilo = 0
else:
ihi = 0
ilo = 1
#/* Unpack UTF-16 encoded data */
## /* Check for BOM marks (U+FEFF) in the input and adjust current
## byte order setting accordingly. In native mode, the leading BOM
## mark is skipped, in all other modes, it is copied to the output
## stream as-is (giving a ZWNBSP character). */
q = 0
p = []
if byteorder == 'native':
if (size >= 2):
bom = (ord(s[ihi]) << 8) | ord(s[ilo])
#ifdef BYTEORDER_IS_LITTLE_ENDIAN
if sys.byteorder == 'little':
if (bom == 0xFEFF):
q += 2
bo = -1
elif bom == 0xFFFE:
q += 2
bo = 1
else:
if bom == 0xFEFF:
q += 2
bo = 1
elif bom == 0xFFFE:
q += 2
bo = -1
elif byteorder == 'little':
bo = -1
else:
bo = 1
if (size == 0):
return [u''], 0, bo
if (bo == -1):
#/* force LE */
ihi = 1
ilo = 0
elif (bo == 1):
#/* force BE */
ihi = 0
ilo = 1
while (q < len(s)):
#/* remaining bytes at the end? (size should be even) */
if (len(s)-q<2):
if not final:
break
errmsg = "truncated data"
startinpos = q
endinpos = len(s)
unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True)
# /* The remaining input chars are ignored if the callback
## chooses to skip the input */
ch = (ord(s[q+ihi]) << 8) | ord(s[q+ilo])
q += 2
if (ch < 0xD800 or ch > 0xDFFF):
p += unichr(ch)
continue
#/* UTF-16 code pair: */
if (q >= len(s)):
errmsg = "unexpected end of data"
startinpos = q-2
endinpos = len(s)
unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True)
if (0xD800 <= ch and ch <= 0xDBFF):
ch2 = (ord(s[q+ihi]) << 8) | ord(s[q+ilo])
q += 2
if (0xDC00 <= ch2 and ch2 <= 0xDFFF):
#ifndef Py_UNICODE_WIDE
if sys.maxunicode < 65536:
p += unichr(ch)
p += unichr(ch2)
else:
p += unichr((((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000)
#endif
continue
else:
errmsg = "illegal UTF-16 surrogate"
startinpos = q-4
endinpos = startinpos+2
unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True)
errmsg = "illegal encoding"
startinpos = q-2
endinpos = startinpos+2
unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True)
return p, q, bo
# moved out of local scope, especially because it didn't
# have any nested variables.
def STORECHAR(CH, byteorder):
hi = chr(((CH) >> 8) & 0xff)
lo = chr((CH) & 0xff)
if byteorder == 'little':
return [lo, hi]
else:
return [hi, lo]
def PyUnicode_EncodeUTF16(s, size, errors, byteorder='little'):
# /* Offsets from p for storing byte pairs in the right order. */
p = []
bom = sys.byteorder
if (byteorder == 'native'):
bom = sys.byteorder
p += STORECHAR(0xFEFF, bom)
if (size == 0):
return ""
if (byteorder == 'little' ):
bom = 'little'
elif (byteorder == 'big'):
bom = 'big'
for c in s:
ch = ord(c)
ch2 = 0
if (ch >= 0x10000) :
ch2 = 0xDC00 | ((ch-0x10000) & 0x3FF)
ch = 0xD800 | ((ch-0x10000) >> 10)
p += STORECHAR(ch, bom)
if (ch2):
p += STORECHAR(ch2, bom)
return p
def PyUnicode_DecodeMBCS(s, size, errors):
pass
def PyUnicode_EncodeMBCS(p, size, errors):
pass
def unicode_call_errorhandler(errors, encoding,
reason, input, startinpos, endinpos, decode=True):
errorHandler = lookup_error(errors)
if decode:
exceptionObject = UnicodeDecodeError(encoding, input, startinpos, endinpos, reason)
else:
exceptionObject = UnicodeEncodeError(encoding, input, startinpos, endinpos, reason)
res = errorHandler(exceptionObject)
if isinstance(res, tuple) and isinstance(res[0], unicode) and isinstance(res[1], int):
newpos = res[1]
if (newpos < 0):
newpos = len(input) + newpos
if newpos < 0 or newpos > len(input):
raise IndexError( "position %d from error handler out of bounds" % newpos)
return res[0], newpos
else:
raise TypeError("encoding error handler must return (unicode, int) tuple, not %s" % repr(res))
def PyUnicode_DecodeUTF8(s, size, errors):
return PyUnicode_DecodeUTF8Stateful(s, size, errors, False)
## /* Map UTF-8 encoded prefix byte to sequence length. zero means
## illegal prefix. see RFC 2279 for details */
utf8_code_length = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 0, 0
]
def PyUnicode_DecodeUTF8Stateful(s, size, errors, final):
consumed = 0
if (size == 0):
if not final:
consumed = 0
return u'', consumed
p = []
pos = 0
while pos < size:
ch = s[pos]
if ord(ch) < 0x80:
p += ch
pos += 1
continue
n = utf8_code_length[ord(ch)]
startinpos = pos
if (startinpos + n > size):
if not final:
break
else:
errmsg = "unexpected end of data"
endinpos = size
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
if n == 0:
errmsg = "unexpected code byte"
endinpos = startinpos+1
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
elif n == 1:
errmsg = "internal error"
endinpos = startinpos+1
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
elif n == 2:
if ((ord(s[pos+1]) & 0xc0) != 0x80):
errmsg = "invalid data"
endinpos = startinpos+2
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
c = ((ord(s[pos]) & 0x1f) << 6) + (ord(s[pos+1]) & 0x3f)
if c < 0x80:
errmsg = "illegal encoding"
endinpos = startinpos+2
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
p += unichr(c)
pos += n
#break
elif n == 3:
if ((ord(s[pos+1]) & 0xc0) != 0x80 or
(ord(s[pos+2]) & 0xc0) != 0x80):
errmsg = "invalid data"
endinpos = startinpos+3
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
c = ((ord(s[pos]) & 0x0f) << 12) + \
((ord(s[pos+1]) & 0x3f) << 6) +\
(ord(s[pos+2]) & 0x3f)
## /* Note: UTF-8 encodings of surrogates are considered
## legal UTF-8 sequences;
##
## XXX For wide builds (UCS-4) we should probably try
## to recombine the surrogates into a single code
## unit.
## */
if c < 0x0800:
errmsg = "illegal encoding"
endinpos = startinpos+3
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
p += unichr(c)
pos += n
elif n == 4:
## case 4:
if ((ord(s[pos+1]) & 0xc0) != 0x80 or
(ord(s[pos+2]) & 0xc0) != 0x80 or
(ord(s[pos+3]) & 0xc0) != 0x80):
errmsg = "invalid data"
startinpos = pos
endinpos = startinpos+4
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
c = ((ord(s[pos+0]) & 0x7) << 18) + ((ord(s[pos+1]) & 0x3f) << 12) +\
((ord(s[pos+2]) & 0x3f) << 6) + (ord(s[pos+3]) & 0x3f)
#/* validate and convert to UTF-16 */
if ((c < 0x10000) or (c > 0x10ffff)):
#/* minimum value allowed for 4 byte encoding */
#/* maximum value allowed for UTF-16 */
errmsg = "illegal encoding"
startinpos = pos
endinpos = startinpos+4
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
else:
#ifdef Py_UNICODE_WIDE
if c < sys.maxunicode:
p += unichr(c)
pos += n
else:
## /* compute and append the two surrogates: */
## /* translate from 10000..10FFFF to 0..FFFF */
c -= 0x10000
#/* high surrogate = top 10 bits added to D800 */
p += unichr(0xD800 + (c >> 10))
#/* low surrogate = bottom 10 bits added to DC00 */
p += unichr(0xDC00 + (c & 0x03FF))
pos += n
else:
## default:
## /* Other sizes are only needed for UCS-4 */
errmsg = "unsupported Unicode code range"
startinpos = pos
endinpos = startinpos+n
res = unicode_call_errorhandler(
errors, "utf8", errmsg,
s, startinpos, endinpos)
p += res[0]
pos = res[1]
#continue
if not final:
consumed = pos
return p, pos # consumed
def PyUnicode_EncodeUTF8(s, size, errors):
#assert(s != None)
assert(size >= 0)
p = []
i = 0
while i < size:
ch = s[i]
i += 1
if (ord(ch) < 0x80):
## /* Encode ASCII */
p += chr(ord(ch))
elif (ord(ch) < 0x0800) :
## /* Encode Latin-1 */
p += chr((0xc0 | (ord(ch) >> 6)))
p += chr((0x80 | (ord(ch) & 0x3f)))
else:
## /* Encode UCS2 Unicode ordinals */
if (ord(ch) < 0x10000):
## /* Special case: check for high surrogate */
if (0xD800 <= ord(ch) and ord(ch) <= 0xDBFF and i != size) :
ch2 = s[i]
## /* Check for low surrogate and combine the two to
## form a UCS4 value */
if (0xDC00 <= ord(ch2) and ord(ch2) <= 0xDFFF) :
ch3 = ((ord(ch) - 0xD800) << 10 | (ord(ch2) - 0xDC00)) + 0x10000
i += 1
p.extend(encodeUCS4(ch3))
continue
## /* Fall through: handles isolated high surrogates */
p += (chr((0xe0 | (ord(ch) >> 12))))
p += (chr((0x80 | ((ord(ch) >> 6) & 0x3f))))
p += (chr((0x80 | (ord(ch) & 0x3f))))
continue
else:
p.extend(encodeUCS4(ord(ch)))
return p
def encodeUCS4(ch):
## /* Encode UCS4 Unicode ordinals */
p = []
p += (chr((0xf0 | (ch >> 18))))
p += (chr((0x80 | ((ch >> 12) & 0x3f))))
p += (chr((0x80 | ((ch >> 6) & 0x3f))))
p += (chr((0x80 | (ch & 0x3f))))
return p
#/* --- Latin-1 Codec ------------------------------------------------------ */
def PyUnicode_DecodeLatin1(s, size, errors):
#/* Latin-1 is equivalent to the first 256 ordinals in Unicode. */
## if (size == 1):
## return [PyUnicode_FromUnicode(s, 1)]
pos = 0
p = []
while (pos < size):
p += unichr(ord(s[pos]))
pos += 1
return p
def unicode_encode_ucs1(p, size, errors, limit):
if limit == 256:
reason = "ordinal not in range(256)"
encoding = "latin-1"
else:
reason = "ordinal not in range(128)"
encoding = "ascii"
if (size == 0):
return ['']
res = []
pos = 0
while pos < len(p):
#for ch in p:
ch = p[pos]
if ord(ch) < limit:
res += chr(ord(ch))
pos += 1
else:
#/* startpos for collecting unencodable chars */
collstart = pos
collend = pos+1
while collend < len(p) and ord(p[collend]) >= limit:
collend += 1
x = unicode_call_errorhandler(errors, encoding, reason, p, collstart, collend, False)
res += str(x[0])
pos = x[1]
return res
def PyUnicode_EncodeLatin1(p, size, errors):
res = unicode_encode_ucs1(p, size, errors, 256)
return res
hexdigits = [hex(i)[-1] for i in range(16)]+[hex(i)[-1].upper() for i in range(10, 16)]
def hexescape(s, pos, digits, message, errors):
chr = 0
p = []
if (pos+digits>len(s)):
message = "end of string in escape sequence"
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2, len(s))
p += x[0]
pos = x[1]
else:
try:
chr = int(s[pos:pos+digits], 16)
except ValueError:
endinpos = pos
while s[endinpos] in hexdigits:
endinpos += 1
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2,
endinpos+1)
p += x[0]
pos = x[1]
#/* when we get here, chr is a 32-bit unicode character */
else:
if chr <= sys.maxunicode:
p += unichr(chr)
pos += digits
elif (chr <= 0x10ffff):
chr -= 0x10000L
p += unichr(0xD800 + (chr >> 10))
p += unichr(0xDC00 + (chr & 0x03FF))
pos += digits
else:
message = "illegal Unicode character"
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2,
pos+1)
p += x[0]
pos = x[1]
res = p
return res, pos
def PyUnicode_DecodeUnicodeEscape(s, size, errors):
if (size == 0):
return u''
p = []
pos = 0
while (pos < size):
## /* Non-escape characters are interpreted as Unicode ordinals */
if (s[pos] != '\\') :
p += unichr(ord(s[pos]))
pos += 1
continue
## /* \ - Escapes */
else:
pos += 1
if pos >= len(s):
errmessage = "\\ at end of string"
unicode_call_errorhandler(errors, "unicodeescape", errmessage, s, pos-1, size)
ch = s[pos]
pos += 1
## /* \x escapes */
if ch == '\\' : p += u'\\'
elif ch == '\'': p += u'\''
elif ch == '\"': p += u'\"'
elif ch == 'b' : p += u'\b'
elif ch == 'f' : p += u'\014' #/* FF */
elif ch == 't' : p += u'\t'
elif ch == 'n' : p += u'\n'
elif ch == 'r' : p += u'\r'
elif ch == 'v': p += u'\013' #break; /* VT */
elif ch == 'a': p += u'\007' # break; /* BEL, not classic C */
elif '0' <= ch <= '7':
x = ord(ch) - ord('0')
if pos < size:
ch = s[pos]
if '0' <= ch <= '7':
pos += 1
x = (x<<3) + ord(ch) - ord('0')
if pos < size:
ch = s[pos]
if '0' <= ch <= '7':
pos += 1
x = (x<<3) + ord(ch) - ord('0')
p += unichr(x)
## /* hex escapes */
## /* \xXX */
elif ch == 'x':
digits = 2
message = "truncated \\xXX escape"
x = hexescape(s, pos, digits, message, errors)
p += x[0]
pos = x[1]
# /* \uXXXX */
elif ch == 'u':
digits = 4
message = "truncated \\uXXXX escape"
x = hexescape(s, pos, digits, message, errors)
p += x[0]
pos = x[1]
# /* \UXXXXXXXX */
elif ch == 'U':
digits = 8
message = "truncated \\UXXXXXXXX escape"
x = hexescape(s, pos, digits, message, errors)
p += x[0]
pos = x[1]
## /* \N{name} */
elif ch == 'N':
message = "malformed \\N character escape"
#pos += 1
look = pos
try:
import unicodedata
except ImportError:
message = "\\N escapes not supported (can't load unicodedata module)"
unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, size)
if look < size and s[look] == '{':
#/* look for the closing brace */
while (look < size and s[look] != '}'):
look += 1
if (look > pos+1 and look < size and s[look] == '}'):
#/* found a name. look it up in the unicode database */
message = "unknown Unicode character name"
st = s[pos+1:look]
try:
chr = unicodedata.lookup("%s" % st)
except KeyError, e:
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1)
else:
x = chr, look + 1
p += x[0]
pos = x[1]
else:
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1)
else:
x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1)
else:
p += '\\'
p += ch
return p
def PyUnicode_EncodeRawUnicodeEscape(s, size):
if (size == 0):
return ''
p = []
for ch in s:
# /* Map 32-bit characters to '\Uxxxxxxxx' */
if (ord(ch) >= 0x10000):
p += '\\'
p += 'U'
p += '%08x' % (ord(ch))
elif (ord(ch) >= 256) :
# /* Map 16-bit characters to '\uxxxx' */
p += '\\'
p += 'u'
p += '%04x' % (ord(ch))
# /* Copy everything else as-is */
else:
p += chr(ord(ch))
#p += '\0'
return p
def charmapencode_output(c, mapping):
rep = mapping[c]
if isinstance(rep, int) or isinstance(rep, long):
if rep < 256:
return chr(rep)
else:
raise TypeError("character mapping must be in range(256)")
elif isinstance(rep, str):
return rep
elif rep == None:
raise KeyError("character maps to <undefined>")
else:
raise TypeError("character mapping must return integer, None or str")
def PyUnicode_EncodeCharmap(p, size, mapping='latin-1', errors='strict'):
## /* the following variable is used for caching string comparisons
## * -1=not initialized, 0=unknown, 1=strict, 2=replace,
## * 3=ignore, 4=xmlcharrefreplace */
# /* Default to Latin-1 */
if mapping == 'latin-1':
return PyUnicode_EncodeLatin1(p, size, errors)
if (size == 0):
return ''
inpos = 0
res = []
while (inpos<size):
#/* try to encode it */
try:
x = charmapencode_output(ord(p[inpos]), mapping)
res += [x]
except KeyError:
x = unicode_call_errorhandler(errors, "charmap",
"character maps to <undefined>", p, inpos, inpos+1, False)
try:
res += [charmapencode_output(ord(y), mapping) for y in x[0]]
except KeyError:
raise UnicodeEncodeError("charmap", p, inpos, inpos+1,
"character maps to <undefined>")
inpos += 1
return res
def PyUnicode_DecodeCharmap(s, size, mapping, errors):
## /* Default to Latin-1 */
if (mapping == None):
return PyUnicode_DecodeLatin1(s, size, errors)
if (size == 0):
return u''
p = []
inpos = 0
while (inpos< len(s)):
#/* Get mapping (char ordinal -> integer, Unicode char or None) */
ch = s[inpos]
try:
x = mapping[ord(ch)]
if isinstance(x, int):
if x < 65536:
p += unichr(x)
else:
raise TypeError("character mapping must be in range(65536)")
elif isinstance(x, unicode):
p += x
elif not x:
raise KeyError
else:
raise TypeError
except KeyError:
x = unicode_call_errorhandler(errors, "charmap",
"character maps to <undefined>", s, inpos, inpos+1)
p += x[0]
inpos += 1
return p
def PyUnicode_DecodeRawUnicodeEscape(s, size, errors):
if (size == 0):
return u''
pos = 0
p = []
while (pos < len(s)):
ch = s[pos]
#/* Non-escape characters are interpreted as Unicode ordinals */
if (ch != '\\'):
p += unichr(ord(ch))
pos += 1
continue
startinpos = pos
## /* \u-escapes are only interpreted iff the number of leading
## backslashes is odd */
bs = pos
while pos < size:
if (s[pos] != '\\'):
break
p += unichr(ord(s[pos]))
pos += 1
if (((pos - bs) & 1) == 0 or
pos >= size or
(s[pos] != 'u' and s[pos] != 'U')) :
p += unichr(ord(s[pos]))
pos += 1
continue
p.pop(-1)
if s[pos] == 'u':
count = 4
else:
count = 8
pos += 1
#/* \uXXXX with 4 hex digits, \Uxxxxxxxx with 8 */
x = 0
try:
x = int(s[pos:pos+count], 16)
except ValueError:
res = unicode_call_errorhandler(
errors, "rawunicodeescape", "truncated \\uXXXX",
s, size, pos, pos+count)
p += res[0]
pos = res[1]
else:
#ifndef Py_UNICODE_WIDE
if sys.maxunicode > 0xffff:
if (x > sys.maxunicode):
res = unicode_call_errorhandler(
errors, "rawunicodeescape", "\\Uxxxxxxxx out of range",
s, size, pos, pos+1)
pos = res[1]
p += res[0]
else:
p += unichr(x)
pos += count
else:
if (x > 0x10000):
res = unicode_call_errorhandler(
errors, "rawunicodeescape", "\\Uxxxxxxxx out of range",
s, size, pos, pos+1)
pos = res[1]
p += res[0]
#endif
else:
p += unichr(x)
pos += count
return p
| Python |
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
appleveldefs = {
'__doc__' : 'app_codecs.__doc__',
'__name__' : 'app_codecs.__name__',
'ascii_decode' : 'app_codecs.ascii_decode',
'ascii_encode' : 'app_codecs.ascii_encode',
'charbuffer_encode' : 'app_codecs.charbuffer_encode',
'charmap_decode' : 'app_codecs.charmap_decode',
'charmap_encode' : 'app_codecs.charmap_encode',
'escape_decode' : 'app_codecs.escape_decode',
'escape_encode' : 'app_codecs.escape_encode',
'latin_1_decode' : 'app_codecs.latin_1_decode',
'latin_1_encode' : 'app_codecs.latin_1_encode',
'lookup' : 'app_codecs.lookup',
'lookup_error' : 'app_codecs.lookup_error',
'mbcs_decode' : 'app_codecs.mbcs_decode',
'mbcs_encode' : 'app_codecs.mbcs_encode',
'raw_unicode_escape_decode' : 'app_codecs.raw_unicode_escape_decode',
'raw_unicode_escape_encode' : 'app_codecs.raw_unicode_escape_encode',
'readbuffer_encode' : 'app_codecs.readbuffer_encode',
'register' : 'app_codecs.register',
'register_error' : 'app_codecs.register_error',
'unicode_escape_decode' : 'app_codecs.unicode_escape_decode',
'unicode_escape_encode' : 'app_codecs.unicode_escape_encode',
'unicode_internal_decode' : 'app_codecs.unicode_internal_decode',
'unicode_internal_encode' : 'app_codecs.unicode_internal_encode',
'utf_16_be_decode' : 'app_codecs.utf_16_be_decode',
'utf_16_be_encode' : 'app_codecs.utf_16_be_encode',
'utf_16_decode' : 'app_codecs.utf_16_decode',
'utf_16_encode' : 'app_codecs.utf_16_encode',
'utf_16_ex_decode' : 'app_codecs.utf_16_ex_decode',
'utf_16_le_decode' : 'app_codecs.utf_16_le_decode',
'utf_16_le_encode' : 'app_codecs.utf_16_le_encode',
'utf_7_decode' : 'app_codecs.utf_7_decode',
'utf_7_encode' : 'app_codecs.utf_7_encode',
'utf_8_decode' : 'app_codecs.utf_8_decode',
'utf_8_encode' : 'app_codecs.utf_8_encode',
'encode': 'app_codecs.encode',
'decode': 'app_codecs.decode'
}
interpleveldefs = {
}
| Python |
"""
self cloning, automatic path configuration
copy this into any subdirectory of pypy from which scripts need
to be run, typically all of the test subdirs.
The idea is that any such script simply issues
import autopath
and this will make sure that the parent directory containing "pypy"
is in sys.path.
If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
you can directly run it which will copy itself on all autopath.py files
it finds under the pypy root directory.
This module always provides these attributes:
pypydir pypy root directory path
this_dir directory where this autopath.py resides
"""
def __dirinfo(part):
""" return (partdir, this_dir) and insert parent of partdir
into sys.path. If the parent directories don't have the part
an EnvironmentError is raised."""
import sys, os
try:
head = this_dir = os.path.realpath(os.path.dirname(__file__))
except NameError:
head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
while head:
partdir = head
head, tail = os.path.split(head)
if tail == part:
break
else:
raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
pypy_root = os.path.join(head, '')
try:
sys.path.remove(head)
except ValueError:
pass
sys.path.insert(0, head)
munged = {}
for name, mod in sys.modules.items():
if '.' in name:
continue
fn = getattr(mod, '__file__', None)
if not isinstance(fn, str):
continue
newname = os.path.splitext(os.path.basename(fn))[0]
if not newname.startswith(part + '.'):
continue
path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
if path.startswith(pypy_root) and newname != part:
modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
if newname != '__init__':
modpaths.append(newname)
modpath = '.'.join(modpaths)
if modpath not in sys.modules:
munged[modpath] = mod
for name, mod in munged.iteritems():
if name not in sys.modules:
sys.modules[name] = mod
if '.' in name:
prename = name[:name.rfind('.')]
postname = name[len(prename)+1:]
if prename not in sys.modules:
__import__(prename)
if not hasattr(sys.modules[prename], postname):
setattr(sys.modules[prename], postname, mod)
return partdir, this_dir
def __clone():
""" clone master version of autopath.py into all subdirs """
from os.path import join, walk
if not this_dir.endswith(join('pypy','tool')):
raise EnvironmentError("can only clone master version "
"'%s'" % join(pypydir, 'tool',_myname))
def sync_walker(arg, dirname, fnames):
if _myname in fnames:
fn = join(dirname, _myname)
f = open(fn, 'rwb+')
try:
if f.read() == arg:
print "checkok", fn
else:
print "syncing", fn
f = open(fn, 'w')
f.write(arg)
finally:
f.close()
s = open(join(pypydir, 'tool', _myname), 'rb').read()
walk(pypydir, sync_walker, s)
_myname = 'autopath.py'
# set guaranteed attributes
pypydir, this_dir = __dirinfo('pypy')
if __name__ == '__main__':
__clone()
| Python |
# empty | Python |
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter import baseobjspace, typedef, gateway
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.function import Function
from pypy.interpreter.error import OperationError
from pypy.objspace.std.listobject import W_ListObject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.dictobject import W_DictObject
from pypy.module._cslib.fd import _FiniteDomain
from pypy.rlib.cslib import rconstraint as rc
class W_AbstractConstraint(baseobjspace.Wrappable):
def __init__(self, space, constraint):
"""variables is a list of variables which appear in the formula"""
self.space = space
assert isinstance( constraint, rc.AbstractConstraint )
self.constraint = constraint
def w_affected_variables(self):
""" Return a list of all variables affected by this constraint """
return self.space.wrap(self._variables)
def affected_variables(self):
return self._variables
def w_revise(self, w_domains):
assert isinstance(w_domains, W_DictObject)
doms = {}
spc = self.space
for var, dom in w_domains.content.items():
doms[spc.str_w(var)] = dom
return self.space.newbool(self.revise(doms))
def w_estimate_cost(self, w_domains):
assert isinstance(w_domains, W_DictObject)
cost = 1
doms = w_domains.content
for var in self._variables:
dom = doms[self.space.wrap(var)]
assert isinstance(dom, W_AbstractDomain)
cost = cost * dom.size()
return self.space.newint(cost)
W_AbstractConstraint.typedef = typedef.TypeDef(
"W_AbstractConstraint")
class _Expression(rc.Expression):
"""A constraint represented as a python expression."""
def __init__(self, space, w_variables, w_formula, w_filter_func):
"""variables is a list of variables which appear in the formula
formula is a python expression that will be evaluated as a boolean"""
self.space = space
variables = []
for w_var in space.unpackiterable( w_variables ):
variables.append( space.str_w(w_var) )
if len(variables)==0:
raise OperationError( space.w_ValueError,
space.wrap("need at least one variable") )
rc.Expression.__init__(self, variables )
self.formula = self.space.str_w(w_formula)
# self.filter_func is a function taking keyword arguments and returning a boolean
self.w_filter_func = w_filter_func
def filter_func(self, kwargs):
space = self.space
w_kwargs = space.newdict()
for var, value in kwargs.items():
dom = self.doms[var]
assert isinstance( dom, _FiniteDomain )
w_val = dom.vlist[value]
w_kwargs.content[space.wrap(var)] = w_val
return space.is_true(space.call(self.w_filter_func,
space.newlist([]),
w_kwargs))
def __repr__(self):
return '<%s>' % self.formula
class _BinaryExpression(rc.BinaryExpression):
"""A constraint represented as a python expression."""
def __init__(self, space, w_variables, w_formula, w_filter_func):
"""variables is a list of variables which appear in the formula
formula is a python expression that will be evaluated as a boolean"""
self.space = space
variables = []
for w_var in space.unpackiterable( w_variables ):
variables.append( space.str_w(w_var) )
if len(variables)==0:
raise OperationError( space.w_ValueError,
space.wrap("need at least one variable") )
rc.BinaryExpression.__init__(self, variables )
self.formula = self.space.str_w(w_formula)
# self.filter_func is a function taking keyword arguments and returning a boolean
self.w_filter_func = w_filter_func
self.kwcache = {}
def filter_func(self, kwargs):
space = self.space
var1 = self._variables[0]
var2 = self._variables[1]
arg1 = kwargs[var1]
arg2 = kwargs[var2]
t = (arg1,arg2)
if t in self.kwcache:
return self.kwcache[t]
w_kwargs = space.newdict()
dom = self.doms[var1]
w_val = dom.vlist[arg1]
w_kwargs.content[space.wrap(var1)] = w_val
dom = self.doms[var2]
w_val = dom.vlist[arg2]
w_kwargs.content[space.wrap(var2)] = w_val
res = space.is_true(space.call(self.w_filter_func,
space.newlist([]),
w_kwargs))
self.kwcache[t] = res
return res
class W_Expression(W_AbstractConstraint):
def __init__(self, space, w_variables, w_formula, w_filter_func):
if space.int_w(space.len(w_variables)) == 2:
constraint = _BinaryExpression(space, w_variables, w_formula, w_filter_func)
else:
constraint = _Expression(space, w_variables, w_formula, w_filter_func)
W_AbstractConstraint.__init__(self, space, constraint)
W_Expression.typedef = typedef.TypeDef("W_Expression",
W_AbstractConstraint.typedef)
def interp_make_expression(space, w_variables, w_formula, w_callable):
"""create a new constraint of type Expression or BinaryExpression
The chosen class depends on the number of variables in the constraint"""
if not isinstance(w_formula, W_StringObject):
raise OperationError(space.w_TypeError,
space.wrap('formula must be a string.'))
return W_Expression(space, w_variables, w_formula, w_callable)
#--- Alldistinct
class _AllDistinct(rc.AllDistinct):
"""Contraint: all values must be distinct"""
def __init__(self, space, w_variables):
variables = []
for w_var in space.unpackiterable( w_variables ):
variables.append( space.str_w(w_var) )
if len(variables)==0:
raise OperationError( space.w_ValueError,
space.wrap("need at least one variable") )
rc.AllDistinct.__init__(self, variables)
class W_AllDistinct(W_AbstractConstraint):
def __init__(self, space, w_variables):
constraint = _AllDistinct(space, w_variables)
W_AbstractConstraint.__init__(self, space, constraint)
W_AllDistinct.typedef = typedef.TypeDef(
"W_AllDistinct", W_AbstractConstraint.typedef)
def make_alldistinct(space, w_variables):
return space.wrap(W_AllDistinct(space, w_variables))
| Python |
from pypy.rlib.cslib.rpropagation import Repository, Solver
import pypy.rlib.cslib.rdistributor as rd
from pypy.module._cslib import fd
from pypy.module._cslib.constraint import W_AbstractConstraint
from pypy.interpreter.error import OperationError
from pypy.interpreter import typedef, gateway, baseobjspace
from pypy.interpreter.gateway import interp2app
from pypy.objspace.std.intobject import W_IntObject
from pypy.objspace.std.listobject import W_ListObject
from pypy.objspace.std.tupleobject import W_TupleObject
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.dictobject import W_DictObject
class _Repository(Repository):
def __init__(self, space, w_variables, w_domains, w_constraints):
# let's just give everything unwrapped to our parent
doms = {}
for var, dom in w_domains.content.items():
assert isinstance( dom, fd.W_FiniteDomain )
assert isinstance( var, W_StringObject )
doms[space.str_w(var)] = dom.domain
constraints = []
for w_constraint in space.unpackiterable( w_constraints ):
if not isinstance( w_constraint, W_AbstractConstraint ):
raise OperationError( space.w_TypeError,
space.wrap("constraints needs to be a sequence of constraints" ) )
constraints.append( w_constraint.constraint )
Repository.__init__(self, doms, constraints)
class W_Repository(baseobjspace.Wrappable):
def __init__(self, space, w_variables, w_domains, w_constraints):
self.repo = _Repository(space, w_variables, w_domains, w_constraints)
W_Repository.typedef = typedef.TypeDef("W_Repository")
def make_repo(space, w_variables, w_domains, w_constraints):
if not isinstance(w_domains,W_DictObject):
raise OperationError(space.w_TypeError,
space.wrap('domains must be a dictionary'))
return W_Repository(space, w_variables, w_domains, w_constraints)
class W_Solver(baseobjspace.Wrappable):
def __init__(self, space):
self.space = space
self.solver = Solver(rd.DefaultDistributor())
def w_solve(self, w_repo, w_verbosity):
space = self.space
if not isinstance(w_repo, W_Repository):
raise OperationError(space.w_TypeError,
space.wrap('first argument must be a repository.'))
if not isinstance(w_verbosity, W_IntObject):
raise OperationError(space.w_TypeError,
space.wrap('second argument must be an int.'))
self._verb = w_verbosity.intval
sols = self.solver.solve_all(w_repo.repo)
sols_w = []
for sol in sols:
w_dict = space.newdict()
for var,value in sol.items():
domain = w_repo.repo._domains[var]
assert isinstance( domain, fd._FiniteDomain )
w_var = space.wrap(var)
w_value = domain.vlist[value]
space.setitem( w_dict, w_var, w_value )
sols_w.append( w_dict )
return space.newlist(sols_w)
W_Solver.typedef = typedef.TypeDef(
'W_Solver',
solve = interp2app(W_Solver.w_solve))
def make_solver(space):
return W_Solver(space)
| Python |
from pypy.interpreter.error import OperationError
from pypy.interpreter import typedef, gateway, baseobjspace
from pypy.interpreter.gateway import interp2app
from pypy.objspace.std.listobject import W_ListObject, W_TupleObject
from pypy.objspace.std.intobject import W_IntObject
from pypy.rlib.cslib import rdomain as rd
class _FiniteDomain(rd.BaseFiniteDomain):
"""
Variable Domain with a finite set of possible values
"""
def __init__(self, vlist, values):
"""vlist is a list of values in the domain
values is a dictionnary to make sure that there are
no duplicate values"""
#assert isinstance(w_values, W_ListObject)
self.vlist = vlist
self._values = {}
if values is None:
for k in range(len(vlist)):
self._values[k] = True
else:
self._values = values.copy()
self._changed = False
def get_wvalues_in_rlist(self):
w_vals = self.vlist
return [w_vals[idx] for idx in self._values]
def copy(self):
return _FiniteDomain(self.vlist, self._values)
def intersect(self, other):
v1 = self.get_wvalues_in_rlist()
v2 = other.get_wvalues_in_rlist()
inter = [v for v in v1
if v in v2]
return _FiniteDomain(inter, None)
class W_FiniteDomain(baseobjspace.Wrappable):
def __init__(self, w_values, values):
assert isinstance(w_values, W_ListObject)
self.domain = _FiniteDomain(w_values.wrappeditems, values)
def make_fd(space, w_values):
if not isinstance(w_values, W_ListObject):
if not isinstance(w_values, W_TupleObject):
raise OperationError(space.w_TypeError,
space.wrap('first argument must be a list.'))
return W_FiniteDomain(w_values, None)
W_FiniteDomain.typedef = typedef.TypeDef(
"W_FiniteDomain")
| Python |
# Package initialisation
from pypy.interpreter.mixedmodule import MixedModule
#print '_csp module'
class Module(MixedModule):
appleveldefs = {
}
interpleveldefs = {
'FiniteDomain' : 'fd.make_fd',
'AllDistinct' : 'constraint.make_alldistinct',
'_make_expression': 'constraint.interp_make_expression',
'Repository' : 'propagation.make_repo',
'Solver' : 'propagation.make_solver'
}
| Python |
from pypy.interpreter.error import OperationError
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped, interp2app
from pypy.interpreter.baseobjspace import Wrappable
from pypy.rlib.rarithmetic import r_uint, intmask
from pypy.rlib import rrandom
import time
def descr_new__(space, w_subtype, w_anything=None):
x = space.allocate_instance(W_Random, w_subtype)
x = space.interp_w(W_Random, x)
W_Random.__init__(x, space, w_anything)
return space.wrap(x)
class W_Random(Wrappable):
def __init__(self, space, w_anything):
self._rnd = rrandom.Random()
self.seed(space, w_anything)
__init__.unwrap_spec = ['self', ObjSpace, W_Root]
def random(self, space):
return space.newfloat(self._rnd.random())
random.unwrap_spec = ['self', ObjSpace]
def seed(self, space, w_n=None):
if w_n is None:
w_n = space.newint(int(time.time()))
else:
if space.is_true(space.isinstance(w_n, space.w_int)):
w_n = space.abs(w_n)
elif space.is_true(space.isinstance(w_n, space.w_long)):
w_n = space.abs(w_n)
else:
# XXX not perfectly like CPython
w_n = space.abs(space.hash(w_n))
key = []
w_one = space.newint(1)
w_two = space.newint(2)
w_thirtytwo = space.newint(32)
# 0xffffffff
w_masklower = space.sub(space.pow(w_two, w_thirtytwo, space.w_None),
w_one)
while space.is_true(w_n):
w_chunk = space.and_(w_n, w_masklower)
chunk = space.uint_w(w_chunk)
key.append(chunk)
w_n = space.rshift(w_n, w_thirtytwo)
if not key:
key = [r_uint(0)]
self._rnd.init_by_array(key)
seed.unwrap_spec = ['self', ObjSpace, W_Root]
def getstate(self, space):
state = [None] * (rrandom.N + 1)
for i in range(rrandom.N):
state[i] = space.newint(intmask(self._rnd.state[i]))
state[rrandom.N] = space.newint(self._rnd.index)
return space.newtuple(state)
getstate.unwrap_spec = ['self', ObjSpace]
def setstate(self, space, w_state):
if not space.is_true(space.isinstance(w_state, space.w_tuple)):
errstring = space.wrap("state vector must be tuple")
raise OperationError(space.w_TypeError, errstring)
if space.int_w(space.len(w_state)) != rrandom.N + 1:
errstring = space.wrap("state vector is the wrong size")
raise OperationError(space.w_ValueError, errstring)
w_zero = space.newint(0)
# independent of platfrom, since the below condition is only
# true on 32 bit platforms anyway
w_add = space.pow(space.newint(2), space.newint(32), space.w_None)
for i in range(rrandom.N):
w_item = space.getitem(w_state, space.newint(i))
if space.is_true(space.lt(w_item, w_zero)):
w_item = space.add(w_item, w_add)
self._rnd.state[i] = space.uint_w(w_item)
w_item = space.getitem(w_state, space.newint(rrandom.N))
self._rnd.index = space.int_w(w_item)
setstate.unwrap_spec = ['self', ObjSpace, W_Root]
def jumpahead(self, n):
self._rnd.jumpahead(n)
jumpahead.unwrap_spec = ['self', int]
def getrandbits(self, space, k):
if k <= 0:
strerror = space.wrap("number of bits must be greater than zero")
raise OperationError(space.w_ValueError, strerror)
bytes = ((k - 1) // 32 + 1) * 4
bytesarray = [0] * bytes
for i in range(0, bytes, 4):
r = self._rnd.genrand32()
if k < 32:
r >>= (32 - k)
bytesarray[i + 0] = r & r_uint(0xff)
bytesarray[i + 1] = (r >> 8) & r_uint(0xff)
bytesarray[i + 2] = (r >> 16) & r_uint(0xff)
bytesarray[i + 3] = (r >> 24) & r_uint(0xff)
k -= 32
# XXX so far this is quadratic
w_result = space.newint(0)
w_eight = space.newint(8)
for i in range(len(bytesarray) - 1, -1, -1):
byte = bytesarray[i]
w_result = space.or_(space.lshift(w_result, w_eight),
space.newint(intmask(byte)))
return w_result
getrandbits.unwrap_spec = ['self', ObjSpace, int]
W_Random.typedef = TypeDef("W_Random",
__new__ = interp2app(descr_new__),
random = interp2app(W_Random.random),
seed = interp2app(W_Random.seed),
getstate = interp2app(W_Random.getstate),
setstate = interp2app(W_Random.setstate),
jumpahead = interp2app(W_Random.jumpahead),
getrandbits = interp2app(W_Random.getrandbits),
)
| Python |
import py
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
appleveldefs = {}
interpleveldefs = {
'Random' : 'interp_random.W_Random',
}
| Python |
# this is a sketch of how one might one day be able to define a pretty simple
# ctypes-using module, suitable for feeding to the ext-compiler
from pypy.interpreter.baseobjspace import ObjSpace
from pypy.module.readline import c_readline
#------------------------------------------------------------
# exported API (see interpleveldefs in __init__.py)
#
def readline(space, prompt):
return space.wrap(c_readline.c_readline(prompt))
readline.unwrap_spec = [ObjSpace, str]
def setcompleter(space, w_callback):
"""Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'.
"""
# XXX set internal completion function
| Python |
from ctypes import *
from pypy.rpython.rctypes.tool.ctypes_platform import configure, Library
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import ObjSpace, interp2app
#------------------------------------------------------------
# configuration for binding to external readline library
# through rctypes
#
class CConfig:
_header_ = ""
_includes_ = ["readline/readline.h", "readline/history.h"]
readline = Library('readline')
cconfig = configure(CConfig)
libreadline = cconfig['readline']
# get a binding to c library functions and define their args and return types
# char *readline(char *)
c_readline = libreadline.readline
c_readline.argtypes = [c_char_p]
c_readline.restype = c_char_p
# void rl_initiliaze(void)
c_rl_initialize = libreadline.rl_initialize
c_rl_initialize.argtypes = []
c_rl_initialize.restype = None
# void using_history(void)
c_using_history = libreadline.using_history
c_using_history.argtypes = []
c_using_history.restype = None
# void add_history(const char *)
c_add_history = libreadline.add_history
c_add_history.argtypes = [c_char_p]
c_add_history.restype = None
#------------------------------------------------------------
# special initialization of readline
class ReadlineState(object):
lastline = "" # XXX possibly temporary hack
readlinestate = ReadlineState()
def setup_readline(space, w_module):
c_using_history()
# XXX CPython initializes more stuff here
c_rl_initialize()
# install sys.__raw_input__, a hook that will be used by raw_input()
space.setitem(space.sys.w_dict, space.wrap('__raw_input__'),
space.wrap(app_readline_func))
def readline_func(space, prompt):
res = c_readline(prompt)
if res is None:
raise OperationError(space.w_EOFError, space.w_None)
if res and res != readlinestate.lastline:
readlinestate.lastline = res
c_add_history(res)
return space.wrap(res)
readline_func.unwrap_spec = [ObjSpace, str]
app_readline_func = interp2app(readline_func)
| Python |
# NOT_RPYTHON
def stub(*args, **kwds):
import warnings
warnings.warn("the 'readline' module is only a stub so far")
def stub_str(*args, **kwds):
stub()
return ''
def stub_int(*args, **kwds):
stub()
return 0
| Python |
# this is a sketch of how one might one day be able to define a pretty simple
# ctypes-using module, suitable for feeding to the ext-compiler
from pypy.interpreter.mixedmodule import MixedModule
# XXX raw_input needs to check for space.readline_func and use
# it if its there
class Module(MixedModule):
"""Importing this module enables command line editing using GNU readline."""
# the above line is the doc string of the translated module
def setup_after_space_initialization(self):
from pypy.module.readline import c_readline
c_readline.setup_readline(self.space, self)
interpleveldefs = {
'readline' : 'interp_readline.readline',
}
appleveldefs = {
'parse_and_bind': 'app_stub.stub',
'get_line_buffer': 'app_stub.stub_str',
'insert_text': 'app_stub.stub',
'read_init_file': 'app_stub.stub',
'read_history_file': 'app_stub.stub',
'write_history_file': 'app_stub.stub',
'clear_history': 'app_stub.stub',
'get_history_length': 'app_stub.stub_int',
'set_history_length': 'app_stub.stub',
'get_current_history_length': 'app_stub.stub_int',
'get_history_item': 'app_stub.stub_str',
'remove_history_item': 'app_stub.stub',
'replace_history_item': 'app_stub.stub',
'redisplay': 'app_stub.stub',
'set_startup_hook': 'app_stub.stub',
'set_pre_input_hook': 'app_stub.stub',
'set_completer': 'app_stub.stub',
'get_completer': 'app_stub.stub',
'get_begidx': 'app_stub.stub_int',
'get_endidx': 'app_stub.stub_int',
'set_completer_delims': 'app_stub.stub',
'get_completer_delims': 'app_stub.stub_str',
'add_history': 'app_stub.stub',
}
| Python |
#
| Python |
import py
class Directory(py.test.collect.Directory):
def run(self):
try:
import ctypes
except ImportError:
py.test.skip("these tests need ctypes installed")
return super(Directory, self).run()
| Python |
class error(Exception):
pass
def as_fd(f):
if not isinstance(f, (int, long)):
try:
fileno = f.fileno
except AttributeError:
raise TypeError("argument must be an int, or have a fileno() method.")
f = f.fileno()
if not isinstance(f, (int, long)):
raise TypeError("fileno() returned a non-integer")
fd = int(f)
if fd < 0 or isinstance(fd, long):
raise ValueError("file descriptor cannot be a negative integer (%i)"%fd)
return fd
def select(iwtd, owtd, ewtd, timeout=None):
"""Wait until one or more file descriptors are ready for some kind of I/O.
The first three arguments are sequences of file descriptors to be waited for:
rlist -- wait until ready for reading
wlist -- wait until ready for writing
xlist -- wait for an ``exceptional condition''
If only one kind of condition is required, pass [] for the other lists.
A file descriptor is either a socket or file object, or a small integer
gotten from a fileno() method call on one of those.
The optional 4th argument specifies a timeout in seconds; it may be
a floating point number to specify fractions of seconds. If it is absent
or None, the call will never time out.
The return value is a tuple of three lists corresponding to the first three
arguments; each contains the subset of the corresponding file descriptors
that are ready.
*** IMPORTANT NOTICE ***
On Windows, only sockets are supported; on Unix, all file descriptors.
"""
from select import poll, POLLIN, POLLOUT, POLLPRI, POLLERR, POLLHUP
fddict = {}
polldict = {}
fd = 0
for f in iwtd + owtd + ewtd:
fddict[id(f)] = as_fd(f)
for f in iwtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLIN
for f in owtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLOUT
for f in ewtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLPRI
p = poll()
for fd, mask in polldict.iteritems():
p.register(fd, mask)
if timeout is not None:
if (not hasattr(timeout, '__int__') and
not hasattr(timeout, '__float__')):
raise TypeError('timeout must be a float or None')
ret = dict(p.poll(int(float(timeout) * 1000)))
else:
ret = dict(p.poll())
iretd = [ f for f in iwtd if ret.get(fddict[id(f)], 0) & (POLLIN|POLLHUP)]
oretd = [ f for f in owtd if ret.get(fddict[id(f)], 0) & POLLOUT]
eretd = [ f for f in ewtd if ret.get(fddict[id(f)], 0) & (POLLERR|POLLPRI)]
return iretd, oretd, eretd
| Python |
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.gateway import W_Root, ObjSpace, interp2app
from pypy.rlib import _rsocket_ctypes as _c
from ctypes import POINTER, byref
from pypy.rpython.rctypes.aerrno import geterrno
from pypy.interpreter.error import OperationError
defaultevents = _c.POLLIN | _c.POLLOUT | _c.POLLPRI
def poll(space):
"""Returns a polling object, which supports registering and
unregistering file descriptors, and then polling them for I/O events."""
return Poll()
def as_fd_w(space, w_fd):
if not space.is_true(space.isinstance(w_fd, space.w_int)):
try:
w_fileno = space.getattr(w_fd, space.wrap('fileno'))
except OperationError, e:
if e.match(space, space.w_AttributeError):
raise OperationError(space.w_TypeError,
space.wrap("argument must be an int, or have a fileno() method."))
raise
w_fd = space.call_function(w_fileno)
if not space.is_true(space.isinstance(w_fd, space.w_int)):
raise OperationError(space.w_TypeError,
space.wrap('filneo() return a non-integer'))
fd = space.int_w(w_fd)
if fd < 0:
raise OperationError(space.w_ValueError,
space.wrap("file descriptor cannot be a negative integer (%d)"%fd))
return fd
class Poll(Wrappable):
def __init__(self):
self.fddict = {}
def register(self, space, w_fd, events=defaultevents):
fd = as_fd_w(space, w_fd)
self.fddict[fd] = events
register.unwrap_spec = ['self', ObjSpace, W_Root, int]
def unregister(self, space, w_fd):
fd = as_fd_w(space, w_fd)
try:
del self.fddict[fd]
except KeyError:
raise OperationError(space.w_KeyError,
space.wrap(fd))
unregister.unwrap_spec = ['self', ObjSpace, W_Root]
if hasattr(_c, 'poll'):
def poll(self, space, w_timeout=None):
if space.is_w(w_timeout, space.w_None):
timeout = -1
else:
timeout = space.int_w(w_timeout)
numfd = len(self.fddict)
buf = _c.create_string_buffer(_c.sizeof(_c.pollfd) * numfd)
pollfds = _c.cast(buf, POINTER(_c.pollfd))
i = 0
for fd, events in self.fddict.iteritems():
pollfds[i].fd = fd
pollfds[i].events = events
i += 1
# XXX Temporary hack for releasing the GIL
GIL = space.threadlocals.getGIL()
if GIL is not None: GIL.release()
ret = _c.poll(pollfds, numfd, timeout)
if GIL is not None: GIL.acquire(True)
if ret < 0:
errno = geterrno()
w_module = space.getbuiltinmodule('select')
w_errortype = space.getattr(w_module, space.wrap('error'))
message = _c.strerror(errno)
raise OperationError(w_errortype,
space.newtuple([space.wrap(errno),
space.wrap(message)]))
retval_w = []
for i in range(numfd):
pollfd = pollfds[i]
if pollfd.revents:
retval_w.append(space.newtuple([space.wrap(pollfd.fd),
space.wrap(pollfd.revents)]))
return space.newlist(retval_w)
elif hasattr(_c, 'WSAEventSelect'):
# win32 implementation
def poll(self, space, w_timeout=None):
numfd = len(self.fddict)
socketevents = _c.ARRAY(_c.WSAEVENT, numfd)()
numevents = 0
eventdict = {}
for fd, events in self.fddict.iteritems():
# select desired events
wsaEvents = 0
if events & _c.POLLIN:
wsaEvents |= _c.FD_READ | _c.FD_ACCEPT | _c.FD_CLOSE
if events & _c.POLLOUT:
wsaEvents |= _c.FD_WRITE | _c.FD_CONNECT | _c.FD_CLOSE
# if no events then ignore socket
if wsaEvents == 0:
continue
# select socket for desired events
event = _c.WSACreateEvent()
_c.WSAEventSelect(fd, event, wsaEvents)
eventdict[fd] = event
socketevents[numevents] = event
numevents += 1
# if no sockets then return immediately
if numevents == 0:
return space.newlist([])
# prepare timeout
if space.is_w(w_timeout, space.w_None):
timeout = -1
else:
timeout = space.int_w(w_timeout)
if timeout < 0:
timeout = _c.INFINITE
# XXX Temporary hack for releasing the GIL
GIL = space.threadlocals.getGIL()
if GIL is not None: GIL.release()
ret = _c.WSAWaitForMultipleEvents(numevents, socketevents,
False, timeout, False)
if GIL is not None: GIL.acquire(True)
if ret == _c.WSA_WAIT_TIMEOUT:
return space.newlist([])
if ret < 0: # WSA_WAIT_FAILED is unsigned...
errno = _c.geterrno()
w_module = space.getbuiltinmodule('select')
w_errortype = space.getattr(w_module, space.wrap('error'))
message = _c.socket_strerror(errno)
raise OperationError(w_errortype,
space.newtuple([space.wrap(errno),
space.wrap(message)]))
retval_w = []
info = _c.WSANETWORKEVENTS()
for fd, event in eventdict.iteritems():
if _c.WSAEnumNetworkEvents(fd, event, byref(info)) < 0:
continue
revents = 0
if info.lNetworkEvents & _c.FD_READ:
revents |= _c.POLLIN
if info.lNetworkEvents & _c.FD_ACCEPT:
revents |= _c.POLLIN
if info.lNetworkEvents & _c.FD_WRITE:
revents |= _c.POLLOUT
if info.lNetworkEvents & _c.FD_CONNECT:
if info.iErrorCode[_c.FD_CONNECT_BIT]:
revents |= _c.POLLERR
else:
revents |= _c.POLLOUT
if info.lNetworkEvents & _c.FD_CLOSE:
if info.iErrorCode[_c.FD_CLOSE_BIT]:
revents |= _c.POLLERR
else:
if self.fddict[fd] & _c.POLLIN:
revents |= _c.POLLIN
if self.fddict[fd] & _c.POLLOUT:
revents |= _c.POLLOUT
if revents:
retval_w.append(space.newtuple([space.wrap(fd),
space.wrap(revents)]))
_c.WSACloseEvent(event)
return space.newlist(retval_w)
poll.unwrap_spec = ['self', ObjSpace, W_Root]
pollmethods = {}
for methodname in 'register unregister poll'.split():
method = getattr(Poll, methodname)
assert hasattr(method,'unwrap_spec'), methodname
assert method.im_func.func_code.co_argcount == len(method.unwrap_spec), methodname
pollmethods[methodname] = interp2app(method, unwrap_spec=method.unwrap_spec)
Poll.typedef = TypeDef('select.poll', **pollmethods)
| Python |
# Package initialisation
from pypy.interpreter.mixedmodule import MixedModule
import sys
class Module(MixedModule):
appleveldefs = {
'error': 'app_select.error',
'select': 'app_select.select',
}
interpleveldefs = {
'poll' : 'interp_select.poll',
}
def buildloaders(cls):
constantnames = '''
POLLIN POLLPRI POLLOUT POLLERR POLLHUP POLLNVAL
POLLRDNORM POLLRDBAND POLLWRNORM POLLWEBAND POLLMSG'''.split()
from pypy.rlib._rsocket_ctypes import constants
for name in constantnames:
if name in constants:
value = constants[name]
Module.interpleveldefs[name] = "space.wrap(%r)" % value
super(Module, cls).buildloaders()
buildloaders = classmethod(buildloaders)
| Python |
import math
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped
class State:
def __init__(self, space):
self.w_e = space.wrap(math.e)
self.w_pi = space.wrap(math.pi)
def get(space):
return space.fromcache(State)
def math1(space, f, x):
try:
y = f(x)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return space.wrap(y)
math1._annspecialcase_ = 'specialize:arg(1)'
def math1_w(space, f, x):
try:
r = f(x)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return r
math1_w._annspecialcase_ = 'specialize:arg(1)'
def math2(space, f, x, snd):
try:
r = f(x, snd)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return space.wrap(r)
math2._annspecialcase_ = 'specialize:arg(1)'
def pow(space, x, y):
"""pow(x,y)
Return x**y (x to the power of y).
"""
return math2(space, math.pow, x, y)
pow.unwrap_spec = [ObjSpace, float, float]
def cosh(space, x):
"""cosh(x)
Return the hyperbolic cosine of x.
"""
return math1(space, math.cosh, x)
cosh.unwrap_spec = [ObjSpace, float]
def ldexp(space, x, i):
"""ldexp(x, i) -> x * (2**i)
"""
return math2(space, math.ldexp, x, i)
ldexp.unwrap_spec = [ObjSpace, float, int]
def hypot(space, x, y):
"""hypot(x,y)
Return the Euclidean distance, sqrt(x*x + y*y).
"""
return math2(space, math.hypot, x, y)
hypot.unwrap_spec = [ObjSpace, float, float]
def tan(space, x):
"""tan(x)
Return the tangent of x (measured in radians).
"""
return math1(space, math.tan, x)
tan.unwrap_spec = [ObjSpace, float]
def asin(space, x):
"""asin(x)
Return the arc sine (measured in radians) of x.
"""
return math1(space, math.asin, x)
asin.unwrap_spec = [ObjSpace, float]
def fabs(space, x):
"""fabs(x)
Return the absolute value of the float x.
"""
return math1(space, math.fabs, x)
fabs.unwrap_spec = [ObjSpace, float]
def floor(space, x):
"""floor(x)
Return the floor of x as a float.
This is the largest integral value <= x.
"""
return math1(space, math.floor, x)
floor.unwrap_spec = [ObjSpace, float]
def sqrt(space, x):
"""sqrt(x)
Return the square root of x.
"""
return math1(space, math.sqrt, x)
sqrt.unwrap_spec = [ObjSpace, float]
def frexp(space, x):
"""frexp(x)
Return the mantissa and exponent of x, as pair (m, e).
m is a float and e is an int, such that x = m * 2.**e.
If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
"""
mant, expo = math1_w(space, math.frexp, x)
return space.newtuple([space.wrap(mant), space.wrap(expo)])
frexp.unwrap_spec = [ObjSpace, float]
degToRad = math.pi / 180.0
def degrees(space, x):
"""degrees(x) -> converts angle x from radians to degrees
"""
return space.wrap(x / degToRad)
degrees.unwrap_spec = [ObjSpace, float]
def _log_any(space, w_x, base):
# base is supposed to be positive or 0.0, which means we use e
try:
if space.is_true(space.isinstance(w_x, space.w_long)):
# special case to support log(extremely-large-long)
num = space.bigint_w(w_x)
result = num.log(base)
else:
x = space.float_w(w_x)
if base == 10.0:
result = math.log10(x)
else:
result = math.log(x)
if base != 0.0:
den = math.log(base)
result /= den
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap('math range error'))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap('math domain error'))
return space.wrap(result)
def log(space, w_x, w_base=NoneNotWrapped):
"""log(x[, base]) -> the logarithm of x to the given base.
If the base not specified, returns the natural logarithm (base e) of x.
"""
if w_base is None:
base = 0.0
else:
base = space.float_w(w_base)
if base <= 0.0:
# just for raising the proper errors
return math1(space, math.log, base)
return _log_any(space, w_x, base)
log.unwrap_spec = [ObjSpace, W_Root, W_Root]
def log10(space, w_x):
"""log10(x) -> the base 10 logarithm of x.
"""
return _log_any(space, w_x, 10.0)
log10.unwrap_spec = [ObjSpace, W_Root]
def fmod(space, x, y):
"""fmod(x,y)
Return fmod(x, y), according to platform C. x % y may differ.
"""
return math2(space, math.fmod, x, y)
fmod.unwrap_spec = [ObjSpace, float, float]
def atan(space, x):
"""atan(x)
Return the arc tangent (measured in radians) of x.
"""
return math1(space, math.atan, x)
atan.unwrap_spec = [ObjSpace, float]
def ceil(space, x):
"""ceil(x)
Return the ceiling of x as a float.
This is the smallest integral value >= x.
"""
return math1(space, math.ceil, x)
ceil.unwrap_spec = [ObjSpace, float]
def sinh(space, x):
"""sinh(x)
Return the hyperbolic sine of x.
"""
return math1(space, math.sinh, x)
sinh.unwrap_spec = [ObjSpace, float]
def cos(space, x):
"""cos(x)
Return the cosine of x (measured in radians).
"""
return math1(space, math.cos, x)
cos.unwrap_spec = [ObjSpace, float]
def tanh(space, x):
"""tanh(x)
Return the hyperbolic tangent of x.
"""
return math1(space, math.tanh, x)
tanh.unwrap_spec = [ObjSpace, float]
def radians(space, x):
"""radians(x) -> converts angle x from degrees to radians
"""
return space.wrap(x * degToRad)
radians.unwrap_spec = [ObjSpace, float]
def sin(space, x):
"""sin(x)
Return the sine of x (measured in radians).
"""
return math1(space, math.sin, x)
sin.unwrap_spec = [ObjSpace, float]
def atan2(space, y, x):
"""atan2(y, x)
Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered.
"""
return math2(space, math.atan2, y, x)
atan2.unwrap_spec = [ObjSpace, float, float]
def modf(space, x):
"""modf(x)
Return the fractional and integer parts of x. Both results carry the sign
of x. The integer part is returned as a real.
"""
frac, intpart = math1_w(space, math.modf, x)
return space.newtuple([space.wrap(frac), space.wrap(intpart)])
modf.unwrap_spec = [ObjSpace, float]
def exp(space, x):
"""exp(x)
Return e raised to the power of x.
"""
return math1(space, math.exp, x)
exp.unwrap_spec = [ObjSpace, float]
def acos(space, x):
"""acos(x)
Return the arc cosine (measured in radians) of x.
"""
return math1(space, math.acos, x)
acos.unwrap_spec = [ObjSpace, float]
| Python |
# Package initialisation
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
appleveldefs = {
}
interpleveldefs = {
'e' : 'interp_math.get(space).w_e',
'pi' : 'interp_math.get(space).w_pi',
'pow' : 'interp_math.pow',
'cosh' : 'interp_math.cosh',
'ldexp' : 'interp_math.ldexp',
'hypot' : 'interp_math.hypot',
'tan' : 'interp_math.tan',
'asin' : 'interp_math.asin',
'fabs' : 'interp_math.fabs',
'floor' : 'interp_math.floor',
'sqrt' : 'interp_math.sqrt',
'frexp' : 'interp_math.frexp',
'degrees' : 'interp_math.degrees',
'log' : 'interp_math.log',
'log10' : 'interp_math.log10',
'fmod' : 'interp_math.fmod',
'atan' : 'interp_math.atan',
'ceil' : 'interp_math.ceil',
'sinh' : 'interp_math.sinh',
'cos' : 'interp_math.cos',
'tanh' : 'interp_math.tanh',
'radians' : 'interp_math.radians',
'sin' : 'interp_math.sin',
'atan2' : 'interp_math.atan2',
'modf' : 'interp_math.modf',
'exp' : 'interp_math.exp',
'acos' : 'interp_math.acos',
}
| Python |
# ONESHOT SCRIPT (probably can go away soon)
# to generate the mixed module 'math' (see same directory)
import py
import math
import re
import sys
rex_arg = re.compile(".*\((.*)\).*")
if __name__ == '__main__':
print py.code.Source("""
import math
from pypy.interpreter.gateway import ObjSpace
""")
names = []
for name, func in math.__dict__.items():
if not callable(func):
continue
sig = func.__doc__.split('\n')[0].strip()
sig = sig.split('->')[0].strip()
m = rex_arg.match(sig)
assert m
args = m.group(1)
args = ", ".join(args.split(','))
sig = sig.replace('(', '(space,')
sig = ", ".join(sig.split(','))
argc = len(args.split(','))
unwrap_spec = ['ObjSpace']
unwrap_spec += ['float'] * argc
unwrap_spec = ", ".join(unwrap_spec)
doc = func.__doc__.replace('\n', '\n ')
print py.code.Source('''
def %(sig)s:
"""%(doc)s
"""
return space.wrap(math.%(name)s(%(args)s))
%(name)s.unwrap_spec = [%(unwrap_spec)s]
''' % locals())
names.append(name)
print >>sys.stderr, py.code.Source("""
# Package initialisation
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
appleveldefs = {
}
interpleveldefs = {
""")
for name in names:
space = " " * (15-len(name))
print >>sys.stderr, (
" %(name)r%(space)s: 'interp_math.%(name)s'," % locals())
print >>sys.stderr, py.code.Source("""
}
""")
| Python |
# NOT_RPYTHON -- flowing results in
# AttributeError: << 'FlowObjSpace' object has no attribute 'w_AttributeError'
# XXX investigate!
"""
The 'sys' module.
"""
import sys
def excepthook(exctype, value, traceback):
"""Handle an exception by displaying it with a traceback on sys.stderr."""
from traceback import print_exception
print_exception(exctype, value, traceback)
def exit(exitcode=0):
"""Exit the interpreter by raising SystemExit(exitcode).
If the exitcode is omitted or None, it defaults to zero (i.e., success).
If the exitcode is numeric, it will be used as the system exit status.
If it is another kind of object, it will be printed and the system
exit status will be one (i.e., failure)."""
# note that we cannot use SystemExit(exitcode) here.
# The comma version leads to an extra de-tupelizing
# in normalize_exception, which is exactly like CPython's.
raise SystemExit, exitcode
def exitfunc():
"""Placeholder for sys.exitfunc(), which is called when PyPy exits."""
pypy__exithandlers__ = {}
#import __builtin__
def getfilesystemencoding():
"""Return the encoding used to convert Unicode filenames in
operating system filenames.
"""
if sys.platform == "win32":
encoding = "mbcs"
elif sys.platform == "darwin":
encoding = "utf-8"
else:
encoding = None
return encoding
def callstats():
"""Not implemented."""
return None
defaultencoding = 'ascii'
def getdefaultencoding():
"""Return the current default string encoding used by the Unicode
implementation."""
return defaultencoding
def setdefaultencoding(encoding):
"""Set the current default string encoding used by the Unicode
implementation."""
global defaultencoding
import codecs
codecs.lookup(encoding)
defaultencoding = encoding
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.