code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
"""
Tutorial - Sessions
Storing session data in CherryPy applications is very easy: cherrypy
provides a dictionary called "session" that represents the session
data for the current user. If you use RAM based sessions, you can store
any kind of object into that dictionary; otherwise, you are limited to
objects that can be pickled.
"""
import cherrypy
class HitCounter:
_cp_config = {'tools.sessions.on': True}
def index(self):
# Increase the silly hit counter
count = cherrypy.session.get('count', 0) + 1
# Store the new value in the session dictionary
cherrypy.session['count'] = count
# And display a silly hit count message!
return '''
During your current session, you've viewed this
page %s times! Your life is a patio of fun!
''' % count
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HitCounter(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HitCounter(), config=tutconf)
| [
[
8,
0,
0.1136,
0.2045,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.25,
0.0227,
0,
0.66,
0.2,
639,
0,
1,
0,
0,
639,
0,
0
],
[
3,
0,
0.5,
0.3864,
0,
0.66,
0.... | [
"\"\"\"\nTutorial - Sessions\n\nStoring session data in CherryPy applications is very easy: cherrypy\nprovides a dictionary called \"session\" that represents the session\ndata for the current user. If you use RAM based sessions, you can store\nany kind of object into that dictionary; otherwise, you are limited to\... |
# This is used in test_config to test unrepr of "from A import B"
thing2 = object() | [
[
14,
0,
1,
0.3333,
0,
0.66,
0,
699,
3,
0,
0,
0,
186,
10,
1
]
] | [
"thing2 = object()"
] |
"""
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
index.exposed = True
class AnotherPage(Page):
title = 'Another Page'
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HomePage(), config=tutconf)
| [
[
8,
0,
0.0542,
0.0964,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1205,
0.012,
0,
0.66,
0.1429,
639,
0,
1,
0,
0,
639,
0,
0
],
[
3,
0,
0.2651,
0.2289,
0,
0.66,... | [
"\"\"\"\nTutorial - Object inheritance\n\nYou are free to derive your request handler classes from any base\nclass you wish. In most real-world applications, you will probably\nwant to create a central base class used for all your pages, which takes\ncare of things like printing a common page header and footer.\n\"... |
###**********************************************###
### Unique Keychain Python Code ###
### ECE 387, Miami University, Spring 2013 ###
### Created By: Andrew Heldt, Lee Mondini and ###
### Shiloh Womack ###
###**********************************************###
import serial, sys, feedparser, pprint, time, imaplib
#Settings - Change these to match your account details
USERNAME="387keychain"
PASSWORD="keychain2013"
SERIALPORT = 'COM4'
NO_MAIL = b'm'
YES_MAIL = b'n'
# Set up COM4 - Our arduino port
try:
ser = serial.Serial(SERIALPORT, 9600)
except serial.SerialException:
print ("no device connected - exiting")
sys.exit()
# Need to figure out how to get this to keep running forever
for num in range(0,10):
obj = imaplib.IMAP4_SSL('imap.gmail.com','993')
obj.login(USERNAME,PASSWORD)
obj.select()
newmails = len(obj.search(None, 'UnSeen')[1][0].split())
time.sleep(5)
# Output data to serial port
if newmails > 0:
ser.write(YES_MAIL)
else:
ser.write(NO_MAIL)
#print data to terminal
# Close serial port
ser.close()
| [
[
1,
0,
0.1667,
0.0238,
0,
0.66,
0,
601,
0,
6,
0,
0,
601,
0,
0
],
[
14,
0,
0.2381,
0.0238,
0,
0.66,
0.125,
157,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.2619,
0.0238,
0,
0... | [
"import serial, sys, feedparser, pprint, time, imaplib",
"USERNAME=\"387keychain\"",
"PASSWORD=\"keychain2013\"",
"SERIALPORT = 'COM4'",
"NO_MAIL = b'm'",
"YES_MAIL = b'n'",
"try:\n\tser = serial.Serial(SERIALPORT, 9600)\nexcept serial.SerialException:\n\tprint (\"no device connected - exiting\")\n\tsys... |
#!/usr/bin/python
import pyglet
from anim3d import *
#3d projection setup func
def setup_gl(dims):
global angle
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
gluLookAt(0, -20, 10, 0, 0, 0, 0, 1, 0)
#dummy timer func
def tfunc(dt):
pass
#Texture related stuff
img = pyglet.image.load('../../../../blendfiles/lowpoly_colored.tga')
tex = img.get_texture()
#model related stuff
model = Model('../../../../blendfiles/lowpoly_tris.txt')
obj = AnimObject()
obj.setModel(model)
obj.setAction("run")
#windowing/pyglet stuff
w = pyglet.window.Window(640, 480)
clock = pyglet.clock.schedule_interval(tfunc, 1/30.0)
fps_display = pyglet.clock.ClockDisplay()
#opengl init stuff
glClearColor(0.2, 0.2, 0.2, 1)
glEnable(GL_DEPTH_TEST)
@w.event
def on_draw():
w.clear()
setup_gl(w.get_size())
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, tex.id)
obj.renderObject()
glPushMatrix();
glTranslatef(-5, 5, 1);
obj.renderObject();
glTranslatef(10, 5, 1);
obj.renderObject();
glTranslatef(-5, 5, 1);
obj.renderObject();
glPopMatrix();
glDisable(GL_TEXTURE_2D)
obj.cur_frame += 1
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
fps_display.draw()
pyglet.app.run()
| [
[
1,
0,
0.0303,
0.0152,
0,
0.66,
0,
182,
0,
1,
0,
0,
182,
0,
0
],
[
1,
0,
0.0455,
0.0152,
0,
0.66,
0.0625,
507,
0,
1,
0,
0,
507,
0,
0
],
[
2,
0,
0.1667,
0.1667,
0,
... | [
"import pyglet",
"from anim3d import *",
"def setup_gl(dims):\n global angle\n\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)\n glMatrixMode(GL_MODELVIEW)",
" glMatrixMode(GL_PROJECTION)",
" glPushMatrix()",
... |
#!/usr/bin/python
import pyglet
from anim3d import *
#3d projection setup func
def setup_gl(dims):
global angle
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
gluLookAt(0, -20, 10, 0, 0, 0, 0, 1, 0)
#dummy timer func
def tfunc(dt):
pass
#Texture related stuff
img = pyglet.image.load('../../../../blendfiles/lowpoly_colored.tga')
tex = img.get_texture()
#model related stuff
model = Model('../../../../blendfiles/lowpoly_tris.txt')
obj = AnimObject()
obj.setModel(model)
obj.setAction("run")
#windowing/pyglet stuff
w = pyglet.window.Window(640, 480)
clock = pyglet.clock.schedule_interval(tfunc, 1/30.0)
fps_display = pyglet.clock.ClockDisplay()
#opengl init stuff
glClearColor(0.2, 0.2, 0.2, 1)
glEnable(GL_DEPTH_TEST)
@w.event
def on_draw():
w.clear()
setup_gl(w.get_size())
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, tex.id)
obj.renderObject()
glPushMatrix();
glTranslatef(-5, 5, 1);
obj.renderObject();
glTranslatef(10, 5, 1);
obj.renderObject();
glTranslatef(-5, 5, 1);
obj.renderObject();
glPopMatrix();
glDisable(GL_TEXTURE_2D)
obj.cur_frame += 1
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
fps_display.draw()
pyglet.app.run()
| [
[
1,
0,
0.0303,
0.0152,
0,
0.66,
0,
182,
0,
1,
0,
0,
182,
0,
0
],
[
1,
0,
0.0455,
0.0152,
0,
0.66,
0.0625,
507,
0,
1,
0,
0,
507,
0,
0
],
[
2,
0,
0.1667,
0.1667,
0,
... | [
"import pyglet",
"from anim3d import *",
"def setup_gl(dims):\n global angle\n\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)\n glMatrixMode(GL_MODELVIEW)",
" glMatrixMode(GL_PROJECTION)",
" glPushMatrix()",
... |
from pyglet.gl import *
import math
class Face:
def __init__(self, ind, uv):
self.indices = ind
self.uv = uv
class KeyFrame:
def __init__(self, fp, nv):
self.verts = []
self.frame_num = int(fp.readline())
for i in xrange(nv):
(v1, v2, v3) = fp.readline().split(' ')
self.verts.append(float(v1))
self.verts.append(float(v2))
self.verts.append(float(v3))
class Action:
def __init__(self, fp, nv):
(self.name, self.num_frames) = fp.readline().split(' ')
self.num_frames = int(self.num_frames)
self.kf = []
for j in xrange(self.num_frames):
k = KeyFrame(fp, nv)
self.kf.append(k)
#function to initialize values in a list.
def init_uv(v):
return -1
class Model:
def __init__(self, fname):
fp = open(fname)
(self.nv, self.nf, self.na) = fp.readline().split(' ')
self.nv = int(self.nv)
self.nf = int(self.nf)
self.na = int(self.na)
self.indices = ()
self.uv = range(0, 2*self.nv)
self.uv = map(init_uv, self.uv)
self.repeats = ()
#load the model data
for i in xrange(self.nf):
line = fp.readline().rstrip(' \n')
indx = map(int, line.split(' '))
for j in xrange(3):
(s, t) = map(float, fp.readline().split(' '))
if(self.uv[int(indx[j])*2] < 0):
self.uv[int(indx[j])*2] = s
self.uv[int(indx[j])*2+1] = t
elif math.fabs(self.uv[int(indx[j])*2]-s) < 0.0001 and math.fabs(self.uv[int(indx[j])*2+1]-t) < 0.0001:
self.repeats += (int(indx[j]),)
self.uv += [s, t]
indx[j] = self.nv + (len(self.repeats)-1)
self.indices += (indx[0], indx[1], indx[2])
#load the animation data
self.acts = []
for i in xrange(self.na):
act = Action(fp, self.nv)
self.acts.append(act)
fp.close()
print "Model: ", fname
print "Faces: %d Anims: %d" % (self.nf, len(self.acts))
for act in self.acts:
print act.name
def linear_interop(v, v1, t):
return v + t*(v1-v)
class AnimObject:
def __init__(self):
self.model = None
self.act = None
self.cur_frame = 0
def loadModel(self, fname):
pass
def setModel(self, model):
self.model = model
def setAction(self, actname):
self.act = None
for i in xrange(self.model.na):
a = self.model.acts[i]
if(actname == a.name):
self.act = a
self.cur_frame = a.kf[0].frame_num
if(self.act == None):
self.act = self.obj.model.acts[0]
def renderObject(self):
if(self.cur_frame > self.act.kf[self.act.num_frames-1].frame_num):
self.cur_frame = self.act.kf[0].frame_num+1
n_index = 0
while (self.cur_frame > self.act.kf[n_index].frame_num):
n_index += 1
index = n_index - 1
v = self.act.kf[index].verts
v1 = self.act.kf[n_index].verts
t = (self.cur_frame - self.act.kf[index].frame_num)/float(self.act.kf[n_index].frame_num - self.act.kf[index].frame_num)
dv = map(linear_interop, v, v1, (t, ) * len(v))
for re in self.model.repeats:
dv += (dv[re*3], dv[re*3+1], dv[re*3+2])
pyglet.graphics.draw_indexed(self.model.nv + len(self.model.repeats), pyglet.gl.GL_TRIANGLES, self.model.indices, ('v3f', dv), ('t2f', self.model.uv))
| [
[
1,
0,
0.0088,
0.0088,
0,
0.66,
0,
531,
0,
1,
0,
0,
531,
0,
0
],
[
1,
0,
0.0177,
0.0088,
0,
0.66,
0.125,
526,
0,
1,
0,
0,
526,
0,
0
],
[
3,
0,
0.0487,
0.0354,
0,
0... | [
"from pyglet.gl import *",
"import math",
"class Face:\n def __init__(self, ind, uv):\n self.indices = ind \n self.uv = uv",
" def __init__(self, ind, uv):\n self.indices = ind \n self.uv = uv",
" self.indices = ind",
" self.uv = uv",
"class KeyFrame:\n ... |
#!BPY
"""
Name: '3Danim (.txt)'
Blender: 243
Group: 'Export'
Tooltip: 'export an animated text format.'
"""
__author__ = 'V Vamsi Krishna'
__version__ = '0.1'
__url__ = ["3danim project, http://code.google.com/p/3danim",
"", "blender", "blenderartists.org"]
__email__ = ["V.Vamsi Krishna, vamsikrishna.v:gmail*com", "3danim export"]
__bpydoc__ = """This script Exports animated 3d models in 3danim text format."""
import Blender
from Blender import *
from Blender.Armature.NLA import *
from Blender.Scene import *
from Blender import Window
def call_back(filename):
if not(filename.endswith(".txt")):
filename = filename + ".txt"
try:
fp = open(filename, "r")
fp.close()
result = Draw.PupMenu("SaveOver?%t|Yes|No")
if result == 2:
return
except:
print "Creating File ", filename
pass
fp = open(filename, "w")
#mesh_objs = Blender.Object.GetSelected()
mesh_objs = Scene.GetCurrent().objects
mesh_obj = mesh_objs[0]
armature_obj = mesh_objs[0]
for obj in mesh_objs:
if obj.getType() == "Mesh":
mesh_obj = obj
elif obj.getType() == "Armature":
armature_obj = obj
# return
mesh = mesh_obj.getData(False, True)
mesh = mesh.__copy__()
num_verts = len(mesh.verts)
num_faces = len(mesh.faces)
actions = GetActions()
print num_verts, num_faces, len(actions.keys())
fp.write("%d %d %d\n" % (num_verts, num_faces, len(actions.keys())))
for face in mesh.faces:
buff = ""
for v in face.verts:
print v.index,
buff += v.index.__str__() + " "
print "\n",
fp.write(buff+"\n")
if mesh.faceUV:
for uv in face.uv:
fp.write('%.3f %.3f\n' % (uv[0], uv[1]))
#get the actions devined on the armature
#get their keyframes and print the vertex coords
#do a linear interpolation of the vertieces
for key in actions.keys():
act = actions[key]
act.setActive(armature_obj)
keyframes = act.getFrameNumbers()
print act.getName(), len(keyframes)
fp.write(act.getName() + " %d" % len(keyframes) + "\n")
for frame in keyframes:
Blender.Set("curframe", frame)
print frame
fp.write("%d\n" % frame)
mesh.getFromObject(mesh_obj.name)
#-------------------------------
#for face in mesh.faces:
# for v in face.verts:
# print v.index,
# print ""
#-------------------------------
for vert in mesh.verts:
fp.write('%.3f %.3f %.3f\n' % (vert.co[0], vert.co[1], vert.co[2]))
fp.close()
defaultFileName = Blender.Get('filename')
Window.FileSelector(call_back, '3Danim Export *.txt', defaultFileName.replace('.blend', '.txt'))
| [
[
8,
0,
0.0579,
0.0632,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1053,
0.0105,
0,
0.66,
0.0769,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.1158,
0.0105,
0,
0.66... | [
"\"\"\"\nName: '3Danim (.txt)'\nBlender: 243\nGroup: 'Export'\nTooltip: 'export an animated text format.'\n\"\"\"",
"__author__ = 'V Vamsi Krishna'",
"__version__ = '0.1'",
"__url__ = [\"3danim project, http://code.google.com/p/3danim\",\n \"\", \"blender\", \"blenderartists.org\"]",
"__email__ = [\"V.V... |
bl_info = {
"name": "Export 3DAnim Format(.txt)",
"author": "V.Vamsi Krishna(vkrishna)",
"version": (1, 0),
"blender": (2, 64, 0),
"api": 40000,
"location": "File > Export > 3DAnim (.txt)",
"description": "Export 3dAnim (.txt)",
"warning": "",
"category": "Import-Export"}
import bpy
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
import math
def getFrameNumbers(act):
"""
Function to return the keyframes given the
action object. We assume and use only one fcurve.
"""
fc = act.fcurves[0]
kf = []
for key in fc.keyframe_points:
kf.append(math.ceil(key.co[0]))
return kf
def call_back(operator, context, filename):
if not(filename.endswith(".txt")):
filename = filename + ".txt"
fp = open(filename, "w")
mesh_objs = context.selected_objects
mesh_obj = mesh_objs[0]
modifier = mesh_obj.modifiers[0] #get modifier associated with object
armature_obj = context.scene.objects[modifier.name]
mesh = mesh_obj.to_mesh(context.scene, True, 'RENDER')
num_verts = len(mesh.vertices)
num_faces = len(mesh.polygons)
actions = bpy.data.actions #get all actions from scene. Assume one obj per scene.
uv_layer = mesh.uv_layers.active.data
#start writing the model faces
print (num_verts, num_faces, len(actions))
fp.write("%d %d %d\n" % (num_verts, num_faces, len(actions)))
for (i,face) in enumerate(mesh.polygons):
buff = ""
for v in face.vertices:
print (v, )
buff += v.__str__() + " "
print ("\n",)
fp.write(buff+"\n")
for loop_index in range(face.loop_start, face.loop_start + face.loop_total):
uv = uv_layer[loop_index].uv
fp.write('%.3f %.3f\n' % (uv[0], uv[1]))
#remove the mesh created above.
bpy.data.meshes.remove(mesh)
#get the actions devined on the armature
#get their keyframes and print the vertex coords
#do a linear interpolation of the vertieces
for act in actions:
armature_obj.animation_data.action = act
keyframes = getFrameNumbers(act)
print (act.name, len(keyframes))
fp.write(act.name + " %d" % len(keyframes) + "\n")
for frame in keyframes:
context.scene.frame_set(frame)
print (frame)
fp.write("%d\n" % frame)
mesh = mesh_obj.to_mesh(context.scene, True, 'RENDER')
for vert in mesh.vertices:
fp.write('%.3f %.3f %.3f\n' % (vert.co[0], vert.co[1], vert.co[2]))
bpy.data.meshes.remove(mesh)
#finished writing animation data
fp.close() #close file
return {'FINISHED'}
class Export3DAnimModel(bpy.types.Operator, ExportHelper):
bl_idname = "filename.txt"
bl_label = "3DAnim Model (.txt)"
filename_ext = ".txt"
def execute(self, context):
if not self.filepath:
raise Exception("Filepath not set")
return call_back(self, context, self.filepath)
def invoke(self, context, event):
if not self.filepath:
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ".txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(Export3DAnimModel.bl_idname)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
| [
[
14,
0,
0.0509,
0.0926,
0,
0.66,
0,
767,
0,
0,
0,
0,
0,
6,
0
],
[
1,
0,
0.1111,
0.0093,
0,
0.66,
0.0909,
573,
0,
1,
0,
0,
573,
0,
0
],
[
1,
0,
0.1204,
0.0093,
0,
0... | [
"bl_info = {\n \"name\": \"Export 3DAnim Format(.txt)\",\n \"author\": \"V.Vamsi Krishna(vkrishna)\",\n \"version\": (1, 0),\n \"blender\": (2, 64, 0),\n \"api\": 40000,\n \"location\": \"File > Export > 3DAnim (.txt)\",\n \"description\": \"Export 3dAnim (.txt)\",",
"import bpy",
"from bpy... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from os import path
from Cheetah.Template import Template
def main():
file = path.join(path.split(__file__)[0], 'home.html')
print 'Compile template %s...' % file
cc = Template.compile(source=None, file=file, returnAClass=False, moduleName='autogen', className='CompiledTemplate')
target = path.join(path.split(__file__)[0], 'autogen', '__init__.py')
print 'Writing file %s...' % target
f = open(target, 'w')
f.write(cc)
f.close()
from autogen import CompiledTemplate
CompiledTemplate(searchList=[])
print 'Compiled ok.'
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1739,
0.0435,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2609,
0.0435,
0,
0.66,
0.25,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3043,
0.0435,
0,
0.6... | [
"__author__ = 'Michael Liao (askxuefeng@gmail.com)'",
"from os import path",
"from Cheetah.Template import Template",
"def main():\n file = path.join(path.split(__file__)[0], 'home.html')\n print('Compile template %s...' % file)\n cc = Template.compile(source=None, file=file, returnAClass=False, modu... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (\
self.chill or "null",
self.direction or "null",
self.speed or "null"
)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (\
self.humidity or "null",
self.visibility or "null",
self.pressure or "null",
self.rising or "null"
)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return '{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code].encode('utf-8'), self.low, self.high,
"http://weather.china.xappengine.com/static/w/img/d%s.png" % self.code,
"http://weather.china.xappengine.com/static/w/img/s%s.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def f2c(temp):
f = float(temp)
c = (f - 32) * 5 / 9 + 0.5
return int(c)
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
f2c(attrs['low']),
f2c(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, name, data):
self.__isLastBuildDate = False
if isinstance(name, unicode):
name = name.encode('utf-8')
self.name = name
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return '{"pub" : %s, "name" : "%s", "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.name, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
if __name__=='__main__':
import urllib
url = 'http://weather.yahooapis.com/forecastrss?u=c&w=2143712'
result = urllib.urlopen(url).read()
print Weather(result)
| [
[
14,
0,
0.0172,
0.0043,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0258,
0.0043,
0,
0.66,
0.0714,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.03,
0.0043,
0,
0.6... | [
"__author__ = 'Michael Liao (askxuefeng@gmail.com)'",
"import datetime",
"from xml.parsers.expat import ParserCreate",
"codes = {\n 0 : u'龙卷风', # tornado\n 1 : u'热带风暴', # tropical storm\n 2 : u'飓风', # hurricane\n 3 : u'风暴', # severe thunderstorms\n 4 : u'雷雨', # thunderstorms... |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
import __builtin__
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.1'
__CHEETAH_versionTuple__ = (2, 4, 1, 'final', 0)
__CHEETAH_genTime__ = 1284450634.7130001
__CHEETAH_genTimestamp__ = 'Tue Sep 14 15:50:34 2010'
__CHEETAH_src__ = 'D:\\workspace\\python\\weather-china\\src\\home.html'
__CHEETAH_srcLastModified__ = 'Wed Jul 28 10:35:46 2010'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class CompiledTemplate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(CompiledTemplate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>\u5929\u6c14\u9884\u62a5</title>
<script type="text/javascript" src="/static/js/jquery.js"></script>
<script type="text/javascript">
var days=["\u661f\u671f\u65e5", "\u661f\u671f\u4e00", "\u661f\u671f\u4e8c", "\u661f\u671f\u4e09", "\u661f\u671f\u56db", "\u661f\u671f\u4e94", "\u661f\u671f\u516d"]
jQuery(document).ready(function() {
jQuery.getJSON("/api?city=''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.first_alias",True) # u'${city.first_alias}' on line 11, col 29
if _v is not None: write(_filter(_v, rawExpr=u'${city.first_alias}')) # from line 11, col 29.
write(u'''", function(data) {
var today = data.forecasts[0];
\tvar tomorrow = data.forecasts[1];
jQuery("#x-today-date").html(today.date);
jQuery("#x-tomorrow-date").html(tomorrow.date);
jQuery("#x-today-day").html(days[today.day]);
jQuery("#x-tomorrow-day").html(days[tomorrow.day]);
jQuery("#x-today-text").html(today.text);
jQuery("#x-tomorrow-text").html(tomorrow.text);
jQuery("#x-today-temp").html(today.low + " ~ " + today.high + "\xb0");
\tjQuery("#x-tomorrow-temp").html(tomorrow.low + " ~ " + tomorrow.high + "\xb0");
jQuery("#x-today-icon").css("background-image", "url(" + today.image_large + ")");
\tjQuery("#x-tomorrow-icon").css("background-image", "url(" + tomorrow.image_large + ")");
\tjQuery("#x-today-icon-small").css("background-image", "url(" + today.image_small + ")");
jQuery("#x-pub").html(data.pub);
\tif (data.wind.chill!=null)
\t jQuery("#x-wind-chill").html(data.wind.chill);
\tif (data.wind.direction!=null)
\t jQuery("#x-wind-direction").html(data.wind.direction);
\tif (data.wind.speed!=null)
\t jQuery("#x-wind-speed").html(data.wind.speed);
if (data.atmosphere.humidity!=null)
\t jQuery("#x-atmosphere-humidity").html(data.atmosphere.humidity);
if (data.atmosphere.visibility!=null)
\t jQuery("#x-atmosphere-visibility").html(data.atmosphere.visibility);
if (data.atmosphere.pressure!=null)
\t jQuery("#x-atmosphere-pressure").html(data.atmosphere.pressure);
if (data.astronomy.sunrise!=null)
\t jQuery("#x-astronomy-sunrise").html(data.astronomy.sunrise);
if (data.astronomy.sunset!=null)
\t jQuery("#x-astronomy-sunset").html(data.astronomy.sunset);
});
});
function change_city(key){
if (key=="-")
return;
location.assign("/?city=" + key);
}
</script>
<link rel="stylesheet" href="/static/css/screen.css" type="text/css" media="screen, projection">
<link rel="stylesheet" href="/static/css/print.css" type="text/css" media="print">
<!--[if lt IE 8]>
\t<link rel="stylesheet" href="/static/css/ie.css" type="text/css" media="screen, projection">
<![endif]-->
<style type="text/css">
div.w-report span.h {
\tmargin:3px 0px;
\tfont-weight:bold;
font-size:24px;
\tdisplay:inline;
}
div.w-report span.date {
\tmargin:3px 0px 3px 12px;
\tfont-weight:bold;
\tfont-size:16px;
}
div.weather-report {
\tbackground-image:url(static/img/w-bg.png);
\tbackground-repeat:no-repeat;
\tbackground-position:56px 70px;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-icon {
\tbackground-image:url(static/w/img/d44.png);
\tbackground-repeat:no-repeat;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-text {
\ttext-align:right;
\tmargin:0px;
\tpadding-top:76px;
\tpadding-right:20px;
}
div.weather-text p {
\tmargin:0px;
\tcolor:#FFF;
\tfont-size: 20px;
\tfont-weight: bold;
\ttext-shadow: #315895 0px -1px 1px;
\tline-height:28px;
}
</style>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push([\'_setAccount\', \'UA-251595-22\']);
_gaq.push([\'_trackPageview\']);
(function() {
var ga = document.createElement(\'script\'); ga.type = \'text/javascript\'; ga.async = true;
ga.src = (\'https:\' == document.location.protocol ? \'https://ssl\' : \'http://www\') + \'.google-analytics.com/ga.js\';
var s = document.getElementsByTagName(\'script\')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body style="font-size:13px">
<div class="container" style="background-color:#FFF">
<div class="span-24 last">
</div>
<div class="span-24 last">
<div id="x-today-icon-small" style="background-repeat:no-repeat; height:34; padding:10px 0px 10px 60px; background-image:url(static/w/img/s44.png)"><strong>''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.name",True) # u'${city.name}' on line 125, col 163
if _v is not None: write(_filter(_v, rawExpr=u'${city.name}')) # from line 125, col 163.
write(u'''</strong>
<select name="change_city" id="change_city" onchange="change_city(this.value)">
<option value="-">\u66f4\u6539\u57ce\u5e02</option>
''')
for c in VFSL([locals()]+SL+[globals(), __builtin__],"cities",True): # generated from line 128, col 1
write(u''' <option value="''')
_v = VFN(VFSL([locals()]+SL+[globals(), __builtin__],"c",True),"first_alias",False)() # u'${c.first_alias()}' on line 129, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${c.first_alias()}')) # from line 129, col 26.
write(u'''">''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"c.name",True) # u'${c.name}' on line 129, col 46
if _v is not None: write(_filter(_v, rawExpr=u'${c.name}')) # from line 129, col 46.
write(u'''</option>
''')
write(u''' </select>
</div>
</div>
\t<div class="span-16">
<div class="span-16 last">
<div id="weather-today" class="w-report span-8">
<div><span class="h">\u4eca\u65e5\u5929\u6c14</span><span class="date"><span id="x-today-date"></span> <span id="x-today-day"></span></span></div>
<div class="weather-report">
<div id="x-today-icon" class="weather-icon">
<div class="weather-text">
<p id="x-today-text">Loading...</p>
<p id="x-today-temp"></p>
</div>
</div>
</div>
<div><span class="h">\u5176\u4ed6\u4fe1\u606f\uff1a</span></div>
<div style="padding:6px">
<div>\u98ce\u529b\uff1a<span id="x-wind-chill">N/A</span> \u98ce\u5411\uff1a<span id="x-wind-direction">N/A</span> \u98ce\u901f\uff1a<span id="x-wind-speed">N/A</span></div>
<div>\u80fd\u89c1\u5ea6\uff1a<span id="x-atmosphere-visibility">N/A</span> \u6e7f\u5ea6\uff1a<span id="x-atmosphere-humidity">N/A</span> \u6c14\u538b\uff1a<span id="x-atmosphere-pressure">N/A</span></div>
<div>\u65e5\u51fa\uff1a<span id="x-astronomy-sunrise">N/A</span> \u65e5\u843d\uff1a<span id="x-astronomy-sunset">N/A</span></div>
<div>\u53d1\u5e03\u4e8e\uff1a<span id="x-pub">N/A</span></div>
</div>
</div>
<div id="weather-tomorrow" class="w-report span-8 last">
<div><span class="h">\u660e\u65e5\u5929\u6c14</span><span class="date"><span id="x-tomorrow-date"></span> <span id="x-tomorrow-day"></span></span></div>
<div class="weather-report">
<div id="x-tomorrow-icon" class="weather-icon">
<div class="weather-text">
<p id="x-tomorrow-text">Loading...</p>
<p id="x-tomorrow-temp"></p>
</div>
</div>
</div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">\u5b89\u88c5Chrome\u63d2\u4ef6</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528\u7684\u662f\u652f\u6301HTML 5\u7684Google Chrome\u6d4f\u89c8\u5668\uff0c\u53ef\u4ee5<a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank">\u5b89\u88c5\u6700\u65b0\u63d2\u4ef6</a>\u4ee5\u4fbf\u968f\u65f6\u83b7\u53d6\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank"><img src="static/img/snapshot-chrome-extension.png" width="291" height="99" style="margin:12px"/></a></div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">GTalk\u673a\u5668\u4eba</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528Google Talk\uff0c\u53ef\u4ee5\u6dfb\u52a0\u673a\u5668\u4eba<strong>weather-china@appspot.com</strong>\u4e3a\u597d\u53cb\uff0c\u968f\u65f6\u5411\u4ed6\u8be2\u95ee\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><img src="static/img/snapshot-xmpp.png" width="300" height="254" style="margin:12px"/></div>
</div>
</div>
</div>
<div class="span-8 last">
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
</div>
<div class="span-24 last"></div>
<div class="span-24 last"><div style="text-align:center;padding:6px"><a href="http://code.google.com/p/weather-china/wiki/API" target="_blank">API\u670d\u52a1</a> | <a href="http://code.google.com/p/weather-china/issues/list" target="_blank">\u610f\u89c1\u53cd\u9988</a> | <a id="x-contact" href="#">\u8054\u7cfb\u6211\u4eec</a> | Copyright©2010</div></div>
</div>
<script type="text/javascript">
jQuery("#x-contact").attr("href", "mail" + "to:ask" + "xuefeng@" + "gm" + "ail.com");
</script>
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_CompiledTemplate= 'respond'
## END CLASS DEFINITION
if not hasattr(CompiledTemplate, '_initCheetahAttributes'):
templateAPIClass = getattr(CompiledTemplate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(CompiledTemplate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=CompiledTemplate()).run()
| [
[
1,
0,
0.0253,
0.0028,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0281,
0.0028,
0,
0.66,
0.0345,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0309,
0.0028,
0,
... | [
"import sys",
"import os",
"import os.path",
"import __builtin__",
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTupl... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from google.appengine.ext import db
class City(db.Model):
name = db.StringProperty(required=True)
aliases = db.StringListProperty(required=True)
code = db.IntegerProperty(required=True)
def first_alias(self):
return self.aliases[0]
def aliases_str(self):
return ', '.join(self.aliases)
def get_city(key=None):
city = None
if key:
city = City.get(key)
if city is None:
city = find_city('beijing')
return city
def get_cities():
return City.all().order('aliases').fetch(1000)
def find_city(name, return_default=True):
'''
Find city by name. Return City or None if not found.
'''
city = City.all().filter('aliases =', name).get()
if city is None:
city = City.all().filter('name =', name).get()
if city is None and return_default:
city = City.all().filter('aliases =', 'beijing').get()
return city
def create_city(name, aliases, code):
c = City(name=name, aliases=aliases, code=code)
c.put()
return c
def delete_city(key):
City.get(key).delete()
import urllib
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
def load_rss(url):
f = urllib.urlopen(url)
data = f.read()
f.close()
return data
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (self.chill, self.direction, self.speed)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (self.humidity, self.visibility, self.pressure, self.rising)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return u'{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code], self.low, self.high,
"http://l.yimg.com/a/i/us/nws/weather/gr/%sd.png" % self.code,
"http://l.yimg.com/a/i/us/nws/weather/gr/%ss.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
int(attrs['low']),
int(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, data):
self.__isLastBuildDate = False
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return u'{"pub" : %s, "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
class Subscriber(db.Model):
mobile = db.StringProperty(required=True)
city = db.StringProperty(required=True)
time = db.IntegerProperty(required=True)
| [
[
14,
0,
0.0151,
0.0038,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0226,
0.0038,
0,
0.66,
0.0455,
167,
0,
1,
0,
0,
167,
0,
0
],
[
3,
0,
0.0472,
0.0377,
0,
0... | [
"__author__ = 'Michael Liao (askxuefeng@gmail.com)'",
"from google.appengine.ext import db",
"class City(db.Model):\n name = db.StringProperty(required=True)\n aliases = db.StringListProperty(required=True)\n code = db.IntegerProperty(required=True)\n\n def first_alias(self):\n return self.al... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import os
import cgi
import time
import logging
import simplejson
from datetime import date
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import memcache
from google.appengine.api import users
from Cheetah.Template import Template
from autogen import CompiledTemplate
import weather
import store
def get_city(request):
# try get city from cookie:
if 'Cookie' in request.headers:
all = request.headers['Cookie']
if all:
cookies = all.split(';')
for cookie in cookies:
c = cookie.strip()
if c.startswith('city='):
return c[5:]
return None
def fetch_weather_in_cache(city):
data = memcache.get(str(city.code))
if data:
return data
data = fetch_weather(city)
if data is None:
return None
memcache.set(str(city.code), data, 3600)
return data
def fetch_weather(city):
data = fetch_rss(city.code)
if data is None:
return None
return str(weather.Weather(city.name, data))
def fetch_rss(code):
url = 'http://weather.yahooapis.com/forecastrss?w=%s' % code
logging.info('Fetch RSS: %s' % url)
try:
result = urlfetch.fetch(url, follow_redirects=False)
except (urlfetch.Error, apiproxy_errors.Error):
return None
if result.status_code!=200:
return None
return result.content
class XmppHandler(webapp.RequestHandler):
def post(self):
message = xmpp.Message(self.request.POST)
logging.info('XMPP from %s: %s' % (message.sender, message.body))
name = message.body.strip().lower()
if name=='':
message.reply(u'''噢,啥都不输,怎么知道您要查询的城市啊?
http://weather-china.appspot.com/
''')
return
city = store.find_city(name, return_default=False)
if city is None:
message.reply(u''':( 噢,没有找到您要查询的城市 "%s"。
http://weather-china.appspot.com/
''' % name)
return
json = fetch_weather_in_cache(city)
if json is None:
return message.reply(u''':( 对不起,网络故障,暂时无法查询,请过几分钟再试试。
http://weather-china.appspot.com/
''')
if isinstance(json, unicode):
json = json.encode('utf-8')
w = simplejson.loads(json, encoding='utf-8')
return message.reply(
u'''%s:
今日:%s,%s~%s度
明日:%s,%s~%s度
更详细的预报请查看 http://weather-china.appspot.com/?city=%s
''' % (
w[u'name'],
w[u'forecasts'][0][u'text'], w[u'forecasts'][0][u'low'], w[u'forecasts'][0][u'high'],
w[u'forecasts'][1][u'text'], w[u'forecasts'][1][u'low'], w[u'forecasts'][1][u'high'],
city.first_alias(),)
)
class HomeHandler(webapp.RequestHandler):
def get(self):
time_1 = time.time()
name = self.request.get('city', '')
if not name:
name = get_city(self.request)
if not name:
name = 'beijing'
cities = memcache.get('__cities__')
if cities is None:
cities = store.get_cities()
memcache.set('__cities__', cities, 3600)
city = None
for c in cities:
if c.name==name or name in c.aliases:
city = c
break
if city is None:
self.response.set_status(500)
return
today = date.today()
target = date(today.year+3, today.month, today.day)
expires = target.strftime('%a, %d-%b-%Y %H:%M:%S GMT')
self.response.headers['Set-Cookie'] = 'city=%s; expires=%s; path=/' % (city.first_alias(), expires)
time_2 = time.time()
t = CompiledTemplate(searchList=[{'city' : city, 'cities' : cities}])
self.response.out.write(t)
time_3 = time.time()
logging.info('Performance: %f / %f of rendering / total.' % (time_3-time_2, time_3-time_1))
class AdminHandler(webapp.RequestHandler):
def get(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action', '')
if action=='delete_city':
key = self.request.get('key')
store.delete_city(key)
self.redirect_admin()
return
if action=='':
cities = store.get_cities()
root = os.path.dirname(__file__)
t = Template(file=os.path.join(root, 'admin.html'), searchList=[{'cities' : cities}])
self.response.out.write(t)
return
self.response.set_status(400)
def post(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action')
if action=='create_city':
name = cgi.escape(self.request.get('name')).strip().lower()
aliases = [cgi.escape(x).lower() for x in self.request.get_all('aliases') if x.strip()]
code = int(self.request.get('code'))
store.create_city(name, aliases, code)
self.redirect_admin()
return
self.response.set_status(400)
def get_login_url(self):
if not users.is_current_user_admin():
return users.create_login_url('/admin')
return None
def redirect_admin(self):
self.redirect('/admin?t=%s' % time.time())
class ApiHandler(webapp.RequestHandler):
CACHE_TIME = 600 # 600 seconds
def get(self):
callback = ''
c = ''
extension = self.request.get('extension', '')
if extension=='chrome':
# detect city from cookie:
c = get_city(self.request)
if not c:
c = 'beijing'
else:
callback = cgi.escape(self.request.get('callback', '').strip())
c = cgi.escape(self.request.get('city', '')).lower()
if not c:
return self.send_error('MISSING_PARAMETER', 'Missing parameter \'city\'')
city = store.find_city(c, return_default=False)
if city is None:
return self.send_error('CITY_NOT_FOUND', 'City not found')
weather = fetch_weather_in_cache(city)
if weather is None:
return self.send_error('SERVICE_UNAVAILABLE', 'Service unavailable')
if callback:
if isinstance(callback, unicode):
callback = callback.encode('utf-8')
self.write_json('%s(%s);' % (callback, weather))
else:
self.write_json(weather)
def send_error(self, code, msg):
json = '{ "error" : "%s", "message" : "%s"}' % (code, msg)
self.write_json(json)
def write_json(self, json):
if isinstance(json, unicode):
json = json.encode('utf-8')
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json)
application = webapp.WSGIApplication([
('^/$', HomeHandler),
('^/api$', ApiHandler),
('^/admin$', AdminHandler),
('^/_ah/xmpp/message/chat/$', XmppHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| [
[
14,
0,
0.0175,
0.0044,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0263,
0.0044,
0,
0.66,
0.0357,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0307,
0.0044,
0,
0... | [
"__author__ = 'Michael Liao (askxuefeng@gmail.com)'",
"import os",
"import cgi",
"import time",
"import logging",
"import simplejson",
"from datetime import date",
"from google.appengine.api import xmpp",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp.util import run_... |
'''
Provides an abstract Servlet baseclass for Cheetah's Template class
'''
import sys
import os.path
isWebwareInstalled = False
try:
try:
from ds.appserver.Servlet import Servlet as BaseServlet
except:
from WebKit.Servlet import Servlet as BaseServlet
isWebwareInstalled = True
if not issubclass(BaseServlet, object):
class NewStyleBaseServlet(BaseServlet, object):
pass
BaseServlet = NewStyleBaseServlet
except:
class BaseServlet(object):
_reusable = 1
_threadSafe = 0
def awake(self, transaction):
pass
def sleep(self, transaction):
pass
def shutdown(self):
pass
##################################################
## CLASSES
class Servlet(BaseServlet):
"""This class is an abstract baseclass for Cheetah.Template.Template.
It wraps WebKit.Servlet and provides a few extra convenience methods that
are also found in WebKit.Page. It doesn't do any of the HTTP method
resolution that is done in WebKit.HTTPServlet
"""
transaction = None
application = None
request = None
session = None
def __init__(self, *args, **kwargs):
super(Servlet, self).__init__(*args, **kwargs)
# this default will be changed by the .awake() method
self._CHEETAH__isControlledByWebKit = False
## methods called by Webware during the request-response
def awake(self, transaction):
super(Servlet, self).awake(transaction)
# a hack to signify that the servlet is being run directly from WebKit
self._CHEETAH__isControlledByWebKit = True
self.transaction = transaction
#self.application = transaction.application
self.response = response = transaction.response
self.request = transaction.request
# Temporary hack to accomodate bug in
# WebKit.Servlet.Servlet.serverSidePath: it uses
# self._request even though this attribute does not exist.
# This attribute WILL disappear in the future.
self._request = transaction.request()
self.session = transaction.session
self.write = response().write
#self.writeln = response.writeln
def respond(self, trans=None):
raise NotImplementedError("""\
couldn't find the template's main method. If you are using #extends
without #implements, try adding '#implements respond' to your template
definition.""")
def sleep(self, transaction):
super(Servlet, self).sleep(transaction)
self.session = None
self.request = None
self._request = None
self.response = None
self.transaction = None
def shutdown(self):
pass
def serverSidePath(self, path=None,
normpath=os.path.normpath,
abspath=os.path.abspath
):
if self._CHEETAH__isControlledByWebKit:
return super(Servlet, self).serverSidePath(path)
elif path:
return normpath(abspath(path.replace("\\", '/')))
elif hasattr(self, '_filePath') and self._filePath:
return normpath(abspath(self._filePath))
else:
return None
# vim: shiftwidth=4 tabstop=4 expandtab
| [
[
8,
0,
0.0179,
0.0268,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0446,
0.0089,
0,
0.66,
0.2,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0536,
0.0089,
0,
0.66,
... | [
"'''\nProvides an abstract Servlet baseclass for Cheetah's Template class\n'''",
"import sys",
"import os.path",
"isWebwareInstalled = False",
"try:\n try:\n from ds.appserver.Servlet import Servlet as BaseServlet\n except:\n from WebKit.Servlet import Servlet as BaseServlet\n isWebwa... |
#
| [] | [] |
"Template support for Cheetah"
import sys, os, imp
from Cheetah import Compiler
import pkg_resources
def _recompile_template(package, basename, tfile, classname):
tmpl = pkg_resources.resource_string(package, "%s.tmpl" % basename)
c = Compiler.Compiler(source=tmpl, mainClassName='GenTemplate')
code = str(c)
mod = imp.new_module(classname)
ns = dict()
exec(code, ns)
tempclass = ns.get("GenTemplate",
ns.get('DynamicallyCompiledCheetahTemplate'))
assert tempclass
tempclass.__name__ = basename
setattr(mod, basename, tempclass)
sys.modules[classname] = mod
return mod
class TurboCheetah:
extension = "tmpl"
def __init__(self, extra_vars_func=None, options=None):
if options is None:
options = dict()
self.get_extra_vars = extra_vars_func
self.options = options
self.compiledTemplates = {}
self.search_path = []
def load_template(self, template=None,
template_string=None, template_file=None,
loadingSite=False):
"""Searches for a template along the Python path.
Template files must end in ".tmpl" and be in legitimate packages.
"""
given = len([_f for _f in (template, template_string, template_file) if _f])
if given > 1:
raise TypeError(
"You may give only one of template, template_string, and "
"template_file")
if not given:
raise TypeError(
"You must give one of template, template_string, or "
"template_file")
if template:
return self.load_template_module(template)
elif template_string:
return self.load_template_string(template_string)
elif template_file:
return self.load_template_file(template_file)
def load_template_module(self, classname):
ct = self.compiledTemplates
divider = classname.rfind(".")
if divider > -1:
package = classname[0:divider]
basename = classname[divider+1:]
else:
raise ValueError("All templates must be in a package")
if not self.options.get("cheetah.precompiled", False):
tfile = pkg_resources.resource_filename(package,
"%s.%s" %
(basename,
self.extension))
if classname in ct:
mtime = os.stat(tfile).st_mtime
if ct[classname] != mtime:
ct[classname] = mtime
del sys.modules[classname]
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
else:
ct[classname] = os.stat(tfile).st_mtime
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
tempclass = getattr(mod, basename)
return tempclass
def load_template_string(self, content):
raise NotImplementedError
def load_template_file(self, filename):
raise NotImplementedError
def render(self, info, format="html", fragment=False, template=None,
template_string=None, template_file=None):
tclass = self.load_template(
template=template, template_string=template_string,
template_file=template_file)
if self.get_extra_vars:
extra = self.get_extra_vars()
else:
extra = {}
tempobj = tclass(searchList=[info, extra])
if fragment:
return tempobj.fragment()
else:
return tempobj.respond()
| [
[
8,
0,
0.0091,
0.0091,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0273,
0.0091,
0,
0.66,
0.2,
509,
0,
3,
0,
0,
509,
0,
0
],
[
1,
0,
0.0455,
0.0091,
0,
0.66,
... | [
"\"Template support for Cheetah\"",
"import sys, os, imp",
"from Cheetah import Compiler",
"import pkg_resources",
"def _recompile_template(package, basename, tfile, classname):\n tmpl = pkg_resources.resource_string(package, \"%s.tmpl\" % basename)\n c = Compiler.Compiler(source=tmpl, mainClassName='... |
from turbocheetah import cheetahsupport
TurboCheetah = cheetahsupport.TurboCheetah
__all__ = ["TurboCheetah"] | [
[
1,
0,
0.2,
0.2,
0,
0.66,
0,
172,
0,
1,
0,
0,
172,
0,
0
],
[
14,
0,
0.6,
0.2,
0,
0.66,
0.5,
316,
7,
0,
0,
0,
0,
0,
0
],
[
14,
0,
1,
0.2,
0,
0.66,
1,
272,
... | [
"from turbocheetah import cheetahsupport",
"TurboCheetah = cheetahsupport.TurboCheetah",
"__all__ = [\"TurboCheetah\"]"
] |
"""
@@TR: This code is pretty much unsupported.
MondoReport.py -- Batching module for Python and Cheetah.
Version 2001-Nov-18. Doesn't do much practical yet, but the companion
testMondoReport.py passes all its tests.
-Mike Orr (Iron)
TODO: BatchRecord.prev/next/prev_batches/next_batches/query, prev.query,
next.query.
How about Report: .page(), .all(), .summary()? Or PageBreaker.
"""
import operator
try:
from functools import reduce
except ImportError:
# If functools doesn't exist, we must be on an old
# enough version that has reduce() in builtins
pass
try:
from Cheetah.NameMapper import valueForKey as lookup_func
except ImportError:
def lookup_func(obj, name):
if hasattr(obj, name):
return getattr(obj, name)
else:
return obj[name] # Raises KeyError.
########## PUBLIC GENERIC FUNCTIONS ##############################
class NegativeError(ValueError):
pass
def isNumeric(v):
return isinstance(v, (int, float))
def isNonNegative(v):
ret = isNumeric(v)
if ret and v < 0:
raise NegativeError(v)
def isNotNone(v):
return v is not None
def Roman(n):
n = int(n) # Raises TypeError.
if n < 1:
raise ValueError("roman numeral for zero or negative undefined: " + n)
roman = ''
while n >= 1000:
n = n - 1000
roman = roman + 'M'
while n >= 500:
n = n - 500
roman = roman + 'D'
while n >= 100:
n = n - 100
roman = roman + 'C'
while n >= 50:
n = n - 50
roman = roman + 'L'
while n >= 10:
n = n - 10
roman = roman + 'X'
while n >= 5:
n = n - 5
roman = roman + 'V'
while n < 5 and n >= 1:
n = n - 1
roman = roman + 'I'
roman = roman.replace('DCCCC', 'CM')
roman = roman.replace('CCCC', 'CD')
roman = roman.replace('LXXXX', 'XC')
roman = roman.replace('XXXX', 'XL')
roman = roman.replace('VIIII', 'IX')
roman = roman.replace('IIII', 'IV')
return roman
def sum(lis):
return reduce(operator.add, lis, 0)
def mean(lis):
"""Always returns a floating-point number.
"""
lis_len = len(lis)
if lis_len == 0:
return 0.00 # Avoid ZeroDivisionError (not raised for floats anyway)
total = float( sum(lis) )
return total / lis_len
def median(lis):
lis = sorted(lis[:])
return lis[int(len(lis)/2)]
def variance(lis):
raise NotImplementedError()
def variance_n(lis):
raise NotImplementedError()
def standardDeviation(lis):
raise NotImplementedError()
def standardDeviation_n(lis):
raise NotImplementedError()
class IndexFormats:
"""Eight ways to display a subscript index.
("Fifty ways to leave your lover....")
"""
def __init__(self, index, item=None):
self._index = index
self._number = index + 1
self._item = item
def index(self):
return self._index
__call__ = index
def number(self):
return self._number
def even(self):
return self._number % 2 == 0
def odd(self):
return not self.even()
def even_i(self):
return self._index % 2 == 0
def odd_i(self):
return not self.even_i()
def letter(self):
return self.Letter().lower()
def Letter(self):
n = ord('A') + self._index
return chr(n)
def roman(self):
return self.Roman().lower()
def Roman(self):
return Roman(self._number)
def item(self):
return self._item
########## PRIVATE CLASSES ##############################
class ValuesGetterMixin:
def __init__(self, origList):
self._origList = origList
def _getValues(self, field=None, criteria=None):
if field:
ret = [lookup_func(elm, field) for elm in self._origList]
else:
ret = self._origList
if criteria:
ret = list(filter(criteria, ret))
return ret
class RecordStats(IndexFormats, ValuesGetterMixin):
"""The statistics that depend on the current record.
"""
def __init__(self, origList, index):
record = origList[index] # Raises IndexError.
IndexFormats.__init__(self, index, record)
ValuesGetterMixin.__init__(self, origList)
def length(self):
return len(self._origList)
def first(self):
return self._index == 0
def last(self):
return self._index >= len(self._origList) - 1
def _firstOrLastValue(self, field, currentIndex, otherIndex):
currentValue = self._origList[currentIndex] # Raises IndexError.
try:
otherValue = self._origList[otherIndex]
except IndexError:
return True
if field:
currentValue = lookup_func(currentValue, field)
otherValue = lookup_func(otherValue, field)
return currentValue != otherValue
def firstValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index - 1)
def lastValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index + 1)
# firstPage and lastPage not implemented. Needed?
def percentOfTotal(self, field=None, suffix='%', default='N/A', decimals=2):
rec = self._origList[self._index]
if field:
val = lookup_func(rec, field)
else:
val = rec
try:
lis = self._getValues(field, isNumeric)
except NegativeError:
return default
total = sum(lis)
if total == 0.00: # Avoid ZeroDivisionError.
return default
val = float(val)
try:
percent = (val / total) * 100
except ZeroDivisionError:
return default
if decimals == 0:
percent = int(percent)
else:
percent = round(percent, decimals)
if suffix:
return str(percent) + suffix # String.
else:
return percent # Numeric.
def __call__(self): # Overrides IndexFormats.__call__
"""This instance is not callable, so we override the super method.
"""
raise NotImplementedError()
def prev(self):
if self._index == 0:
return None
else:
length = self.length()
start = self._index - length
return PrevNextPage(self._origList, length, start)
def next(self):
if self._index + self.length() == self.length():
return None
else:
length = self.length()
start = self._index + length
return PrevNextPage(self._origList, length, start)
def prevPages(self):
raise NotImplementedError()
def nextPages(self):
raise NotImplementedError()
prev_batches = prevPages
next_batches = nextPages
def summary(self):
raise NotImplementedError()
def _prevNextHelper(self, start, end, size, orphan, sequence):
"""Copied from Zope's DT_InSV.py's "opt" function.
"""
if size < 1:
if start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except: start=len(sequence)
# if start > l: start=l
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
elif end > 0:
try: sequence[end-1]
except: end=len(sequence)
# if end > l: end=l
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
return start, end, size
class Summary(ValuesGetterMixin):
"""The summary statistics, that don't depend on the current record.
"""
def __init__(self, origList):
ValuesGetterMixin.__init__(self, origList)
def sum(self, field=None):
lis = self._getValues(field, isNumeric)
return sum(lis)
total = sum
def count(self, field=None):
lis = self._getValues(field, isNotNone)
return len(lis)
def min(self, field=None):
lis = self._getValues(field, isNotNone)
return min(lis) # Python builtin function min.
def max(self, field=None):
lis = self._getValues(field, isNotNone)
return max(lis) # Python builtin function max.
def mean(self, field=None):
"""Always returns a floating point number.
"""
lis = self._getValues(field, isNumeric)
return mean(lis)
average = mean
def median(self, field=None):
lis = self._getValues(field, isNumeric)
return median(lis)
def variance(self, field=None):
raiseNotImplementedError()
def variance_n(self, field=None):
raiseNotImplementedError()
def standardDeviation(self, field=None):
raiseNotImplementedError()
def standardDeviation_n(self, field=None):
raiseNotImplementedError()
class PrevNextPage:
def __init__(self, origList, size, start):
end = start + size
self.start = IndexFormats(start, origList[start])
self.end = IndexFormats(end, origList[end])
self.length = size
########## MAIN PUBLIC CLASS ##############################
class MondoReport:
_RecordStatsClass = RecordStats
_SummaryClass = Summary
def __init__(self, origlist):
self._origList = origlist
def page(self, size, start, overlap=0, orphan=0):
"""Returns list of ($r, $a, $b)
"""
if overlap != 0:
raise NotImplementedError("non-zero overlap")
if orphan != 0:
raise NotImplementedError("non-zero orphan")
origList = self._origList
origList_len = len(origList)
start = max(0, start)
end = min( start + size, len(self._origList) )
mySlice = origList[start:end]
ret = []
for rel in range(size):
abs_ = start + rel
r = mySlice[rel]
a = self._RecordStatsClass(origList, abs_)
b = self._RecordStatsClass(mySlice, rel)
tup = r, a, b
ret.append(tup)
return ret
batch = page
def all(self):
origList_len = len(self._origList)
return self.page(origList_len, 0, 0, 0)
def summary(self):
return self._SummaryClass(self._origList)
"""
**********************************
Return a pageful of records from a sequence, with statistics.
in : origlist, list or tuple. The entire set of records. This is
usually a list of objects or a list of dictionaries.
page, int >= 0. Which page to display.
size, int >= 1. How many records per page.
widow, int >=0. Not implemented.
orphan, int >=0. Not implemented.
base, int >=0. Number of first page (usually 0 or 1).
out: list of (o, b) pairs. The records for the current page. 'o' is
the original element from 'origlist' unchanged. 'b' is a Batch
object containing meta-info about 'o'.
exc: IndexError if 'page' or 'size' is < 1. If 'origlist' is empty or
'page' is too high, it returns an empty list rather than raising
an error.
origlist_len = len(origlist)
start = (page + base) * size
end = min(start + size, origlist_len)
ret = []
# widow, orphan calculation: adjust 'start' and 'end' up and down,
# Set 'widow', 'orphan', 'first_nonwidow', 'first_nonorphan' attributes.
for i in range(start, end):
o = origlist[i]
b = Batch(origlist, size, i)
tup = o, b
ret.append(tup)
return ret
def prev(self):
# return a PrevNextPage or None
def next(self):
# return a PrevNextPage or None
def prev_batches(self):
# return a list of SimpleBatch for the previous batches
def next_batches(self):
# return a list of SimpleBatch for the next batches
########## PUBLIC MIXIN CLASS FOR CHEETAH TEMPLATES ##############
class MondoReportMixin:
def batch(self, origList, size=None, start=0, overlap=0, orphan=0):
bat = MondoReport(origList)
return bat.batch(size, start, overlap, orphan)
def batchstats(self, origList):
bat = MondoReport(origList)
return bat.stats()
"""
# vim: shiftwidth=4 tabstop=4 expandtab textwidth=79
| [
[
8,
0,
0.0162,
0.0302,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0323,
0.0022,
0,
0.66,
0.0455,
616,
0,
1,
0,
0,
616,
0,
0
],
[
7,
0,
0.0399,
0.0129,
0,
0.66... | [
"\"\"\"\n@@TR: This code is pretty much unsupported.\n\nMondoReport.py -- Batching module for Python and Cheetah.\n\nVersion 2001-Nov-18. Doesn't do much practical yet, but the companion\ntestMondoReport.py passes all its tests.\n-Mike Orr (Iron)",
"import operator",
"try:\n from functools import reduce\nex... |
# $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $
"""A subclass of Cheetah.Template for use in CGI scripts.
Usage in a template:
#extends Cheetah.Tools.CGITemplate
#implements respond
$cgiHeaders#slurp
Usage in a template inheriting a Python class:
1. The template
#extends MyPythonClass
#implements respond
$cgiHeaders#slurp
2. The Python class
from Cheetah.Tools import CGITemplate
class MyPythonClass(CGITemplate):
def cgiHeadersHook(self):
return "Content-Type: text/html; charset=koi8-r\n\n"
To read GET/POST variables, use the .webInput method defined in
Cheetah.Utils.WebInputMixin (available in all templates without importing
anything), use Python's 'cgi' module, or make your own arrangements.
This class inherits from Cheetah.Template to make it usable in Cheetah's
single-inheritance model.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.6 $
Start Date: 2001/10/03
Last Revision Date: $Date: 2006/01/29 02:09:59 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.6 $"[11:-2]
import os
from Cheetah.Template import Template
class CGITemplate(Template):
"""Methods useful in CGI scripts.
Any class that inherits this mixin must also inherit Cheetah.Servlet.
"""
def cgiHeaders(self):
"""Outputs the CGI headers if this is a CGI script.
Usage: $cgiHeaders#slurp
Override .cgiHeadersHook() if you want to customize the headers.
"""
if self.isCgi():
return self.cgiHeadersHook()
def cgiHeadersHook(self):
"""Override if you want to customize the CGI headers.
"""
return "Content-type: text/html\n\n"
def isCgi(self):
"""Is this a CGI script?
"""
env = 'REQUEST_METHOD' in os.environ
wk = self._CHEETAH__isControlledByWebKit
return env and not wk
# vim: shiftwidth=4 tabstop=4 expandtab
| [
[
8,
0,
0.2532,
0.4675,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.4935,
0.013,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.5065,
0.013,
0,
0.66,
... | [
"\"\"\"A subclass of Cheetah.Template for use in CGI scripts.\n\nUsage in a template:\n #extends Cheetah.Tools.CGITemplate\n #implements respond\n $cgiHeaders#slurp\n\nUsage in a template inheriting a Python class:",
"__author__ = \"Mike Orr <iron@mso.oz.net>\"",
"__revision__ = \"$Revision: 1.6 $\"[11... |
# $Id: SiteHierarchy.py,v 1.1 2001/10/11 03:25:54 tavis_rudd Exp $
"""Create menus and crumbs from a site hierarchy.
You define the site hierarchy as lists/tuples. Each location in the hierarchy
is a (url, description) tuple. Each list has the base URL/text in the 0
position, and all the children coming after it. Any child can be a list,
representing further depth to the hierarchy. See the end of the file for an
example hierarchy.
Use Hierarchy(contents, currentURL), where contents is this hierarchy, and
currentURL is the position you are currently in. The menubar and crumbs methods
give you the HTML output.
There are methods you can override to customize the HTML output.
"""
##################################################
## DEPENDENCIES
import string
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
##################################################
## CLASSES
class Hierarchy:
def __init__(self, hierarchy, currentURL, prefix='', menuCSSClass=None,
crumbCSSClass=None):
"""
hierarchy is described above, currentURL should be somewhere in
the hierarchy. prefix will be added before all of the URLs (to
help mitigate the problems with absolute URLs), and if given,
cssClass will be used for both links *and* nonlinks.
"""
self._contents = hierarchy
self._currentURL = currentURL
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
else:
self._menuCSSClass = ''
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
else:
self._crumbCSSClass = ''
self._prefix=prefix
## Main output methods
def menuList(self, menuCSSClass=None):
"""An indented menu list"""
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
stream = StringIO()
for item in self._contents[1:]:
self._menubarRecurse(item, 0, stream)
return stream.getvalue()
def crumbs(self, crumbCSSClass=None):
"""The home>where>you>are crumbs"""
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
path = []
pos = self._contents
while True:
## This is not the fastest algorithm, I'm afraid.
## But it probably won't be for a huge hierarchy anyway.
foundAny = False
path.append(pos[0])
for item in pos[1:]:
if self._inContents(item):
if isinstance(item, tuple):
path.append(item)
break
else:
pos = item
foundAny = True
break
if not foundAny:
break
if len(path) == 1:
return self.emptyCrumb()
return string.join(map(lambda x, self=self: self.crumbLink(x[0], x[1]),
path), self.crumbSeperator()) + \
self.crumbTerminator()
## Methods to control the Aesthetics
# - override these methods for your own look
def menuLink(self, url, text, indent):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '%s<B%s>%s</B> <BR>\n' % (' '*2*indent,
self._menuCSSClass, text)
else:
return '%s<A HREF="%s%s"%s>%s</A> <BR>\n' % \
(' '*2*indent, self._prefix, url,
self._menuCSSClass, text)
def crumbLink(self, url, text):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '<B%s>%s</B>' % (text, self._crumbCSSClass)
else:
return '<A HREF="%s%s"%s>%s</A>' % \
(self._prefix, url, self._crumbCSSClass, text)
def crumbSeperator(self):
return ' > '
def crumbTerminator(self):
return ''
def emptyCrumb(self):
"""When you are at the homepage"""
return ''
## internal methods
def _menubarRecurse(self, contents, indent, stream):
if isinstance(contents, tuple):
url, text = contents
rest = []
else:
url, text = contents[0]
rest = contents[1:]
stream.write(self.menuLink(url, text, indent))
if self._inContents(contents):
for item in rest:
self._menubarRecurse(item, indent+1, stream)
def _inContents(self, contents):
if isinstance(contents, tuple):
return self._currentURL == contents[0]
for item in contents:
if self._inContents(item):
return True
return False
##################################################
## from the command line
if __name__ == '__main__':
hierarchy = [('/', 'home'),
('/about', 'About Us'),
[('/services', 'Services'),
[('/services/products', 'Products'),
('/services/products/widget', 'The Widget'),
('/services/products/wedge', 'The Wedge'),
('/services/products/thimble', 'The Thimble'),
],
('/services/prices', 'Prices'),
],
('/contact', 'Contact Us'),
]
for url in ['/', '/services', '/services/products/widget', '/contact']:
print('<p>', '='*50)
print('<br> %s: <br>\n' % url)
n = Hierarchy(hierarchy, url, menuCSSClass='menu', crumbCSSClass='crumb',
prefix='/here')
print(n.menuList())
print('<p>', '-'*50)
print(n.crumbs())
| [
[
8,
0,
0.0512,
0.0843,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1145,
0.006,
0,
0.66,
0.25,
890,
0,
1,
0,
0,
890,
0,
0
],
[
7,
0,
0.1295,
0.0241,
0,
0.66,
... | [
"\"\"\"Create menus and crumbs from a site hierarchy.\n\nYou define the site hierarchy as lists/tuples. Each location in the hierarchy\nis a (url, description) tuple. Each list has the base URL/text in the 0\nposition, and all the children coming after it. Any child can be a list,\nrepresenting further depth to ... |
"""
Nothing, but in a friendly way. Good for filling in for objects you want to
hide. If $form.f1 is a RecursiveNull object, then
$form.f1.anything["you"].might("use") will resolve to the empty string.
This module was contributed by Ian Bicking.
"""
class RecursiveNull(object):
def __getattr__(self, attr):
return self
def __getitem__(self, item):
return self
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return ''
def __repr__(self):
return ''
def __nonzero__(self):
return 0
def __eq__(self, x):
if x:
return False
return True
def __ne__(self, x):
return x and True or False
| [
[
8,
0,
0.1429,
0.25,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.6429,
0.6786,
0,
0.66,
1,
956,
0,
8,
0,
0,
186,
0,
0
],
[
2,
1,
0.375,
0.0714,
1,
0.74,
0,... | [
"\"\"\"\nNothing, but in a friendly way. Good for filling in for objects you want to\nhide. If $form.f1 is a RecursiveNull object, then\n$form.f1.anything[\"you\"].might(\"use\") will resolve to the empty string.\n\nThis module was contributed by Ian Bicking.\n\"\"\"",
"class RecursiveNull(object):\n def __g... |
"""This package contains classes, functions, objects and packages contributed
by Cheetah users. They are not used by Cheetah itself. There is no
guarantee that this directory will be included in Cheetah releases, that
these objects will remain here forever, or that they will remain
backward-compatible.
"""
# vim: shiftwidth=5 tabstop=5 expandtab
| [
[
8,
0,
0.4375,
0.75,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
]
] | [
"\"\"\"This package contains classes, functions, objects and packages contributed\n by Cheetah users. They are not used by Cheetah itself. There is no\n guarantee that this directory will be included in Cheetah releases, that\n these objects will remain here forever, or that they will remain\n backward-co... |
Version = '2.4.1'
VersionTuple = (2, 4, 1, 'final', 0)
MinCompatibleVersion = '2.0rc6'
MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6)
####
def convertVersionStringToTuple(s):
versionNum = [0, 0, 0]
releaseType = 'final'
releaseTypeSubNum = 0
if s.find('a')!=-1:
num, releaseTypeSubNum = s.split('a')
releaseType = 'alpha'
elif s.find('b')!=-1:
num, releaseTypeSubNum = s.split('b')
releaseType = 'beta'
elif s.find('rc')!=-1:
num, releaseTypeSubNum = s.split('rc')
releaseType = 'candidate'
else:
num = s
num = num.split('.')
for i in range(len(num)):
versionNum[i] = int(num[i])
if len(versionNum)<3:
versionNum += [0]
releaseTypeSubNum = int(releaseTypeSubNum)
return tuple(versionNum+[releaseType, releaseTypeSubNum])
if __name__ == '__main__':
c = convertVersionStringToTuple
print(c('2.0a1'))
print(c('2.0b1'))
print(c('2.0rc1'))
print(c('2.0'))
print(c('2.0.2'))
assert c('0.9.19b1') < c('0.9.19')
assert c('0.9b1') < c('0.9.19')
assert c('2.0a2') > c('2.0a1')
assert c('2.0b1') > c('2.0a2')
assert c('2.0b2') > c('2.0b1')
assert c('2.0b2') == c('2.0b2')
assert c('2.0rc1') > c('2.0b1')
assert c('2.0rc2') > c('2.0rc1')
assert c('2.0rc2') > c('2.0b1')
assert c('2.0') > c('2.0a1')
assert c('2.0') > c('2.0b1')
assert c('2.0') > c('2.0rc1')
assert c('2.0.1') > c('2.0')
assert c('2.0rc1') > c('2.0b1')
| [
[
14,
0,
0.0172,
0.0172,
0,
0.66,
0,
444,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0345,
0.0172,
0,
0.66,
0.2,
292,
0,
0,
0,
0,
0,
8,
0
],
[
14,
0,
0.069,
0.0172,
0,
0.66,... | [
"Version = '2.4.1'",
"VersionTuple = (2, 4, 1, 'final', 0)",
"MinCompatibleVersion = '2.0rc6'",
"MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6)",
"def convertVersionStringToTuple(s):\n versionNum = [0, 0, 0]\n releaseType = 'final'\n releaseTypeSubNum = 0\n if s.find('a')!=-1:\n nu... |
"""SourceReader class for Cheetah's Parser and CodeGenerator
"""
import re
import sys
EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)')
EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)')
ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search
class Error(Exception):
pass
class SourceReader(object):
def __init__(self, src, filename=None, breakPoint=None, encoding=None):
## @@TR 2005-01-17: the following comes from a patch Terrel Shumway
## contributed to add unicode support to the reading of Cheetah source
## files with dynamically compiled templates. All the existing unit
## tests pass but, it needs more testing and some test cases of its
## own. My instinct is to move this up into the code that passes in the
## src string rather than leaving it here. As implemented here it
## forces all src strings to unicode, which IMO is not what we want.
# if encoding is None:
# # peek at the encoding in the first two lines
# m = EOLZre.search(src)
# pos = m.end()
# if pos<len(src):
# m = EOLZre.search(src,pos)
# pos = m.end()
# m = ENCODINGsearch(src,0,pos)
# if m:
# encoding = m.group(1)
# else:
# encoding = sys.getfilesystemencoding()
# self._encoding = encoding
# if type(src) is not unicode:
# src = src.decode(encoding)
## end of Terrel's patch
self._src = src
self._filename = filename
self._srcLen = len(src)
if breakPoint == None:
self._breakPoint = self._srcLen
else:
self.setBreakPoint(breakPoint)
self._pos = 0
self._bookmarks = {}
self._posTobookmarkMap = {}
## collect some meta-information
self._EOLs = []
pos = 0
while pos < len(self):
EOLmatch = EOLZre.search(src, pos)
self._EOLs.append(EOLmatch.start())
pos = EOLmatch.end()
self._BOLs = []
for pos in self._EOLs:
BOLpos = self.findBOL(pos)
self._BOLs.append(BOLpos)
def src(self):
return self._src
def filename(self):
return self._filename
def __len__(self):
return self._breakPoint
def __getitem__(self, i):
self.checkPos(i)
return self._src[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self._src[i:j]
def splitlines(self):
if not hasattr(self, '_srcLines'):
self._srcLines = self._src.splitlines()
return self._srcLines
def lineNum(self, pos=None):
if pos == None:
pos = self._pos
for i in range(len(self._BOLs)):
if pos >= self._BOLs[i] and pos <= self._EOLs[i]:
return i
def getRowCol(self, pos=None):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum]
return lineNum+1, pos-BOL+1
def getRowColLine(self, pos=None):
if pos == None:
pos = self._pos
row, col = self.getRowCol(pos)
return row, col, self.splitlines()[row-1]
def getLine(self, pos):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
return self.splitlines()[lineNum]
def pos(self):
return self._pos
def setPos(self, pos):
self.checkPos(pos)
self._pos = pos
def validPos(self, pos):
return pos <= self._breakPoint and pos >=0
def checkPos(self, pos):
if not pos <= self._breakPoint:
raise Error("pos (" + str(pos) + ") is invalid: beyond the stream's end (" +
str(self._breakPoint-1) + ")" )
elif not pos >=0:
raise Error("pos (" + str(pos) + ") is invalid: less than 0" )
def breakPoint(self):
return self._breakPoint
def setBreakPoint(self, pos):
if pos > self._srcLen:
raise Error("New breakpoint (" + str(pos) +
") is invalid: beyond the end of stream's source string (" +
str(self._srcLen) + ")" )
elif not pos >= 0:
raise Error("New breakpoint (" + str(pos) + ") is invalid: less than 0" )
self._breakPoint = pos
def setBookmark(self, name):
self._bookmarks[name] = self._pos
self._posTobookmarkMap[self._pos] = name
def hasBookmark(self, name):
return name in self._bookmarks
def gotoBookmark(self, name):
if not self.hasBookmark(name):
raise Error("Invalid bookmark (" + name + ") is invalid: does not exist")
pos = self._bookmarks[name]
if not self.validPos(pos):
raise Error("Invalid bookmark (" + name + ', '+
str(pos) + ") is invalid: pos is out of range" )
self._pos = pos
def atEnd(self):
return self._pos >= self._breakPoint
def atStart(self):
return self._pos == 0
def peek(self, offset=0):
self.checkPos(self._pos+offset)
pos = self._pos + offset
return self._src[pos]
def getc(self):
pos = self._pos
if self.validPos(pos+1):
self._pos += 1
return self._src[pos]
def ungetc(self, c=None):
if not self.atStart():
raise Error('Already at beginning of stream')
self._pos -= 1
if not c==None:
self._src[self._pos] = c
def advance(self, offset=1):
self.checkPos(self._pos + offset)
self._pos += offset
def rev(self, offset=1):
self.checkPos(self._pos - offset)
self._pos -= offset
def read(self, offset):
self.checkPos(self._pos + offset)
start = self._pos
self._pos += offset
return self._src[start:self._pos]
def readTo(self, to, start=None):
self.checkPos(to)
if start == None:
start = self._pos
self._pos = to
return self._src[start:to]
def readToEOL(self, start=None, gobble=True):
EOLmatch = EOLZre.search(self.src(), self.pos())
if gobble:
pos = EOLmatch.end()
else:
pos = EOLmatch.start()
return self.readTo(to=pos, start=start)
def find(self, it, pos=None):
if pos == None:
pos = self._pos
return self._src.find(it, pos )
def startswith(self, it, pos=None):
if self.find(it, pos) == self.pos():
return True
else:
return False
def rfind(self, it, pos):
if pos == None:
pos = self._pos
return self._src.rfind(it, pos)
def findBOL(self, pos=None):
if pos == None:
pos = self._pos
src = self.src()
return max(src.rfind('\n', 0, pos)+1, src.rfind('\r', 0, pos)+1, 0)
def findEOL(self, pos=None, gobble=False):
if pos == None:
pos = self._pos
match = EOLZre.search(self.src(), pos)
if gobble:
return match.end()
else:
return match.start()
def isLineClearToPos(self, pos=None):
if pos == None:
pos = self.pos()
self.checkPos(pos)
src = self.src()
BOL = self.findBOL()
return BOL == pos or src[BOL:pos].isspace()
def matches(self, strOrRE):
if isinstance(strOrRE, (str, unicode)):
return self.startswith(strOrRE, pos=self.pos())
else: # assume an re object
return strOrRE.match(self.src(), self.pos())
def matchWhiteSpace(self, WSchars=' \f\t'):
return (not self.atEnd()) and self.peek() in WSchars
def getWhiteSpace(self, max=None, WSchars=' \f\t'):
if not self.matchWhiteSpace(WSchars):
return ''
start = self.pos()
breakPoint = self.breakPoint()
if max is not None:
breakPoint = min(breakPoint, self.pos()+max)
while self.pos() < breakPoint:
self.advance()
if not self.matchWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
def matchNonWhiteSpace(self, WSchars=' \f\t\n\r'):
return self.atEnd() or not self.peek() in WSchars
def getNonWhiteSpace(self, WSchars=' \f\t\n\r'):
if not self.matchNonWhiteSpace(WSchars):
return ''
start = self.pos()
while self.pos() < self.breakPoint():
self.advance()
if not self.matchNonWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
| [
[
8,
0,
0.0052,
0.0069,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0103,
0.0034,
0,
0.66,
0.1429,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0138,
0.0034,
0,
0.66... | [
"\"\"\"SourceReader class for Cheetah's Parser and CodeGenerator\n\"\"\"",
"import re",
"import sys",
"EOLre = re.compile(r'[ \\f\\t]*(?:\\r\\n|\\r|\\n)')",
"EOLZre = re.compile(r'(?:\\r\\n|\\r|\\n|\\Z)')",
"ENCODINGsearch = re.compile(\"coding[=:]\\s*([-\\w.]+)\").search",
"class Error(Exception):\n ... |
import gettext
_ = gettext.gettext
class I18n(object):
def __init__(self, parser):
pass
## junk I'm playing with to test the macro framework
# def parseArgs(self, parser, startPos):
# parser.getWhiteSpace()
# args = parser.getExpression(useNameMapper=False,
# pyTokensToBreakAt=[':']).strip()
# return args
#
# def convertArgStrToDict(self, args, parser=None, startPos=None):
# def getArgs(*pargs, **kws):
# return pargs, kws
# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
# return kwArgs
def __call__(self,
src, # aka message,
plural=None,
n=None, # should be a string representing the name of the
# '$var' rather than $var itself
id=None,
domain=None,
source=None,
target=None,
comment=None,
# args that are automatically supplied by the parser when the
# macro is called:
parser=None,
macros=None,
isShortForm=False,
EOLCharsInShortForm=None,
startPos=None,
endPos=None,
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
#print macros['i18n']
src = _(src)
if isShortForm and endPos<len(parser):
return src+EOLCharsInShortForm
else:
return src
| [
[
1,
0,
0.0149,
0.0149,
0,
0.66,
0,
723,
0,
1,
0,
0,
723,
0,
0
],
[
14,
0,
0.0299,
0.0149,
0,
0.66,
0.5,
660,
7,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.5149,
0.9552,
0,
0.66... | [
"import gettext",
"_ = gettext.gettext",
"class I18n(object):\n def __init__(self, parser):\n pass\n\n## junk I'm playing with to test the macro framework \n# def parseArgs(self, parser, startPos):\n# parser.getWhiteSpace()\n# args = parser.getExpression(useNameMapper=False,",
" ... |
#
| [] | [] |
# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.7 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2005/01/03 19:59:07 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.7 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
| [
[
8,
0,
0.0968,
0.1452,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1774,
0.0161,
0,
0.66,
0.1,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.1935,
0.0161,
0,
0.66,
... | [
"\"\"\"ErrorCatcher class for Cheetah Templates\n\nMeta-Data\n================================================================================\nAuthor: Tavis Rudd <tavis@damnsimple.com>\nVersion: $Revision: 1.7 $\nStart Date: 2001/08/01\nLast Revision Date: $Date: 2005/01/03 19:59:07 $",
"__author__ = \"Tavis Rud... |
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'),
('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'),
('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
## Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''),
('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
]
DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1])
if reprstr[i]=="'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
#self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self", None) ]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname, defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name, defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(_initMethod_initCheetah % {'className' : self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className #self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, basestring): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName)
if not isinstance(source, basestring):
source = unicode(source)
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.match(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, str):
encoding = unicodeMatch.group(1) or 'ascii'
source = unicode(source, encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
source = source.decode(encoding)
else:
source = unicode(source)
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"import __builtin__",
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = baseClassName.split(',')
for klass in baseclasses:
chunks = klass.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline in the middle of a source block
# we don't want to inadvertantly import the module at the top of the file either
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if not var == '*']
self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| [
[
8,
0,
0.0025,
0.0045,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0055,
0.0005,
0,
0.66,
0.0312,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.006,
0.0005,
0,
0.66,... | [
"'''\n Compiler classes for Cheetah:\n ModuleCompiler aka 'Compiler'\n ClassCompiler\n MethodCompiler\n\n If you are trying to grok this code start with ModuleCompiler.__init__,\n ModuleCompiler.compile, and ModuleCompiler.__getattr__.",
"import sys",
"import os",
"import os.path",
"from o... |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
from Cheetah.Utils.memcache import Client as MemcachedClient
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val, time)
def add(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
| [
[
8,
0,
0.0787,
0.1481,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1574,
0.0093,
0,
0.66,
0.1667,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.1759,
0.0093,
0,
0.66... | [
"'''\nProvides several CacheStore backends for Cheetah's caching framework. The\nmethods provided by these classes have the same semantics as those in the\npython-memcached API, except for their return values:\n\nset(key, val, time=0)\n set the value unconditionally\nadd(key, val, time=0)",
"import time",
"fr... |
# $Id: ImportHooks.py,v 1.27 2007/11/16 18:28:47 tavis_rudd Exp $
"""Provides some import hooks to allow Cheetah's .tmpl files to be imported
directly like Python .py modules.
To use these:
import Cheetah.ImportHooks
Cheetah.ImportHooks.install()
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.27 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/11/16 18:28:47 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.27 $"[11:-2]
import sys
import os.path
import types
import __builtin__
import new
import imp
from threading import RLock
import string
import traceback
from Cheetah import ImportManager
from Cheetah.ImportManager import DirOwner
from Cheetah.Compiler import Compiler
from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
_installed = False
##################################################
## HELPER FUNCS
_cacheDir = []
def setCacheDir(cacheDir):
global _cacheDir
_cacheDir.append(cacheDir)
##################################################
## CLASSES
class CheetahDirOwner(DirOwner):
_lock = RLock()
_acquireLock = _lock.acquire
_releaseLock = _lock.release
templateFileExtensions = ('.tmpl',)
def getmod(self, name):
self._acquireLock()
try:
mod = DirOwner.getmod(self, name)
if mod:
return mod
for ext in self.templateFileExtensions:
tmplPath = os.path.join(self.path, name + ext)
if os.path.exists(tmplPath):
try:
return self._compile(name, tmplPath)
except:
# @@TR: log the error
exc_txt = traceback.format_exc()
exc_txt =' '+(' \n'.join(exc_txt.splitlines()))
raise ImportError(
'Error while compiling Cheetah module'
' %(name)s, original traceback follows:\n%(exc_txt)s'%locals())
##
return None
finally:
self._releaseLock()
def _compile(self, name, tmplPath):
## @@ consider adding an ImportError raiser here
code = str(Compiler(file=tmplPath, moduleName=name,
mainClassName=name))
if _cacheDir:
__file__ = os.path.join(_cacheDir[0],
convertTmplPathToModuleName(tmplPath)) + '.py'
try:
open(__file__, 'w').write(code)
except OSError:
## @@ TR: need to add some error code here
traceback.print_exc(file=sys.stderr)
__file__ = tmplPath
else:
__file__ = tmplPath
co = compile(code+'\n', __file__, 'exec')
mod = imp.new_module(name)
mod.__file__ = co.co_filename
if _cacheDir:
mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit
# filemonitoring code
mod.__co__ = co
return mod
##################################################
## FUNCTIONS
def install(templateFileExtensions=('.tmpl',)):
"""Install the Cheetah Import Hooks"""
global _installed
if not _installed:
CheetahDirOwner.templateFileExtensions = templateFileExtensions
import __builtin__
if isinstance(__builtin__.__import__, types.BuiltinFunctionType):
global __oldimport__
__oldimport__ = __builtin__.__import__
ImportManager._globalOwnerTypes.insert(0, CheetahDirOwner)
#ImportManager._globalOwnerTypes.append(CheetahDirOwner)
global _manager
_manager=ImportManager.ImportManager()
_manager.setThreaded()
_manager.install()
def uninstall():
"""Uninstall the Cheetah Import Hooks"""
global _installed
if not _installed:
import __builtin__
if isinstance(__builtin__.__import__, types.MethodType):
__builtin__.__import__ = __oldimport__
global _manager
del _manager
if __name__ == '__main__':
install()
| [
[
8,
0,
0.0761,
0.1159,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1377,
0.0072,
0,
0.66,
0.0455,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.1449,
0.0072,
0,
0.66... | [
"\"\"\"Provides some import hooks to allow Cheetah's .tmpl files to be imported\ndirectly like Python .py modules.\n\nTo use these:\n import Cheetah.ImportHooks\n Cheetah.ImportHooks.install()\n\nMeta-Data",
"__author__ = \"Tavis Rudd <tavis@damnsimple.com>\"",
"__revision__ = \"$Revision: 1.27 $\"[11:-2]",
... |
import os.path
import string
l = ['_'] * 256
for c in string.digits + string.letters:
l[ord(c)] = c
_pathNameTransChars = string.join(l, '')
del l, c
def convertTmplPathToModuleName(tmplPath,
_pathNameTransChars=_pathNameTransChars,
splitdrive=os.path.splitdrive,
translate=string.translate,
):
return translate(splitdrive(tmplPath)[1], _pathNameTransChars)
| [
[
1,
0,
0.0667,
0.0667,
0,
0.66,
0,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.1333,
0.0667,
0,
0.66,
0.2,
890,
0,
1,
0,
0,
890,
0,
0
],
[
14,
0,
0.2667,
0.0667,
0,
0.66... | [
"import os.path",
"import string",
"l = ['_'] * 256",
"for c in string.digits + string.letters:\n l[ord(c)] = c",
" l[ord(c)] = c",
"_pathNameTransChars = string.join(l, '')",
"def convertTmplPathToModuleName(tmplPath,\n _pathNameTransChars=_pathNameTransChars,\n ... |
"""
Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
"""
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
from Cheetah.Macros.I18n import I18n
# re tools
_regexCache = {}
def cachedRegex(pattern):
if pattern not in _regexCache:
_regexCache[pattern] = re.compile(pattern)
return _regexCache[pattern]
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1', txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
##################################################
## CONSTANTS & GLOBALS ##
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
##################################################
## Tokens for the parser ##
#generic
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
#operators
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//', '%')
shiftOps = ('>>', '<<')
bitwiseOps = ('&', '|', '^')
assignOp = '='
augAssignOps = ('+=', '-=', '/=', '*=', '**=', '^=', '%=',
'>>=', '<<=', '&=', '|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<', '>', '==', '!=', '<=', '>=', '<>', 'is', 'in',)
booleanOps = ('and', 'or', 'not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(', ')', '{', '}', '[', ']',
',', '.', ':', ';', '=', '`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
##################################################
## Regex chunks for the parser ##
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS = r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)', r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
# e.g. ##author@ Tavis Rudd
unicodeDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
encodingDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
escapedNewlineRE = re.compile(r'(?<!\\)((\\\\)*)\\(n|012)')
directiveNamesAndParsers = {
# importing and inheritance
'import': None,
'from': None,
'extends': 'eatExtends',
'implements': 'eatImplements',
'super': 'eatSuper',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'transform': 'eatTransform',
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher': None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
##################################################
## CLASSES ##
# @@TR: SyntaxError doesn't call exception.__str__ for some reason!
#class ParseError(SyntaxError):
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if self.lineno:
lineno = self.lineno
row, col, line = (lineno, (self.col or 0),
self.stream.splitlines()[lineno-1])
else:
row, col, line = self.stream.getRowColLine()
## get the surrounding lines
lines = stream.splitlines()
prevLines = [] # (rowNum, content)
for i in range(1, 4):
if row-1-i <=0:
break
prevLines.append( (row-i, lines[row-1-i]) )
nextLines = [] # (rowNum, content)
for i in range(1, 4):
if not row-1+i < len(lines):
break
nextLines.append( (row+i, lines[row-1+i]) )
nextLines.reverse()
## print the main message
report += "\n\n%s\n" %self.msg
report += "Line %i, column %i%s\n\n" % (row, col, f)
report += 'Line|Cheetah Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line}
report += ' '*5 +' '*(col-1) + "^\n"
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
## add the extra msg
if self.extMsg:
report += self.extMsg + '\n'
return report
class ForbiddenSyntax(ParseError):
pass
class ForbiddenExpression(ForbiddenSyntax):
pass
class ForbiddenDirective(ForbiddenSyntax):
pass
class CheetahVariable(object):
def __init__(self, nameChunks, useNameMapper=True, cacheToken=None,
rawSource=None):
self.nameChunks = nameChunks
self.useNameMapper = useNameMapper
self.cacheToken = cacheToken
self.rawSource = rawSource
class Placeholder(CheetahVariable):
pass
class ArgList(object):
"""Used by _LowLevelParser.getArgList()"""
def __init__(self):
self.arguments = []
self.defaults = []
self.count = 0
def add_argument(self, name):
self.arguments.append(name)
self.defaults.append(None)
def next(self):
self.count += 1
def add_default(self, token):
count = self.count
if self.defaults[count] is None:
self.defaults[count] = ''
self.defaults[count] += token
def merge(self):
defaults = (isinstance(d, basestring) and d.strip() or None for d in self.defaults)
return list(map(None, (a.strip() for a in self.arguments), defaults))
def __str__(self):
return str(self.merge())
class _LowLevelParser(SourceReader):
"""This class implements the methods to match or extract ('get*') the basic
elements of Cheetah's grammar. It does NOT handle any code generation or
state management.
"""
_settingsManager = None
def setSettingsManager(self, settingsManager):
self._settingsManager = settingsManager
def setting(self, key, default=Unspecified):
if default is Unspecified:
return self._settingsManager.setting(key)
else:
return self._settingsManager.setting(key, default=default)
def setSetting(self, key, val):
self._settingsManager.setSetting(key, val)
def settings(self):
return self._settingsManager.settings()
def updateSettings(self, settings):
self._settingsManager.updateSettings(settings)
def _initializeSettings(self):
self._settingsManager._initializeSettings()
def configureParser(self):
"""Is called by the Compiler instance after the parser has had a
settingsManager assigned with self.setSettingsManager()
"""
self._makeCheetahVarREs()
self._makeCommentREs()
self._makeDirectiveREs()
self._makePspREs()
self._possibleNonStrConstantChars = (
self.setting('commentStartToken')[0] +
self.setting('multiLineCommentStartToken')[0] +
self.setting('cheetahVarStartToken')[0] +
self.setting('directiveStartToken')[0] +
self.setting('PSPStartToken')[0])
self._nonStrConstMatchers = [
self.matchCommentStartToken,
self.matchMultiLineCommentStartToken,
self.matchVariablePlaceholderStart,
self.matchExpressionPlaceholderStart,
self.matchDirective,
self.matchPSPStartToken,
self.matchEOLSlurpToken,
]
## regex setup ##
def _makeCheetahVarREs(self):
"""Setup the regexs for Cheetah $var parsing."""
num = r'[0-9\.]+'
interval = (r'(?P<interval>' +
num + r's|' +
num + r'm|' +
num + r'h|' +
num + r'd|' +
num + r'w|' +
num + ')'
)
cacheToken = (r'(?:' +
r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+
'|' +
r'(?P<STATIC_CACHE>\*)' +
'|' +
r'(?P<NO_CACHE>)' +
')')
self.cacheTokenRE = cachedRegex(cacheToken)
silentPlaceholderToken = (r'(?:' +
r'(?P<SILENT>' +escapeRegexChars('!')+')'+
'|' +
r'(?P<NOT_SILENT>)' +
')')
self.silentPlaceholderTokenRE = cachedRegex(silentPlaceholderToken)
self.cheetahVarStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+
r'(?P<silenceToken>'+silentPlaceholderToken+')'+
r'(?P<cacheToken>'+cacheToken+')'+
r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure
r'(?=[A-Za-z_])')
validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])'
self.cheetahVarStartToken = self.setting('cheetahVarStartToken')
self.cheetahVarStartTokenRE = cachedRegex(
escCharLookBehind +
escapeRegexChars(self.setting('cheetahVarStartToken'))
+validCharsLookAhead
)
self.cheetahVarInExpressionStartTokenRE = cachedRegex(
escapeRegexChars(self.setting('cheetahVarStartToken'))
+r'(?=[A-Za-z_])'
)
self.expressionPlaceholderStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' +
r'(?P<cacheToken>' + cacheToken + ')' +
#r'\[[ \t\f]*'
r'(?:\{|\(|\[)[ \t\f]*'
+ r'(?=[^\)\}\]])'
)
if self.setting('EOLSlurpToken'):
self.EOLSlurpRE = cachedRegex(
escapeRegexChars(self.setting('EOLSlurpToken'))
+ r'[ \t\f]*'
+ r'(?:'+EOL+')'
)
else:
self.EOLSlurpRE = None
def _makeCommentREs(self):
"""Construct the regex bits that are used in comment parsing."""
startTokenEsc = escapeRegexChars(self.setting('commentStartToken'))
self.commentStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
del startTokenEsc
startTokenEsc = escapeRegexChars(
self.setting('multiLineCommentStartToken'))
endTokenEsc = escapeRegexChars(
self.setting('multiLineCommentEndToken'))
self.multiLineCommentTokenStartRE = cachedRegex(escCharLookBehind +
startTokenEsc)
self.multiLineCommentEndTokenRE = cachedRegex(escCharLookBehind +
endTokenEsc)
def _makeDirectiveREs(self):
"""Construct the regexs that are used in directive parsing."""
startToken = self.setting('directiveStartToken')
endToken = self.setting('directiveEndToken')
startTokenEsc = escapeRegexChars(startToken)
endTokenEsc = escapeRegexChars(endToken)
validSecondCharsLookAhead = r'(?=[A-Za-z_@])'
reParts = [escCharLookBehind, startTokenEsc]
if self.setting('allowWhitespaceAfterDirectiveStartToken'):
reParts.append('[ \t]*')
reParts.append(validSecondCharsLookAhead)
self.directiveStartTokenRE = cachedRegex(''.join(reParts))
self.directiveEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _makePspREs(self):
"""Setup the regexs for PSP parsing."""
startToken = self.setting('PSPStartToken')
startTokenEsc = escapeRegexChars(startToken)
self.PSPStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
endToken = self.setting('PSPEndToken')
endTokenEsc = escapeRegexChars(endToken)
self.PSPEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah directives in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def isLineClearToStartToken(self, pos=None):
return self.isLineClearToPos(pos)
def matchTopLevelToken(self):
"""Returns the first match found from the following methods:
self.matchCommentStartToken
self.matchMultiLineCommentStartToken
self.matchVariablePlaceholderStart
self.matchExpressionPlaceholderStart
self.matchDirective
self.matchPSPStartToken
self.matchEOLSlurpToken
Returns None if no match.
"""
match = None
if self.peek() in self._possibleNonStrConstantChars:
for matcher in self._nonStrConstMatchers:
match = matcher()
if match:
break
return match
def matchPyToken(self):
match = pseudoprog.match(self.src(), self.pos())
if match and match.group() in tripleQuotedStringStarts:
TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos())
if TQSmatch:
return TQSmatch
return match
def getPyToken(self):
match = self.matchPyToken()
if match is None:
raise ParseError(self)
elif match.group() in tripleQuotedStringStarts:
raise ParseError(self, msg='Malformed triple-quoted string')
return self.readTo(match.end())
def matchEOLSlurpToken(self):
if self.EOLSlurpRE:
return self.EOLSlurpRE.match(self.src(), self.pos())
def getEOLSlurpToken(self):
match = self.matchEOLSlurpToken()
if not match:
raise ParseError(self, msg='Invalid EOL slurp token')
return self.readTo(match.end())
def matchCommentStartToken(self):
return self.commentStartTokenRE.match(self.src(), self.pos())
def getCommentStartToken(self):
match = self.matchCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid single-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentStartToken(self):
return self.multiLineCommentTokenStartRE.match(self.src(), self.pos())
def getMultiLineCommentStartToken(self):
match = self.matchMultiLineCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentEndToken(self):
return self.multiLineCommentEndTokenRE.match(self.src(), self.pos())
def getMultiLineCommentEndToken(self):
match = self.matchMultiLineCommentEndToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment end token')
return self.readTo(match.end())
def getCommaSeparatedSymbols(self):
"""
Loosely based on getDottedName to pull out comma separated
named chunks
"""
srcLen = len(self)
pieces = []
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
elif c == ',':
self.getc()
pieces.append(''.join(nameChunks))
nameChunks = []
elif c in (' ', '\t'):
self.getc()
else:
break
if nameChunks:
pieces.append(''.join(nameChunks))
return pieces
def getDottedName(self):
srcLen = len(self)
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
else:
break
return ''.join(nameChunks)
def matchIdentifier(self):
return identRE.match(self.src(), self.pos())
def getIdentifier(self):
match = self.matchIdentifier()
if not match:
raise ParseError(self, msg='Invalid identifier')
return self.readTo(match.end())
def matchOperator(self):
match = self.matchPyToken()
if match and match.group() not in operators:
match = None
return match
def getOperator(self):
match = self.matchOperator()
if not match:
raise ParseError(self, msg='Expected operator')
return self.readTo( match.end() )
def matchAssignmentOperator(self):
match = self.matchPyToken()
if match and match.group() not in assignmentOps:
match = None
return match
def getAssignmentOperator(self):
match = self.matchAssignmentOperator()
if not match:
raise ParseError(self, msg='Expected assignment operator')
return self.readTo( match.end() )
def matchDirective(self):
"""Returns False or the name of the directive matched.
"""
startPos = self.pos()
if not self.matchDirectiveStartToken():
return False
self.getDirectiveStartToken()
directiveName = self.matchDirectiveName()
self.setPos(startPos)
return directiveName
def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'):
startPos = self.pos()
possibleMatches = self._directiveNamesAndParsers.keys()
name = ''
match = None
while not self.atEnd():
c = self.getc()
if not c in directiveNameChars:
break
name += c
if name == '@':
if not self.atEnd() and self.peek() in identchars:
match = '@'
break
possibleMatches = [dn for dn in possibleMatches if dn.startswith(name)]
if not possibleMatches:
break
elif (name in possibleMatches and (self.atEnd() or self.peek() not in directiveNameChars)):
match = name
break
self.setPos(startPos)
return match
def matchDirectiveStartToken(self):
return self.directiveStartTokenRE.match(self.src(), self.pos())
def getDirectiveStartToken(self):
match = self.matchDirectiveStartToken()
if not match:
raise ParseError(self, msg='Invalid directive start token')
return self.readTo(match.end())
def matchDirectiveEndToken(self):
return self.directiveEndTokenRE.match(self.src(), self.pos())
def getDirectiveEndToken(self):
match = self.matchDirectiveEndToken()
if not match:
raise ParseError(self, msg='Invalid directive end token')
return self.readTo(match.end())
def matchColonForSingleLineShortFormDirective(self):
if not self.atEnd() and self.peek()==':':
restOfLine = self[self.pos()+1:self.findEOL()]
restOfLine = restOfLine.strip()
if not restOfLine:
return False
elif self.commentStartTokenRE.match(restOfLine):
return False
else: # non-whitespace, non-commment chars found
return True
return False
def matchPSPStartToken(self):
return self.PSPStartTokenRE.match(self.src(), self.pos())
def matchPSPEndToken(self):
return self.PSPEndTokenRE.match(self.src(), self.pos())
def getPSPStartToken(self):
match = self.matchPSPStartToken()
if not match:
raise ParseError(self, msg='Invalid psp start token')
return self.readTo(match.end())
def getPSPEndToken(self):
match = self.matchPSPEndToken()
if not match:
raise ParseError(self, msg='Invalid psp end token')
return self.readTo(match.end())
def matchCheetahVarStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchCheetahVarStartToken(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartTokenRE.match(self.src(), self.pos())
def matchCheetahVarInExpressionStartToken(self):
"""no enclosures or cache tokens allowed"""
return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos())
def matchVariablePlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchExpressionPlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.expressionPlaceholderStartRE.match(self.src(), self.pos())
def getCheetahVarStartToken(self):
"""just the start token, not the enclosure or cache token"""
match = self.matchCheetahVarStartToken()
if not match:
raise ParseError(self, msg='Expected Cheetah $var start token')
return self.readTo( match.end() )
def getCacheToken(self):
try:
token = self.cacheTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected cache token')
def getSilentPlaceholderToken(self):
try:
token = self.silentPlaceholderTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected silent placeholder token')
def getTargetVarsList(self):
varnames = []
while not self.atEnd():
if self.peek() in ' \t\f':
self.getWhiteSpace()
elif self.peek() in '\r\n':
break
elif self.startswith(','):
self.advance()
elif self.startswith('in ') or self.startswith('in\t'):
break
#elif self.matchCheetahVarStart():
elif self.matchCheetahVarInExpressionStartToken():
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
varnames.append( self.getDottedName() )
elif self.matchIdentifier():
varnames.append( self.getDottedName() )
else:
break
return varnames
def getCheetahVar(self, plain=False, skipStartToken=False):
"""This is called when parsing inside expressions. Cache tokens are only
valid in placeholders so this method discards any cache tokens found.
"""
if not skipStartToken:
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
return self.getCheetahVarBody(plain=plain)
def getCheetahVarBody(self, plain=False):
# @@TR: this should be in the compiler
return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain)
def getCheetahVarNameChunks(self):
"""
nameChunks = list of Cheetah $var subcomponents represented as tuples
[ (namemapperPart,autoCall,restOfName),
]
where:
namemapperPart = the dottedName base
autocall = where NameMapper should use autocalling on namemapperPart
restOfName = any arglist, index, or slice
If restOfName contains a call arglist (e.g. '(1234)') then autocall is
False, otherwise it defaults to True.
EXAMPLE
------------------------------------------------------------------------
if the raw CheetahVar is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'),
('d',False,'()'),
('x.y.z',True,''),
]
"""
chunks = []
while self.pos() < len(self):
rest = ''
autoCall = True
if not self.peek() in identchars + '.':
break
elif self.peek() == '.':
if self.pos()+1 < len(self) and self.peek(1) in identchars:
self.advance() # discard the period as it isn't needed with NameMapper
else:
break
dottedName = self.getDottedName()
if not self.atEnd() and self.peek() in '([':
if self.peek() == '(':
rest = self.getCallArgString()
else:
rest = self.getExpression(enclosed=True)
period = max(dottedName.rfind('.'), 0)
if period:
chunks.append( (dottedName[:period], autoCall, '') )
dottedName = dottedName[period+1:]
if rest and rest[0]=='(':
autoCall = False
chunks.append( (dottedName, autoCall, rest) )
return chunks
def getCallArgString(self,
enclosures=[], # list of tuples (char, pos), where char is ({ or [
useNameMapper=Unspecified):
""" Get a method/function call argument string.
This method understands *arg, and **kw
"""
# @@TR: this settings mangling should be removed
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures:
pass
else:
if not self.peek() == '(':
raise ParseError(self, msg="Expected '('")
startPos = self.pos()
self.getc()
enclosures = [('(', startPos),
]
argStringBits = ['(']
addBit = argStringBits.append
while True:
if self.atEnd():
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
c = self.peek()
if c in ")}]": # get the ending enclosure and break
if not enclosures:
raise ParseError(self)
c = self.getc()
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
addBit(')')
break
else:
raise ParseError(self)
elif c in " \t\f\r\n":
addBit(self.getc())
elif self.matchCheetahVarInExpressionStartToken():
startPos = self.pos()
codeFor1stToken = self.getCheetahVar()
WS = self.getWhiteSpace()
if not self.atEnd() and self.peek() == '=':
nextToken = self.getPyToken()
if nextToken == '=':
endPos = self.pos()
self.setPos(startPos)
codeFor1stToken = self.getCheetahVar(plain=True)
self.setPos(endPos)
## finally
addBit( codeFor1stToken + WS + nextToken )
else:
addBit( codeFor1stToken + WS)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
addBit(token)
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return ''.join(argStringBits)
def getDefArgList(self, exitPos=None, useNameMapper=False):
""" Get an argument list. Can be used for method/function definition
argument lists or for #directive argument lists. Returns a list of
tuples in the form (argName, defVal=None) with one tuple for each arg
name.
These defVals are always strings, so (argName, defVal=None) is safe even
with a case like (arg1, arg2=None, arg3=1234*2), which would be returned as
[('arg1', None),
('arg2', 'None'),
('arg3', '1234*2'),
]
This method understands *arg, and **kw
"""
if self.peek() == '(':
self.advance()
else:
exitPos = self.findEOL() # it's a directive so break at the EOL
argList = ArgList()
onDefVal = False
# @@TR: this settings mangling should be removed
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
while True:
if self.atEnd():
raise ParseError(
self, msg="EOF was reached before a matching ')'"+
" was found for the '('")
if self.pos() == exitPos:
break
c = self.peek()
if c == ")" or self.matchDirectiveEndToken():
break
elif c == ":":
break
elif c in " \t\f\r\n":
if onDefVal:
argList.add_default(c)
self.advance()
elif c == '=':
onDefVal = True
self.advance()
elif c == ",":
argList.next()
onDefVal = False
self.advance()
elif self.startswith(self.cheetahVarStartToken) and not onDefVal:
self.advance(len(self.cheetahVarStartToken))
elif self.matchIdentifier() and not onDefVal:
argList.add_argument( self.getIdentifier() )
elif onDefVal:
if self.matchCheetahVarInExpressionStartToken():
token = self.getCheetahVar()
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
argList.add_default(token)
elif c == '*' and not onDefVal:
varName = self.getc()
if self.peek() == '*':
varName += self.getc()
if not self.matchIdentifier():
raise ParseError(self)
varName += self.getIdentifier()
argList.add_argument(varName)
else:
raise ParseError(self)
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return argList.merge()
def getExpressionParts(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where char is ({ or [
pyTokensToBreakAt=None, # only works if not enclosed
useNameMapper=Unspecified,
):
""" Get a Cheetah expression that includes $CheetahVars and break at
directive end tokens, the end of an enclosure, or at a specified
pyToken.
"""
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures is None:
enclosures = []
srcLen = len(self)
exprBits = []
while True:
if self.atEnd():
if enclosures:
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
else:
break
c = self.peek()
if c in "{([":
exprBits.append(c)
enclosures.append( (c, self.pos()) )
self.advance()
elif enclosed and not enclosures:
break
elif c in "])}":
if not enclosures:
raise ParseError(self)
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
exprBits.append(c)
else:
open = enclosures[-1][0]
close = closurePairsRev[open]
row, col = self.getRowCol()
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg= "A '" + c + "' was found at line " + str(row) +
", col " + str(col) +
" before a matching '" + close +
"' was found\nfor the '" + open + "'")
self.advance()
elif c in " \f\t":
exprBits.append(self.getWhiteSpace())
elif self.matchDirectiveEndToken() and not enclosures:
break
elif c == "\\" and self.pos()+1 < srcLen:
eolMatch = EOLre.match(self.src(), self.pos()+1)
if not eolMatch:
self.advance()
raise ParseError(self, msg='Line ending expected')
self.setPos( eolMatch.end() )
elif c in '\r\n':
if enclosures:
self.advance()
else:
break
elif self.matchCheetahVarInExpressionStartToken():
expr = self.getCheetahVar()
exprBits.append(expr)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if (not enclosures
and pyTokensToBreakAt
and token in pyTokensToBreakAt):
self.setPos(beforeTokenPos)
break
token = self.transformToken(token, beforeTokenPos)
exprBits.append(token)
if identRE.match(token):
if token == 'for':
expr = self.getExpression(useNameMapper=False, pyTokensToBreakAt=['in'])
exprBits.append(expr)
else:
exprBits.append(self.getWhiteSpace())
if not self.atEnd() and self.peek() == '(':
exprBits.append(self.getCallArgString())
##
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return exprBits
def getExpression(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where # char is ({ or [
pyTokensToBreakAt=None,
useNameMapper=Unspecified,
):
"""Returns the output of self.getExpressionParts() as a concatenated
string rather than as a list.
"""
return ''.join(self.getExpressionParts(
enclosed=enclosed, enclosures=enclosures,
pyTokensToBreakAt=pyTokensToBreakAt,
useNameMapper=useNameMapper))
def transformToken(self, token, beforeTokenPos):
"""Takes a token from the expression being parsed and performs and
special transformations required by Cheetah.
At the moment only Cheetah's c'$placeholder strings' are transformed.
"""
if token=='c' and not self.atEnd() and self.peek() in '\'"':
nextToken = self.getPyToken()
token = nextToken.upper()
theStr = eval(token)
endPos = self.pos()
if not theStr:
return
if token.startswith(single3) or token.startswith(double3):
startPosIdx = 3
else:
startPosIdx = 1
self.setPos(beforeTokenPos+startPosIdx+1)
outputExprs = []
strConst = ''
while self.pos() < (endPos-startPosIdx):
if self.matchCheetahVarStart() or self.matchExpressionPlaceholderStart():
if strConst:
outputExprs.append(repr(strConst))
strConst = ''
placeholderExpr = self.getPlaceholder()
outputExprs.append('str('+placeholderExpr+')')
else:
strConst += self.getc()
self.setPos(endPos)
if strConst:
outputExprs.append(repr(strConst))
token = "''.join(["+','.join(outputExprs)+"])"
return token
def _raiseErrorAboutInvalidCheetahVarSyntaxInExpr(self):
match = self.matchCheetahVarStart()
groupdict = match.groupdict()
if groupdict.get('cacheToken'):
raise ParseError(
self,
msg='Cache tokens are not valid inside expressions. '
'Use them in top-level $placeholders only.')
elif groupdict.get('enclosure'):
raise ParseError(
self,
msg='Long-form placeholders - ${}, $(), $[], etc. are not valid inside expressions. '
'Use them in top-level $placeholders only.')
else:
raise ParseError(
self,
msg='This form of $placeholder syntax is not valid here.')
def getPlaceholder(self, allowCacheTokens=False, plain=False, returnEverything=False):
# filtered
for callback in self.setting('preparsePlaceholderHooks'):
callback(parser=self)
startPos = self.pos()
lineCol = self.getRowCol(startPos)
startToken = self.getCheetahVarStartToken()
silentPlaceholderToken = self.getSilentPlaceholderToken()
if silentPlaceholderToken:
isSilentPlaceholder = True
else:
isSilentPlaceholder = False
if allowCacheTokens:
cacheToken = self.getCacheToken()
cacheTokenParts = self.cacheTokenRE.match(cacheToken).groupdict()
else:
cacheTokenParts = {}
if self.peek() in '({[':
pos = self.pos()
enclosureOpenChar = self.getc()
enclosures = [ (enclosureOpenChar, pos) ]
self.getWhiteSpace()
else:
enclosures = []
filterArgs = None
if self.matchIdentifier():
nameChunks = self.getCheetahVarNameChunks()
expr = self._compiler.genCheetahVar(nameChunks[:], plain=plain)
restOfExpr = None
if enclosures:
WS = self.getWhiteSpace()
expr += WS
if self.setting('allowPlaceholderFilterArgs') and self.peek()==',':
filterArgs = self.getCallArgString(enclosures=enclosures)[1:-1]
else:
if self.peek()==closurePairsRev[enclosureOpenChar]:
self.getc()
else:
restOfExpr = self.getExpression(enclosed=True, enclosures=enclosures)
if restOfExpr[-1] == closurePairsRev[enclosureOpenChar]:
restOfExpr = restOfExpr[:-1]
expr += restOfExpr
rawPlaceholder = self[startPos: self.pos()]
else:
expr = self.getExpression(enclosed=True, enclosures=enclosures)
if expr[-1] == closurePairsRev[enclosureOpenChar]:
expr = expr[:-1]
rawPlaceholder=self[startPos: self.pos()]
expr = self._applyExpressionFilters(expr, 'placeholder',
rawExpr=rawPlaceholder, startPos=startPos)
for callback in self.setting('postparsePlaceholderHooks'):
callback(parser=self)
if returnEverything:
return (expr, rawPlaceholder, lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder)
else:
return expr
class _HighLevelParser(_LowLevelParser):
"""This class is a StateMachine for parsing Cheetah source and
sending state dependent code generation commands to
Cheetah.Compiler.Compiler.
"""
def __init__(self, src, filename=None, breakPoint=None, compiler=None):
super(_HighLevelParser, self).__init__(src, filename=filename, breakPoint=breakPoint)
self.setSettingsManager(compiler)
self._compiler = compiler
self.setupState()
self.configureParser()
def setupState(self):
self._macros = {}
self._macroDetails = {}
self._openDirectivesStack = []
def cleanup(self):
"""Cleanup to remove any possible reference cycles
"""
self._macros.clear()
for macroname, macroDetails in self._macroDetails.items():
macroDetails.template.shutdown()
del macroDetails.template
self._macroDetails.clear()
def configureParser(self):
super(_HighLevelParser, self).configureParser()
self._initDirectives()
def _initDirectives(self):
def normalizeParserVal(val):
if isinstance(val, (str, unicode)):
handler = getattr(self, val)
elif type(val) in (ClassType, TypeType):
handler = val(self)
elif hasattr(val, '__call__'):
handler = val
elif val is None:
handler = val
else:
raise Exception('Invalid parser/handler value %r for %s'%(val, name))
return handler
normalizeHandlerVal = normalizeParserVal
_directiveNamesAndParsers = directiveNamesAndParsers.copy()
customNamesAndParsers = self.setting('directiveNamesAndParsers', {})
_directiveNamesAndParsers.update(customNamesAndParsers)
_endDirectiveNamesAndHandlers = endDirectiveNamesAndHandlers.copy()
customNamesAndHandlers = self.setting('endDirectiveNamesAndHandlers', {})
_endDirectiveNamesAndHandlers.update(customNamesAndHandlers)
self._directiveNamesAndParsers = {}
for name, val in _directiveNamesAndParsers.items():
if val in (False, 0):
continue
self._directiveNamesAndParsers[name] = normalizeParserVal(val)
self._endDirectiveNamesAndHandlers = {}
for name, val in _endDirectiveNamesAndHandlers.items():
if val in (False, 0):
continue
self._endDirectiveNamesAndHandlers[name] = normalizeHandlerVal(val)
self._closeableDirectives = ['def', 'block', 'closure', 'defmacro',
'call',
'capture',
'cache',
'filter',
'if', 'unless',
'for', 'while', 'repeat',
'try',
]
for directiveName in self.setting('closeableDirectives', []):
self._closeableDirectives.append(directiveName)
macroDirectives = self.setting('macroDirectives', {})
macroDirectives['i18n'] = I18n
for macroName, callback in macroDirectives.items():
if type(callback) in (ClassType, TypeType):
callback = callback(parser=self)
assert callback
self._macros[macroName] = callback
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def _applyExpressionFilters(self, expr, exprType, rawExpr=None, startPos=None):
"""Pipes cheetah expressions through a set of optional filter hooks.
The filters are functions which may modify the expressions or raise
a ForbiddenExpression exception if the expression is not allowed. They
are defined in the compiler setting 'expressionFilterHooks'.
Some intended use cases:
- to implement 'restricted execution' safeguards in cases where you
can't trust the author of the template.
- to enforce style guidelines
filter call signature: (parser, expr, exprType, rawExpr=None, startPos=None)
- parser is the Cheetah parser
- expr is the expression to filter. In some cases the parser will have
already modified it from the original source code form. For example,
placeholders will have been translated into namemapper calls. If you
need to work with the original source, see rawExpr.
- exprType is the name of the directive, 'psp', or 'placeholder'. All
lowercase. @@TR: These will eventually be replaced with a set of
constants.
- rawExpr is the original source string that Cheetah parsed. This
might be None in some cases.
- startPos is the character position in the source string/file
where the parser started parsing the current expression.
@@TR: I realize this use of the term 'expression' is a bit wonky as many
of the 'expressions' are actually statements, but I haven't thought of
a better name yet. Suggestions?
"""
for callback in self.setting('expressionFilterHooks'):
expr = callback(parser=self, expr=expr, exprType=exprType,
rawExpr=rawExpr, startPos=startPos)
return expr
def _filterDisabledDirectives(self, directiveName):
directiveName = directiveName.lower()
if (directiveName in self.setting('disabledDirectives')
or (self.setting('enabledDirectives')
and directiveName not in self.setting('enabledDirectives'))):
for callback in self.setting('disabledDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
raise ForbiddenDirective(self, msg='This %r directive is disabled'%directiveName)
## main parse loop
def parse(self, breakPoint=None, assertEmptyStack=True):
if breakPoint:
origBP = self.breakPoint()
self.setBreakPoint(breakPoint)
assertEmptyStack = False
while not self.atEnd():
if self.matchCommentStartToken():
self.eatComment()
elif self.matchMultiLineCommentStartToken():
self.eatMultiLineComment()
elif self.matchVariablePlaceholderStart():
self.eatPlaceholder()
elif self.matchExpressionPlaceholderStart():
self.eatPlaceholder()
elif self.matchDirective():
self.eatDirective()
elif self.matchPSPStartToken():
self.eatPSP()
elif self.matchEOLSlurpToken():
self.eatEOLSlurpToken()
else:
self.eatPlainText()
if assertEmptyStack:
self.assertEmptyOpenDirectivesStack()
if breakPoint:
self.setBreakPoint(origBP)
## non-directive eat methods
def eatPlainText(self):
startPos = self.pos()
match = None
while not self.atEnd():
match = self.matchTopLevelToken()
if match:
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
strConst = self._unescapeCheetahVars(strConst)
strConst = self._unescapeDirectives(strConst)
self._compiler.addStrConst(strConst)
return match
def eatComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
if isLineClearToStartToken:
self._compiler.handleWSBeforeDirective()
self.getCommentStartToken()
comm = self.readToEOL(gobble=isLineClearToStartToken)
self._compiler.addComment(comm)
def eatMultiLineComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getMultiLineCommentStartToken()
endPos = startPos = self.pos()
level = 1
while True:
endPos = self.pos()
if self.atEnd():
break
if self.matchMultiLineCommentStartToken():
self.getMultiLineCommentStartToken()
level += 1
elif self.matchMultiLineCommentEndToken():
self.getMultiLineCommentEndToken()
level -= 1
if not level:
break
self.advance()
comm = self.readTo(endPos, start=startPos)
if not self.atEnd():
self.getMultiLineCommentEndToken()
if (not self.atEnd()) and self.setting('gobbleWhitespaceAroundMultiLineComments'):
restOfLine = self[self.pos():self.findEOL()]
if not restOfLine.strip(): # WS only to EOL
self.readToEOL(gobble=isLineClearToStartToken)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLine):
self._compiler.handleWSBeforeDirective()
self._compiler.addComment(comm)
def eatPlaceholder(self):
(expr, rawPlaceholder,
lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder) = self.getPlaceholder(
allowCacheTokens=True, returnEverything=True)
self._compiler.addPlaceholder(
expr,
filterArgs=filterArgs,
rawPlaceholder=rawPlaceholder,
cacheTokenParts=cacheTokenParts,
lineCol=lineCol,
silentMode=isSilentPlaceholder)
return
def eatPSP(self):
# filtered
self._filterDisabledDirectives(directiveName='psp')
self.getPSPStartToken()
endToken = self.setting('PSPEndToken')
startPos = self.pos()
while not self.atEnd():
if self.peek() == endToken[0]:
if self.matchPSPEndToken():
break
self.advance()
pspString = self.readTo(self.pos(), start=startPos).strip()
pspString = self._applyExpressionFilters(pspString, 'psp', startPos=startPos)
self._compiler.addPSP(pspString)
self.getPSPEndToken()
## generic directive eat methods
_simpleIndentingDirectives = '''
else elif for while repeat unless try except finally'''.split()
_simpleExprDirectives = '''
pass continue stop return yield break
del assert raise
silent echo
import from'''.split()
_directiveHandlerNames = {'import': 'addImportStatement',
'from': 'addImportStatement', }
def eatDirective(self):
directiveName = self.matchDirective()
self._filterDisabledDirectives(directiveName)
for callback in self.setting('preparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
# subclasses can override the default behaviours here by providing an
# eater method in self._directiveNamesAndParsers[directiveName]
directiveParser = self._directiveNamesAndParsers.get(directiveName)
if directiveParser:
directiveParser()
elif directiveName in self._simpleIndentingDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
self.eatSimpleIndentingDirective(directiveName, callback=handler)
elif directiveName in self._simpleExprDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
if directiveName in ('silent', 'echo'):
includeDirectiveNameInExpr = False
else:
includeDirectiveNameInExpr = True
expr = self.eatSimpleExprDirective(
directiveName,
includeDirectiveNameInExpr=includeDirectiveNameInExpr)
handler(expr)
##
for callback in self.setting('postparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
def _eatRestOfDirectiveTag(self, isLineClearToStartToken, endOfFirstLinePos):
foundComment = False
if self.matchCommentStartToken():
pos = self.pos()
self.advance()
if not self.matchDirective():
self.setPos(pos)
foundComment = True
self.eatComment() # this won't gobble the EOL
else:
self.setPos(pos)
if not foundComment and self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
# still gobble the EOL if a comment was found.
self.readToEOL(gobble=True)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLinePos):
self._compiler.handleWSBeforeDirective()
def _eatToThisEndDirective(self, directiveName):
finalPos = endRawPos = startPos = self.pos()
directiveChar = self.setting('directiveStartToken')[0]
isLineClearToStartToken = False
while not self.atEnd():
if self.peek() == directiveChar:
if self.matchDirective() == 'end':
endRawPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('end'))
self.getWhiteSpace()
if self.startswith(directiveName):
if self.isLineClearToStartToken(endRawPos):
isLineClearToStartToken = True
endRawPos = self.findBOL(endRawPos)
self.advance(len(directiveName)) # to end of directiveName
self.getWhiteSpace()
finalPos = self.pos()
break
self.advance()
finalPos = endRawPos = self.pos()
textEaten = self.readTo(endRawPos, start=startPos)
self.setPos(finalPos)
endOfFirstLinePos = self.findEOL()
if self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
self.readToEOL(gobble=True)
if isLineClearToStartToken and self.pos() > endOfFirstLinePos:
self._compiler.handleWSBeforeDirective()
return textEaten
def eatSimpleExprDirective(self, directiveName, includeDirectiveNameInExpr=True):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
if not includeDirectiveNameInExpr:
self.advance(len(directiveName))
startPos = self.pos()
expr = self.getExpression().strip()
directiveName = expr.split()[0]
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
return expr
def eatSimpleIndentingDirective(self, directiveName, callback,
includeDirectiveNameInExpr=False):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
if directiveName not in 'else elif for while try except finally'.split():
self.advance(len(directiveName))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
if directiveName in 'else elif except finally'.split():
callback(expr, dedent=False, lineCol=lineCol)
else:
callback(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
callback(expr, lineCol=lineCol)
def eatEndDirective(self):
isLineClearToStartToken = self.isLineClearToStartToken()
self.getDirectiveStartToken()
self.advance(3) # to end of 'end'
self.getWhiteSpace()
pos = self.pos()
directiveName = False
for key in self._endDirectiveNamesAndHandlers.keys():
if self.find(key, pos) == pos:
directiveName = key
break
if not directiveName:
raise ParseError(self, msg='Invalid end directive')
endOfFirstLinePos = self.findEOL()
self.getExpression() # eat in any extra comment-like crap
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.popFromOpenDirectivesStack(directiveName)
# subclasses can override the default behaviours here by providing an
# end-directive handler in self._endDirectiveNamesAndHandlers[directiveName]
if self._endDirectiveNamesAndHandlers.get(directiveName):
handler = self._endDirectiveNamesAndHandlers[directiveName]
handler()
elif directiveName in 'block capture cache call filter errorCatcher'.split():
if key == 'block':
self._compiler.closeBlock()
elif key == 'capture':
self._compiler.endCaptureRegion()
elif key == 'cache':
self._compiler.endCacheRegion()
elif key == 'call':
self._compiler.endCallRegion()
elif key == 'filter':
self._compiler.closeFilterBlock()
elif key == 'errorCatcher':
self._compiler.turnErrorCatcherOff()
elif directiveName in 'while for if try repeat unless'.split():
self._compiler.commitStrConst()
self._compiler.dedent()
elif directiveName=='closure':
self._compiler.commitStrConst()
self._compiler.dedent()
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
## specific directive eat methods
def eatBreakPoint(self):
"""Tells the parser to stop parsing at this point and completely ignore
everything else.
This is a debugging tool.
"""
self.setBreakPoint(self.pos())
def eatShbang(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('shBang'))
self.getWhiteSpace()
startPos = self.pos()
shBang = self.readToEOL()
shBang = self._applyExpressionFilters(shBang, 'shbang', startPos=startPos)
self._compiler.setShBang(shBang.strip())
def eatEncoding(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('encoding'))
self.getWhiteSpace()
startPos = self.pos()
encoding = self.readToEOL()
encoding = self._applyExpressionFilters(encoding, 'encoding', startPos=startPos)
self._compiler.setModuleEncoding(encoding.strip())
def eatCompiler(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('compiler')) # to end of 'compiler'
self.getWhiteSpace()
startPos = self.pos()
settingName = self.getIdentifier()
if settingName.lower() == 'reset':
self.getExpression() # gobble whitespace & junk
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._initializeSettings()
self.configureParser()
return
self.getWhiteSpace()
if self.peek() == '=':
self.advance()
else:
raise ParseError(self)
valueExpr = self.getExpression()
endPos = self.pos()
# @@TR: it's unlikely that anyone apply filters would have left this
# directive enabled:
# @@TR: fix up filtering, regardless
self._applyExpressionFilters('%s=%r'%(settingName, valueExpr),
'compiler', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
try:
self._compiler.setCompilerSetting(settingName, valueExpr)
except:
sys.stderr.write('An error occurred while processing the following #compiler directive.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % self[startPos:endPos])
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatCompilerSettings(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('compiler-settings')) # to end of 'settings'
keywords = self.getTargetVarsList()
self.getExpression() # gobble any garbage
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
if 'reset' in keywords:
self._compiler._initializeSettings()
self.configureParser()
# @@TR: this implies a single-line #compiler-settings directive, and
# thus we should parse forward for an end directive.
# Subject to change in the future
return
startPos = self.pos()
settingsStr = self._eatToThisEndDirective('compiler-settings')
settingsStr = self._applyExpressionFilters(settingsStr, 'compilerSettings',
startPos=startPos)
try:
self._compiler.setCompilerSettings(keywords=keywords, settingsStr=settingsStr)
except:
sys.stderr.write('An error occurred while processing the following compiler settings.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % settingsStr.strip())
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatAttr(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('attr'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
attribName = self.getIdentifier()
self.getWhiteSpace()
self.getAssignmentOperator()
expr = self.getExpression()
expr = self._applyExpressionFilters(expr, 'attr', startPos=startPos)
self._compiler.addAttribute(attribName, expr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatDecorator(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
#self.advance() # eat @
startPos = self.pos()
decoratorExpr = self.getExpression()
decoratorExpr = self._applyExpressionFilters(decoratorExpr, 'decorator', startPos=startPos)
self._compiler.addDecorator(decoratorExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.getWhiteSpace()
directiveName = self.matchDirective()
if not directiveName or directiveName not in ('def', 'block', 'closure', '@'):
raise ParseError(
self, msg='Expected #def, #block, #closure or another @decorator')
self.eatDirective()
def eatDef(self):
# filtered
self._eatDefOrBlock('def')
def eatBlock(self):
# filtered
startPos = self.pos()
methodName, rawSignature = self._eatDefOrBlock('block')
self._compiler._blockMetaData[methodName] = {
'raw': rawSignature,
'lineCol': self.getRowCol(startPos),
}
def eatClosure(self):
# filtered
self._eatDefOrBlock('closure')
def _eatDefOrBlock(self, directiveName):
# filtered
assert directiveName in ('def', 'block', 'closure')
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len(directiveName))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
methodName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
def includeBlockMarkers():
if self.setting('includeBlockMarkers'):
startMarker = self.setting('blockMarkerStart')
self._compiler.addStrConst(startMarker[0] + methodName + startMarker[1])
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'def', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
self.getc()
rawSignature = self[startPos:endOfFirstLinePos]
self._eatSingleLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
endPos=endOfFirstLinePos)
if directiveName == 'def' and not isNestedDef:
#@@TR: must come before _eatRestOfDirectiveTag ... for some reason
self._compiler.closeDef()
elif directiveName == 'block':
includeBlockMarkers()
self._compiler.closeBlock()
elif directiveName == 'closure' or isNestedDef:
self._compiler.dedent()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
else:
if self.peek()==':':
self.getc()
self.pushToOpenDirectivesStack(directiveName)
rawSignature = self[startPos:self.pos()]
self._eatMultiLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
isLineClearToStartToken=isLineClearToStartToken)
if directiveName == 'block':
includeBlockMarkers()
return methodName, rawSignature
def _eatMultiLineDef(self, directiveName, methodName, argsList, startPos,
isLineClearToStartToken=False):
# filtered in calling method
self.getExpression() # slurp up any garbage left at the end
signature = self[startPos:self.pos()]
endOfFirstLinePos = self.findEOL()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
signature = ' '.join([line.strip() for line in signature.splitlines()])
parserComment = ('## CHEETAH: generated from ' + signature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and len([name for name in self._openDirectivesStack if name=='def'])>1)
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
self._useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
return methodName
def _eatSingleLineDef(self, directiveName, methodName, argsList, startPos, endPos):
# filtered in calling method
fullSignature = self[startPos:endPos]
parserComment = ('## Generated from ' + fullSignature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
# @@TR: temporary hack of useSearchList
useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
self.getWhiteSpace(max=1)
self.parse(breakPoint=endPos)
if directiveName=='closure' or isNestedDef: # @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', useSearchList_orig)
def eatExtends(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('extends'))
self.getWhiteSpace()
startPos = self.pos()
if self.setting('allowExpressionsInExtendsDirective'):
baseName = self.getExpression()
else:
baseName = self.getCommaSeparatedSymbols()
baseName = ', '.join(baseName)
baseName = self._applyExpressionFilters(baseName, 'extends', startPos=startPos)
self._compiler.setBaseClass(baseName) # in compiler
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatImplements(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('implements'))
self.getWhiteSpace()
startPos = self.pos()
methodName = self.getIdentifier()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
# @@TR: need to split up filtering of the methodname and the args
#methodName = self._applyExpressionFilters(methodName, 'implements', startPos=startPos)
self._applyExpressionFilters(self[startPos:self.pos()], 'implements', startPos=startPos)
self._compiler.setMainMethodName(methodName)
self._compiler.setMainMethodArgs(argsList)
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatSuper(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('super'))
self.getWhiteSpace()
startPos = self.pos()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
self._applyExpressionFilters(self[startPos:self.pos()], 'super', startPos=startPos)
#parserComment = ('## CHEETAH: generated from ' + signature +
# ' at line %s, col %s' % self.getRowCol(startPos)
# + '.')
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addSuper(argsList)
def eatSet(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(3)
self.getWhiteSpace()
style = SET_LOCAL
if self.startswith('local'):
self.getIdentifier()
self.getWhiteSpace()
elif self.startswith('global'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_GLOBAL
elif self.startswith('module'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_MODULE
startsWithDollar = self.matchCheetahVarStart()
startPos = self.pos()
LVALUE = self.getExpression(pyTokensToBreakAt=assignmentOps, useNameMapper=False).strip()
OP = self.getAssignmentOperator()
RVALUE = self.getExpression()
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
expr = self._applyExpressionFilters(expr, 'set', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
class Components: pass # used for 'set global'
exprComponents = Components()
exprComponents.LVALUE = LVALUE
exprComponents.OP = OP
exprComponents.RVALUE = RVALUE
self._compiler.addSet(expr, exprComponents, style)
def eatSlurp(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatEOLSlurpToken(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatRaw(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('raw'))
self.getWhiteSpace()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
rawBlock = self.readToEOL(gobble=False)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
rawBlock = self._eatToThisEndDirective('raw')
self._compiler.addRawText(rawBlock)
def eatInclude(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('include'))
self.getWhiteSpace()
includeFrom = 'file'
isRaw = False
if self.startswith('raw'):
self.advance(3)
isRaw=True
self.getWhiteSpace()
if self.startswith('source'):
self.advance(len('source'))
includeFrom = 'str'
self.getWhiteSpace()
if not self.peek() == '=':
raise ParseError(self)
self.advance()
startPos = self.pos()
sourceExpr = self.getExpression()
sourceExpr = self._applyExpressionFilters(sourceExpr, 'include', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.addInclude(sourceExpr, includeFrom, isRaw)
def eatDefMacro(self):
# @@TR: not filtered yet
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('defmacro'))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
macroName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList(useNameMapper=False)
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
assert macroName not in self._directiveNamesAndParsers
argsList.insert(0, ('src', None))
argsList.append(('parser', 'None'))
argsList.append(('macros', 'None'))
argsList.append(('compilerSettings', 'None'))
argsList.append(('isShortForm', 'None'))
argsList.append(('EOLCharsInShortForm', 'None'))
argsList.append(('startPos', 'None'))
argsList.append(('endPos', 'None'))
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
macroSrc = self.readToEOL(gobble=False)
self.readToEOL(gobble=True)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
macroSrc = self._eatToThisEndDirective('defmacro')
#print argsList
normalizedMacroSrc = ''.join(
['%def callMacro('+','.join([defv and '%s=%s'%(n, defv) or n
for n, defv in argsList])
+')\n',
macroSrc,
'%end def'])
from Cheetah.Template import Template
templateAPIClass = self.setting('templateAPIClassForDefMacro', default=Template)
compilerSettings = self.setting('compilerSettingsForDefMacro', default={})
searchListForMacros = self.setting('searchListForDefMacro', default=[])
searchListForMacros = list(searchListForMacros) # copy to avoid mutation bugs
searchListForMacros.append({'macros': self._macros,
'parser': self,
'compilerSettings': self.settings(),
})
templateAPIClass._updateSettingsWithPreprocessTokens(
compilerSettings, placeholderToken='@', directiveToken='%')
macroTemplateClass = templateAPIClass.compile(source=normalizedMacroSrc,
compilerSettings=compilerSettings)
#print normalizedMacroSrc
#t = macroTemplateClass()
#print t.callMacro('src')
#print t.generatedClassCode()
class MacroDetails: pass
macroDetails = MacroDetails()
macroDetails.macroSrc = macroSrc
macroDetails.argsList = argsList
macroDetails.template = macroTemplateClass(searchList=searchListForMacros)
self._macroDetails[macroName] = macroDetails
self._macros[macroName] = macroDetails.template.callMacro
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def eatMacroCall(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
macroName = self.getIdentifier()
macro = self._macros[macroName]
if hasattr(macro, 'parse'):
return macro.parse(parser=self, startPos=startPos)
if hasattr(macro, 'parseArgs'):
args = macro.parseArgs(parser=self, startPos=startPos)
else:
self.getWhiteSpace()
args = self.getExpression(useNameMapper=False,
pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
isShortForm = True
self.advance() # skip over :
self.getWhiteSpace(max=1)
srcBlock = self.readToEOL(gobble=False)
EOLCharsInShortForm = self.readToEOL(gobble=True)
#self.readToEOL(gobble=False)
else:
isShortForm = False
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
srcBlock = self._eatToThisEndDirective(macroName)
if hasattr(macro, 'convertArgStrToDict'):
kwArgs = macro.convertArgStrToDict(args, parser=self, startPos=startPos)
else:
def getArgs(*pargs, **kws):
return pargs, kws
exec('positionalArgs, kwArgs = getArgs(%(args)s)'%locals())
assert 'src' not in kwArgs
kwArgs['src'] = srcBlock
if isinstance(macro, new.instancemethod):
co = macro.im_func.func_code
elif (hasattr(macro, '__call__')
and hasattr(macro.__call__, 'im_func')):
co = macro.__call__.im_func.func_code
else:
co = macro.func_code
availableKwArgs = inspect.getargs(co)[0]
if 'parser' in availableKwArgs:
kwArgs['parser'] = self
if 'macros' in availableKwArgs:
kwArgs['macros'] = self._macros
if 'compilerSettings' in availableKwArgs:
kwArgs['compilerSettings'] = self.settings()
if 'isShortForm' in availableKwArgs:
kwArgs['isShortForm'] = isShortForm
if isShortForm and 'EOLCharsInShortForm' in availableKwArgs:
kwArgs['EOLCharsInShortForm'] = EOLCharsInShortForm
if 'startPos' in availableKwArgs:
kwArgs['startPos'] = startPos
if 'endPos' in availableKwArgs:
kwArgs['endPos'] = self.pos()
srcFromMacroOutput = macro(**kwArgs)
origParseSrc = self._src
origBreakPoint = self.breakPoint()
origPos = self.pos()
# add a comment to the output about the macro src that is being parsed
# or add a comment prefix to all the comments added by the compiler
self._src = srcFromMacroOutput
self.setPos(0)
self.setBreakPoint(len(srcFromMacroOutput))
self.parse(assertEmptyStack=False)
self._src = origParseSrc
self.setBreakPoint(origBreakPoint)
self.setPos(origPos)
#self._compiler.addRawText('end')
def eatCache(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('cache'))
startPos = self.pos()
argList = self.getDefArgList(useNameMapper=True)
argList = self._applyExpressionFilters(argList, 'cache', startPos=startPos)
def startCache():
cacheInfo = self._compiler.genCacheInfoFromArgList(argList)
self._compiler.startCacheRegion(cacheInfo, lineCol)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
startCache()
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.endCacheRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack('cache')
startCache()
def eatCall(self):
# @@TR: need to enable single line version of this
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('call'))
startPos = self.pos()
useAutocallingOrig = self.setting('useAutocalling')
self.setSetting('useAutocalling', False)
self.getWhiteSpace()
if self.matchCheetahVarStart():
functionName = self.getCheetahVar()
else:
functionName = self.getCheetahVar(plain=True, skipStartToken=True)
self.setSetting('useAutocalling', useAutocallingOrig)
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'call', startPos=startPos)
self.getWhiteSpace()
args = self.getExpression(pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCallRegion(functionName, args, lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCallRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("call")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.startCallRegion(functionName, args, lineCol)
def eatCallArg(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('arg'))
startPos = self.pos()
self.getWhiteSpace()
argName = self.getIdentifier()
self.getWhiteSpace()
argName = self._applyExpressionFilters(argName, 'arg', startPos=startPos)
self._compiler.setCallArg(argName, lineCol)
if self.peek() == ':':
self.getc()
else:
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatFilter(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('filter'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
theFilter = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
theFilter = self.getIdentifier()
self.getWhiteSpace()
theFilter = self._applyExpressionFilters(theFilter, 'filter', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
self._compiler.setFilter(theFilter, isKlass)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.closeFilterBlock()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("filter")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setFilter(theFilter, isKlass)
def eatTransform(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('transform'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
transformer = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
transformer = self.getIdentifier()
self.getWhiteSpace()
transformer = self._applyExpressionFilters(transformer, 'transform', startPos=startPos)
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setTransform(transformer, isKlass)
def eatErrorCatcher(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('errorCatcher'))
self.getWhiteSpace()
startPos = self.pos()
errorCatcherName = self.getIdentifier()
errorCatcherName = self._applyExpressionFilters(
errorCatcherName, 'errorcatcher', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setErrorCatcher(errorCatcherName)
def eatCapture(self):
# @@TR: this could be refactored to use the code in eatSimpleIndentingDirective
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('capture'))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, 'capture', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCaptureRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack("capture")
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
def eatIf(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
startPos = self.pos()
expressionParts = self.getExpressionParts(pyTokensToBreakAt=[':'])
expr = ''.join(expressionParts).strip()
expr = self._applyExpressionFilters(expr, 'if', startPos=startPos)
isTernaryExpr = ('then' in expressionParts and 'else' in expressionParts)
if isTernaryExpr:
conditionExpr = []
trueExpr = []
falseExpr = []
currentExpr = conditionExpr
for part in expressionParts:
if part.strip()=='then':
currentExpr = trueExpr
elif part.strip()=='else':
currentExpr = falseExpr
else:
currentExpr.append(part)
conditionExpr = ''.join(conditionExpr)
trueExpr = ''.join(trueExpr)
falseExpr = ''.join(falseExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addTernaryExpr(conditionExpr, trueExpr, falseExpr, lineCol=lineCol)
elif self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.addIf(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self.pushToOpenDirectivesStack('if')
self._compiler.addIf(expr, lineCol=lineCol)
## end directive handlers
def handleEndDef(self):
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if not isNestedDef:
self._compiler.closeDef()
else:
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
self._compiler.commitStrConst()
self._compiler.dedent()
###
def pushToOpenDirectivesStack(self, directiveName):
assert directiveName in self._closeableDirectives
self._openDirectivesStack.append(directiveName)
def popFromOpenDirectivesStack(self, directiveName):
if not self._openDirectivesStack:
raise ParseError(self, msg="#end found, but nothing to end")
if self._openDirectivesStack[-1] == directiveName:
del self._openDirectivesStack[-1]
else:
raise ParseError(self, msg="#end %s found, expected #end %s" %(
directiveName, self._openDirectivesStack[-1]))
def assertEmptyOpenDirectivesStack(self):
if self._openDirectivesStack:
errorMsg = (
"Some #directives are missing their corresponding #end ___ tag: %s" %(
', '.join(self._openDirectivesStack)))
raise ParseError(self, msg=errorMsg)
##################################################
## Make an alias to export
Parser = _HighLevelParser
| [
[
8,
0,
0.0019,
0.0034,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0041,
0.0004,
0,
0.66,
0.0132,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0045,
0.0004,
0,
0.66... | [
"\"\"\"\nParser classes for Cheetah's Compiler\n\nClasses:\n ParseError( Exception )\n _LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer\n _HighLevelParser( _LowLevelParser )\n Parser === _HighLevelParser (an alias)",
"import os",
"import sys",
"import re",
"from re import DOTALL, ... |
#!/usr/bin/env python
import os
import pprint
try:
from functools import reduce
except ImportError:
# Assume we have reduce
pass
from Cheetah import Parser
from Cheetah import Compiler
from Cheetah import Template
class Analyzer(Parser.Parser):
def __init__(self, *args, **kwargs):
self.calls = {}
super(Analyzer, self).__init__(*args, **kwargs)
def eatDirective(self):
directive = self.matchDirective()
try:
self.calls[directive] += 1
except KeyError:
self.calls[directive] = 1
super(Analyzer, self).eatDirective()
class AnalysisCompiler(Compiler.ModuleCompiler):
parserClass = Analyzer
def analyze(source):
klass = Template.Template.compile(source, compilerClass=AnalysisCompiler)
return klass._CHEETAH_compilerInstance._parser.calls
def main_file(f):
fd = open(f, 'r')
try:
print u'>>> Analyzing %s' % f
calls = analyze(fd.read())
return calls
finally:
fd.close()
def _find_templates(directory, suffix):
for root, dirs, files in os.walk(directory):
for f in files:
if not f.endswith(suffix):
continue
yield root + os.path.sep + f
def _analyze_templates(iterable):
for template in iterable:
yield main_file(template)
def main_dir(opts):
results = _analyze_templates(_find_templates(opts.dir, opts.suffix))
totals = {}
for series in results:
if not series:
continue
for k, v in series.iteritems():
try:
totals[k] += v
except KeyError:
totals[k] = v
return totals
def main():
from optparse import OptionParser
op = OptionParser()
op.add_option('-f', '--file', dest='file', default=None,
help='Specify a single file to analyze')
op.add_option('-d', '--dir', dest='dir', default=None,
help='Specify a directory of templates to analyze')
op.add_option('--suffix', default='tmpl', dest='suffix',
help='Specify a custom template file suffix for the -d option (default: "tmpl")')
opts, args = op.parse_args()
if not opts.file and not opts.dir:
op.print_help()
return
results = None
if opts.file:
results = main_file(opts.file)
if opts.dir:
results = main_dir(opts)
pprint.pprint(results)
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0306,
0.0102,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0408,
0.0102,
0,
0.66,
0.0714,
276,
0,
1,
0,
0,
276,
0,
0
],
[
7,
0,
0.0816,
0.051,
0,
0... | [
"import os",
"import pprint",
"try:\n from functools import reduce\nexcept ImportError:\n # Assume we have reduce\n pass",
" from functools import reduce",
"from Cheetah import Parser",
"from Cheetah import Compiler",
"from Cheetah import Template",
"class Analyzer(Parser.Parser):\n def... |
try:
from ds.sys.Unspecified import Unspecified
except ImportError:
class _Unspecified:
def __repr__(self):
return 'Unspecified'
def __str__(self):
return 'Unspecified'
Unspecified = _Unspecified()
| [
[
7,
0,
0.5556,
1,
0,
0.66,
0,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.2222,
0.1111,
1,
0.08,
0,
219,
0,
1,
0,
0,
219,
0,
0
],
[
3,
1,
0.6667,
0.5556,
1,
0.08,
0,
... | [
"try:\n from ds.sys.Unspecified import Unspecified\nexcept ImportError:\n class _Unspecified:\n def __repr__(self):\n return 'Unspecified' \n def __str__(self):\n return 'Unspecified'",
" from ds.sys.Unspecified import Unspecified",
" class _Unspecified:\n ... |
# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $
"""Provides helpers for Template.webInput(), a method for importing web
transaction variables in bulk. See the docstring of webInput for full details.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.10 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2006/01/06 21:56:54 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.10 $"[11:-2]
from Cheetah.Utils.Misc import useOrRaise
class NonNumericInputError(ValueError): pass
##################################################
## PRIVATE FUNCTIONS AND CLASSES
class _Converter:
"""A container object for info about type converters.
.name, string, name of this converter (for error messages).
.func, function, factory function.
.default, value to use or raise if the real value is missing.
.error, value to use or raise if .func() raises an exception.
"""
def __init__(self, name, func, default, error):
self.name = name
self.func = func
self.default = default
self.error = error
def _lookup(name, func, multi, converters):
"""Look up a Webware field/cookie/value/session value. Return
'(realName, value)' where 'realName' is like 'name' but with any
conversion suffix strips off. Applies numeric conversion and
single vs multi values according to the comments in the source.
"""
# Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
# If there's no colon, the suffix is "". 'longName' is the name with the
# suffix, 'shortName' is without.
# XXX This implementation assumes "height:" means "height".
colon = name.find(':')
if colon != -1:
longName = name
shortName, ext = name[:colon], name[colon+1:]
else:
longName = shortName = name
ext = ''
# Step 2 -- look up the values by calling 'func'.
if longName != shortName:
values = func(longName, None) or func(shortName, None)
else:
values = func(shortName, None)
# 'values' is a list of strings, a string or None.
# Step 3 -- Coerce 'values' to a list of zero, one or more strings.
if values is None:
values = []
elif isinstance(values, str):
values = [values]
# Step 4 -- Find a _Converter object or raise TypeError.
try:
converter = converters[ext]
except KeyError:
fmt = "'%s' is not a valid converter name in '%s'"
tup = (ext, longName)
raise TypeError(fmt % tup)
# Step 5 -- if there's a converter func, run it on each element.
# If the converter raises an exception, use or raise 'converter.error'.
if converter.func is not None:
tmp = values[:]
values = []
for elm in tmp:
try:
elm = converter.func(elm)
except (TypeError, ValueError):
tup = converter.name, elm
errmsg = "%s '%s' contains invalid characters" % tup
elm = useOrRaise(converter.error, errmsg)
values.append(elm)
# 'values' is now a list of strings, ints or floats.
# Step 6 -- If we're supposed to return a multi value, return the list
# as is. If we're supposed to return a single value and the list is
# empty, return or raise 'converter.default'. Otherwise, return the
# first element in the list and ignore any additional values.
if multi:
return shortName, values
if len(values) == 0:
return shortName, useOrRaise(converter.default)
return shortName, values[0]
# vim: sw=4 ts=4 expandtab
| [
[
8,
0,
0.0735,
0.1176,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1373,
0.0098,
0,
0.66,
0.1667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.1471,
0.0098,
0,
0.66... | [
"\"\"\"Provides helpers for Template.webInput(), a method for importing web\ntransaction variables in bulk. See the docstring of webInput for full details.\n\nMeta-Data\n================================================================================\nAuthor: Mike Orr <iron@mso.oz.net>\nLicense: This software is r... |
## statprof.py
## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this program; if not, contact:
##
## Free Software Foundation Voice: +1-617-542-5942
## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
## Boston, MA 02111-1307, USA gnu@gnu.org
"""
statprof is intended to be a fairly simple statistical profiler for
python. It was ported directly from a statistical profiler for guile,
also named statprof, available from guile-lib [0].
[0] http://wingolog.org/software/guile-lib/statprof/
To start profiling, call statprof.start():
>>> start()
Then run whatever it is that you want to profile, for example:
>>> import test.pystone; test.pystone.pystones()
Then stop the profiling and print out the results:
>>> stop()
>>> display()
% cumulative self
time seconds seconds name
26.72 1.40 0.37 pystone.py:79:Proc0
13.79 0.56 0.19 pystone.py:133:Proc1
13.79 0.19 0.19 pystone.py:208:Proc8
10.34 0.16 0.14 pystone.py:229:Func2
6.90 0.10 0.10 pystone.py:45:__init__
4.31 0.16 0.06 pystone.py:53:copy
...
All of the numerical data with the exception of the calls column is
statistically approximate. In the following column descriptions, and
in all of statprof, "time" refers to execution time (both user and
system), not wall clock time.
% time
The percent of the time spent inside the procedure itself (not
counting children).
cumulative seconds
The total number of seconds spent in the procedure, including
children.
self seconds
The total number of seconds spent in the procedure itself (not
counting children).
name
The name of the procedure.
By default statprof keeps the data collected from previous runs. If you
want to clear the collected data, call reset():
>>> reset()
reset() can also be used to change the sampling frequency. For example,
to tell statprof to sample 50 times a second:
>>> reset(50)
This means that statprof will sample the call stack after every 1/50 of
a second of user + system time spent running on behalf of the python
process. When your process is idle (for example, blocking in a read(),
as is the case at the listener), the clock does not advance. For this
reason statprof is not currently not suitable for profiling io-bound
operations.
The profiler uses the hash of the code object itself to identify the
procedures, so it won't confuse different procedures with the same name.
They will show up as two different rows in the output.
Right now the profiler is quite simplistic. I cannot provide
call-graphs or other higher level information. What you see in the
table is pretty much all there is. Patches are welcome :-)
Threading
---------
Because signals only get delivered to the main thread in Python,
statprof only profiles the main thread. However because the time
reporting function uses per-process timers, the results can be
significantly off if other threads' work patterns are not similar to the
main thread's work patterns.
Implementation notes
--------------------
The profiler works by setting the unix profiling signal ITIMER_PROF to
go off after the interval you define in the call to reset(). When the
signal fires, a sampling routine is run which looks at the current
procedure that's executing, and then crawls up the stack, and for each
frame encountered, increments that frame's code object's sample count.
Note that if a procedure is encountered multiple times on a given stack,
it is only counted once. After the sampling is complete, the profiler
resets profiling timer to fire again after the appropriate interval.
Meanwhile, the profiler keeps track, via os.times(), how much CPU time
(system and user -- which is also what ITIMER_PROF tracks), has elapsed
while code has been executing within a start()/stop() block.
The profiler also tries to avoid counting or timing its own code as
much as possible.
"""
try:
import itimer
except ImportError:
raise ImportError('''statprof requires the itimer python extension.
To install it, enter the following commands from a terminal:
wget http://www.cute.fi/~torppa/py-itimer/py-itimer.tar.gz
tar zxvf py-itimer.tar.gz
cd py-itimer
sudo python setup.py install
''')
import signal
import os
__all__ = ['start', 'stop', 'reset', 'display']
###########################################################################
## Utils
def clock():
times = os.times()
return times[0] + times[1]
###########################################################################
## Collection data structures
class ProfileState(object):
def __init__(self, frequency=None):
self.reset(frequency)
def reset(self, frequency=None):
# total so far
self.accumulated_time = 0.0
# start_time when timer is active
self.last_start_time = None
# total count of sampler calls
self.sample_count = 0
# a float
if frequency:
self.sample_interval = 1.0/frequency
elif not hasattr(self, 'sample_interval'):
# default to 100 Hz
self.sample_interval = 1.0/100.0
else:
# leave the frequency as it was
pass
self.remaining_prof_time = None
# for user start/stop nesting
self.profile_level = 0
# whether to catch apply-frame
self.count_calls = False
# gc time between start() and stop()
self.gc_time_taken = 0
def accumulate_time(self, stop_time):
self.accumulated_time += stop_time - self.last_start_time
state = ProfileState()
## call_data := { code object: CallData }
call_data = {}
class CallData(object):
def __init__(self, code):
self.name = code.co_name
self.filename = code.co_filename
self.lineno = code.co_firstlineno
self.call_count = 0
self.cum_sample_count = 0
self.self_sample_count = 0
call_data[code] = self
def get_call_data(code):
return call_data.get(code, None) or CallData(code)
###########################################################################
## SIGPROF handler
def sample_stack_procs(frame):
state.sample_count += 1
get_call_data(frame.f_code).self_sample_count += 1
code_seen = {}
while frame:
code_seen[frame.f_code] = True
frame = frame.f_back
for code in code_seen.iterkeys():
get_call_data(code).cum_sample_count += 1
def profile_signal_handler(signum, frame):
if state.profile_level > 0:
state.accumulate_time(clock())
sample_stack_procs(frame)
itimer.setitimer(itimer.ITIMER_PROF,
state.sample_interval, 0.0)
state.last_start_time = clock()
###########################################################################
## Profiling API
def is_active():
return state.profile_level > 0
def start():
state.profile_level += 1
if state.profile_level == 1:
state.last_start_time = clock()
rpt = state.remaining_prof_time
state.remaining_prof_time = None
signal.signal(signal.SIGPROF, profile_signal_handler)
itimer.setitimer(itimer.ITIMER_PROF,
rpt or state.sample_interval, 0.0)
state.gc_time_taken = 0 # dunno
def stop():
state.profile_level -= 1
if state.profile_level == 0:
state.accumulate_time(clock())
state.last_start_time = None
rpt = itimer.setitimer(itimer.ITIMER_PROF, 0.0, 0.0)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
state.remaining_prof_time = rpt[0]
state.gc_time_taken = 0 # dunno
def reset(frequency=None):
assert state.profile_level == 0, "Can't reset() while statprof is running"
call_data.clear()
state.reset(frequency)
###########################################################################
## Reporting API
class CallStats(object):
def __init__(self, call_data):
self_samples = call_data.self_sample_count
cum_samples = call_data.cum_sample_count
nsamples = state.sample_count
secs_per_sample = state.accumulated_time / nsamples
basename = os.path.basename(call_data.filename)
self.name = '%s:%d:%s' % (basename, call_data.lineno, call_data.name)
self.pcnt_time_in_proc = self_samples / nsamples * 100
self.cum_secs_in_proc = cum_samples * secs_per_sample
self.self_secs_in_proc = self_samples * secs_per_sample
self.num_calls = None
self.self_secs_per_call = None
self.cum_secs_per_call = None
def display(self):
print('%6.2f %9.2f %9.2f %s' % (self.pcnt_time_in_proc,
self.cum_secs_in_proc,
self.self_secs_in_proc,
self.name))
def display():
if state.sample_count == 0:
print('No samples recorded.')
return
l = [CallStats(x) for x in call_data.itervalues()]
l = [(x.self_secs_in_proc, x.cum_secs_in_proc, x) for x in l]
l.sort(reverse=True)
l = [x[2] for x in l]
print('%5.5s %10.10s %7.7s %-8.8s' % ('% ', 'cumulative', 'self', ''))
print('%5.5s %9.9s %8.8s %-8.8s' % ("time", "seconds", "seconds", "name"))
for x in l:
x.display()
print('---')
print('Sample count: %d' % state.sample_count)
print('Total time: %f seconds' % state.accumulated_time)
| [
[
8,
0,
0.2336,
0.3257,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.4276,
0.0362,
0,
0.66,
0.0556,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.4145,
0.0033,
1,
0.06,
... | [
"\"\"\"\nstatprof is intended to be a fairly simple statistical profiler for\npython. It was ported directly from a statistical profiler for guile,\nalso named statprof, available from guile-lib [0].\n\n[0] http://wingolog.org/software/guile-lib/statprof/\n\nTo start profiling, call statprof.start():",
"try:\n ... |
"""This is a copy of the htmlDecode function in Webware.
@@TR: It implemented more efficiently.
"""
from Cheetah.Utils.htmlEncode import htmlCodesReversed
def htmlDecode(s, codes=htmlCodesReversed):
""" Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode()."""
for code in codes:
s = s.replace(code[1], code[0])
return s
| [
[
8,
0,
0.2143,
0.3571,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.5,
0.0714,
0,
0.66,
0.5,
756,
0,
1,
0,
0,
756,
0,
0
],
[
2,
0,
0.8214,
0.4286,
0,
0.66,
... | [
"\"\"\"This is a copy of the htmlDecode function in Webware.\n\n@@TR: It implemented more efficiently.\n\n\"\"\"",
"from Cheetah.Utils.htmlEncode import htmlCodesReversed",
"def htmlDecode(s, codes=htmlCodesReversed):\n \"\"\" Returns the ASCII decoded version of the given HTML string. This does\n NOT rem... |
#!/usr/bin/env python
"""
Miscellaneous functions/objects used by Cheetah but also useful standalone.
"""
import os # Used in mkdirsWithPyInitFile.
import sys # Used in die.
##################################################
## MISCELLANEOUS FUNCTIONS
def die(reason):
sys.stderr.write(reason + '\n')
sys.exit(1)
def useOrRaise(thing, errmsg=''):
"""Raise 'thing' if it's a subclass of Exception. Otherwise return it.
Called by: Cheetah.Servlet.cgiImport()
"""
if isinstance(thing, type) and issubclass(thing, Exception):
raise thing(errmsg)
return thing
def checkKeywords(dic, legalKeywords, what='argument'):
"""Verify no illegal keyword arguments were passed to a function.
in : dic, dictionary (**kw in the calling routine).
legalKeywords, list of strings, the keywords that are allowed.
what, string, suffix for error message (see function source).
out: None.
exc: TypeError if 'dic' contains a key not in 'legalKeywords'.
called by: Cheetah.Template.__init__()
"""
# XXX legalKeywords could be a set when sets get added to Python.
for k in dic.keys(): # Can be dic.iterkeys() if Python >= 2.2.
if k not in legalKeywords:
raise TypeError("'%s' is not a valid %s" % (k, what))
def removeFromList(list_, *elements):
"""Save as list_.remove(each element) but don't raise an error if
element is missing. Modifies 'list_' in place! Returns None.
"""
for elm in elements:
try:
list_.remove(elm)
except ValueError:
pass
def mkdirsWithPyInitFiles(path):
"""Same as os.makedirs (mkdir 'path' and all missing parent directories)
but also puts a Python '__init__.py' file in every directory it
creates. Does nothing (without creating an '__init__.py' file) if the
directory already exists.
"""
dir, fil = os.path.split(path)
if dir and not os.path.exists(dir):
mkdirsWithPyInitFiles(dir)
if not os.path.exists(path):
os.mkdir(path)
init = os.path.join(path, "__init__.py")
f = open(init, 'w') # Open and close to produce empty file.
f.close()
# vim: shiftwidth=4 tabstop=4 expandtab
| [
[
8,
0,
0.0448,
0.0448,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0746,
0.0149,
0,
0.66,
0.1429,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0896,
0.0149,
0,
0.66... | [
"\"\"\"\n Miscellaneous functions/objects used by Cheetah but also useful standalone.\n\"\"\"",
"import os # Used in mkdirsWithPyInitFile.",
"import sys # Used in die.",
"def die(reason):\n sys.stderr.write(reason + '\\n')\n sys.exit(1)",
" sys.stderr.write(reason + '\\n')",
"... |
"""
Indentation maker.
@@TR: this code is unsupported and largely undocumented ...
This version is based directly on code by Robert Kuzelj
<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and
attributes have been renamed. Indentation is output via
$self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the
searchList and another one being found. The directive syntax will
soon be changed somewhat.
"""
import re
import sys
def indentize(source):
return IndentProcessor().process(source)
class IndentProcessor(object):
"""Preprocess #indent tags."""
LINE_SEP = '\n'
ARGS = "args"
INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)')
DIRECTIVE = re.compile(r"[ \t]*#")
WS = "ws"
WHITESPACES = re.compile(r"(?P<ws>[ \t]*)")
INC = "++"
DEC = "--"
SET = "="
CHAR = "char"
ON = "on"
OFF = "off"
PUSH = "push"
POP = "pop"
def process(self, _txt):
result = []
for line in _txt.splitlines():
match = self.INDENT_DIR.match(line)
if match:
#is indention directive
args = match.group(self.ARGS).strip()
if args == self.ON:
line = "#silent $self._CHEETAH__indenter.on()"
elif args == self.OFF:
line = "#silent $self._CHEETAH__indenter.off()"
elif args == self.INC:
line = "#silent $self._CHEETAH__indenter.inc()"
elif args == self.DEC:
line = "#silent $self._CHEETAH__indenter.dec()"
elif args.startswith(self.SET):
level = int(args[1:])
line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level}
elif args.startswith('chars'):
self.indentChars = eval(args.split('=')[1])
line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level}
elif args.startswith(self.PUSH):
line = "#silent $self._CHEETAH__indenter.push()"
elif args.startswith(self.POP):
line = "#silent $self._CHEETAH__indenter.pop()"
else:
match = self.DIRECTIVE.match(line)
if not match:
#is not another directive
match = self.WHITESPACES.match(line)
if match:
size = len(match.group("ws").expandtabs(4))
line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip()
else:
line = "${self._CHEETAH__indenter.indent(0)}" + line
result.append(line)
return self.LINE_SEP.join(result)
class Indenter(object):
"""
A class that keeps track of the current indentation level.
.indent() returns the appropriate amount of indentation.
"""
On = 1
Level = 0
Chars = ' '
LevelStack = []
def on(self):
self.On = 1
def off(self):
self.On = 0
def inc(self):
self.Level += 1
def dec(self):
"""decrement can only be applied to values greater zero
values below zero don't make any sense at all!"""
if self.Level > 0:
self.Level -= 1
def push(self):
self.LevelStack.append(self.Level)
def pop(self):
"""the levestack can not become -1. any attempt to do so
sets the level to 0!"""
if len(self.LevelStack) > 0:
self.Level = self.LevelStack.pop()
else:
self.Level = 0
def setLevel(self, _level):
"""the leve can't be less than zero. any attempt to do so
sets the level automatically to zero!"""
if _level < 0:
self.Level = 0
else:
self.Level = _level
def setChar(self, _chars):
self.Chars = _chars
def indent(self, _default=0):
if self.On:
return self.Chars * self.Level
return " " * _default
| [
[
8,
0,
0.0488,
0.0894,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1057,
0.0081,
0,
0.66,
0.2,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1138,
0.0081,
0,
0.66,
... | [
"\"\"\"\nIndentation maker.\n@@TR: this code is unsupported and largely undocumented ...\n\nThis version is based directly on code by Robert Kuzelj\n<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and\nattributes have been renamed. Indentation is output via\n$self._CHEETAH__indenter.indent()... |
"""This is a copy of the htmlEncode function in Webware.
@@TR: It implemented more efficiently.
"""
htmlCodes = [
['&', '&'],
['<', '<'],
['>', '>'],
['"', '"'],
]
htmlCodesReversed = htmlCodes[:]
htmlCodesReversed.reverse()
def htmlEncode(s, codes=htmlCodes):
""" Returns the HTML encoded version of the given string. This is useful to
display a plain ASCII text string on a web page."""
for code in codes:
s = s.replace(code[0], code[1])
return s
| [
[
8,
0,
0.1667,
0.2857,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.4524,
0.2857,
0,
0.66,
0.25,
350,
0,
0,
0,
0,
0,
5,
0
],
[
14,
0,
0.619,
0.0476,
0,
0.66,
... | [
"\"\"\"This is a copy of the htmlEncode function in Webware.\n\n\n@@TR: It implemented more efficiently.\n\n\"\"\"",
"htmlCodes = [\n ['&', '&'],\n ['<', '<'],\n ['>', '>'],\n ['\"', '"'],\n]",
"htmlCodesReversed = htmlCodes[:]",
"htmlCodesReversed.reverse()",
"def htmlEncode(s, c... |
#
| [] | [] |
import sys
import os.path
import copy as copyModule
from ConfigParser import ConfigParser
import re
from tokenize import Intnumber, Floatnumber, Number
from types import *
import types
import new
import time
from StringIO import StringIO # not cStringIO because of unicode support
import imp # used by SettingsManager.updateSettingsFromPySrcFile()
numberRE = re.compile(Number)
complexNumberRE = re.compile('[\(]*' +Number + r'[ \t]*\+[ \t]*' + Number + '[\)]*')
convertableToStrTypes = (StringType, IntType, FloatType,
LongType, ComplexType, NoneType,
UnicodeType)
##################################################
## FUNCTIONS ##
def mergeNestedDictionaries(dict1, dict2, copy=False, deepcopy=False):
"""Recursively merge the values of dict2 into dict1.
This little function is very handy for selectively overriding settings in a
settings dictionary that has a nested structure.
"""
if copy:
dict1 = copyModule.copy(dict1)
elif deepcopy:
dict1 = copyModule.deepcopy(dict1)
for key, val in dict2.iteritems():
if key in dict1 and isinstance(val, dict) and isinstance(dict1[key], dict):
dict1[key] = mergeNestedDictionaries(dict1[key], val)
else:
dict1[key] = val
return dict1
def stringIsNumber(S):
"""Return True if theString represents a Python number, False otherwise.
This also works for complex numbers and numbers with +/- in front."""
S = S.strip()
if S[0] in '-+' and len(S) > 1:
S = S[1:].strip()
match = complexNumberRE.match(S)
if not match:
match = numberRE.match(S)
if not match or (match.end() != len(S)):
return False
else:
return True
def convStringToNum(theString):
"""Convert a string representation of a Python number to the Python version"""
if not stringIsNumber(theString):
raise Error(theString + ' cannot be converted to a Python number')
return eval(theString, {}, {})
class Error(Exception):
pass
class NoDefault(object):
pass
class ConfigParserCaseSensitive(ConfigParser):
"""A case sensitive version of the standard Python ConfigParser."""
def optionxform(self, optionstr):
"""Don't change the case as is done in the default implemenation."""
return optionstr
class _SettingsCollector(object):
"""An abstract base class that provides the methods SettingsManager uses to
collect settings from config files and strings.
This class only collects settings it doesn't modify the _settings dictionary
of SettingsManager instances in any way.
"""
_ConfigParserClass = ConfigParserCaseSensitive
def readSettingsFromModule(self, mod, ignoreUnderscored=True):
"""Returns all settings from a Python module.
"""
S = {}
attrs = vars(mod)
for k, v in attrs.iteritems():
if (ignoreUnderscored and k.startswith('_')):
continue
else:
S[k] = v
return S
def readSettingsFromPySrcStr(self, theString):
"""Return a dictionary of the settings in a Python src string."""
globalsDict = {'True': (1==1),
'False': (0==1),
}
newSettings = {'self':self}
exec((theString+os.linesep), globalsDict, newSettings)
del newSettings['self']
module = new.module('temp_settings_module')
module.__dict__.update(newSettings)
return self.readSettingsFromModule(module)
def readSettingsFromConfigFileObj(self, inFile, convert=True):
"""Return the settings from a config file that uses the syntax accepted by
Python's standard ConfigParser module (like Windows .ini files).
NOTE:
this method maintains case unlike the ConfigParser module, unless this
class was initialized with the 'caseSensitive' keyword set to False.
All setting values are initially parsed as strings. However, If the
'convert' arg is True this method will do the following value
conversions:
* all Python numeric literals will be coverted from string to number
* The string 'None' will be converted to the Python value None
* The string 'True' will be converted to a Python truth value
* The string 'False' will be converted to a Python false value
* Any string starting with 'python:' will be treated as a Python literal
or expression that needs to be eval'd. This approach is useful for
declaring lists and dictionaries.
If a config section titled 'Globals' is present the options defined
under it will be treated as top-level settings.
"""
p = self._ConfigParserClass()
p.readfp(inFile)
sects = p.sections()
newSettings = {}
sects = p.sections()
newSettings = {}
for s in sects:
newSettings[s] = {}
for o in p.options(s):
if o != '__name__':
newSettings[s][o] = p.get(s, o)
## loop through new settings -> deal with global settings, numbers,
## booleans and None ++ also deal with 'importSettings' commands
for sect, subDict in newSettings.items():
for key, val in subDict.items():
if convert:
if val.lower().startswith('python:'):
subDict[key] = eval(val[7:], {}, {})
if val.lower() == 'none':
subDict[key] = None
if val.lower() == 'true':
subDict[key] = True
if val.lower() == 'false':
subDict[key] = False
if stringIsNumber(val):
subDict[key] = convStringToNum(val)
## now deal with any 'importSettings' commands
if key.lower() == 'importsettings':
if val.find(';') < 0:
importedSettings = self.readSettingsFromPySrcFile(val)
else:
path = val.split(';')[0]
rest = ''.join(val.split(';')[1:]).strip()
parentDict = self.readSettingsFromPySrcFile(path)
importedSettings = eval('parentDict["' + rest + '"]')
subDict.update(mergeNestedDictionaries(subDict,
importedSettings))
if sect.lower() == 'globals':
newSettings.update(newSettings[sect])
del newSettings[sect]
return newSettings
class SettingsManager(_SettingsCollector):
"""A mixin class that provides facilities for managing application settings.
SettingsManager is designed to work well with nested settings dictionaries
of any depth.
"""
def __init__(self):
super(SettingsManager, self).__init__()
self._settings = {}
self._initializeSettings()
def _defaultSettings(self):
return {}
def _initializeSettings(self):
"""A hook that allows for complex setting initialization sequences that
involve references to 'self' or other settings. For example:
self._settings['myCalcVal'] = self._settings['someVal'] * 15
This method should be called by the class' __init__() method when needed.
The dummy implementation should be reimplemented by subclasses.
"""
pass
## core post startup methods
def setting(self, name, default=NoDefault):
"""Get a setting from self._settings, with or without a default value."""
if default is NoDefault:
return self._settings[name]
else:
return self._settings.get(name, default)
def hasSetting(self, key):
"""True/False"""
return key in self._settings
def setSetting(self, name, value):
"""Set a setting in self._settings."""
self._settings[name] = value
def settings(self):
"""Return a reference to the settings dictionary"""
return self._settings
def copySettings(self):
"""Returns a shallow copy of the settings dictionary"""
return copyModule.copy(self._settings)
def deepcopySettings(self):
"""Returns a deep copy of the settings dictionary"""
return copyModule.deepcopy(self._settings)
def updateSettings(self, newSettings, merge=True):
"""Update the settings with a selective merge or a complete overwrite."""
if merge:
mergeNestedDictionaries(self._settings, newSettings)
else:
self._settings.update(newSettings)
## source specific update methods
def updateSettingsFromPySrcStr(self, theString, merge=True):
"""Update the settings from a code in a Python src string."""
newSettings = self.readSettingsFromPySrcStr(theString)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge) )
def updateSettingsFromConfigFileObj(self, inFile, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
The caller of this method is responsible for closing the inFile file
object."""
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
def updateSettingsFromConfigStr(self, configStr, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
"""
configStr = '[globals]\n' + configStr
inFile = StringIO(configStr)
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
| [
[
1,
0,
0.0034,
0.0034,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0069,
0.0034,
0,
0.66,
0.0455,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.0103,
0.0034,
0,
0.... | [
"import sys",
"import os.path",
"import copy as copyModule",
"from ConfigParser import ConfigParser",
"import re",
"from tokenize import Intnumber, Floatnumber, Number",
"from types import *",
"import types",
"import new",
"import time",
"from StringIO import StringIO # not cStringIO because of ... |
# $Id: _SkeletonPage.py,v 1.13 2002/10/01 17:52:02 tavis_rudd Exp $
"""A baseclass for the SkeletonPage template
Meta-Data
==========
Author: Tavis Rudd <tavis@damnsimple.com>,
Version: $Revision: 1.13 $
Start Date: 2001/04/05
Last Revision Date: $Date: 2002/10/01 17:52:02 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.13 $"[11:-2]
##################################################
## DEPENDENCIES ##
import time, types, os, sys
# intra-package imports ...
from Cheetah.Template import Template
##################################################
## GLOBALS AND CONSTANTS ##
True = (1==1)
False = (0==1)
##################################################
## CLASSES ##
class _SkeletonPage(Template):
"""A baseclass for the SkeletonPage template"""
docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ' + \
'"http://www.w3.org/TR/html4/loose.dtd">'
# docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' + \
#'"http://www.w3.org/TR/xhtml1l/DTD/transitional.dtd">'
title = ''
siteDomainName = 'www.example.com'
siteCredits = 'Designed & Implemented by Tavis Rudd'
siteCopyrightName = "Tavis Rudd"
htmlTag = '<html>'
def __init__(self, *args, **KWs):
Template.__init__(self, *args, **KWs)
self._metaTags = {'HTTP-EQUIV':{'keywords': 'Cheetah',
'Content-Type': 'text/html; charset=iso-8859-1',
},
'NAME':{'generator':'Cheetah: The Python-Powered Template Engine'}
}
# metaTags = {'HTTP_EQUIV':{'test':1234}, 'NAME':{'test':1234,'test2':1234} }
self._stylesheets = {}
# stylesheets = {'.cssClassName':'stylesheetCode'}
self._stylesheetsOrder = []
# stylesheetsOrder = ['.cssClassName',]
self._stylesheetLibs = {}
# stylesheetLibs = {'libName':'libSrcPath'}
self._javascriptLibs = {}
self._javascriptTags = {}
# self._javascriptLibs = {'libName':'libSrcPath'}
self._bodyTagAttribs = {}
def metaTags(self):
"""Return a formatted vesion of the self._metaTags dictionary, using the
formatMetaTags function from Cheetah.Macros.HTML"""
return self.formatMetaTags(self._metaTags)
def stylesheetTags(self):
"""Return a formatted version of the self._stylesheetLibs and
self._stylesheets dictionaries. The keys in self._stylesheets must
be listed in the order that they should appear in the list
self._stylesheetsOrder, to ensure that the style rules are defined in
the correct order."""
stylesheetTagsTxt = ''
for title, src in self._stylesheetLibs.items():
stylesheetTagsTxt += '<link rel="stylesheet" type="text/css" href="' + str(src) + '" />\n'
if not self._stylesheetsOrder:
return stylesheetTagsTxt
stylesheetTagsTxt += '<style type="text/css"><!--\n'
for identifier in self._stylesheetsOrder:
if identifier not in self._stylesheets:
warning = '# the identifier ' + identifier + \
'was in stylesheetsOrder, but not in stylesheets'
print(warning)
stylesheetTagsTxt += warning
continue
attribsDict = self._stylesheets[identifier]
cssCode = ''
attribCode = ''
for k, v in attribsDict.items():
attribCode += str(k) + ': ' + str(v) + '; '
attribCode = attribCode[:-2] # get rid of the last semicolon
cssCode = '\n' + identifier + ' {' + attribCode + '}'
stylesheetTagsTxt += cssCode
stylesheetTagsTxt += '\n//--></style>\n'
return stylesheetTagsTxt
def javascriptTags(self):
"""Return a formatted version of the javascriptTags and
javascriptLibs dictionaries. Each value in javascriptTags
should be a either a code string to include, or a list containing the
JavaScript version number and the code string. The keys can be anything.
The same applies for javascriptLibs, but the string should be the
SRC filename rather than a code string."""
javascriptTagsTxt = []
for key, details in self._javascriptTags.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript"><!--\n',
str(details[0]), '\n//--></script>\n']
for key, details in self._javascriptLibs.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript" src="',
str(details[1]), '" />\n']
return ''.join(javascriptTagsTxt)
def bodyTag(self):
"""Create a body tag from the entries in the dict bodyTagAttribs."""
return self.formHTMLTag('body', self._bodyTagAttribs)
def imgTag(self, src, alt='', width=None, height=None, border=0):
"""Dynamically generate an image tag. Cheetah will try to convert the
src argument to a WebKit serverSidePath relative to the servlet's
location. If width and height aren't specified they are calculated using
PIL or ImageMagick if available."""
src = self.normalizePath(src)
if not width or not height:
try: # see if the dimensions can be calc'd with PIL
import Image
im = Image.open(src)
calcWidth, calcHeight = im.size
del im
if not width: width = calcWidth
if not height: height = calcHeight
except:
try: # try imageMagick instead
calcWidth, calcHeight = os.popen(
'identify -format "%w,%h" ' + src).read().split(',')
if not width: width = calcWidth
if not height: height = calcHeight
except:
pass
if width and height:
return ''.join(['<img src="', src, '" width="', str(width), '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
elif width:
return ''.join(['<img src="', src, '" width="', str(width),
'" alt="', alt, '" border="', str(border), '" />'])
elif height:
return ''.join(['<img src="', src, '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
else:
return ''.join(['<img src="', src, '" alt="', alt, '" border="', str(border), '" />'])
def currentYr(self):
"""Return a string representing the current yr."""
return time.strftime("%Y", time.localtime(time.time()))
def currentDate(self, formatString="%b %d, %Y"):
"""Return a string representing the current localtime."""
return time.strftime(formatString, time.localtime(time.time()))
def spacer(self, width=1,height=1):
return '<img src="spacer.gif" width="%s" height="%s" alt="" />'% (str(width), str(height))
def formHTMLTag(self, tagName, attributes={}):
"""returns a string containing an HTML <tag> """
tagTxt = ['<', tagName.lower()]
for name, val in attributes.items():
tagTxt += [' ', name.lower(), '="', str(val), '"']
tagTxt.append('>')
return ''.join(tagTxt)
def formatMetaTags(self, metaTags):
"""format a dict of metaTag definitions into an HTML version"""
metaTagsTxt = []
if 'HTTP-EQUIV' in metaTags:
for http_equiv, contents in metaTags['HTTP-EQUIV'].items():
metaTagsTxt += ['<meta http-equiv="', str(http_equiv), '" content="',
str(contents), '" />\n']
if 'NAME' in metaTags:
for name, contents in metaTags['NAME'].items():
metaTagsTxt += ['<meta name="', str(name), '" content="', str(contents),
'" />\n']
return ''.join(metaTagsTxt)
| [
[
8,
0,
0.0282,
0.0423,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0516,
0.0047,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0563,
0.0047,
0,
0.66,
... | [
"\"\"\"A baseclass for the SkeletonPage template\n\nMeta-Data\n==========\nAuthor: Tavis Rudd <tavis@damnsimple.com>,\nVersion: $Revision: 1.13 $\nStart Date: 2001/04/05\nLast Revision Date: $Date: 2002/10/01 17:52:02 $",
"__author__ = \"Tavis Rudd <tavis@damnsimple.com>\"",
"__revision__ = \"$Revision: 1.13 $\... |
# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $
'''
Cache holder classes for Cheetah:
Cache regions are defined using the #cache Cheetah directive. Each
cache region can be viewed as a dictionary (keyed by cacheRegionID)
handling at least one cache item (the default one). It's possible to add
cacheItems in a region by using the `varyBy` #cache directive parameter as
in the following example::
#def getArticle
this is the article content.
#end def
#cache varyBy=$getArticleID()
$getArticle($getArticleID())
#end cache
The code above will generate a CacheRegion and add new cacheItem for each value
of $getArticleID().
'''
try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
import Cheetah.CacheStore
class CacheItem(object):
'''
A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
'''
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and time.time() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = time.time()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper(object):
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix+key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix+key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix+key, val, time=time)
class CacheRegion(object):
'''
A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that works
with Cheetah's CacheStore protocol and provide it as the cacheStore argument
to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
'''
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = Cheetah.CacheStore.MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in self._cacheItems.keys():
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = md5(str(cacheItemID)).hexdigest()
if cacheItemID not in self._cacheItems:
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID]
| [
[
8,
0,
0.0809,
0.1397,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.1728,
0.0294,
0,
0.66,
0.1667,
0,
0,
1,
0,
0,
0,
0,
0
],
[
1,
1,
0.1691,
0.0074,
1,
0.64,
... | [
"'''\nCache holder classes for Cheetah:\n\nCache regions are defined using the #cache Cheetah directive. Each\ncache region can be viewed as a dictionary (keyed by cacheRegionID)\nhandling at least one cache item (the default one). It's possible to add\ncacheItems in a region by using the `varyBy` #cache directive ... |
# $Id: CheetahWrapper.py,v 1.26 2007/10/02 01:22:04 tavis_rudd Exp $
"""Cheetah command-line interface.
2002-09-03 MSO: Total rewrite.
2002-09-04 MSO: Bugfix, compile command was using wrong output ext.
2002-11-08 MSO: Another rewrite.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>>
Version: $Revision: 1.26 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/10/02 01:22:04 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>"
__revision__ = "$Revision: 1.26 $"[11:-2]
import getopt, glob, os, pprint, re, shutil, sys
import cPickle as pickle
from optparse import OptionParser
from Cheetah.Version import Version
from Cheetah.Template import Template, DEFAULT_COMPILER_SETTINGS
from Cheetah.Utils.Misc import mkdirsWithPyInitFiles
optionDashesRE = re.compile( R"^-{1,2}" )
moduleNameRE = re.compile( R"^[a-zA-Z_][a-zA-Z_0-9]*$" )
def fprintfMessage(stream, format, *args):
if format[-1:] == '^':
format = format[:-1]
else:
format += '\n'
if args:
message = format % args
else:
message = format
stream.write(message)
class Error(Exception):
pass
class Bundle:
"""Wrap the source, destination and backup paths in one neat little class.
Used by CheetahWrapper.getBundles().
"""
def __init__(self, **kw):
self.__dict__.update(kw)
def __repr__(self):
return "<Bundle %r>" % self.__dict__
##################################################
## USAGE FUNCTION & MESSAGES
def usage(usageMessage, errorMessage="", out=sys.stderr):
"""Write help text, an optional error message, and abort the program.
"""
out.write(WRAPPER_TOP)
out.write(usageMessage)
exitStatus = 0
if errorMessage:
out.write('\n')
out.write("*** USAGE ERROR ***: %s\n" % errorMessage)
exitStatus = 1
sys.exit(exitStatus)
WRAPPER_TOP = """\
__ ____________ __
\ \/ \/ /
\/ * * \/ CHEETAH %(Version)s Command-Line Tool
\ | /
\ ==----== / by Tavis Rudd <tavis@damnsimple.com>
\__________/ and Mike Orr <sluggoster@gmail.com>
""" % globals()
HELP_PAGE1 = """\
USAGE:
------
cheetah compile [options] [FILES ...] : Compile template definitions
cheetah fill [options] [FILES ...] : Fill template definitions
cheetah help : Print this help message
cheetah options : Print options help message
cheetah test [options] : Run Cheetah's regression tests
: (same as for unittest)
cheetah version : Print Cheetah version number
You may abbreviate the command to the first letter; e.g., 'h' == 'help'.
If FILES is a single "-", read standard input and write standard output.
Run "cheetah options" for the list of valid options.
"""
##################################################
## CheetahWrapper CLASS
class CheetahWrapper(object):
MAKE_BACKUPS = True
BACKUP_SUFFIX = ".bak"
_templateClass = None
_compilerSettings = None
def __init__(self):
self.progName = None
self.command = None
self.opts = None
self.pathArgs = None
self.sourceFiles = []
self.searchList = []
self.parser = None
##################################################
## MAIN ROUTINE
def main(self, argv=None):
"""The main program controller."""
if argv is None:
argv = sys.argv
# Step 1: Determine the command and arguments.
try:
self.progName = progName = os.path.basename(argv[0])
self.command = command = optionDashesRE.sub("", argv[1])
if command == 'test':
self.testOpts = argv[2:]
else:
self.parseOpts(argv[2:])
except IndexError:
usage(HELP_PAGE1, "not enough command-line arguments")
# Step 2: Call the command
meths = (self.compile, self.fill, self.help, self.options,
self.test, self.version)
for meth in meths:
methName = meth.__name__
# Or meth.im_func.func_name
# Or meth.func_name (Python >= 2.1 only, sometimes works on 2.0)
methInitial = methName[0]
if command in (methName, methInitial):
sys.argv[0] += (" " + methName)
# @@MO: I don't necessarily agree sys.argv[0] should be
# modified.
meth()
return
# If none of the commands matched.
usage(HELP_PAGE1, "unknown command '%s'" % command)
def parseOpts(self, args):
C, D, W = self.chatter, self.debug, self.warn
self.isCompile = isCompile = self.command[0] == 'c'
defaultOext = isCompile and ".py" or ".html"
self.parser = OptionParser()
pao = self.parser.add_option
pao("--idir", action="store", dest="idir", default='', help='Input directory (defaults to current directory)')
pao("--odir", action="store", dest="odir", default="", help='Output directory (defaults to current directory)')
pao("--iext", action="store", dest="iext", default=".tmpl", help='File input extension (defaults: compile: .tmpl, fill: .tmpl)')
pao("--oext", action="store", dest="oext", default=defaultOext, help='File output extension (defaults: compile: .py, fill: .html)')
pao("-R", action="store_true", dest="recurse", default=False, help='Recurse through subdirectories looking for input files')
pao("--stdout", "-p", action="store_true", dest="stdout", default=False, help='Send output to stdout instead of writing to a file')
pao("--quiet", action="store_false", dest="verbose", default=True, help='Do not print informational messages to stdout')
pao("--debug", action="store_true", dest="debug", default=False, help='Print diagnostic/debug information to stderr')
pao("--env", action="store_true", dest="env", default=False, help='Pass the environment into the search list')
pao("--pickle", action="store", dest="pickle", default="", help='Unpickle FILE and pass it through in the search list')
pao("--flat", action="store_true", dest="flat", default=False, help='Do not build destination subdirectories')
pao("--nobackup", action="store_true", dest="nobackup", default=False, help='Do not make backup files when generating new ones')
pao("--settings", action="store", dest="compilerSettingsString", default=None, help='String of compiler settings to pass through, e.g. --settings="useNameMapper=False,useFilters=False"')
pao('--print-settings', action='store_true', dest='print_settings', help='Print out the list of available compiler settings')
pao("--templateAPIClass", action="store", dest="templateClassName", default=None, help='Name of a subclass of Cheetah.Template.Template to use for compilation, e.g. MyTemplateClass')
pao("--parallel", action="store", type="int", dest="parallel", default=1, help='Compile/fill templates in parallel, e.g. --parallel=4')
pao('--shbang', dest='shbang', default='#!/usr/bin/env python', help='Specify the shbang to place at the top of compiled templates, e.g. --shbang="#!/usr/bin/python2.6"')
opts, files = self.parser.parse_args(args)
self.opts = opts
if sys.platform == "win32":
new_files = []
for spec in files:
file_list = glob.glob(spec)
if file_list:
new_files.extend(file_list)
else:
new_files.append(spec)
files = new_files
self.pathArgs = files
D("""\
cheetah compile %s
Options are
%s
Files are %s""", args, pprint.pformat(vars(opts)), files)
if opts.print_settings:
print()
print('>> Available Cheetah compiler settings:')
from Cheetah.Compiler import _DEFAULT_COMPILER_SETTINGS
listing = _DEFAULT_COMPILER_SETTINGS
listing.sort(key=lambda l: l[0][0].lower())
for l in listing:
print('\t%s (default: "%s")\t%s' % l)
sys.exit(0)
#cleanup trailing path separators
seps = [sep for sep in [os.sep, os.altsep] if sep]
for attr in ['idir', 'odir']:
for sep in seps:
path = getattr(opts, attr, None)
if path and path.endswith(sep):
path = path[:-len(sep)]
setattr(opts, attr, path)
break
self._fixExts()
if opts.env:
self.searchList.insert(0, os.environ)
if opts.pickle:
f = open(opts.pickle, 'rb')
unpickled = pickle.load(f)
f.close()
self.searchList.insert(0, unpickled)
##################################################
## COMMAND METHODS
def compile(self):
self._compileOrFill()
def fill(self):
from Cheetah.ImportHooks import install
install()
self._compileOrFill()
def help(self):
usage(HELP_PAGE1, "", sys.stdout)
def options(self):
return self.parser.print_help()
def test(self):
# @@MO: Ugly kludge.
TEST_WRITE_FILENAME = 'cheetah_test_file_creation_ability.tmp'
try:
f = open(TEST_WRITE_FILENAME, 'w')
except:
sys.exit("""\
Cannot run the tests because you don't have write permission in the current
directory. The tests need to create temporary files. Change to a directory
you do have write permission to and re-run the tests.""")
else:
f.close()
os.remove(TEST_WRITE_FILENAME)
# @@MO: End ugly kludge.
from Cheetah.Tests import Test
import unittest
verbosity = 1
if '-q' in self.testOpts:
verbosity = 0
if '-v' in self.testOpts:
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(unittest.TestSuite(Test.suites))
def version(self):
print(Version)
# If you add a command, also add it to the 'meths' variable in main().
##################################################
## LOGGING METHODS
def chatter(self, format, *args):
"""Print a verbose message to stdout. But don't if .opts.stdout is
true or .opts.verbose is false.
"""
if self.opts.stdout or not self.opts.verbose:
return
fprintfMessage(sys.stdout, format, *args)
def debug(self, format, *args):
"""Print a debugging message to stderr, but don't if .debug is
false.
"""
if self.opts.debug:
fprintfMessage(sys.stderr, format, *args)
def warn(self, format, *args):
"""Always print a warning message to stderr.
"""
fprintfMessage(sys.stderr, format, *args)
def error(self, format, *args):
"""Always print a warning message to stderr and exit with an error code.
"""
fprintfMessage(sys.stderr, format, *args)
sys.exit(1)
##################################################
## HELPER METHODS
def _fixExts(self):
assert self.opts.oext, "oext is empty!"
iext, oext = self.opts.iext, self.opts.oext
if iext and not iext.startswith("."):
self.opts.iext = "." + iext
if oext and not oext.startswith("."):
self.opts.oext = "." + oext
def _compileOrFill(self):
C, D, W = self.chatter, self.debug, self.warn
opts, files = self.opts, self.pathArgs
if files == ["-"]:
self._compileOrFillStdin()
return
elif not files and opts.recurse:
which = opts.idir and "idir" or "current"
C("Drilling down recursively from %s directory.", which)
sourceFiles = []
dir = os.path.join(self.opts.idir, os.curdir)
os.path.walk(dir, self._expandSourceFilesWalk, sourceFiles)
elif not files:
usage(HELP_PAGE1, "Neither files nor -R specified!")
else:
sourceFiles = self._expandSourceFiles(files, opts.recurse, True)
sourceFiles = [os.path.normpath(x) for x in sourceFiles]
D("All source files found: %s", sourceFiles)
bundles = self._getBundles(sourceFiles)
D("All bundles: %s", pprint.pformat(bundles))
if self.opts.flat:
self._checkForCollisions(bundles)
# In parallel mode a new process is forked for each template
# compilation, out of a pool of size self.opts.parallel. This is not
# really optimal in all cases (e.g. probably wasteful for small
# templates), but seems to work well in real life for me.
#
# It also won't work for Windows users, but I'm not going to lose any
# sleep over that.
if self.opts.parallel > 1:
bad_child_exit = 0
pid_pool = set()
def child_wait():
pid, status = os.wait()
pid_pool.remove(pid)
return os.WEXITSTATUS(status)
while bundles:
b = bundles.pop()
pid = os.fork()
if pid:
pid_pool.add(pid)
else:
self._compileOrFillBundle(b)
sys.exit(0)
if len(pid_pool) == self.opts.parallel:
bad_child_exit = child_wait()
if bad_child_exit:
break
while pid_pool:
child_exit = child_wait()
if not bad_child_exit:
bad_child_exit = child_exit
if bad_child_exit:
sys.exit("Child process failed, exited with code %d" % bad_child_exit)
else:
for b in bundles:
self._compileOrFillBundle(b)
def _checkForCollisions(self, bundles):
"""Check for multiple source paths writing to the same destination
path.
"""
C, D, W = self.chatter, self.debug, self.warn
isError = False
dstSources = {}
for b in bundles:
if b.dst in dstSources:
dstSources[b.dst].append(b.src)
else:
dstSources[b.dst] = [b.src]
keys = sorted(dstSources.keys())
for dst in keys:
sources = dstSources[dst]
if len(sources) > 1:
isError = True
sources.sort()
fmt = "Collision: multiple source files %s map to one destination file %s"
W(fmt, sources, dst)
if isError:
what = self.isCompile and "Compilation" or "Filling"
sys.exit("%s aborted due to collisions" % what)
def _expandSourceFilesWalk(self, arg, dir, files):
"""Recursion extension for .expandSourceFiles().
This method is a callback for os.path.walk().
'arg' is a list to which successful paths will be appended.
"""
iext = self.opts.iext
for f in files:
path = os.path.join(dir, f)
if path.endswith(iext) and os.path.isfile(path):
arg.append(path)
elif os.path.islink(path) and os.path.isdir(path):
os.path.walk(path, self._expandSourceFilesWalk, arg)
# If is directory, do nothing; 'walk' will eventually get it.
def _expandSourceFiles(self, files, recurse, addIextIfMissing):
"""Calculate source paths from 'files' by applying the
command-line options.
"""
C, D, W = self.chatter, self.debug, self.warn
idir = self.opts.idir
iext = self.opts.iext
files = []
for f in self.pathArgs:
oldFilesLen = len(files)
D("Expanding %s", f)
path = os.path.join(idir, f)
pathWithExt = path + iext # May or may not be valid.
if os.path.isdir(path):
if recurse:
os.path.walk(path, self._expandSourceFilesWalk, files)
else:
raise Error("source file '%s' is a directory" % path)
elif os.path.isfile(path):
files.append(path)
elif (addIextIfMissing and not path.endswith(iext) and
os.path.isfile(pathWithExt)):
files.append(pathWithExt)
# Do not recurse directories discovered by iext appending.
elif os.path.exists(path):
W("Skipping source file '%s', not a plain file.", path)
else:
W("Skipping source file '%s', not found.", path)
if len(files) > oldFilesLen:
D(" ... found %s", files[oldFilesLen:])
return files
def _getBundles(self, sourceFiles):
flat = self.opts.flat
idir = self.opts.idir
iext = self.opts.iext
nobackup = self.opts.nobackup
odir = self.opts.odir
oext = self.opts.oext
idirSlash = idir + os.sep
bundles = []
for src in sourceFiles:
# 'base' is the subdirectory plus basename.
base = src
if idir and src.startswith(idirSlash):
base = src[len(idirSlash):]
if iext and base.endswith(iext):
base = base[:-len(iext)]
basename = os.path.basename(base)
if flat:
dst = os.path.join(odir, basename + oext)
else:
dbn = basename
if odir and base.startswith(os.sep):
odd = odir
while odd != '':
idx = base.find(odd)
if idx == 0:
dbn = base[len(odd):]
if dbn[0] == '/':
dbn = dbn[1:]
break
odd = os.path.dirname(odd)
if odd == '/':
break
dst = os.path.join(odir, dbn + oext)
else:
dst = os.path.join(odir, base + oext)
bak = dst + self.BACKUP_SUFFIX
b = Bundle(src=src, dst=dst, bak=bak, base=base, basename=basename)
bundles.append(b)
return bundles
def _getTemplateClass(self):
C, D, W = self.chatter, self.debug, self.warn
modname = None
if self._templateClass:
return self._templateClass
modname = self.opts.templateClassName
if not modname:
return Template
p = modname.rfind('.')
if ':' not in modname:
self.error('The value of option --templateAPIClass is invalid\n'
'It must be in the form "module:class", '
'e.g. "Cheetah.Template:Template"')
modname, classname = modname.split(':')
C('using --templateAPIClass=%s:%s'%(modname, classname))
if p >= 0:
mod = getattr(__import__(modname[:p], {}, {}, [modname[p+1:]]), modname[p+1:])
else:
mod = __import__(modname, {}, {}, [])
klass = getattr(mod, classname, None)
if klass:
self._templateClass = klass
return klass
else:
self.error('**Template class specified in option --templateAPIClass not found\n'
'**Falling back on Cheetah.Template:Template')
def _getCompilerSettings(self):
if self._compilerSettings:
return self._compilerSettings
def getkws(**kws):
return kws
if self.opts.compilerSettingsString:
try:
exec('settings = getkws(%s)'%self.opts.compilerSettingsString)
except:
self.error("There's an error in your --settings option."
"It must be valid Python syntax.\n"
+" --settings='%s'\n"%self.opts.compilerSettingsString
+" %s: %s"%sys.exc_info()[:2]
)
validKeys = DEFAULT_COMPILER_SETTINGS.keys()
if [k for k in settings.keys() if k not in validKeys]:
self.error(
'The --setting "%s" is not a valid compiler setting name.'%k)
self._compilerSettings = settings
return settings
else:
return {}
def _compileOrFillStdin(self):
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
if self.isCompile:
pysrc = TemplateClass.compile(file=sys.stdin,
compilerSettings=compilerSettings,
returnAClass=False)
output = pysrc
else:
output = str(TemplateClass(file=sys.stdin, compilerSettings=compilerSettings))
sys.stdout.write(output)
def _compileOrFillBundle(self, b):
C, D, W = self.chatter, self.debug, self.warn
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
src = b.src
dst = b.dst
base = b.base
basename = b.basename
dstDir = os.path.dirname(dst)
what = self.isCompile and "Compiling" or "Filling"
C("%s %s -> %s^", what, src, dst) # No trailing newline.
if os.path.exists(dst) and not self.opts.nobackup:
bak = b.bak
C(" (backup %s)", bak) # On same line as previous message.
else:
bak = None
C("")
if self.isCompile:
if not moduleNameRE.match(basename):
tup = basename, src
raise Error("""\
%s: base name %s contains invalid characters. It must
be named according to the same rules as Python modules.""" % tup)
pysrc = TemplateClass.compile(file=src, returnAClass=False,
moduleName=basename,
className=basename,
commandlineopts=self.opts,
compilerSettings=compilerSettings)
output = pysrc
else:
#output = str(TemplateClass(file=src, searchList=self.searchList))
tclass = TemplateClass.compile(file=src, compilerSettings=compilerSettings)
output = str(tclass(searchList=self.searchList))
if bak:
shutil.copyfile(dst, bak)
if dstDir and not os.path.exists(dstDir):
if self.isCompile:
mkdirsWithPyInitFiles(dstDir)
else:
os.makedirs(dstDir)
if self.opts.stdout:
sys.stdout.write(output)
else:
f = open(dst, 'w')
f.write(output)
f.close()
# Called when invoked as `cheetah`
def _cheetah():
CheetahWrapper().main()
# Called when invoked as `cheetah-compile`
def _cheetah_compile():
sys.argv.insert(1, "compile")
CheetahWrapper().main()
##################################################
## if run from the command line
if __name__ == '__main__': CheetahWrapper().main()
# vim: shiftwidth=4 tabstop=4 expandtab
| [
[
8,
0,
0.0127,
0.0206,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0237,
0.0016,
0,
0.66,
0.05,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0253,
0.0016,
0,
0.66,
... | [
"\"\"\"Cheetah command-line interface.\n\n2002-09-03 MSO: Total rewrite.\n2002-09-04 MSO: Bugfix, compile command was using wrong output ext.\n2002-11-08 MSO: Another rewrite.\n\nMeta-Data\n================================================================================",
"__author__ = \"Tavis Rudd <tavis@damnsim... |
'''
Cheetah is an open source template engine and code generation tool.
It can be used standalone or combined with other tools and frameworks. Web
development is its principle use, but Cheetah is very flexible and is also being
used to generate C++ game code, Java, sql, form emails and even Python code.
Homepage
http://www.cheetahtemplate.org/
Documentation
http://cheetahtemplate.org/learn.html
Mailing list
cheetahtemplate-discuss@lists.sourceforge.net
Subscribe at
http://lists.sourceforge.net/lists/listinfo/cheetahtemplate-discuss
'''
from Version import *
| [
[
8,
0,
0.475,
0.9,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
1,
0.05,
0,
0.66,
1,
444,
0,
1,
0,
0,
444,
0,
0
]
] | [
"'''\nCheetah is an open source template engine and code generation tool.\n\nIt can be used standalone or combined with other tools and frameworks. Web\ndevelopment is its principle use, but Cheetah is very flexible and is also being\nused to generate C++ game code, Java, sql, form emails and even Python code.\n\nH... |
#!/usr/bin/env python
'''
Core module of Cheetah's Unit-testing framework
TODO
================================================================================
# combo tests
# negative test cases for expected exceptions
# black-box vs clear-box testing
# do some tests that run the Template for long enough to check that the refresh code works
'''
import sys
import unittest
from Cheetah.Tests import SyntaxAndOutput
from Cheetah.Tests import NameMapper
from Cheetah.Tests import Misc
from Cheetah.Tests import Filters
from Cheetah.Tests import Template
from Cheetah.Tests import Cheps
from Cheetah.Tests import Parser
from Cheetah.Tests import Regressions
from Cheetah.Tests import Unicode
from Cheetah.Tests import CheetahWrapper
from Cheetah.Tests import Analyzer
SyntaxAndOutput.install_eols()
suites = [
unittest.findTestCases(SyntaxAndOutput),
unittest.findTestCases(NameMapper),
unittest.findTestCases(Filters),
unittest.findTestCases(Template),
#unittest.findTestCases(Cheps),
unittest.findTestCases(Regressions),
unittest.findTestCases(Unicode),
unittest.findTestCases(Misc),
unittest.findTestCases(Parser),
unittest.findTestCases(Analyzer),
]
if not sys.platform.startswith('java'):
suites.append(unittest.findTestCases(CheetahWrapper))
if __name__ == '__main__':
runner = unittest.TextTestRunner()
if 'xml' in sys.argv:
import xmlrunner
runner = xmlrunner.XMLTestRunner(filename='Cheetah-Tests.xml')
results = runner.run(unittest.TestSuite(suites))
| [
[
8,
0,
0.1226,
0.1887,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2453,
0.0189,
0,
0.66,
0.0588,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.2642,
0.0189,
0,
0.66... | [
"'''\nCore module of Cheetah's Unit-testing framework\n\nTODO\n================================================================================\n# combo tests\n# negative test cases for expected exceptions\n# black-box vs clear-box testing",
"import sys",
"import unittest",
"from Cheetah.Tests import SyntaxAn... |
"""
XML Test Runner for PyUnit
"""
# Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in
# the Public Domain. With contributions by Paolo Borelli.
__revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $"
import os.path
import re
import sys
import time
import traceback
import unittest
from StringIO import StringIO
from xml.sax.saxutils import escape
from StringIO import StringIO
class _TestInfo(object):
"""Information about a particular test.
Used by _XMLTestResult.
"""
def __init__(self, test, time):
_pieces = test.id().split('.')
(self._class, self._method) = ('.'.join(_pieces[:-1]), _pieces[-1])
self._time = time
self._error = None
self._failure = None
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
{
"class": self._class,
"method": self._method,
"time": self._time,
})
if self._failure != None:
self._print_error(stream, 'failure', self._failure)
if self._error != None:
self._print_error(stream, 'error', self._error)
stream.write('</testcase>\n')
def _print_error(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('\n')
stream.write(' <%s type="%s">%s\n' \
% (tagname, issubclass(error[0], Exception) and error[0].__name__ or str(error[0]), text))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write(' </%s>\n' % tagname)
stream.write(' ')
# Module level functions since Python 2.3 doesn't grok decorators
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner.
"""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = create_error(test, time_taken, self._error)
elif self._failure:
info = create_failure(test, time_taken, self._failure)
else:
info = create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
{ "e": len(self.errors), "f": len(self.failures) })
stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
{
"n": self._test_name,
"t": self.testsRun,
"time": time_taken,
})
for info in self._tests:
info.print_report(stream)
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out)
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err)
stream.write('</testsuite>\n')
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class.
"""
def __init__(self, *args, **kwargs):
self._stream = kwargs.get('stream')
self._filename = kwargs.get('filename')
self._path = "."
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + "." + class_.__name__
if self._stream == None:
filename = "TEST-%s.xml" % classname
if self._filename:
filename = self._filename
stream = file(os.path.join(self._path, filename), "w")
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
else:
stream = self._stream
result = _XMLTestResult(classname)
start_time = time.time()
# TODO: Python 2.5: Use the with statement
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ""
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ""
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
if self._stream == None:
stream.close()
return result
def _set_path(self, path):
self._path = path
path = property(lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
class XMLTestRunnerTest(unittest.TestCase):
def setUp(self):
self._stream = StringIO()
def _try_test_run(self, test_class, expected):
"""Run the test suite against the supplied test class and compare the
XML result against the expected XML string. Fail if the expected
string doesn't match the actual string. All time attribute in the
expected string should have the value "0.000". All error and failure
messages are reduced to "Foobar".
"""
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(test_class))
got = self._stream.getvalue()
# Replace all time="X.YYY" attributes by time="0.000" to enable a
# simple string comparison.
got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got)
# Likewise, replace all failure and error messages by a simple "Foobar"
# string.
got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got)
got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got)
self.assertEqual(expected, got)
def test_no_tests(self):
"""Regression test: Check whether a test run without any tests
matches a previous run.
"""
class TestTest(unittest.TestCase):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000">
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_success(self):
"""Regression test: Check whether a test run with a successful test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_failure(self):
"""Regression test: Check whether a test run with a failing test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
self.assert_(False)
self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<failure type="exceptions.AssertionError">Foobar</failure>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_error(self):
"""Regression test: Check whether a test run with a erroneous test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
raise IndexError()
self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<error type="exceptions.IndexError">Foobar</error>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stdout_capture(self):
"""Regression test: Check whether a test run with output to stdout
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
print("Test")
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[Test
]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stderr_capture(self):
"""Regression test: Check whether a test run with output to stderr
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr.write('Test\n')
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[Test
]]></system-err>
</testsuite>
""")
class NullStream(object):
"""A file-like object that discards everything written to it."""
def write(self, buffer):
pass
def test_unittests_changing_stdout(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stdout, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
def test_unittests_changing_stderr(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stderr, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
class XMLTestProgram(unittest.TestProgram):
def runTests(self):
if self.testRunner is None:
self.testRunner = XMLTestRunner()
unittest.TestProgram.runTests(self)
main = XMLTestProgram
if __name__ == "__main__":
main(module=None)
| [
[
8,
0,
0.0052,
0.0079,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.021,
0.0026,
0,
0.66,
0.05,
809,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0262,
0.0026,
0,
0.66,
... | [
"\"\"\"\nXML Test Runner for PyUnit\n\"\"\"",
"__revision__ = \"$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $\"",
"import os.path",
"import re",
"import sys",
"import time",
"import traceback",
"import unittest",
"from StringIO import StringIO",
"from xml.sa... |
#!/usr/bin/env python
# -*- encoding: utf8 -*-
from Cheetah.Template import Template
from Cheetah import CheetahWrapper
from Cheetah import DummyTransaction
import imp
import os
import sys
import tempfile
import unittest
class CommandLineTest(unittest.TestCase):
def createAndCompile(self, source):
sourcefile = '-'
while sourcefile.find('-') != -1:
sourcefile = tempfile.mktemp()
fd = open('%s.tmpl' % sourcefile, 'w')
fd.write(source)
fd.close()
wrap = CheetahWrapper.CheetahWrapper()
wrap.main(['cheetah', 'compile', '--quiet', '--nobackup', sourcefile])
module_path, module_name = os.path.split(sourcefile)
module = loadModule(module_name, [module_path])
template = getattr(module, module_name)
return template
class JBQ_UTF8_Test1(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test2(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test3(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String and an eacute é'
assert unicode(t())
class JBQ_UTF8_Test4(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = 'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test5(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = u'Unicode String'
assert unicode(t())
def loadModule(moduleName, path=None):
if path:
assert isinstance(path, list)
try:
mod = sys.modules[moduleName]
except KeyError:
fp = None
try:
fp, pathname, description = imp.find_module(moduleName, path)
mod = imp.load_module(moduleName, fp, pathname, description)
finally:
if fp:
fp.close()
return mod
class JBQ_UTF8_Test6(unittest.TestCase):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
t = Template.compile(source=source)
t.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test7(CommandLineTest):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
template = self.createAndCompile(source)
template.v = u'Unicode String'
assert unicode(template())
class JBQ_UTF8_Test8(CommandLineTest):
def testStaticCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = self.createAndCompile(source)()
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
def testDynamicCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = Template(source = source)
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
class EncodeUnicodeCompatTest(unittest.TestCase):
"""
Taken initially from Red Hat's bugzilla #529332
https://bugzilla.redhat.com/show_bug.cgi?id=529332
"""
def runTest(self):
t = Template("""Foo ${var}""", filter='EncodeUnicode')
t.var = u"Text with some non-ascii characters: åäö"
rc = t.respond()
assert isinstance(rc, unicode), ('Template.respond() should return unicode', rc)
rc = str(t)
assert isinstance(rc, str), ('Template.__str__() should return a UTF-8 encoded string', rc)
class Unicode_in_SearchList_Test(CommandLineTest):
def test_BasicASCII(self):
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : u'neat'}])
assert template.respond()
def test_Thai(self):
# The string is something in Thai
source = '''This is $foo $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'foo' : 'bar',
'adjective' : u'\u0e22\u0e34\u0e19\u0e14\u0e35\u0e15\u0e49\u0e2d\u0e19\u0e23\u0e31\u0e1a'}])
assert template.respond()
def test_Thai_utf8(self):
utf8 = '\xe0\xb8\xa2\xe0\xb8\xb4\xe0\xb8\x99\xe0\xb8\x94\xe0\xb8\xb5\xe0\xb8\x95\xe0\xb9\x89\xe0\xb8\xad\xe0\xb8\x99\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a'
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : utf8}])
assert template.respond()
class InlineSpanishTest(unittest.TestCase):
def setUp(self):
super(InlineSpanishTest, self).setUp()
self.template = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Pagina del vendedor</title>
</head>
<body>
$header
<h2>Bienvenido $nombre.</h2>
<br /><br /><br />
<center>
Usted tiene $numpedidos_noconf <a href="">pedidós</a> sin confirmar.
<br /><br />
Bodega tiene fecha para $numpedidos_bodega <a href="">pedidos</a>.
</center>
</body>
</html>
'''
def test_failure(self):
""" Test a template lacking a proper #encoding tag """
self.failUnlessRaises(UnicodeDecodeError, Template, self.template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
def test_success(self):
""" Test a template with a proper #encoding tag """
template = '#encoding utf-8\n%s' % self.template
template = Template(template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
self.assertTrue(unicode(template))
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0169,
0.0042,
0,
0.66,
0,
171,
0,
1,
0,
0,
171,
0,
0
],
[
1,
0,
0.0211,
0.0042,
0,
0.66,
0.0476,
920,
0,
1,
0,
0,
920,
0,
0
],
[
1,
0,
0.0253,
0.0042,
0,
... | [
"from Cheetah.Template import Template",
"from Cheetah import CheetahWrapper",
"from Cheetah import DummyTransaction",
"import imp",
"import os",
"import sys",
"import tempfile",
"import unittest",
"class CommandLineTest(unittest.TestCase):\n def createAndCompile(self, source):\n sourcefil... |
#!/usr/bin/env python
import unittest
from Cheetah import SettingsManager
class SettingsManagerTests(unittest.TestCase):
def test_mergeDictionaries(self):
left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}}
right = {'xyz' : (10, 9)}
expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a': 1, 'c': (3,), 'b': 2}}
result = SettingsManager.mergeNestedDictionaries(left, right)
self.assertEquals(result, expect)
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.15,
0.05,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.25,
0.05,
0,
0.66,
0.3333,
920,
0,
1,
0,
0,
920,
0,
0
],
[
3,
0,
0.575,
0.4,
0,
0.66,
0.66... | [
"import unittest",
"from Cheetah import SettingsManager",
"class SettingsManagerTests(unittest.TestCase):\n def test_mergeDictionaries(self):\n left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}}\n right = {'xyz' : (10, 9)}\n expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a'... |
#!/usr/bin/env python
import unittest
from Cheetah import Parser
class ArgListTest(unittest.TestCase):
def setUp(self):
super(ArgListTest, self).setUp()
self.al = Parser.ArgList()
def test_merge1(self):
'''
Testing the ArgList case results from Template.Preprocessors.test_complexUsage
'''
self.al.add_argument('arg')
expect = [('arg', None)]
self.assertEquals(expect, self.al.merge())
def test_merge2(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test4
'''
self.al.add_argument('a')
self.al.add_default('999')
self.al.next()
self.al.add_argument('b')
self.al.add_default('444')
expect = [(u'a', u'999'), (u'b', u'444')]
self.assertEquals(expect, self.al.merge())
def test_merge3(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test13
'''
self.al.add_argument('arg')
self.al.add_default("'This is my block'")
expect = [('arg', "'This is my block'")]
self.assertEquals(expect, self.al.merge())
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0612,
0.0204,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.102,
0.0204,
0,
0.66,
0.3333,
920,
0,
1,
0,
0,
920,
0,
0
],
[
3,
0,
0.5306,
0.7959,
0,
0.6... | [
"import unittest",
"from Cheetah import Parser",
"class ArgListTest(unittest.TestCase):\n def setUp(self):\n super(ArgListTest, self).setUp()\n self.al = Parser.ArgList()\n\n def test_merge1(self):\n ''' \n Testing the ArgList case results from Template.Preprocessors.test_c... |
#!/usr/bin/env python
import unittest
from Cheetah import DirectiveAnalyzer
class AnalyzerTests(unittest.TestCase):
def test_set(self):
template = '''
#set $foo = "bar"
Hello ${foo}!
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('set'))
def test_compilersettings(self):
template = '''
#compiler-settings
useNameMapper = False
#end compiler-settings
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('compiler-settings'))
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.1034,
0.0345,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.1724,
0.0345,
0,
0.66,
0.3333,
920,
0,
1,
0,
0,
920,
0,
0
],
[
3,
0,
0.5517,
0.5862,
0,
0.... | [
"import unittest",
"from Cheetah import DirectiveAnalyzer",
"class AnalyzerTests(unittest.TestCase):\n def test_set(self):\n template = '''\n #set $foo = \"bar\"\n Hello ${foo}!\n '''\n calls = DirectiveAnalyzer.analyze(template)\n self.assertEquals(1, calls.get('set... |
#!/usr/bin/env python
import Cheetah.NameMapper
import Cheetah.Template
import sys
import unittest
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
def isPython23():
''' Python 2.3 is still supported by Cheetah, but doesn't support decorators '''
return majorVer == 2 and minorVer < 4
class GetAttrException(Exception):
pass
class CustomGetAttrClass(object):
def __getattr__(self, name):
raise GetAttrException('FAIL, %s' % name)
class GetAttrTest(unittest.TestCase):
'''
Test for an issue occurring when __getatttr__() raises an exception
causing NameMapper to raise a NotFound exception
'''
def test_ValidException(self):
o = CustomGetAttrClass()
try:
print(o.attr)
except GetAttrException, e:
# expected
return
except:
self.fail('Invalid exception raised: %s' % e)
self.fail('Should have had an exception raised')
def test_NotFoundException(self):
template = '''
#def raiseme()
$obj.attr
#end def'''
template = Cheetah.Template.Template.compile(template, compilerSettings={}, keepRefToGeneratedCode=True)
template = template(searchList=[{'obj' : CustomGetAttrClass()}])
assert template, 'We should have a valid template object by now'
self.failUnlessRaises(GetAttrException, template.raiseme)
class InlineImportTest(unittest.TestCase):
def test_FromFooImportThing(self):
'''
Verify that a bug introduced in v2.1.0 where an inline:
#from module import class
would result in the following code being generated:
import class
'''
template = '''
#def myfunction()
#if True
#from os import path
#return 17
Hello!
#end if
#end def
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
rc = template.myfunction()
assert rc == 17, (template, 'Didn\'t get a proper return value')
def test_ImportFailModule(self):
template = '''
#try
#import invalidmodule
#except
#set invalidmodule = dict(FOO='BAR!')
#end try
$invalidmodule.FOO
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
assert str(template), 'We weren\'t able to properly generate the result from the template'
def test_ProperImportOfBadModule(self):
template = '''
#from invalid import fail
This should totally $fail
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
def test_AutoImporting(self):
template = '''
#extends FakeyTemplate
Boo!
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template)
def test_StuffBeforeImport_Legacy(self):
template = '''
###
### I like comments before import
###
#extends Foo
Bar
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : True}, keepRefToGeneratedCode=True)
class Mantis_Issue_11_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in Mantis issue #11:
Output:
Traceback (most recent call last):
File "test.py", line 12, in <module>
t.respond()
File "DynamicallyCompiledCheetahTemplate.py", line 86, in respond
File "/usr/lib64/python2.6/cgi.py", line 1035, in escape
s = s.replace("&", "&") # Must be done first!
'''
def test_FailingBehavior(self):
import cgi
template = Cheetah.Template.Template("$escape($request)", searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}])
assert template
self.failUnlessRaises(AttributeError, template.respond)
def test_FailingBehaviorWithSetting(self):
import cgi
template = Cheetah.Template.Template("$escape($request)",
searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}],
compilerSettings={'prioritizeSearchListOverSelf' : True})
assert template
assert template.respond()
class Mantis_Issue_21_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #21
Effectively @staticmethod and @classmethod
decorated methods in templates don't
properly define the _filter local, which breaks
when using the NameMapper
'''
def runTest(self):
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
This is my $output
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug') # raises a NameError: global name '_filter' is not defined
class Mantis_Issue_22_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #22
When using @staticmethod and @classmethod
in conjunction with the #filter directive
the generated code for the #filter is reliant
on the `self` local, breaking the function
'''
def test_NoneFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter None
This is my $output
#end filter
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
def test_DefinedFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter Filter
This is my $output
#end filter
#end def
'''
# The generated code for the template's testMethod() should look something
# like this in the 'error' case:
'''
@staticmethod
def testMethod(**KWS):
## CHEETAH: generated from #def testMethod() at line 3, col 13.
trans = DummyTransaction()
_dummyTrans = True
write = trans.response().write
SL = [KWS]
_filter = lambda x, **kwargs: unicode(x)
########################################
## START - generated method body
_orig_filter_18517345 = _filter
filterName = u'Filter'
if self._CHEETAH__filters.has_key("Filter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u' This is my ')
_v = VFFSL(SL,"output",True) # u'$output' on line 5, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$output')) # from line 5, col 32.
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0122,
0.0041,
0,
0.66,
0,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.0163,
0.0041,
0,
0.66,
0.0714,
171,
0,
1,
0,
0,
171,
0,
0
],
[
1,
0,
0.0244,
0.0041,
0,
... | [
"import Cheetah.NameMapper",
"import Cheetah.Template",
"import sys",
"import unittest",
"majorVer, minorVer = sys.version_info[0], sys.version_info[1]",
"versionTuple = (majorVer, minorVer)",
"def isPython23():\n ''' Python 2.3 is still supported by Cheetah, but doesn't support decorators '''\n r... |
#!/usr/bin/env python
'''
Tests for the 'cheetah' command.
Besides unittest usage, recognizes the following command-line options:
--list CheetahWrapper.py
List all scenarios that are tested. The argument is the path
of this script.
--nodelete
Don't delete scratch directory at end.
--output
Show the output of each subcommand. (Normally suppressed.)
'''
import os
import os.path
import pdb
import re # Used by listTests.
import shutil
import sys
import tempfile
import unittest
from optparse import OptionParser
from Cheetah.CheetahWrapper import CheetahWrapper # Used by NoBackup.
try:
from subprocess import Popen, PIPE, STDOUT
class Popen4(Popen):
def __init__(self, cmd, bufsize=-1, shell=True, close_fds=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, **kwargs):
super(Popen4, self).__init__(cmd, bufsize=bufsize, shell=shell,
close_fds=close_fds, stdin=stdin, stdout=stdout,
stderr=stderr, **kwargs)
self.tochild = self.stdin
self.fromchild = self.stdout
self.childerr = self.stderr
except ImportError:
from popen2 import Popen4
DELETE = True # True to clean up after ourselves, False for debugging.
OUTPUT = False # Normally False, True for debugging.
BACKUP_SUFFIX = CheetahWrapper.BACKUP_SUFFIX
def warn(msg):
sys.stderr.write(msg + '\n')
class CFBase(unittest.TestCase):
"""Base class for "cheetah compile" and "cheetah fill" unit tests.
"""
srcDir = '' # Nonblank to create source directory.
subdirs = ('child', 'child/grandkid') # Delete in reverse order.
srcFiles = ('a.tmpl', 'child/a.tmpl', 'child/grandkid/a.tmpl')
expectError = False # Used by --list option.
def inform(self, message):
if self.verbose:
print(message)
def setUp(self):
"""Create the top-level directories, subdirectories and .tmpl
files.
"""
I = self.inform
# Step 1: Create the scratch directory and chdir into it.
self.scratchDir = scratchDir = tempfile.mktemp()
os.mkdir(scratchDir)
self.origCwd = os.getcwd()
os.chdir(scratchDir)
if self.srcDir:
os.mkdir(self.srcDir)
# Step 2: Create source subdirectories.
for dir in self.subdirs:
os.mkdir(dir)
# Step 3: Create the .tmpl files, each in its proper directory.
for fil in self.srcFiles:
f = open(fil, 'w')
f.write("Hello, world!\n")
f.close()
def tearDown(self):
os.chdir(self.origCwd)
if DELETE:
shutil.rmtree(self.scratchDir, True) # Ignore errors.
if os.path.exists(self.scratchDir):
warn("Warning: unable to delete scratch directory %s")
else:
warn("Warning: not deleting scratch directory %s" % self.scratchDir)
def _checkDestFileHelper(self, path, expected,
allowSurroundingText, errmsg):
"""Low-level helper to check a destination file.
in : path, string, the destination path.
expected, string, the expected contents.
allowSurroundingtext, bool, allow the result to contain
additional text around the 'expected' substring?
errmsg, string, the error message. It may contain the
following "%"-operator keys: path, expected, result.
out: None
"""
path = os.path.abspath(path)
exists = os.path.exists(path)
msg = "destination file missing: %s" % path
self.failUnless(exists, msg)
f = open(path, 'r')
result = f.read()
f.close()
if allowSurroundingText:
success = result.find(expected) != -1
else:
success = result == expected
msg = errmsg % locals()
self.failUnless(success, msg)
def checkCompile(self, path):
# Raw string to prevent "\n" from being converted to a newline.
#expected = R"write('Hello, world!\n')"
expected = "Hello, world!" # might output a u'' string
errmsg = """\
destination file %(path)s doesn't contain expected substring:
%(expected)r"""
self._checkDestFileHelper(path, expected, True, errmsg)
def checkFill(self, path):
expected = "Hello, world!\n"
errmsg = """\
destination file %(path)s contains wrong result.
Expected %(expected)r
Found %(result)r"""
self._checkDestFileHelper(path, expected, False, errmsg)
def checkSubdirPyInit(self, path):
"""Verify a destination subdirectory exists and contains an
__init__.py file.
"""
exists = os.path.exists(path)
msg = "destination subdirectory %s misssing" % path
self.failUnless(exists, msg)
initPath = os.path.join(path, "__init__.py")
exists = os.path.exists(initPath)
msg = "destination init file missing: %s" % initPath
self.failUnless(exists, msg)
def checkNoBackup(self, path):
"""Verify 'path' does not exist. (To check --nobackup.)
"""
exists = os.path.exists(path)
msg = "backup file exists in spite of --nobackup: %s" % path
self.failIf(exists, msg)
def locate_command(self, cmd):
paths = os.getenv('PATH')
if not paths:
return cmd
parts = cmd.split(' ')
paths = paths.split(':')
for p in paths:
p = p + os.path.sep + parts[0]
if os.path.isfile(p):
return ' '.join([p] + parts[1:])
return ' '.join(parts)
def assertWin32Subprocess(self, cmd):
_in, _out = os.popen4(cmd)
_in.close()
output = _out.read()
rc = _out.close()
if rc is None:
rc = 0
return rc, output
def assertPosixSubprocess(self, cmd):
cmd = self.locate_command(cmd)
process = Popen4(cmd, env=os.environ)
process.tochild.close()
output = process.fromchild.read()
status = process.wait()
process.fromchild.close()
return status, output
def assertSubprocess(self, cmd, nonzero=False):
status, output = None, None
if sys.platform == 'win32':
status, output = self.assertWin32Subprocess(cmd)
else:
status, output = self.assertPosixSubprocess(cmd)
if not nonzero:
self.failUnlessEqual(status, 0, '''Subprocess exited with a non-zero status (%d)
%s''' % (status, output))
else:
self.failIfEqual(status, 0, '''Subprocess exited with a zero status (%d)
%s''' % (status, output))
return output
def go(self, cmd, expectedStatus=0, expectedOutputSubstring=None):
"""Run a "cheetah compile" or "cheetah fill" subcommand.
in : cmd, string, the command to run.
expectedStatus, int, subcommand's expected output status.
0 if the subcommand is expected to succeed, 1-255 otherwise.
expectedOutputSubstring, string, substring which much appear
in the standard output or standard error. None to skip this
test.
out: None.
"""
output = self.assertSubprocess(cmd)
if expectedOutputSubstring is not None:
msg = "substring %r not found in subcommand output: %s" % \
(expectedOutputSubstring, cmd)
substringTest = output.find(expectedOutputSubstring) != -1
self.failUnless(substringTest, msg)
class CFIdirBase(CFBase):
"""Subclass for tests with --idir.
"""
srcDir = 'SRC'
subdirs = ('SRC/child', 'SRC/child/grandkid') # Delete in reverse order.
srcFiles = ('SRC/a.tmpl', 'SRC/child/a.tmpl', 'SRC/child/grandkid/a.tmpl')
##################################################
## TEST CASE CLASSES
class OneFile(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl")
self.checkFill("a.txt")
class OneFileNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a")
self.checkFill("a.txt")
class SplatTmpl(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill *.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl")
self.checkFill("a.txt")
class ThreeFilesWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class ThreeFilesWithSubdirectoriesNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a child/a child/grandkid/a")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a child/a child/grandkid/a")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a child/a child/grandkid/a")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class SplatTmplWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class OneFileWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl")
self.checkSubdirPyInit("DEST")
self.checkCompile("DEST/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl")
self.checkFill("DEST/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl")
self.checkFill("DEST/a.txt")
class VarietyWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkSubdirPyInit("DEST")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/a.py")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.html")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.txt")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class RecurseExplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R child")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseImplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseExplicitWIthOdir(CFBase):
def testCompile(self):
self.go("cheetah compile -R --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class Flat(CFBase):
def testCompile(self):
self.go("cheetah compile --flat child/a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill --flat child/a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --flat --oext txt child/a.tmpl")
self.checkFill("a.txt")
class FlatRecurseCollision(CFBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
class IdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC child")
self.checkSubdirPyInit("child")
self.checkSubdirPyInit("child/grandkid")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class IdirOdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class IdirFlatRecurseCollision(CFIdirBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat --idir SRC", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC --oext txt", nonzero=True)
class NoBackup(CFBase):
"""Run the command twice each time and verify a backup file is
*not* created.
"""
def testCompile(self):
self.go("cheetah compile --nobackup a.tmpl")
self.go("cheetah compile --nobackup a.tmpl")
self.checkNoBackup("a.py" + BACKUP_SUFFIX)
def testFill(self):
self.go("cheetah fill --nobackup a.tmpl")
self.go("cheetah fill --nobackup a.tmpl")
self.checkNoBackup("a.html" + BACKUP_SUFFIX)
def testText(self):
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.checkNoBackup("a.txt" + BACKUP_SUFFIX)
def listTests(cheetahWrapperFile):
"""cheetahWrapperFile, string, path of this script.
XXX TODO: don't print test where expectError is true.
"""
rx = re.compile( R'self\.go\("(.*?)"\)' )
f = open(cheetahWrapperFile)
while True:
lin = f.readline()
if not lin:
break
m = rx.search(lin)
if m:
print(m.group(1))
f.close()
def main():
global DELETE, OUTPUT
parser = OptionParser()
parser.add_option("--list", action="store", dest="listTests")
parser.add_option("--nodelete", action="store_true")
parser.add_option("--output", action="store_true")
# The following options are passed to unittest.
parser.add_option("-e", "--explain", action="store_true")
parser.add_option("-v", "--verbose", action="store_true")
parser.add_option("-q", "--quiet", action="store_true")
opts, files = parser.parse_args()
if opts.nodelete:
DELETE = False
if opts.output:
OUTPUT = True
if opts.listTests:
listTests(opts.listTests)
else:
# Eliminate script-specific command-line arguments to prevent
# errors in unittest.
del sys.argv[1:]
for opt in ("explain", "verbose", "quiet"):
if getattr(opts, opt):
sys.argv.append("--" + opt)
sys.argv.extend(files)
unittest.main()
if __name__ == '__main__':
main()
# vim: sw=4 ts=4 expandtab
| [
[
8,
0,
0.0131,
0.0209,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0244,
0.0017,
0,
0.66,
0.027,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0262,
0.0017,
0,
0.66,... | [
"'''\nTests for the 'cheetah' command.\n\nBesides unittest usage, recognizes the following command-line options:\n --list CheetahWrapper.py\n List all scenarios that are tested. The argument is the path\n of this script.\n --nodelete",
"import os",
"import os.path",
"import pdb",
"impo... |
#
| [] | [] |
#!/usr/bin/env python
import hotshot
import hotshot.stats
import os
import sys
import unittest
from test import pystone
import time
import Cheetah.NameMapper
import Cheetah.Template
# This can be turned on with the `--debug` flag when running the test
# and will cause the tests to all just dump out how long they took
# insteasd of asserting on duration
DEBUG = False
# TOLERANCE in Pystones
kPS = 1000
TOLERANCE = 0.5*kPS
class DurationError(AssertionError):
pass
_pystone_calibration_mark = None
def _pystone_calibration():
global _pystone_calibration_mark
if not _pystone_calibration_mark:
_pystone_calibration_mark = pystone.pystones(loops=pystone.LOOPS)
return _pystone_calibration_mark
def perftest(max_num_pystones, current_pystone=None):
'''
Performance test decorator based off the 'timedtest'
decorator found in this Active State recipe:
http://code.activestate.com/recipes/440700/
'''
if not isinstance(max_num_pystones, float):
max_num_pystones = float(max_num_pystones)
if not current_pystone:
current_pystone = _pystone_calibration()
def _test(function):
def wrapper(*args, **kw):
start_time = time.time()
try:
return function(*args, **kw)
finally:
total_time = time.time() - start_time
if total_time == 0:
pystone_total_time = 0
else:
pystone_rate = current_pystone[0] / current_pystone[1]
pystone_total_time = total_time / pystone_rate
global DEBUG
if DEBUG:
print('The test "%s" took: %s pystones' % (function.func_name,
pystone_total_time))
else:
if pystone_total_time > (max_num_pystones + TOLERANCE):
raise DurationError((('Test too long (%.2f Ps, '
'need at most %.2f Ps)')
% (pystone_total_time,
max_num_pystones)))
return wrapper
return _test
class DynamicTemplatePerformanceTest(unittest.TestCase):
loops = 10
#@perftest(1200)
def test_BasicDynamic(self):
template = '''
#def foo(arg1, arg2)
#pass
#end def
'''
for i in range(self.loops):
klass = Cheetah.Template.Template.compile(template)
assert klass
test_BasicDynamic = perftest(1200)(test_BasicDynamic)
class PerformanceTest(unittest.TestCase):
iterations = 100000
display = False
save = False
def runTest(self):
self.prof = hotshot.Profile('%s.prof' % self.__class__.__name__)
self.prof.start()
for i in range(self.iterations):
if hasattr(self, 'performanceSample'):
self.display = True
self.performanceSample()
self.prof.stop()
self.prof.close()
if self.display:
print('>>> %s (%d iterations) ' % (self.__class__.__name__,
self.iterations))
stats = hotshot.stats.load('%s.prof' % self.__class__.__name__)
#stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if not self.save:
os.unlink('%s.prof' % self.__class__.__name__)
class DynamicMethodCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#def testMethod()
#set foo = [1, 2, 3, 4]
#return $foo[0]
#end def
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.testMethod()
class BunchOfWriteCalls(PerformanceTest):
iterations = 1000
def performanceSample(self):
template = '''
#import sys
#import os
#for i in range(1000)
$i
#end for
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.respond()
del value
class DynamicSimpleCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#set foo = [1,2,3,4]
Well hello there! This is basic.
Here's an array too: $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
template = unicode(template)
class FilterTest(PerformanceTest):
template = None
def setUp(self):
super(FilterTest, self).setUp()
template = '''
#import sys
#import os
#set foo = [1, 2, 3, 4]
$foo, $foo, $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
self.template = template()
def performanceSample(self):
value = unicode(self.template)
class LongCompileTest(PerformanceTest):
''' Test the compilation on a sufficiently large template '''
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False)
def performanceSample(self):
template = '''
#import sys
#import Cheetah.Template
#extends Cheetah.Template.Template
#def header()
<center><h2>This is my header</h2></center>
#end def
#def footer()
#return "Huzzah"
#end def
#def scripts()
#pass
#end def
#def respond()
<html>
<head>
<title>${title}</title>
$scripts()
</head>
<body>
$header()
#for $i in $range(10)
This is just some stupid page!
<br/>
#end for
<br/>
$footer()
</body>
</html>
#end def
'''
return self.compile(template)
class LongCompile_CompilerSettingsTest(LongCompileTest):
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False,
compilerSettings={'useStackFrames' : True, 'useAutocalling' : True})
class LongCompileAndRun(LongCompileTest):
def performanceSample(self):
template = super(LongCompileAndRun, self).performanceSample()
template = template(searchList=[{'title' : 'foo'}])
template = template.respond()
if __name__ == '__main__':
if '--debug' in sys.argv:
DEBUG = True
sys.argv = [arg for arg in sys.argv if not arg == '--debug']
unittest.main()
| [
[
1,
0,
0.0123,
0.0041,
0,
0.66,
0,
974,
0,
1,
0,
0,
974,
0,
0
],
[
1,
0,
0.0165,
0.0041,
0,
0.66,
0.04,
226,
0,
1,
0,
0,
226,
0,
0
],
[
1,
0,
0.0206,
0.0041,
0,
0.... | [
"import hotshot",
"import hotshot.stats",
"import os",
"import sys",
"import unittest",
"from test import pystone",
"import time",
"import Cheetah.NameMapper",
"import Cheetah.Template",
"DEBUG = False",
"kPS = 1000",
"TOLERANCE = 0.5*kPS",
"class DurationError(AssertionError):\n pass",
... |
#!/usr/bin/env python
import sys
import types
import os
import os.path
import unittest
from Cheetah.NameMapper import NotFound, valueForKey, \
valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList
class DummyClass:
classVar1 = 123
def __init__(self):
self.instanceVar1 = 123
def __str__(self):
return 'object'
def meth(self, arg="arff"):
return str(arg)
def meth1(self, arg="doo"):
return arg
def meth2(self, arg1="a1", arg2="a2"):
raise ValueError
def meth3(self):
"""Tests a bug that Jeff Johnson reported on Oct 1, 2001"""
x = 'A string'
try:
for i in [1, 2, 3, 4]:
if x == 2:
pass
if x == 'xx':
pass
return x
except:
raise
def dummyFunc(arg="Scooby"):
return arg
def funcThatRaises():
raise ValueError
testNamespace = {
'aStr': 'blarg',
'anInt': 1,
'aFloat': 1.5,
'aDict': {'one': 'item1',
'two': 'item2',
'nestedDict': {'one': 'nestedItem1',
'two': 'nestedItem2',
'funcThatRaises': funcThatRaises,
'aClass': DummyClass,
},
'nestedFunc': dummyFunc,
},
'aClass': DummyClass,
'aFunc': dummyFunc,
'anObj': DummyClass(),
'aMeth': DummyClass().meth1,
'none': None,
'emptyString': '',
'funcThatRaises': funcThatRaises,
}
autoCallResults = {'aFunc': 'Scooby',
'aMeth': 'doo',
}
results = testNamespace.copy()
results.update({'anObj.meth1': 'doo',
'aDict.one': 'item1',
'aDict.nestedDict': testNamespace['aDict']['nestedDict'],
'aDict.nestedDict.one': 'nestedItem1',
'aDict.nestedDict.aClass': DummyClass,
'aDict.nestedFunc': 'Scooby',
'aClass.classVar1': 123,
'anObj.instanceVar1': 123,
'anObj.meth3': 'A string',
})
for k in testNamespace.keys():
# put them in the globals for the valueFromFrame tests
exec('%s = testNamespace[k]'%k)
##################################################
## TEST BASE CLASSES
class NameMapperTest(unittest.TestCase):
failureException = (NotFound, AssertionError)
_testNamespace = testNamespace
_results = results
def namespace(self):
return self._testNamespace
def VFN(self, name, autocall=True):
return valueForName(self.namespace(), name, autocall)
def VFS(self, searchList, name, autocall=True):
return valueFromSearchList(searchList, name, autocall)
# alias to be overriden later
get = VFN
def check(self, name):
got = self.get(name)
if name in autoCallResults:
expected = autoCallResults[name]
else:
expected = self._results[name]
assert got == expected
##################################################
## TEST CASE CLASSES
class VFN(NameMapperTest):
def test1(self):
"""string in dict lookup"""
self.check('aStr')
def test2(self):
"""string in dict lookup in a loop"""
for i in range(10):
self.check('aStr')
def test3(self):
"""int in dict lookup"""
self.check('anInt')
def test4(self):
"""int in dict lookup in a loop"""
for i in range(10):
self.check('anInt')
def test5(self):
"""float in dict lookup"""
self.check('aFloat')
def test6(self):
"""float in dict lookup in a loop"""
for i in range(10):
self.check('aFloat')
def test7(self):
"""class in dict lookup"""
self.check('aClass')
def test8(self):
"""class in dict lookup in a loop"""
for i in range(10):
self.check('aClass')
def test9(self):
"""aFunc in dict lookup"""
self.check('aFunc')
def test10(self):
"""aFunc in dict lookup in a loop"""
for i in range(10):
self.check('aFunc')
def test11(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test12(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test13(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test14(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test15(self):
"""anObj in dict lookup"""
self.check('anObj')
def test16(self):
"""anObj in dict lookup in a loop"""
for i in range(10):
self.check('anObj')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test19(self):
"""aClass.classVar1 in dict lookup"""
self.check('aClass.classVar1')
def test20(self):
"""aClass.classVar1 in dict lookup in a loop"""
for i in range(10):
self.check('aClass.classVar1')
def test23(self):
"""anObj.instanceVar1 in dict lookup"""
self.check('anObj.instanceVar1')
def test24(self):
"""anObj.instanceVar1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.instanceVar1')
## tests 22, 25, and 26 removed when the underscored lookup was removed
def test27(self):
"""anObj.meth1 in dict lookup"""
self.check('anObj.meth1')
def test28(self):
"""anObj.meth1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth1')
def test29(self):
"""aDict.one in dict lookup"""
self.check('aDict.one')
def test30(self):
"""aDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.one')
def test31(self):
"""aDict.nestedDict in dict lookup"""
self.check('aDict.nestedDict')
def test32(self):
"""aDict.nestedDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict')
def test33(self):
"""aDict.nestedDict.one in dict lookup"""
self.check('aDict.nestedDict.one')
def test34(self):
"""aDict.nestedDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.one')
def test35(self):
"""aDict.nestedFunc in dict lookup"""
self.check('aDict.nestedFunc')
def test36(self):
"""aDict.nestedFunc in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedFunc')
def test37(self):
"""aDict.nestedFunc in dict lookup - without autocalling"""
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test38(self):
"""aDict.nestedFunc in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test39(self):
"""aMeth in dict lookup - without autocalling"""
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test40(self):
"""aMeth in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test41(self):
"""anObj.meth3 in dict lookup"""
self.check('anObj.meth3')
def test42(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth3')
def test43(self):
"""NotFound test"""
def test(self=self):
self.get('anObj.methX')
self.assertRaises(NotFound, test)
def test44(self):
"""NotFound test in a loop"""
def test(self=self):
self.get('anObj.methX')
for i in range(10):
self.assertRaises(NotFound, test)
def test45(self):
"""Other exception from meth test"""
def test(self=self):
self.get('anObj.meth2')
self.assertRaises(ValueError, test)
def test46(self):
"""Other exception from meth test in a loop"""
def test(self=self):
self.get('anObj.meth2')
for i in range(10):
self.assertRaises(ValueError, test)
def test47(self):
"""None in dict lookup"""
self.check('none')
def test48(self):
"""None in dict lookup in a loop"""
for i in range(10):
self.check('none')
def test49(self):
"""EmptyString in dict lookup"""
self.check('emptyString')
def test50(self):
"""EmptyString in dict lookup in a loop"""
for i in range(10):
self.check('emptyString')
def test51(self):
"""Other exception from func test"""
def test(self=self):
self.get('funcThatRaises')
self.assertRaises(ValueError, test)
def test52(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test53(self):
"""Other exception from func test"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
self.assertRaises(ValueError, test)
def test54(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test55(self):
"""aDict.nestedDict.aClass in dict lookup"""
self.check('aDict.nestedDict.aClass')
def test56(self):
"""aDict.nestedDict.aClass in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.aClass')
def test57(self):
"""aDict.nestedDict.aClass in dict lookup - without autocalling"""
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test58(self):
"""aDict.nestedDict.aClass in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test59(self):
"""Other exception from func test -- but without autocalling shouldn't raise"""
self.get('aDict.nestedDict.funcThatRaises', False)
def test60(self):
"""Other exception from func test in a loop -- but without autocalling shouldn't raise"""
for i in range(10):
self.get('aDict.nestedDict.funcThatRaises', False)
class VFS(VFN):
_searchListLength = 1
def searchList(self):
lng = self._searchListLength
if lng == 1:
return [self.namespace()]
elif lng == 2:
return [self.namespace(), {'dummy':1234}]
elif lng == 3:
# a tuple for kicks
return ({'dummy':1234}, self.namespace(), {'dummy':1234})
elif lng == 4:
# a generator for more kicks
return self.searchListGenerator()
def searchListGenerator(self):
class Test:
pass
for i in [Test(), {'dummy':1234}, self.namespace(), {'dummy':1234}]:
yield i
def get(self, name, autocall=True):
return self.VFS(self.searchList(), name, autocall)
class VFS_2namespaces(VFS):
_searchListLength = 2
class VFS_3namespaces(VFS):
_searchListLength = 3
class VFS_4namespaces(VFS):
_searchListLength = 4
class VFF(VFN):
def get(self, name, autocall=True):
ns = self._testNamespace
aStr = ns['aStr']
aFloat = ns['aFloat']
none = 'some'
return valueFromFrame(name, autocall)
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
res['True'] = True
res['False'] = False
res['None'] = None
res['eval'] = eval
def test_VFF_1(self):
"""Builtins"""
self.check('True')
self.check('None')
self.check('False')
assert self.get('eval', False)==eval
assert self.get('range', False)==range
class VFFSL(VFS):
_searchListLength = 1
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
del ns['anInt'] # will be picked up by globals
def VFFSL(self, searchList, name, autocall=True):
anInt = 1
none = 'some'
return valueFromFrameOrSearchList(searchList, name, autocall)
def get(self, name, autocall=True):
return self.VFFSL(self.searchList(), name, autocall)
class VFFSL_2(VFFSL):
_searchListLength = 2
class VFFSL_3(VFFSL):
_searchListLength = 3
class VFFSL_4(VFFSL):
_searchListLength = 4
if sys.platform.startswith('java'):
del VFF, VFFSL, VFFSL_2, VFFSL_3, VFFSL_4
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0076,
0.0019,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0095,
0.0019,
0,
0.66,
0.0385,
209,
0,
1,
0,
0,
209,
0,
0
],
[
1,
0,
0.0114,
0.0019,
0,
... | [
"import sys",
"import types",
"import os",
"import os.path",
"import unittest",
"from Cheetah.NameMapper import NotFound, valueForKey, \\\n valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList",
"class DummyClass:\n classVar1 = 123\n\n def __init__(self):\n self... |
import Cheetah.Template
def render(template_file, **kwargs):
'''
Cheetah.Django.render() takes the template filename
(the filename should be a file in your Django
TEMPLATE_DIRS)
Any additional keyword arguments are passed into the
template are propogated into the template's searchList
'''
import django.http
import django.template.loader
source, loader = django.template.loader.find_template_source(template_file)
t = Cheetah.Template.Template(source, searchList=[kwargs])
return django.http.HttpResponse(t.__str__())
| [
[
1,
0,
0.0625,
0.0625,
0,
0.66,
0,
171,
0,
1,
0,
0,
171,
0,
0
],
[
2,
0,
0.5938,
0.875,
0,
0.66,
1,
24,
0,
2,
1,
0,
0,
0,
4
],
[
8,
1,
0.4688,
0.5,
1,
0.59,
0,... | [
"import Cheetah.Template",
"def render(template_file, **kwargs):\n '''\n Cheetah.Django.render() takes the template filename \n (the filename should be a file in your Django \n TEMPLATE_DIRS)\n\n Any additional keyword arguments are passed into the \n template are propogated ... |
from glob import glob
import os
from os import listdir
import os.path
import re
from tempfile import mktemp
def _escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
return escapeRE.sub(r'\\\1', txt)
def findFiles(*args, **kw):
"""Recursively find all the files matching a glob pattern.
This function is a wrapper around the FileFinder class. See its docstring
for details about the accepted arguments, etc."""
return FileFinder(*args, **kw).files()
def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results()
def replaceRegexInFiles(files, pattern, repl):
"""Replace all instances of regex 'pattern' with 'repl' for each file in the
'files' list. Returns a dictionary with data about the matches found.
This is like re.sub on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
return FindAndReplace(files, pattern, repl).results()
##################################################
## CLASSES
class FileFinder:
"""Traverses a directory tree and finds all files in it that match one of
the specified glob patterns."""
def __init__(self, rootPath,
globPatterns=('*',),
ignoreBasenames=('CVS', '.svn'),
ignoreDirs=(),
):
self._rootPath = rootPath
self._globPatterns = globPatterns
self._ignoreBasenames = ignoreBasenames
self._ignoreDirs = ignoreDirs
self._files = []
self.walkDirTree(rootPath)
def walkDirTree(self, dir='.',
listdir=os.listdir,
isdir=os.path.isdir,
join=os.path.join,
):
"""Recursively walk through a directory tree and find matching files."""
processDir = self.processDir
filterDir = self.filterDir
pendingDirs = [dir]
addDir = pendingDirs.append
getDir = pendingDirs.pop
while pendingDirs:
dir = getDir()
## process this dir
processDir(dir)
## and add sub-dirs
for baseName in listdir(dir):
fullPath = join(dir, baseName)
if isdir(fullPath):
if filterDir(baseName, fullPath):
addDir( fullPath )
def filterDir(self, baseName, fullPath):
"""A hook for filtering out certain dirs. """
return not (baseName in self._ignoreBasenames or
fullPath in self._ignoreDirs)
def processDir(self, dir, glob=glob):
extend = self._files.extend
for pattern in self._globPatterns:
extend( glob(os.path.join(dir, pattern)) )
def files(self):
return self._files
class _GenSubberFunc:
"""Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
groups, etc.) into a function that can be used to do the substitutions in
the FindAndReplace class."""
backrefRE = re.compile(r'\\([1-9][0-9]*)')
groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
def __init__(self, replaceStr):
self._src = replaceStr
self._pos = 0
self._codeChunks = []
self.parse()
def src(self):
return self._src
def pos(self):
return self._pos
def setPos(self, pos):
self._pos = pos
def atEnd(self):
return self._pos >= len(self._src)
def advance(self, offset=1):
self._pos += offset
def readTo(self, to, start=None):
if start == None:
start = self._pos
self._pos = to
if self.atEnd():
return self._src[start:]
else:
return self._src[start:to]
## match and get methods
def matchBackref(self):
return self.backrefRE.match(self.src(), self.pos())
def getBackref(self):
m = self.matchBackref()
self.setPos(m.end())
return m.group(1)
def matchGroup(self):
return self.groupRE.match(self.src(), self.pos())
def getGroup(self):
m = self.matchGroup()
self.setPos(m.end())
return m.group(1)
## main parse loop and the eat methods
def parse(self):
while not self.atEnd():
if self.matchBackref():
self.eatBackref()
elif self.matchGroup():
self.eatGroup()
else:
self.eatStrConst()
def eatStrConst(self):
startPos = self.pos()
while not self.atEnd():
if self.matchBackref() or self.matchGroup():
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self.addChunk(repr(strConst))
def eatBackref(self):
self.addChunk( 'm.group(' + self.getBackref() + ')' )
def eatGroup(self):
self.addChunk( 'm.group("' + self.getGroup() + '")' )
def addChunk(self, chunk):
self._codeChunks.append(chunk)
## code wrapping methods
def codeBody(self):
return ', '.join(self._codeChunks)
def code(self):
return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
def subberFunc(self):
exec(self.code())
return subber
class FindAndReplace:
"""Find and replace all instances of 'patternOrRE' with 'replacement' for
each file in the 'files' list. This is a multi-file version of re.sub().
'patternOrRE' can be a raw regex pattern or
a regex object as generated by the re module. 'replacement' can be any
string that would work with patternOrRE.sub(replacement, fileContents).
"""
def __init__(self, files, patternOrRE, replacement,
recordResults=True):
if isinstance(patternOrRE, basestring):
self._regex = re.compile(patternOrRE)
else:
self._regex = patternOrRE
if isinstance(replacement, basestring):
self._subber = _GenSubberFunc(replacement).subberFunc()
else:
self._subber = replacement
self._pattern = pattern = self._regex.pattern
self._files = files
self._results = {}
self._recordResults = recordResults
## see if we should use pgrep to do the file matching
self._usePgrep = False
if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
## now check to make sure pgrep understands the pattern
tmpFile = mktemp()
open(tmpFile, 'w').write('#')
if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
# it didn't print an error msg so we're ok
self._usePgrep = True
os.remove(tmpFile)
self._run()
def results(self):
return self._results
def _run(self):
regex = self._regex
subber = self._subDispatcher
usePgrep = self._usePgrep
pattern = self._pattern
for file in self._files:
if not os.path.isfile(file):
continue # skip dirs etc.
self._currFile = file
found = False
if 'orig' in locals():
del orig
if self._usePgrep:
if os.popen('pgrep "' + pattern + '" ' + file ).read():
found = True
else:
orig = open(file).read()
if regex.search(orig):
found = True
if found:
if 'orig' not in locals():
orig = open(file).read()
new = regex.sub(subber, orig)
open(file, 'w').write(new)
def _subDispatcher(self, match):
if self._recordResults:
if self._currFile not in self._results:
res = self._results[self._currFile] = {}
res['count'] = 0
res['matches'] = []
else:
res = self._results[self._currFile]
res['count'] += 1
res['matches'].append({'contents': match.group(),
'start': match.start(),
'end': match.end(),
}
)
return self._subber(match)
class SourceFileStats:
"""
"""
_fileStats = None
def __init__(self, files):
self._fileStats = stats = {}
for file in files:
stats[file] = self.getFileStats(file)
def rawStats(self):
return self._fileStats
def summary(self):
codeLines = 0
blankLines = 0
commentLines = 0
totalLines = 0
for fileStats in self.rawStats().values():
codeLines += fileStats['codeLines']
blankLines += fileStats['blankLines']
commentLines += fileStats['commentLines']
totalLines += fileStats['totalLines']
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
def printStats(self):
pass
def getFileStats(self, fileName):
codeLines = 0
blankLines = 0
commentLines = 0
commentLineRe = re.compile(r'\s#.*$')
blankLineRe = re.compile('\s$')
lines = open(fileName).read().splitlines()
totalLines = len(lines)
for line in lines:
if commentLineRe.match(line):
commentLines += 1
elif blankLineRe.match(line):
blankLines += 1
else:
codeLines += 1
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
| [
[
1,
0,
0.0056,
0.0028,
0,
0.66,
0,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.0084,
0.0028,
0,
0.66,
0.0769,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0112,
0.0028,
0,
... | [
"from glob import glob",
"import os",
"from os import listdir",
"import os.path",
"import re",
"from tempfile import mktemp",
"def _escapeRegexChars(txt,\n escapeRE=re.compile(r'([\\$\\^\\*\\+\\.\\?\\{\\}\\[\\]\\(\\)\\|\\\\])')):\n return escapeRE.sub(r'\\\\\\1', txt)",
" ret... |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 8 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
wx.ID_Window = 1000
wx.ID_Window_StatusBar = 1001
wx.ID_Window_MenuBar = 1002
wx.ID_Window_Quit = 1003
wx.ID_Window_SplitterWindow_LeftPanel = 1004
###########################################################################
## Class Window
###########################################################################
class Window ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_Window, title = u"Klein", pos = wx.DefaultPosition, size = wx.Size( 705,238 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.mStatusBar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_Window_StatusBar )
self.mMenuBar = wx.MenuBar( 0 )
self.mFile = wx.Menu()
self.mQuit = wx.MenuItem( self.mFile, wx.ID_Window_Quit, u"Quit", wx.EmptyString, wx.ITEM_NORMAL )
self.mFile.AppendItem( self.mQuit )
self.mMenuBar.Append( self.mFile, u"File" )
self.SetMenuBar( self.mMenuBar )
mSizer = wx.BoxSizer( wx.HORIZONTAL )
self.mSplitterWindow = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D )
self.mSplitterWindow.Bind( wx.EVT_IDLE, self.mSplitterWindowOnIdle )
self.mLeftPanel = wx.Panel( self.mSplitterWindow, wx.ID_Window_SplitterWindow_LeftPanel, wx.DefaultPosition, wx.DefaultSize, 0 )
mRightSizer = wx.BoxSizer( wx.VERTICAL )
self.mCanvasPanel = wx.Panel( self.mLeftPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.mCanvasPanel.SetBackgroundColour( wx.Colour( 128, 128, 128 ) )
mRightSizer.Add( self.mCanvasPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.mLeftPanel.SetSizer( mRightSizer )
self.mLeftPanel.Layout()
mRightSizer.Fit( self.mLeftPanel )
self.mRightPanel = wx.Panel( self.mSplitterWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.VSCROLL )
mLeftSizer = wx.BoxSizer( wx.VERTICAL )
self.m_button38 = wx.Button( self.mRightPanel, wx.ID_ANY, u"1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button38, 0, wx.ALL, 5 )
self.m_button39 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button39, 0, wx.ALL, 5 )
self.m_button40 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button40, 0, wx.ALL, 5 )
self.m_button41 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button41, 0, wx.ALL, 5 )
self.m_button42 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button42, 0, wx.ALL, 5 )
self.m_button43 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button43, 0, wx.ALL, 5 )
self.m_button44 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button44, 0, wx.ALL, 5 )
self.m_button45 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button45, 0, wx.ALL, 5 )
self.m_button46 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button46, 0, wx.ALL, 5 )
self.m_button47 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button47, 0, wx.ALL, 5 )
self.m_button48 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button48, 0, wx.ALL, 5 )
self.m_button49 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button49, 0, wx.ALL, 5 )
self.m_button50 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button50, 0, wx.ALL, 5 )
self.m_button51 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button51, 0, wx.ALL, 5 )
self.m_button52 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button52, 0, wx.ALL, 5 )
self.m_button53 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button53, 0, wx.ALL, 5 )
self.m_button54 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button54, 0, wx.ALL, 5 )
self.m_button55 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button55, 0, wx.ALL, 5 )
self.m_button56 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button56, 0, wx.ALL, 5 )
self.m_button57 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button57, 0, wx.ALL, 5 )
self.m_button58 = wx.Button( self.mRightPanel, wx.ID_ANY, u"-1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button58, 0, wx.ALL, 5 )
self.mRightPanel.SetSizer( mLeftSizer )
self.mRightPanel.Layout()
mLeftSizer.Fit( self.mRightPanel )
self.mSplitterWindow.SplitVertically( self.mLeftPanel, self.mRightPanel, 486 )
mSizer.Add( self.mSplitterWindow, 1, wx.EXPAND, 5 )
self.SetSizer( mSizer )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
def mSplitterWindowOnIdle( self, event ):
self.mSplitterWindow.SetSashPosition( 486 )
self.mSplitterWindow.Unbind( wx.EVT_IDLE )
app = wx.App()
win = Window(None)
win.Show(True)
app.MainLoop()
| [
[
1,
0,
0.0704,
0.007,
0,
0.66,
0,
666,
0,
1,
0,
0,
666,
0,
0
],
[
14,
0,
0.0845,
0.007,
0,
0.66,
0.1,
180,
1,
0,
0,
0,
0,
1,
0
],
[
14,
0,
0.0915,
0.007,
0,
0.66,
... | [
"import wx",
"wx.ID_Window = 1000",
"wx.ID_Window_StatusBar = 1001",
"wx.ID_Window_MenuBar = 1002",
"wx.ID_Window_Quit = 1003",
"wx.ID_Window_SplitterWindow_LeftPanel = 1004",
"class Window ( wx.Frame ):\n\t\n\tdef __init__( self, parent ):\n\t\twx.Frame.__init__ ( self, parent, id = wx.ID_Window, title... |
""" some utility function for use in load_balance benchmark """
# MPI imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
import sys
import os
from os.path import join, exists
import traceback
from optparse import OptionParser
# logging imports
import logging
# local imports
from pysph.base.kernels import CubicSplineKernel
from pysph.base.point import Point
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.base.particle_array import ParticleArray
from pysph.parallel.load_balancer import get_load_balancer_class
from pysph.solver.particle_generator import DensityComputationMode as Dcm
from pysph.solver.particle_generator import MassComputationMode as Mcm
from pysph.solver.basic_generators import RectangleGenerator, LineGenerator
LoadBalancer = get_load_balancer_class()
def parse_options(args=None):
"""parse commandline options from given list (default=sys.argv[1:])"""
# default values
square_width = 1.0
np_d = 50
particle_spacing = square_width / np_d
particle_radius = square_width / np_d
sph_interpolations = 1
num_iterations = 10
num_load_balance_iterations = 500
max_cell_scale = 2.0
op = OptionParser()
op.add_option('-t', '--type', dest='type', default="square",
help='type of problem to load_balance, one of "dam_break" or "square"')
op.add_option('-w', '--width', dest='square_width',
metavar='SQUARE_WIDTH')
op.add_option('-s', '--spacing', dest='particle_spacing',
metavar='PARTICLE_SPACING')
op.add_option('-r', '--radius', dest='particle_radius',
metavar='PARTICLE_RADIUS')
op.add_option('-d', '--destdir', dest='destdir',
metavar='DESTDIR')
op.add_option('-i', '--sph-interpolations', dest='sph_interpolations',
metavar='SPH_INTERPOLATIONS')
op.add_option('-n', '--num-iterations', dest='num_iterations',
metavar='NUM_ITERATIONS')
op.add_option('-l', '--num-load-balance-iterations',
dest='num_load_balance_iterations',
metavar='NUM_LOAD_BALANCE_ITERATIONS')
op.add_option('-o', '--write-vtk',
action="store_true", default=False, dest='write_vtk',
help='write a vtk file after all iterations are done')
op.add_option('-v', '--verbose',
action="store_true", default=True, dest='verbose',
help='print large amounts of debug information')
op.add_option('-c', '--max-cell-scale', dest='max_cell_scale',
metavar='MAX_CELL_SCALE',
help='specify the ratio of largest cell to smallest cell')
# parse the input arguments
args = op.parse_args()
options = args[0]
# setup the default values or the ones passed from the command line
if options.destdir is None:
print 'No destination directory specified. Using current dir'
options.destdir = ''
options.destdir = os.path.abspath(options.destdir)
# create the destination directory if it does not exist.
if not exists(options.destdir):
os.mkdir(options.destdir)
# logging
options.logger = logger = logging.getLogger()
log_filename = os.path.join(options.destdir, 'load_balance.log.%d'%rank)
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level, filename=log_filename, filemode='w')
#logger.addHandler(logging.StreamHandler())
# read the square_width to use
if options.square_width == None:
logger.warn('Using default square width of %f'%(square_width))
options.square_width = square_width
options.square_width = float(options.square_width)
# read the particle spacing
if options.particle_spacing == None:
logger.warn('Using default particle spacing of %f'%(particle_spacing))
options.particle_spacing = particle_spacing
options.particle_spacing = float(options.particle_spacing)
# read the particle radius
if options.particle_radius == None:
logger.warn('Using default particle radius of %f'%(particle_radius))
options.particle_radius = particle_radius
options.particle_radius = float(options.particle_radius)
# read the number of sph-interpolations to perform
if options.sph_interpolations == None:
logger.warn('Using default number of SPH interpolations %f'%(
sph_interpolations))
options.sph_interpolations = sph_interpolations
options.sph_interpolations = int(sph_interpolations)
# read the total number of iterations to run
if options.num_iterations == None:
logger.warn('Using default number of iterations %d'%(num_iterations))
options.num_iterations = num_iterations
options.num_iterations = int(options.num_iterations)
if options.num_load_balance_iterations == None:
logger.warn('Running %d initial load balance iterations'
%(num_load_balance_iterations))
options.num_load_balance_iterations = num_load_balance_iterations
options.num_load_balance_iterations = int(num_load_balance_iterations)
if options.max_cell_scale == None:
logger.warn('Using default max cell scale of %f'%(max_cell_scale))
options.max_cell_scale = max_cell_scale
options.max_cell_scale = float(options.max_cell_scale)
# one node zero - write this setting into a file.
if rank == 0:
settings_file = options.destdir + '/settings.dat'
f = open(settings_file, 'w')
f.write('Run with command : %s\n'%(sys.argv))
f.write('destdir = %s\n'%(options.destdir))
f.write('square_width = %f\n'%(options.square_width))
f.write('particle_spacing = %f\n'%(options.particle_spacing))
f.write('particle_radius = %f\n'%(options.particle_radius))
f.write('sph_interpolations = %d\n'%(options.sph_interpolations))
f.write('num_iterations = %d\n'%(options.num_iterations))
f.close()
return options
def create_particles(options):
if options.type == "square":
# create the square block of particles.
start_point = Point(0, 0, 0)
end_point = Point(options.square_width, options.square_width, 0)
parray = ParticleArray()
if rank == 0:
rg = RectangleGenerator(start_point=start_point,
end_point=end_point,
particle_spacing_x1=options.particle_spacing,
particle_spacing_x2=options.particle_spacing,
density_computation_mode=Dcm.Set_Constant,
particle_density=1000.0,
mass_computation_mode=Mcm.Compute_From_Density,
particle_h=options.particle_radius,
kernel=CubicSplineKernel(2),
filled=True)
tmp = rg.get_particles()
parray.append_parray(tmp)
if rank != 0:
# add some necessary properties to the particle array.
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h', 'default':options.particle_radius})
parray.add_property({'name':'rho', 'default':1000.})
parray.add_property({'name':'pid'})
parray.add_property({'name':'_tmp', 'default':0.0})
parray.add_property({'name':'m'})
else:
parray.add_property({'name':'_tmp'})
parray.add_property({'name':'pid', 'default':0.0})
return [parray]
elif options.type == "dam_break":
dam_wall = ParticleArray()
dam_fluid = ParticleArray()
if rank == 0:
radius = 0.2
dam_width=10.0
dam_height=7.0
solid_particle_h=radius
dam_particle_spacing=radius/9.
solid_particle_mass=1.0
origin_x=origin_y=0.0
fluid_particle_h=radius
fluid_density=1000.
fluid_column_height=3.0
fluid_column_width=2.0
fluid_particle_spacing=radius
# generate the left wall - a line
lg = LineGenerator(particle_mass=solid_particle_mass,
mass_computation_mode=Mcm.Set_Constant,
density_computation_mode=Dcm.Ignore,
particle_h=solid_particle_h,
start_point=Point(0, 0, 0),
end_point=Point(0, dam_height, 0),
particle_spacing=dam_particle_spacing)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate one half of the base
lg.start_point = Point(dam_particle_spacing, 0, 0)
lg.end_point = Point(dam_width/2, 0, 0)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate particles for the left column of fluid.
rg = RectangleGenerator(
start_point=Point(origin_x+2.0*solid_particle_h,
origin_y+2.0*solid_particle_h,
0.0),
end_point=Point(origin_x+2.0*solid_particle_h+fluid_column_width,
origin_y+2.0*solid_particle_h+fluid_column_height, 0.0),
particle_spacing_x1=fluid_particle_spacing,
particle_spacing_x2=fluid_particle_spacing,
density_computation_mode=Dcm.Set_Constant,
mass_computation_mode=Mcm.Compute_From_Density,
particle_density=1000.,
particle_h=fluid_particle_h,
kernel=CubicSplineKernel(2),
filled=True)
dam_fluid = rg.get_particles()
# generate the right wall - a line
lg = LineGenerator(particle_mass=solid_particle_mass,
mass_computation_mode=Mcm.Set_Constant,
density_computation_mode=Dcm.Ignore,
particle_h=solid_particle_h,
start_point=Point(dam_width, 0, 0),
end_point=Point(dam_width, dam_height, 0),
particle_spacing=dam_particle_spacing)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
# generate the right half of the base
lg.start_point = Point(dam_width/2.+dam_particle_spacing, 0, 0)
lg.end_point = Point(dam_width, 0, 0)
tmp = lg.get_particles()
dam_wall.append_parray(tmp)
for parray in [dam_fluid, dam_wall]:
if rank != 0:
# add some necessary properties to the particle array.
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h', 'default':options.particle_radius})
parray.add_property({'name':'rho', 'default':1000.})
parray.add_property({'name':'pid'})
parray.add_property({'name':'_tmp', 'default':0.0})
parray.add_property({'name':'m'})
else:
parray.add_property({'name':'_tmp'})
parray.add_property({'name':'pid', 'default':0.0})
return [dam_fluid, dam_wall]
def create_cell_manager(options):
print 'creating cell manager', options
# create a parallel cell manager.
cell_manager = ParallelCellManager(arrays_to_bin=[],
max_cell_scale=options.max_cell_scale,
dimension=2,
load_balancing=False,
initialize=False)
# enable load balancing
cell_manager.load_balancer = LoadBalancer(parallel_cell_manager=cell_manager)
cell_manager.load_balancer.skip_iteration = 1
cell_manager.load_balancer.threshold_ratio = 10.
for i,pa in enumerate(create_particles(options)):
cell_manager.arrays_to_bin.append(pa)
print 'parray %d:'%i, pa.get_number_of_particles()
cell_manager.initialize()
print 'num_particles', cell_manager.get_number_of_particles()
return cell_manager
def get_lb_args():
return [
dict(method='normal'),
dict(method='normal', adaptive=True),
dict(method='serial'),
dict(method='serial', adaptive=True),
dict(method='serial', distr_func='auto'),
dict(method='serial', distr_func='geometric'),
dict(method='serial_mkmeans', max_iter=200, c=0.3, t=0.2, tr=0.8, u=0.4, e=3, er=6, r=2.0),
dict(method='serial_sfc', sfc_func_name='morton'),
dict(method='serial_sfc', sfc_func_name='hilbert'),
dict(method='serial_metis'),
]
def get_desc_name(lbargs):
method = lbargs.get('method','')
adaptive = lbargs.get('adaptive', False)
if adaptive:
method += '_a'
sfcfunc = lbargs.get('sfc_func_name')
if sfcfunc:
method += '_' + sfcfunc
redistr_method = lbargs.get('distr_func')
if redistr_method:
method += '_' + redistr_method
return method
| [
[
8,
0,
0.003,
0.003,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0121,
0.003,
0,
0.66,
0.0417,
985,
0,
1,
0,
0,
985,
0,
0
],
[
14,
0,
0.0152,
0.003,
0,
0.66,
... | [
"\"\"\" some utility function for use in load_balance benchmark \"\"\"",
"from mpi4py import MPI",
"comm = MPI.COMM_WORLD",
"size = comm.Get_size()",
"rank = comm.Get_rank()",
"import sys",
"import os",
"from os.path import join, exists",
"import traceback",
"from optparse import OptionParser",
... |
''' Module to run bench modules which need to be run in mpi
This module imports the given module to run, and returns the result
of the bench functions of the modules. Also results are written to
mpirunner.log file
Usage:
1. Print the result in formatted form:
$ mpiexec -n <num_procs> python mpirunner.py <bench_name>
1. Print the result dictionary in pickled form (useful in automation):
$ mpiexec -n <num_procs> python mpirunner.py p <bench_name>
'''
from mpi4py import MPI
import sys
import pickle
rank = MPI.COMM_WORLD.Get_rank()
size = MPI.COMM_WORLD.Get_size()
def mpirun(args=None):
pkl = False
redir_op = True
if args is None:
comm = MPI.Comm.Get_parent()
#rank = comm.Get_rank()
bench_name = comm.bcast('', root=0)
else:
if args[0] == 'p':
pkl = True
bench_name = args[1]
elif args[0] == 'i':
redir_op = False
bench_name = args[1]
else:
bench_name = args[0]
logfile = open('mpirunner.log.%d'%rank, 'w')
stdout_orig = sys.stdout
stderr_orig = sys.stderr
if redir_op:
sys.stdout = sys.stderr = logfile
bench_mod = __import__(bench_name)
res = bench_mod.bench()
sys.stdout = stdout_orig
sys.stderr = stderr_orig
logfile.close()
if rank != 0: return
outtext = ''
s = bench_name.split('_',1)[1]+' %d\n'%size
s += '#'*len(s)
outtext += s + '\n'
for func in res:
for k in sorted(func.keys()):
s = k.ljust(40) + '\t%g'%func[k]
outtext += s + '\n'
outtext += '\n'
logfile = open('mpirunner.log', 'w')
logfile.write(outtext)
logfile.close()
if args is None:
comm.send(res, 0)
elif pkl:
sys.stdout.write(pickle.dumps(res))
else:
sys.stdout.write(outtext)
if __name__ == '__main__':
mpirun(sys.argv[1:])
| [
[
8,
0,
0.0959,
0.1781,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2055,
0.0137,
0,
0.66,
0.1429,
985,
0,
1,
0,
0,
985,
0,
0
],
[
1,
0,
0.2192,
0.0137,
0,
0.66... | [
"''' Module to run bench modules which need to be run in mpi\n\nThis module imports the given module to run, and returns the result\nof the bench functions of the modules. Also results are written to\nmpirunner.log file\n\nUsage:\n1. Print the result in formatted form:",
"from mpi4py import MPI",
"import sys",
... |
""" Time comparison for the Cython and OpenCL integrators.
We use the NBody integration example as the benchmark. Here, and all
neighbor locator is used. The setup consists of four points at the
vertices of the unit square in 2D.
"""
import numpy
from time import time
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import pyopencl as cl
AllPairLocatorCython = base.NeighborLocatorType.NSquareNeighborLocator
AllPairLocatorOpenCL = base.OpenCLNeighborLocatorType.AllPairNeighborLocator
DomainManager = base.DomainManagerType.DomainManager
# constants
np = 1024
tf = 1.0
dt = 0.01
nsteps = tf/dt
# generate the particles
x = numpy.random.random(np)
y = numpy.random.random(np)
z = numpy.random.random(np)
m = numpy.random.random(np)
precision = "single"
ctx = solver.create_some_context()
pa1 = base.get_particle_array(name="cython", x=x, y=y, z=z, m=m)
pa2 = base.get_particle_array(name="opencl", cl_precision=precision,
x=x, y=y, z=z, m=m)
particles1 = base.Particles([pa1,], locator_type=AllPairLocatorCython)
particles2 = base.CLParticles([pa2, ])
kernel = base.CubicSplineKernel(dim=2)
# create the cython solver
solver1 = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
solver1.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(), on_types=[0], updates=['u','v'],
id="force")
)
solver1.add_operation_step(types=[0])
solver1.setup(particles1)
solver1.set_final_time(tf)
solver1.set_time_step(dt)
solver1.set_print_freq(nsteps + 1)
solver1.set_output_directory(".")
# create the OpenCL solver
solver2 = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
solver2.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(), on_types=[0], updates=['u','v'],
id="force")
)
solver2.add_operation_step(types=[0])
solver2.set_cl(True)
solver2.setup(particles2)
solver2.set_final_time(tf)
solver2.set_time_step(dt)
solver2.set_print_freq(nsteps + 1)
solver2.set_output_directory(".")
t1 = time()
solver1.solve()
cython_time = time() - t1
t1 = time()
solver2.solve()
opencl_time = time() - t1
pa2.read_from_buffer()
#print pa1.x - pa2.x
print sum(abs(pa1.x - pa2.x))/np
print "=================================================================="
print "OpenCL execution time = %g s"%opencl_time
print "Cython execution time = %g s"%cython_time
print "Speedup = %g"%(cython_time/opencl_time)
| [
[
8,
0,
0.0446,
0.0792,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.099,
0.0099,
0,
0.66,
0.0189,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.1089,
0.0099,
0,
0.66,... | [
"\"\"\" Time comparison for the Cython and OpenCL integrators.\n\nWe use the NBody integration example as the benchmark. Here, and all\nneighbor locator is used. The setup consists of four points at the\nvertices of the unit square in 2D.\n\n\n\"\"\"",
"import numpy",
"from time import time",
"import pysph.so... |
""" Benchmark for the PySPH neighbor search functions. """
import sys
import numpy
import time
#PySPH imports
import pysph.base.api as base
def get_points(np = 10000):
""" Get np particles in domain [1, -1] X [-1, 1] """
x = numpy.random.random(np)*2.0 - 1.0
y = numpy.random.random(np)*2.0 - 1.0
z = numpy.random.random(np)*2.0 - 1.0
# h ~ 2*vol_per_particle
# rad ~ (2-3)*h => rad ~ 6*h
vol_per_particle = pow(4.0/np, 0.5)
radius = 6 * vol_per_particle
h = numpy.ones_like(x) * radius * 0.5
return x, y, z, h
def get_particle_array(x, y, z, h):
pdict = {}
pdict['x'] = {'name':'x', 'data':x}
pdict['y'] = {'name':'y', 'data':y}
pdict['z'] = {'name':'z', 'data':z}
pdict['h'] = {'name':'h', 'data':h}
pa = base.ParticleArray(**pdict)
return pa
def bin_particles(pa):
""" Bin the particles.
Parameters:
-----------
pa -- a newly created particle array from the get_particle_array function
min_cell_size -- the cell size to use for binning
"""
particles = base.Particles([pa,])
return particles
def cache_neighbors(particles):
""" Cache the neighbors for the particle array """
pa = particles.arrays[0]
loc = particles.get_neighbor_particle_locator(pa,pa,2.0)
loc.py_get_nearest_particles(0)
def get_stats(particles):
cd = particles.cell_manager.cells_dict
ncells = len(cd)
np_max = 0
_np = 0
for cid, cell in cd.iteritems():
np = cell.index_lists[0].length
_np += np
if np > np_max:
np_max = np
print "\n\n\n##############################################################"
print "CELL MANAGER DATA"
print "CellManager cell size ", particles.cell_manager.cell_size
print "Number of cells %d\t Particles/cell (avg) %f "%(ncells, _np/ncells),
print " Maximum %d particles"%(np_max)
if __name__ == '__main__':
if len(sys.argv) > 1:
np = sys.argv[-1]
x,y,z,h = get_points(np = int(sys.argv[-1]))
pa = get_particle_array(x,y,z,h)
else:
x,y,z,h = get_points()
pa = get_particle_array(x,y,z,h)
np = pa.get_number_of_particles()
print "Number of particles: ", np
vol_per_particle = pow(4.0/np, 0.5)
radius = 6 * vol_per_particle
print "Search Radius %f. "%(radius)
t = time.time()
particles = bin_particles(pa)
bt = time.time() - t
print "Time for binning: %f s" %(bt)
t = time.time()
cache_neighbors(particles)
ct = time.time() - t
print "Time for caching neighbors: %f s" %(ct)
print "\nTotal time %fs"%(bt + ct)
get_stats(particles)
| [
[
8,
0,
0.0092,
0.0092,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0183,
0.0092,
0,
0.66,
0.1,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0275,
0.0092,
0,
0.66,
... | [
"\"\"\" Benchmark for the PySPH neighbor search functions. \"\"\"",
"import sys",
"import numpy",
"import time",
"import pysph.base.api as base",
"def get_points(np = 10000):\n \"\"\" Get np particles in domain [1, -1] X [-1, 1] \"\"\"\n\n x = numpy.random.random(np)*2.0 - 1.0\n y = numpy.random... |
from setuptools import find_packages, setup
from Cython.Distutils import build_ext
from numpy.distutils.extension import Extension
ext_modules = [Extension("cython_nnps", ["cython_nnps.pyx"],
language="c++",
extra_compile_args=["-O3", "-Wall"]
),
Extension("nnps_bench", ["nnps_bench.pyx"],
language="c++",
extra_compile_args=["-O3", "-Wall"]
),
]
setup(
name = "Cython NNPS",
cmdclass = {'build_ext':build_ext},
ext_modules=ext_modules
)
| [
[
1,
0,
0.0455,
0.0455,
0,
0.66,
0,
182,
0,
2,
0,
0,
182,
0,
0
],
[
1,
0,
0.0909,
0.0455,
0,
0.66,
0.25,
413,
0,
1,
0,
0,
413,
0,
0
],
[
1,
0,
0.1364,
0.0455,
0,
0.... | [
"from setuptools import find_packages, setup",
"from Cython.Distutils import build_ext",
"from numpy.distutils.extension import Extension",
"ext_modules = [Extension(\"cython_nnps\", [\"cython_nnps.pyx\"],\n language=\"c++\",\n extra_compile_args=[\"-O3\", \"-Wa... |
"""This module compiles the specified (all) the cython .pyx files
in the specified (current) directory into python extensions
"""
import sys
import os
from setuptools import setup
from Cython.Distutils import build_ext
from numpy.distutils.extension import Extension
import numpy
def get_spcl_extn(extn):
""" special-case extensions with specific requirements """
cpp_extensions = 'sph_funcs', 'nnps', 'cell', 'cpp_vs_pyx', 'cpp_extensions', 'nnps_brute_force'
if extn.name in cpp_extensions:
pass
#extn.sources.append('cPoint.cpp')
return extn
def compile_extns(extensions=None, dirname=None, inc_dirs=None):
"""compile cython extensions
`extensions` is list of extensions to compile (None => all pyx files)
`dirname` is directory in which extensions are found (None = current directory)
`inc_dirs` is list of additional cython include directories
"""
if dirname is None:
dirname = os.path.abspath(os.curdir)
olddir = os.path.abspath(os.curdir)
os.chdir(dirname)
if extensions is None:
extensions = sorted([f[:-4] for f in os.listdir(os.curdir) if f.endswith('.pyx')])
if inc_dirs is None:
inc_dirs = []
inc_dirs.append(os.path.join(os.path.split(os.path.abspath(os.path.curdir))[0],'source'))
print inc_dirs
sys.argvold = sys.argv[:]
sys.argv = [__file__, 'build_ext','--inplace']
inc_dirs = [numpy.get_include()] + inc_dirs
cargs = []#'-O3']
# extension modules
extns = []
for extnname in extensions:
extn = Extension(extnname, [extnname+".pyx"], include_dirs=inc_dirs,
language='c++', extra_compile_args=cargs)
extn = get_spcl_extn(extn)
extns.append(extn)
setup(name='PySPH-bench',
ext_modules = extns,
include_package_data = True,
cmdclass={'build_ext': build_ext},
)
os.chdir(olddir)
sys.argv = sys.argvold
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print '''usage:
python setup.py [extension1, [extension2, [...]]]
compiles the cython extensions present in the current directory
'''
elif len(sys.argv) > 1:
# compile specified extensions
compile_extns(sys.argv[1:])
else:
# compile all extensions found in current directory
compile_extns()
| [
[
1,
0,
0.0164,
0.0164,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0328,
0.0164,
0,
0.66,
0.1429,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0656,
0.0164,
0,
... | [
"import sys",
"import os",
"from setuptools import setup",
"from Cython.Distutils import build_ext",
"from numpy.distutils.extension import Extension",
"import numpy",
"def get_spcl_extn(extn):\n \"\"\" special-case extensions with specific requirements \"\"\"\n cpp_extensions = 'sph_funcs', 'nnps... |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
import time
import pyopencl as cl
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
# number of particles
np = 1 << 20
# number of times a single calc is evaluated
neval = 5
x = numpy.linspace(0,1,np)
m = numpy.ones_like(x) * (x[1] - x[0])
h = 2*m
rho = numpy.ones_like(x)
# get the OpenCL context and device. Default to the first device
platforms = cl.get_platforms()
for platform in platforms:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
devices = platform.get_devices()
for device in devices:
ctx = cl.Context([device])
print("===============================================================")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
precision_types = ['single']
device_extensions = device.get_info(cl.device_info.EXTENSIONS)
if 'cl_khr_fp64' in device_extensions:
precision_types.append('double')
for prec in precision_types:
print "--------------------------------------------------------"
print """Summation Density for %g million particles using %s precision"""%(np/1e6, prec)
pa = base.get_particle_array(cl_precision=prec,
name="test", x=x,h=h,m=m,rho=rho)
particles = base.Particles(arrays=[pa,])
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.LinkedListManager,
cl_locator_type=CLLocator.LinkedListSPHNeighborLocator)
kernel = base.CubicSplineKernel(dim=1)
# create the function
func = sph.SPHRho.get_func(pa,pa)
# create the CLCalc object
t1 = time.time()
cl_calc = sph.CLCalc(particles=cl_particles,
sources=[pa,],
dest=pa,
kernel=kernel,
funcs=[func,],
updates=['rho'] )
cl_calc.reset_arrays = True
# setup OpenCL for PySPH
cl_calc.setup_cl(ctx)
cl_setup_time = time.time() - t1
# create a normal calc object
t1 = time.time()
calc = sph.SPHCalc(particles=particles, sources=[pa,], dest=pa,
kernel=kernel, funcs=[func,], updates=['rho'] )
cython_setup_time = time.time() - t1
# evaluate pysph on the OpenCL device!
t1 = time.time()
for i in range(neval):
cl_calc.sph()
cl_elapsed = time.time() - t1
# Read the buffer contents
t1 = time.time()
pa.read_from_buffer()
read_elapsed = time.time() - t1
print "\nPyOpenCL setup time = %g s"%(cl_setup_time)
print "PyOpenCL execution time = %g s" %(cl_elapsed)
print "PyOpenCL buffer transfer time: %g s "%(read_elapsed)
cl_rho = pa.get('_tmpx').copy()
# Do the same thing with Cython.
t1 = time.time()
for i in range(neval):
calc.sph('_tmpx')
cython_elapsed = time.time() - t1
print "Cython setup time = %g s"%(cython_setup_time)
print "Cython execution time = %g s" %(cython_elapsed)
cython_total = cython_setup_time + cython_elapsed
opencl_total = cl_setup_time + cl_elapsed + read_elapsed
# Compare the results
cython_rho = pa.get('_tmpx')
diff = sum(abs(cl_rho - cython_rho))
print "sum(abs(cl_rho - cy_rho))/np = ", diff/np
print "Execution speedup: %g"%(cython_elapsed/cl_elapsed)
print "Overall Speedup: %g "%(cython_total/opencl_total)
| [
[
1,
0,
0.0079,
0.0079,
0,
0.66,
0,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0159,
0.0079,
0,
0.66,
0.0667,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0238,
0.0079,
0,
... | [
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"import numpy",
"import time",
"import pyopencl as cl",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"np = 1 << 20",
"neval = 5",
"x = numpy.linspace(0,1,np)... |
""" Benchmark example for binning particles in Cython and OpenCL """
import numpy
import numpy.random as random
from time import time
import pysph.base.api as base
import pysph.solver.api as solver
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
# number of points
np = 2**20
# number of times to bin
nbins = 3
# generate the point set
x = random.random(np)
y = random.random(np)
z = random.random(np)
vol_per_particle = numpy.power(1.0/np, 1.0/3.0)
h = numpy.ones_like(x) * 2 * vol_per_particle
precision = "single"
ctx = solver.create_some_context()
pa = base.get_particle_array(name="test", cl_precision=precision,
x=x, y=y, z=z, h=h)
t1 = time()
for i in range(nbins):
particles = base.Particles([pa,])
pa.set_dirty(True)
cython_time = time() - t1
t1 = time()
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.LinkedListManager,
cl_locator_type=CLLocator.LinkedListSPHNeighborLocator)
cl_particles.setup_cl(ctx)
domain_manager = cl_particles.domain_manager
for i in range(nbins - 1):
domain_manager.is_dirty = False
domain_manager.update()
opencl_time = time() - t1
print "================================================================"
print "Binning for %d particles using % s precision"%(np, precision)
print "PyOpenCL time = %g s"%(opencl_time)
print "Cython time = %g s"%(cython_time)
print "Speedup = %g"%(cython_time/opencl_time)
| [
[
8,
0,
0.0164,
0.0164,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
0.66,
0.0323,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0656,
0.0164,
0,
0.66... | [
"\"\"\" Benchmark example for binning particles in Cython and OpenCL \"\"\"",
"import numpy",
"import numpy.random as random",
"from time import time",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType... |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
import time
import pyopencl as cl
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# number of particles
np = 16384
# number of times a single calc is evaluated
neval = 1
x = numpy.linspace(0,1,np)
m = numpy.ones_like(x) * (x[1] - x[0])
h = 2*m
rho = numpy.ones_like(x)
# get the OpenCL context and device. Default to the first device
platforms = cl.get_platforms()
for platform in platforms:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
devices = platform.get_devices()
for device in devices:
ctx = cl.Context([device])
print("===============================================================")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
precision_types = ['single']
device_extensions = device.get_info(cl.device_info.EXTENSIONS)
if 'cl_khr_fp64' in device_extensions:
precision_types.append('double')
for prec in precision_types:
print "--------------------------------------------------------"
print "NBody force comparison using %s precision"%(prec)
pa = base.get_particle_array(cl_precision=prec,
name="test", x=x,h=h,m=m,rho=rho)
particles = base.Particles(
arrays=[pa,],
locator_type=Locator.NSquareNeighborLocator)
cl_particles = base.CLParticles(
arrays=[pa,],
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator)
kernel = base.CubicSplineKernel(dim=1)
# create the function
func = sph.NBodyForce.get_func(pa,pa)
# create the CLCalc object
t1 = time.time()
cl_calc = sph.CLCalc(particles=cl_particles,
sources=[pa,],
dest=pa,
kernel=kernel,
funcs=[func,],
updates=['u','v','w'] )
# setup OpenCL for PySPH
cl_calc.setup_cl(ctx)
cl_setup_time = time.time() - t1
# create a normal calc object
t1 = time.time()
calc = sph.SPHCalc(particles=particles, sources=[pa,], dest=pa,
kernel=kernel, funcs=[func,],
updates=['u','v','w'] )
cython_setup_time = time.time() - t1
# evaluate pysph on the OpenCL device!
t1 = time.time()
for i in range(neval):
cl_calc.sph()
cl_elapsed = time.time() - t1
# Read the buffer contents
t1 = time.time()
pa.read_from_buffer()
read_elapsed = time.time() - t1
print "\nPyOpenCL setup time = %g s"%(cl_setup_time)
print "PyOpenCL execution time = %g s" %(cl_elapsed)
print "PyOpenCL buffer transfer time: %g s "%(read_elapsed)
cl_rho = pa.get('_tmpx').copy()
# Do the same thing with Cython.
t1 = time.time()
for i in range(neval):
calc.sph('_tmpx')
cython_elapsed = time.time() - t1
print "Cython setup time = %g s"%(cython_setup_time)
print "Cython execution time = %g s" %(cython_elapsed)
cython_total = cython_setup_time + cython_elapsed
opencl_total = cl_setup_time + cl_elapsed + read_elapsed
# Compare the results
cython_rho = pa.get('_tmpx')
diff = sum(abs(cl_rho - cython_rho))
print "sum(abs(cl_rho - cy_rho))/np = ", diff/np
print "Execution speedup: %g"%(cython_elapsed/cl_elapsed)
print "Overall Speedup: %g "%(cython_total/opencl_total)
| [
[
1,
0,
0.0077,
0.0077,
0,
0.66,
0,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0154,
0.0077,
0,
0.66,
0.0625,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0231,
0.0077,
0,
... | [
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"import numpy",
"import time",
"import pyopencl as cl",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"Locator = base.NeighborLocatorType",
"np = 16384",
"nev... |
from cStringIO import StringIO
import compiler
import inspect
import textwrap
import tokenize
from compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
for token in tokenize.generate_tokens(file.next):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| [
[
1,
0,
0.0063,
0.0063,
0,
0.66,
0,
764,
0,
1,
0,
0,
764,
0,
0
],
[
1,
0,
0.0127,
0.0063,
0,
0.66,
0.1,
738,
0,
1,
0,
0,
738,
0,
0
],
[
1,
0,
0.019,
0.0063,
0,
0.66... | [
"from cStringIO import StringIO",
"import compiler",
"import inspect",
"import textwrap",
"import tokenize",
"from compiler_unparse import unparse",
"class Comment(object):\n \"\"\" A comment block.\n \"\"\"\n is_comment = True\n def __init__(self, start_lineno, end_lineno, text):\n #... |
""" Turn compiler.ast structures back into executable python code.
The unparse method takes a compiler.ast tree and transforms it back into
valid python code. It is incomplete and currently only works for
import statements, function calls, function definitions, assignments, and
basic expressions.
Inspired by python-2.5-svn/Demo/parser/unparse.py
fixme: We may want to move to using _ast trees because the compiler for
them is about 6 times faster than compiler.compile.
"""
import sys
import cStringIO
from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
def unparse(ast, single_line_functions=False):
s = cStringIO.StringIO()
UnparseCompilerAst(ast, s, single_line_functions)
return s.getvalue().lstrip()
op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
class UnparseCompilerAst:
""" Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarged.
"""
#########################################################################
# object interface.
#########################################################################
def __init__(self, tree, file = sys.stdout, single_line_functions=False):
""" Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
"""
self.f = file
self._single_func = single_line_functions
self._do_indent = True
self._indent = 0
self._dispatch(tree)
self._write("\n")
self.f.flush()
#########################################################################
# Unparser private interface.
#########################################################################
### format, output, and dispatch methods ################################
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text)
def _write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def _enter(self):
"Print ':', and increase the indentation."
self._write(": ")
self._indent += 1
def _leave(self):
"Decrease the indentation level."
self._indent -= 1
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree)
#########################################################################
# compiler.ast unparsing methods.
#
# There should be one method per concrete grammar type. They are
# organized in alphabetical order.
#########################################################################
def _Add(self, t):
self.__binary_op(t, '+')
def _And(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") and (")
self._write(")")
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ')
def _AssName(self, t):
""" Name on left hand side of expression.
Treat just like a name on the right side of an expression.
"""
self._Name(t)
def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';')
def _Bitand(self, t):
""" Bit and operation.
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" & ")
def _Bitor(self, t):
""" Bit or operation
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" | ")
def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")")
def _Compare(self, t):
self._dispatch(t.expr)
for op, expr in t.ops:
self._write(" " + op + " ")
self._dispatch(expr)
def _Const(self, t):
""" A constant value such as an integer value, 3, or a string, "hello".
"""
self._dispatch(t.value)
def _Decorators(self, t):
""" Handle function decorators (eg. @has_units)
"""
for node in t.nodes:
self._dispatch(node)
def _Dict(self, t):
self._write("{")
for i, (k, v) in enumerate(t.items):
self._dispatch(k)
self._write(": ")
self._dispatch(v)
if i < len(t.items)-1:
self._write(", ")
self._write("}")
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
self._fill()
self._dispatch(t.expr)
def _Div(self, t):
self.__binary_op(t, '/')
def _Ellipsis(self, t):
self._write("...")
def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True
def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _If(self, t):
self._fill()
for i, (compare,code) in enumerate(t.tests):
if i == 0:
self._write("if ")
else:
self._write("elif ")
self._dispatch(compare)
self._enter()
self._fill()
self._dispatch(code)
self._leave()
self._write("\n")
if t.else_ is not None:
self._write("else")
self._enter()
self._fill()
self._dispatch(t.else_)
self._leave()
self._write("\n")
def _IfExp(self, t):
self._dispatch(t.then)
self._write(" if ")
self._dispatch(t.test)
if t.else_ is not None:
self._write(" else (")
self._dispatch(t.else_)
self._write(")")
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr)
def _List(self, t):
self._write("[")
for i,node in enumerate(t.nodes):
self._dispatch(node)
if i < len(t.nodes)-1:
self._write(", ")
self._write("]")
def _Module(self, t):
if t.doc is not None:
self._dispatch(t.doc)
self._dispatch(t.node)
def _Mul(self, t):
self.__binary_op(t, '*')
def _Name(self, t):
self._write(t.name)
def _NoneType(self, t):
self._write("None")
def _Not(self, t):
self._write('not (')
self._dispatch(t.expr)
self._write(')')
def _Or(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") or (")
self._write(")")
def _Pass(self, t):
self._write("pass\n")
def _Printnl(self, t):
self._fill("print ")
if t.dest:
self._write(">> ")
self._dispatch(t.dest)
self._write(", ")
comma = False
for node in t.nodes:
if comma: self._write(', ')
else: comma = True
self._dispatch(node)
def _Power(self, t):
self.__binary_op(t, '**')
def _Return(self, t):
self._fill("return ")
if t.value:
if isinstance(t.value, Tuple):
text = ', '.join([ name.name for name in t.value.asList() ])
self._write(text)
else:
self._dispatch(t.value)
if not self._do_indent:
self._write('; ')
def _Slice(self, t):
self._dispatch(t.expr)
self._write("[")
if t.lower:
self._dispatch(t.lower)
self._write(":")
if t.upper:
self._dispatch(t.upper)
#if t.step:
# self._write(":")
# self._dispatch(t.step)
self._write("]")
def _Sliceobj(self, t):
for i, node in enumerate(t.nodes):
if i != 0:
self._write(":")
if not (isinstance(node, Const) and node.value is None):
self._dispatch(node)
def _Stmt(self, tree):
for node in tree.nodes:
self._dispatch(node)
def _Sub(self, t):
self.__binary_op(t, '-')
def _Subscript(self, t):
self._dispatch(t.expr)
self._write("[")
for i, value in enumerate(t.subs):
if i != 0:
self._write(",")
self._dispatch(value)
self._write("]")
def _TryExcept(self, t):
self._fill("try")
self._enter()
self._dispatch(t.body)
self._leave()
for handler in t.handlers:
self._fill('except ')
self._dispatch(handler[0])
if handler[1] is not None:
self._write(', ')
self._dispatch(handler[1])
self._enter()
self._dispatch(handler[2])
self._leave()
if t.else_:
self._fill("else")
self._enter()
self._dispatch(t.else_)
self._leave()
def _Tuple(self, t):
if not t.nodes:
# Empty tuple.
self._write("()")
else:
self._write("(")
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
self._write(")")
def _UnaryAdd(self, t):
self._write("+")
self._dispatch(t.expr)
def _UnarySub(self, t):
self._write("-")
self._dispatch(t.expr)
def _With(self, t):
self._fill('with ')
self._dispatch(t.expr)
if t.vars:
self._write(' as ')
self._dispatch(t.vars.name)
self._enter()
self._dispatch(t.body)
self._leave()
self._write('\n')
def _int(self, t):
self._write(repr(t))
def __binary_op(self, t, symbol):
# Check if parenthesis are needed on left side and then dispatch
has_paren = False
left_class = str(t.left.__class__)
if (left_class in op_precedence.keys() and
op_precedence[left_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.left)
if has_paren:
self._write(')')
# Write the appropriate symbol for operator
self._write(symbol)
# Check if parenthesis are needed on the right side and then dispatch
has_paren = False
right_class = str(t.right.__class__)
if (right_class in op_precedence.keys() and
op_precedence[right_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.right)
if has_paren:
self._write(')')
def _float(self, t):
# if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
# We prefer str here.
self._write(str(t))
def _str(self, t):
self._write(repr(t))
def _tuple(self, t):
self._write(str(t))
#########################################################################
# These are the methods from the _ast modules unparse.
#
# As our needs to handle more advanced code increase, we may want to
# modify some of the methods below so that they work for compiler.ast.
#########################################################################
# # stmt
# def _Expr(self, tree):
# self._fill()
# self._dispatch(tree.value)
#
# def _Import(self, t):
# self._fill("import ")
# first = True
# for a in t.names:
# if first:
# first = False
# else:
# self._write(", ")
# self._write(a.name)
# if a.asname:
# self._write(" as "+a.asname)
#
## def _ImportFrom(self, t):
## self._fill("from ")
## self._write(t.module)
## self._write(" import ")
## for i, a in enumerate(t.names):
## if i == 0:
## self._write(", ")
## self._write(a.name)
## if a.asname:
## self._write(" as "+a.asname)
## # XXX(jpe) what is level for?
##
#
# def _Break(self, t):
# self._fill("break")
#
# def _Continue(self, t):
# self._fill("continue")
#
# def _Delete(self, t):
# self._fill("del ")
# self._dispatch(t.targets)
#
# def _Assert(self, t):
# self._fill("assert ")
# self._dispatch(t.test)
# if t.msg:
# self._write(", ")
# self._dispatch(t.msg)
#
# def _Exec(self, t):
# self._fill("exec ")
# self._dispatch(t.body)
# if t.globals:
# self._write(" in ")
# self._dispatch(t.globals)
# if t.locals:
# self._write(", ")
# self._dispatch(t.locals)
#
# def _Print(self, t):
# self._fill("print ")
# do_comma = False
# if t.dest:
# self._write(">>")
# self._dispatch(t.dest)
# do_comma = True
# for e in t.values:
# if do_comma:self._write(", ")
# else:do_comma=True
# self._dispatch(e)
# if not t.nl:
# self._write(",")
#
# def _Global(self, t):
# self._fill("global")
# for i, n in enumerate(t.names):
# if i != 0:
# self._write(",")
# self._write(" " + n)
#
# def _Yield(self, t):
# self._fill("yield")
# if t.value:
# self._write(" (")
# self._dispatch(t.value)
# self._write(")")
#
# def _Raise(self, t):
# self._fill('raise ')
# if t.type:
# self._dispatch(t.type)
# if t.inst:
# self._write(", ")
# self._dispatch(t.inst)
# if t.tback:
# self._write(", ")
# self._dispatch(t.tback)
#
#
# def _TryFinally(self, t):
# self._fill("try")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# self._fill("finally")
# self._enter()
# self._dispatch(t.finalbody)
# self._leave()
#
# def _excepthandler(self, t):
# self._fill("except ")
# if t.type:
# self._dispatch(t.type)
# if t.name:
# self._write(", ")
# self._dispatch(t.name)
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _ClassDef(self, t):
# self._write("\n")
# self._fill("class "+t.name)
# if t.bases:
# self._write("(")
# for a in t.bases:
# self._dispatch(a)
# self._write(", ")
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _FunctionDef(self, t):
# self._write("\n")
# for deco in t.decorators:
# self._fill("@")
# self._dispatch(deco)
# self._fill("def "+t.name + "(")
# self._dispatch(t.args)
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _For(self, t):
# self._fill("for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# def _While(self, t):
# self._fill("while ")
# self._dispatch(t.test)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# # expr
# def _Str(self, tree):
# self._write(repr(tree.s))
##
# def _Repr(self, t):
# self._write("`")
# self._dispatch(t.value)
# self._write("`")
#
# def _Num(self, t):
# self._write(repr(t.n))
#
# def _ListComp(self, t):
# self._write("[")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write("]")
#
# def _GeneratorExp(self, t):
# self._write("(")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write(")")
#
# def _comprehension(self, t):
# self._write(" for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# for if_clause in t.ifs:
# self._write(" if ")
# self._dispatch(if_clause)
#
# def _IfExp(self, t):
# self._dispatch(t.body)
# self._write(" if ")
# self._dispatch(t.test)
# if t.orelse:
# self._write(" else ")
# self._dispatch(t.orelse)
#
# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
# def _UnaryOp(self, t):
# self._write(self.unop[t.op.__class__.__name__])
# self._write("(")
# self._dispatch(t.operand)
# self._write(")")
#
# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
# "FloorDiv":"//", "Pow": "**"}
# def _BinOp(self, t):
# self._write("(")
# self._dispatch(t.left)
# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
# self._dispatch(t.right)
# self._write(")")
#
# boolops = {_ast.And: 'and', _ast.Or: 'or'}
# def _BoolOp(self, t):
# self._write("(")
# self._dispatch(t.values[0])
# for v in t.values[1:]:
# self._write(" %s " % self.boolops[t.op.__class__])
# self._dispatch(v)
# self._write(")")
#
# def _Attribute(self,t):
# self._dispatch(t.value)
# self._write(".")
# self._write(t.attr)
#
## def _Call(self, t):
## self._dispatch(t.func)
## self._write("(")
## comma = False
## for e in t.args:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## for e in t.keywords:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## if t.starargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("*")
## self._dispatch(t.starargs)
## if t.kwargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("**")
## self._dispatch(t.kwargs)
## self._write(")")
#
# # slice
# def _Index(self, t):
# self._dispatch(t.value)
#
# def _ExtSlice(self, t):
# for i, d in enumerate(t.dims):
# if i != 0:
# self._write(': ')
# self._dispatch(d)
#
# # others
# def _arguments(self, t):
# first = True
# nonDef = len(t.args)-len(t.defaults)
# for a in t.args[0:nonDef]:
# if first:first = False
# else: self._write(", ")
# self._dispatch(a)
# for a,d in zip(t.args[nonDef:], t.defaults):
# if first:first = False
# else: self._write(", ")
# self._dispatch(a),
# self._write("=")
# self._dispatch(d)
# if t.vararg:
# if first:first = False
# else: self._write(", ")
# self._write("*"+t.vararg)
# if t.kwarg:
# if first:first = False
# else: self._write(", ")
# self._write("**"+t.kwarg)
#
## def _keyword(self, t):
## self._write(t.arg)
## self._write("=")
## self._dispatch(t.value)
#
# def _Lambda(self, t):
# self._write("lambda ")
# self._dispatch(t.args)
# self._write(": ")
# self._dispatch(t.body)
| [
[
8,
0,
0.0076,
0.014,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0163,
0.0012,
0,
0.66,
0.1667,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0174,
0.0012,
0,
0.66,... | [
"\"\"\" Turn compiler.ast structures back into executable python code.\n\n The unparse method takes a compiler.ast tree and transforms it back into\n valid python code. It is incomplete and currently only works for\n import statements, function calls, function definitions, assignments, and\n basic expr... |
import inspect
import os
import pydoc
import docscrape
from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc
import numpydoc
import comment_eater
class SphinxTraitsDoc(SphinxClassDoc):
def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
if not inspect.isclass(cls):
raise ValueError("Initialise using a class. Got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
self._func_doc = func_doc
docstring = pydoc.getdoc(cls)
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = docscrape.Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Description': [],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Traits': [],
'Methods': [],
'See Also': [],
'Notes': [],
'References': '',
'Example': '',
'Examples': '',
'index': {}
}
self._parse()
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Description'] + self['Extended Summary'] + ['']
def __str__(self, indent=0, func_role="func"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Traits', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_see_also("obj")
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Example')
out += self._str_section('Examples')
out = self._str_indent(out,indent)
return '\n'.join(out)
def looks_like_issubclass(obj, classname):
""" Return True if the object has a class or superclass with the given class
name.
Ignores old-style classes.
"""
t = obj
if t.__name__ == classname:
return True
for klass in t.__mro__:
if klass.__name__ == classname:
return True
return False
def get_doc_object(obj, what=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
doc = SphinxTraitsDoc(obj, '', func_doc=numpydoc.SphinxFunctionDoc)
if looks_like_issubclass(obj, 'HasTraits'):
for name, trait, comment in comment_eater.get_class_traits(obj):
# Exclude private traits.
if not name.startswith('_'):
doc['Traits'].append((name, trait, comment.splitlines()))
return doc
elif what in ('function', 'method'):
return numpydoc.SphinxFunctionDoc(obj, '')
else:
return numpydoc.SphinxDocString(pydoc.getdoc(obj))
def initialize(app):
try:
app.connect('autodoc-process-signature', numpydoc.mangle_signature)
except:
numpydoc.monkeypatch_sphinx_ext_autodoc()
# Monkeypatch numpydoc
numpydoc.get_doc_object = get_doc_object
fn = app.config.numpydoc_phantom_import_file
if (fn and os.path.isfile(fn)):
print "[numpydoc] Phantom importing modules from", fn, "..."
numpydoc.import_phantom_module(fn)
def setup(app):
app.connect('autodoc-process-docstring', numpydoc.mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_phantom_import_file', None, True)
app.add_config_value('numpydoc_edit_link', None, True)
app.add_directive('autosummary', numpydoc.autosummary_directive, 1, (0, 0, False))
app.add_role('autolink', numpydoc.autolink_role)
| [
[
1,
0,
0.0071,
0.0071,
0,
0.66,
0,
878,
0,
1,
0,
0,
878,
0,
0
],
[
1,
0,
0.0142,
0.0071,
0,
0.66,
0.0909,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0213,
0.0071,
0,
... | [
"import inspect",
"import os",
"import pydoc",
"import docscrape",
"from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc",
"import numpydoc",
"import comment_eater",
"class SphinxTraitsDoc(SphinxClassDoc):\n def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):\n if n... |
import re, textwrap
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
return out
def __str__(self, indent=0, func_role="func"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Attributes', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_see_also("obj")
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Examples')
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
pass
class SphinxClassDoc(SphinxDocString, ClassDoc):
pass
| [
[
1,
0,
0.0093,
0.0093,
0,
0.66,
0,
540,
0,
2,
0,
0,
540,
0,
0
],
[
1,
0,
0.0185,
0.0093,
0,
0.66,
0.25,
91,
0,
3,
0,
0,
91,
0,
0
],
[
3,
0,
0.4907,
0.9167,
0,
0.66... | [
"import re, textwrap",
"from docscrape import NumpyDocString, FunctionDoc, ClassDoc",
"class SphinxDocString(NumpyDocString):\n # string conversion routines\n def _str_header(self, name, symbol='`'):\n return ['.. rubric:: ' + name, '']\n\n def _str_field_list(self, name):\n return [':' +... |
def exact_solution(tf=0.00076, dt=1e-4):
""" Exact solution for the the elliptical drop equations """
import numpy
A0 = 100
a0 = 1.0
t = 0.0
theta = numpy.linspace(0,2*numpy.pi, 101)
Anew = A0
anew = a0
while t <= tf:
t += dt
Aold = Anew
aold = anew
Anew = Aold + dt*(Aold*Aold*(aold**4 - 1))/(aold**4 + 1)
anew = aold + dt*(-aold * Aold)
dadt = Anew**2 * (anew**4 - 1)/(anew**4 + 1)
po = 0.5*-anew**2 * (dadt - Anew**2)
return anew*numpy.cos(theta), 1/anew*numpy.sin(theta), po
#############################################################################
| [
[
2,
0,
0.4667,
0.9,
0,
0.66,
0,
167,
0,
2,
1,
0,
0,
0,
3
],
[
8,
1,
0.0667,
0.0333,
1,
0.3,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
1,
0.1,
0.0333,
1,
0.3,
0.0909,
... | [
"def exact_solution(tf=0.00076, dt=1e-4):\n \"\"\" Exact solution for the the elliptical drop equations \"\"\"\n import numpy\n \n A0 = 100\n a0 = 1.0\n\n t = 0.0",
" \"\"\" Exact solution for the the elliptical drop equations \"\"\"",
" import numpy",
" A0 = 100",
" a0 = 1.0",... |
""" An example solving stress test case """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
CFL = 0.1
dim = 3
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.002 # 2mm
R = 0.02
xl = -0.05
L = 0.2
x,y,z = numpy.mgrid[xl:L+dx/2:dx, -R/2:(R+dx)/2:dx, -R/2:(R+dx)/2:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
r2 = y**2+z**2
keep = numpy.flatnonzero(r2<R*R/4)
x = x[keep]
y = y[keep]
z = z[keep]
bdry = (x<dx/2)*1.0
bdry_indices = numpy.flatnonzero(bdry)
rod_indices = numpy.flatnonzero(1-bdry)
x2 = x[bdry_indices]
y2 = y[bdry_indices]
z2 = z[bdry_indices]
x = x[rod_indices]
y = y[rod_indices]
z = z[rod_indices]
print 'num_particles:', len(x), 'num_bdry_particles:', len(x2)
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx**dim
m2 = numpy.ones_like(x2)*dx**dim
h = numpy.ones_like(x)*1.5*dx
h2 = numpy.ones_like(x2)*1.5*dx
rho = numpy.ones_like(x)
rho2 = numpy.ones_like(x2)
p = u = x*0
vel_max = 1
v = z*vel_max/max(z)*sin(pi*x/2/L)
w = -y*vel_max/max(y)*sin(pi*x/2/L)
p2 = u2 = v2 = w2 = x2*0
pa = get_particle_array(x=x, y=y, z=z, m=m, rho=rho, h=h, p=p, u=u, v=v, w=w,
name='solid',
)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.25
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
print 'v_f:', pa.v[-1]/pa.c_s, '(%s)'%pa.v[-1]
print 'T:', 2*numpy.pi/(pa.E*0.02**2*(1.875/0.2)**4/(12*pa.rho0*(1-pa.nu**2)))**0.5
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
#print 'CFL:', pa.c_s*dt/dx/2
#print 'particle_motion:', -pa.u[-1]*dt
# boundary particle array
pb = get_particle_array(x=x2, x0=x2, y=y2, y0=y2, z=z2, z0=z2,
m=m2, rho=rho2,
h=h2, p=p2,
name='bdry', type=1,
)
pb.constants['E'] = 1e7
pb.constants['nu'] = 0.25
pb.constants['G'] = pb.constants['E']/(2.0*(1+pb.constants['nu']))
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['dr0'] = dx
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x2) * pb.constants['c_s']
return [pa, pb]
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.props = props[:]
self.values = values[:]
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props + [i for i in self.values if isinstance(i,str)]
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get_carray(prop)
p = p.get_npy_array()
v = self.values[i]
if isinstance(v, str):
p[:] = getattr(self.dest, v)
else:
p[:] = v
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
# FIXME: LeapFrog Integrator does not work
s = StressSolver(dim=3, integrator_type=solver.EulerIntegrator, xsph=0.5,
marts_eps=0.3, marts_n=4, CFL=CFL)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa, pb = particles.arrays
s.pre_step_functions.append(FixedBoundary(pb, pb, props=['x','y','z','u','v','w','rho'],
values=['x0','y0','z0',0,0,0,'rho0']))
app.run()
| [
[
8,
0,
0.0058,
0.0058,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0174,
0.0058,
0,
0.66,
0.037,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0233,
0.0058,
0,
0.66,... | [
"\"\"\" An example solving stress test case \"\"\"",
"import sys",
"import numpy",
"from numpy import pi, sin, sinh, cos, cosh",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"import pysph.solver.api as solver",
"from pysph.solver.stress_solver import StressSolver, get_particle_array... |
""" An example solving stress test case """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
CFL = 0.1
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.002 # 2mm
xl = -0.05
L = 0.2
H = 0.02
x,y = numpy.mgrid[xl:L+dx/2:dx, -H/2:(H+dx)/2:dx]
x = x.ravel()
y = y.ravel()
bdry = (x<dx/2)*1.0
bdry_indices = numpy.flatnonzero(bdry)
print 'num_particles', len(x)
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.5*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 0.0
#v = numpy.ones_like(x)*1e-2
#v = numpy.sin(x*pi/2.0/5.0)*2.17e3
#v = numpy.sin(x*pi/2.0/5.0)*1e-1
# set the v
kL = 1.875
k = kL/L
M = sin(kL)+sinh(kL)
N = cos(kL) + cosh(kL)
Q = 2*(cos(kL)*sinh(kL) - sin(kL)*cosh(kL))
v_f = 0.01
kx = k*x
# sill need to multiply by c_s
v = v_f*(M*(cos(kx)-cosh(kx)) - N*(sin(kx)-sinh(kx)))/Q
v[bdry_indices] = 0
p *= 0
h *= 1
#u = 0.1*numpy.sin(x*pi/2.0/5.0)
#u[numpy.flatnonzero(x<0.01)] = 0
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='solid', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
bdry=bdry
)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.25
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
pa.v *= pa.c_s
print 'v_f:', pa.v[-1]/pa.c_s, '(%s)'%pa.v[-1]
print 'T:', 2*numpy.pi/(pa.E*0.02**2*(1.875/0.2)**4/(12*pa.rho0*(1-pa.nu**2)))**0.5
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
#print 'CFL:', pa.c_s*dt/dx/2
#print 'particle_motion:', -pa.u[-1]*dt
# boundary particle array
x, y = numpy.mgrid[xl:dx/2:dx, H/2+dx:H/2+3.5*dx:dx]
x = x.ravel()
y = y.ravel()
x2, y2 = x, -y
x = numpy.concatenate([x,x2])
y = numpy.concatenate([y,y2])
z = numpy.zeros_like(x)
rho = numpy.ones_like(x)
m = rho*dx*dx
h = 1.5*dx*rho
pb = base.get_particle_array(x=x, x0=x, y=y, y0=y, m=m, rho=rho,
h=h, p=z, u=z, v=z, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='bdry', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
)
pb.constants['E'] = 1e9
pb.constants['nu'] = 0.25
pb.constants['G'] = pb.constants['E']/(2.0*(1+pb.constants['nu']))
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['dr0'] = dx
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x) * pb.constants['c_s']
return [pa, pb]
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.props = props[:]
self.values = values[:]
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props + [i for i in self.values if isinstance(i,str)]
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get_carray(prop)
p = p.get_npy_array()
v = self.values[i]
if isinstance(v, str):
p[:] = getattr(self.dest, v)
else:
p[:] = v
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=CFL)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa, pb = particles.arrays
s.pre_step_functions.append(FixedBoundary(pb, pb, props=['x','y','u','v','rho'],
values=['x0','y0',0,0,'rho0']))
app.run()
| [
[
8,
0,
0.0051,
0.0051,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0154,
0.0051,
0,
0.66,
0.0385,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0205,
0.0051,
0,
0.66... | [
"\"\"\" An example solving stress test case \"\"\"",
"import sys",
"import numpy",
"from numpy import pi, sin, sinh, cos, cosh",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"import pysph.solver.api as solver",
"from pysph.solver.stress_solver import StressSolver",
"from pysph.sph... |
""" Balls colliding in 2D """
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
import pysph.sph.funcs.stress_funcs as stress_funcs
app = solver.Application()
Solid = base.ParticleType.Solid
E = 1e7
nu = 0.3975
G = E/(2.0*(1+nu))
K = sph.get_K(G, nu)
ro = 1.0
co = numpy.sqrt(K/ro)
deltap = 0.001
fac=1e-10
print "co, ro, G = ", co, ro, G
def create_particles(two_arr=False):
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.001 # 1mm
ri = 0.03 # 3cm inner radius
ro = 0.04 # 4cm outer radius
spacing = 0.041 # spacing = 2*5cm
x,y = numpy.mgrid[-ro:ro:dx, -ro:ro:dx]
x = x.ravel()
y = y.ravel()
d = (x*x+y*y)
keep = numpy.flatnonzero((ri*ri<=d) * (d<ro*ro))
x = x[keep]
y = y[keep]
print 'num_particles', len(x)*2
if not two_arr:
x = numpy.concatenate([x-spacing,x+spacing])
y = numpy.concatenate([y,y])
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
# u is set later
v = z
u_f = 0.059
p *= 0
h *= 1
pa = base.get_particle_array(cl_precision="single",
name="ball", type=Solid,
x=x+spacing, y=y,
m=m, rho=rho, h=h,
p=p, cs=cs,
u=z, v=v)
pa.cs[:] = co
pa.u = pa.cs*u_f*(2*(x<0)-1)
pa.constants['dr0'] = dx
pa.constants["rho0"] = ro
return pa
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Add the operations
# Velocity Gradient tensor
s.add_operation(solver.SPHOperation(
sph.VelocityGradient2D.withargs(), on_types=[Solid,],
id="vgrad")
)
# Equation of state
s.add_operation(solver.SPHOperation(
sph.IsothermalEquation.withargs(ro=ro, co=co), on_types=[Solid,],
id="eos", updates=['p'])
)
# Artificial stress
s.add_operation(solver.SPHOperation(
sph.MonaghanArtificialStress.withargs(eps=0.3),
on_types=[Solid,],
id="art_stress",)
)
# density rate
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), on_types=[Solid,], from_types=[Solid],
id="density", updates=['rho'])
)
# momentum equation artificial viscosity
s.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialViscosity.withargs(alpha=1.0, beta=1.0),
on_types=[Solid,], from_types=[Solid,],
id="avisc", updates=['u','v'])
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquationWithStress2D.withargs(deltap=deltap, n=4),
on_types=[Solid,],
from_types=[Solid,], id="momentum", updates=['u','v'])
)
# s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtStressAcc.withargs(n=4, deltap=deltap, rho0=ro,
# R="R_"),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v'],
# id='mart_stressacc')
# )
# XSPH
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(eps=0.5),
on_types=[Solid,], from_types=[Solid,],
id="xsph", updates=['u','v'])
)
# stress rate
s.add_operation(solver.SPHIntegration(
sph.HookesDeviatoricStressRate2D.withargs(shear_mod=G),
on_types=[Solid,],
id="stressrate")
)
# position stepping
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Solid,],
id="step", updates=['x','y'])
)
app.setup(s, create_particles=create_particles)
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.run()
###############################################################################
# DEBUG
s1 = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Velocity Gradient tensor
s1.add_operation(solver.SPHOperation(
sph.VelocityGradient2D.withargs(), on_types=[Solid,],
id="vgrad")
)
# Equation of state
s1.add_operation(solver.SPHOperation(
sph.IsothermalEquation.withargs(ro=ro, co=co), on_types=[Solid,],
id="eos", updates=['p'])
)
# density rate
s1.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), on_types=[Solid,], from_types=[Solid],
id="density", updates=['rho'])
)
# s1.add_operation(solver.SPHOperation(
# stress_funcs.MonaghanArtStressD.withargs(eps=0.3, stress="S_"),
# on_types=[Solid],
# updates=['MArtStress00','MArtStress11','MArtStress22'],
# id='mart_stress_d')
# )
# s1.add_operation(solver.SPHOperation(
# stress_funcs.MonaghanArtStressS.withargs(eps=0.3, stress="S_"),
# on_types=[Solid],
# updates=['MArtStress12','MArtStress02','MArtStress01'],
# id='mart_stress_s')
# )
# s1.add_operation(solver.SPHIntegration(
# stress_funcs.MonaghanArtStressAcc.withargs(n=4),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v','w'],
# id='mart_stressacc')
# )
# momentum equation
s1.add_operation(solver.SPHIntegration(
sph.MomentumEquationWithStress2D.withargs(theta_factor=fac,
deltap=deltap, n=4,
epsp=0.3, epsm=0),
on_types=[Solid,],
from_types=[Solid,], id="momentum", updates=['u','v'])
)
# s1.add_operation(solver.SPHIntegration(
# stress_funcs.SimpleStressAcceleration.withargs(stress="S_"),
# from_types=[Solid], on_types=[Solid],
# updates=['u','v','w'],
# id='stressacc')
# )
# momentum equation artificial viscosity
s1.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialVsicosity.withargs(alpha=1.0, beta=1.0, eta=0.0),
on_types=[Solid,], from_types=[Solid,],
id="avisc", updates=['u','v'])
)
# stress rate
s1.add_operation(solver.SPHIntegration(
sph.HookesDeviatoricStressRate2D.withargs(shear_mod=G),
on_types=[Solid,],
id="stressrate")
)
# position stepping
s1.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Solid,],
id="step", updates=['x','y','z'])
)
dt = 1e-8
tf = 1e-2
s1.set_time_step(dt)
s1.set_final_time(tf)
s1.set_kernel_correction(-1)
s1.pfreq = 100
app1.setup(s1, create_particles=create_particles)
#app.run()
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app2.setup(s, create_particles=create_particles)
#print [calc.id for calc in s.integrator.calcs]
#print [calc.id for calc in s1.integrator.calcs]
# particles = s.particles
# pa = particles.arrays[0]
def check():
array1 = s.particles.arrays[0]
array2 = s1.particles.arrays[0]
props = ['x','y','u','v','rho','p']
np = array1.get_number_of_particles()
nk = array2.get_number_of_particles()
assert np == nk
for prop in props:
p = array1.get(prop)
k = array2.get(prop)
err = abs(p - k)
print prop, sum(err)/nk, max(err)
t = 0.0
while t < tf:
print "Checkking at %g ", t
check()
print
t += dt
s.set_final_time(t)
s1.set_final_time(t)
s.solve(dt)
s1.solve(dt)
| [
[
8,
0,
0.0029,
0.0029,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0088,
0.0029,
0,
0.66,
0.0167,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0146,
0.0029,
0,
0.66... | [
"\"\"\" Balls colliding in 2D \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"import pysph.solver.api as solver",
"import pysph.sph.funcs.stress_funcs as stress_funcs",
"app = solver.Application()",
"Solid = base.ParticleType.Solid",
"E = 1e7",
"nu = 0.397... |
""" An example solving stress test case """
import numpy
import sys
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs, arithmetic_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
#tf = app.options.final_time if app.options.final_time else 1e-2
class PrintPos(object):
''' print properties of a particle in a column format (gnuplot/np.loadtxt) '''
def __init__(self, particle_id, props=['x'], filename='stress.dat', write_interval=100):
self.file = open(filename, 'w')
self.file.write('i\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
self.write_interval = write_interval
def function(self, solver):
l = [solver.count]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
if solver.count%self.write_interval == 0:
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
#N = 21
dx = 0.1
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
x,y = numpy.mgrid[-0.2:5.01:dx, -0.2:0.21:dx]
x = x.ravel()
y = y.ravel()
bdry = (x<0.01)*1.0
print 'num_particles', len(x)
print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 1e0
h *= 1
v = 0.0*y
p *= 0.0
pa = get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
bdry=bdry)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.
pa.constants['dr0'] = dx
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
pa.set(idx=numpy.arange(len(pa.x)))
print 'G_mu', pa.G/pa.K
print 'Number of particles: ', len(pa.x)
return pa
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, particle_indices, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.indices = particle_indices
self.props = props
self.values = values
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props
def eval(self, solver):
for i,prop in enumerate(self.props):
p = self.dest.get(prop)
p[self.indices] = self.values[i]
CFL=None
# use the solvers default cubic spline kernel
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=CFL)
dt = 1e-8
tf = 1e-3
s.set_time_step(dt)
s.set_final_time(tf)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['u'], values=[0],
particle_indices=numpy.flatnonzero(pa.bdry)))
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['v'], values=[0],
particle_indices=range(len(pa.x))))
s.set_kernel_correction(-1)
app.run()
| [
[
8,
0,
0.0081,
0.0081,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0242,
0.0081,
0,
0.66,
0.04,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0323,
0.0081,
0,
0.66,
... | [
"\"\"\" An example solving stress test case \"\"\"",
"import numpy",
"import sys",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"from pysph.solver.stress_solver import StressSolver, get_particle_array",
"from pysph.sph.funcs import stress_funcs, arithmetic_funcs",
"from pysph.... |
""" An example solving stress test case : colliding rubber balls """
import sys
import numpy
from numpy import pi, sin, sinh, cos, cosh
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver
from pysph.sph.funcs import stress_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
#dt = app.options.time_step if app.options.time_step else 1e-8
#tf = app.options.final_time if app.options.final_time else 1e-2
def create_particles(two_arr=False):
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
dx = 0.001 # 1mm
ri = 0.03 # 3cm inner radius
ro = 0.04 # 4cm outer radius
spacing = 0.041 # spacing = 2*5cm
x,y = numpy.mgrid[-ro:ro:dx, -ro:ro:dx]
x = x.ravel()
y = y.ravel()
d = (x*x+y*y)
keep = numpy.flatnonzero((ri*ri<=d) * (d<ro*ro))
x = x[keep]
y = y[keep]
print 'num_particles', len(x)*2
if not two_arr:
x = numpy.concatenate([x-spacing,x+spacing])
y = numpy.concatenate([y,y])
#print bdry, numpy.flatnonzero(bdry)
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*1.4*dx
rho = numpy.ones_like(x)
z = numpy.zeros_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 10000.0
# u is set later
v = z
u_f = 0.059
p *= 0
h *= 1
#u = 0.1*numpy.sin(x*pi/2.0/5.0)
#u[numpy.flatnonzero(x<0.01)] = 0
pa = base.get_particle_array(x=x+spacing, y=y, m=m, rho=rho, h=h, p=p, u=z, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='right_ball', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
#bdry=bdry
)
pa.constants['E'] = 1e7
pa.constants['nu'] = 0.3975
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['dr0'] = dx
pa.constants['c_s'] = (pa.constants['K']/pa.constants['rho0'])**0.5
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
print 'c_s:', pa.c_s
print 'G:', pa.G/pa.c_s**2/pa.rho0
pa.u = pa.c_s*u_f*(2*(x<0)-1)
print 'u_f:', pa.u[0]/pa.c_s, '(%s)'%pa.u[0]
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
print 'CFL:', pa.c_s*dt/dx/2
print 'particle_motion:', abs(pa.u[0]*dt)
if two_arr:
pb = base.get_particle_array(x=x-spacing, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v, z=z,w=z,
ubar=z, vbar=z, wbar=z,
name='left_ball', type=1,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z,
MArtStress00=z, MArtStress11=z, MArtStress22=z,
MArtStress01=z, MArtStress12=z, MArtStress02=z,
#bdry=bdry
)
pb.constants['E'] = 1e7
pb.constants['nu'] = 0.3975
pb.constants['G'] = pb.constants['E']/(2.0*1+pb.constants['nu'])
pb.constants['K'] = stress_funcs.get_K(pb.constants['G'], pb.constants['nu'])
pb.constants['rho0'] = 1.0
pb.constants['c_s'] = (pb.constants['K']/pb.constants['rho0'])**0.5
pb.cs = numpy.ones_like(x) * pb.constants['c_s']
print 'c_s:', pb.c_s
print 'G:', pb.G/pb.c_s**2/pb.rho0
print 'G_mu', pa.G/pa.K
pa.u = pa.c_s*u_f*(2*(x<0)-1)
print 'u_f:', pb.u[-1]/pb.c_s, '(%s)'%pb.u[-1]
pb.set(idx=numpy.arange(len(pb.x)))
print 'Number of particles: ', len(pb.x)
return [pa, pb]
else:
return pa
cfl = 0.1
# use the solvers default cubic spline kernel
# s = StressSolver(dim=2, integrator_type=solver.RK2Integrator)
s = StressSolver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator,
xsph=0.5, marts_eps=0.3, marts_n=4, CFL=cfl)
# can be overriden by commandline arguments
dt = 1e-8
tf = 1e-2
s.set_time_step(dt)
s.set_final_time(tf)
s.set_kernel_correction(-1)
s.pfreq = 100
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
app.run()
| [
[
8,
0,
0.0071,
0.0071,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0213,
0.0071,
0,
0.66,
0.0435,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0284,
0.0071,
0,
0.66... | [
"\"\"\" An example solving stress test case : colliding rubber balls \"\"\"",
"import sys",
"import numpy",
"from numpy import pi, sin, sinh, cos, cosh",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"import pysph.solver.api as solver",
"from pysph.solver.stress_solver import StressS... |
""" An example solving stress test case """
import numpy
import sys
import os
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.solver.stress_solver import StressSolver, get_particle_array
from pysph.sph.funcs import stress_funcs, arithmetic_funcs
from pysph.sph.api import SPHFunction
app = solver.Application()
app.opt_parse.add_option('--hfac', action='store', dest='hfac', default=None,
type='float',
help='the smoothing length as a factor of particle spacing')
app.opt_parse.add_option('--N', action='store', dest='N', default=None, type='float',
help='number of partitions (num particles=N+1)')
class PrintPos(object):
''' print properties of a particle in a column format (gnuplot/np.loadtxt) '''
def __init__(self, particle_id, props=['x'], filename='stress.dat'):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
self.file = open(filename, 'w')
self.file.write('i\tt\t'+'\t'.join(props)+'\n')
self.res = []
self.props = props
self.particle_id = particle_id
def function(self, solver):
l = [solver.count, solver.t]
for prop in self.props:
l.append(getattr(solver.particles.arrays[0], prop)[self.particle_id])
self.res.append(l)
s = '\n'.join('\t'.join(map(str,line)) for line in self.res)
self.file.write(s)
self.file.write('\n')
self.res = []
def create_particles():
N = app.options.N or 20
N += 1
hfac = app.options.hfac or 1.2
rho0 = 1.0
#x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -0.105:0.105+1e-4:dx]
x = numpy.mgrid[0:1:1j*N]
dx = 1.0/(N-1)
x = x.ravel()
#y = y.ravel()
bdry = (x<=0)
print bdry, numpy.flatnonzero(bdry)
m = rho0*numpy.ones_like(x)*dx
h = numpy.ones_like(x)*hfac*dx
rho = rho0*numpy.ones_like(x)
y = z = numpy.zeros_like(x)
p = z
#cs = numpy.ones_like(x) * 10000.0
u = -x
u *= 0.1
pa = get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=z, z=z,w=z,
name='solid', type=1,
bdry=bdry,)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*(1+pa.constants['nu']))
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = rho0
pa.constants['dr0'] = dx
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
pa.cs = numpy.ones_like(x) * pa.constants['c_s']
pa.set(idx=numpy.arange(len(pa.x)))
print 'G:', pa.G
print 'K', pa.K
print 'c_s', pa.c_s
print 'Number of particles: ', len(pa.x)
return pa
class FixedBoundary(SPHFunction):
def __init__(self, source, dest, particle_indices, props=['x','y','z'],
values=[0,0,0], setup_arrays=True):
self.indices = particle_indices
self.props = props
self.values = values
SPHFunction.__init__(self, source, dest, setup_arrays)
def set_src_dst_reads(self):
self.src_reads = self.dst_reads = self.props
def eval(self, solver):
for i,prop in enumerate(self.props):
self.dest.get(prop)[self.indices] = self.values[i]
# use the solvers default cubic spline kernel
s = StressSolver(dim=1, integrator_type=solver.PredictorCorrectorIntegrator, xsph=0.5, marts_eps=0.3, marts_n=4, CFL=None)
# can be overriden by commandline arguments
s.set_time_step(1e-7)
s.set_final_time(1e-3)
app.setup(s, create_particles=create_particles)
particles = s.particles
pa = particles.arrays[0]
s.pre_step_functions.append(FixedBoundary(pa, pa, props=['u','x'], values=[0,0],
particle_indices=numpy.flatnonzero(pa.bdry)))
for i in range(len(particles.arrays[0].x)):
app.command_manager.add_function(PrintPos(i, ['x','y','u','p','rho','sigma00','ubar'],
s.output_directory+'/stress%s.dat'%i).function,
interval=1)
s.set_kernel_correction(-1)
s.pfreq = 10
app.run()
sys.exit(0)
from pylab import *
pa = particles.arrays[0]
plot(pa.x, pa.y, '.', label='y')
legend(loc='best')
figure()
plot(pa.x, pa.u, '.', label='u')
legend(loc='best')
figure()
plot(pa.x, pa.ubar, '.', label='ubar')
legend(loc='best')
figure()
plot(pa.x, pa.rho, '.', label='rho')
legend(loc='best')
figure()
plot(pa.x, pa.p, '.', label='p')
legend(loc='best')
figure()
plot(pa.x, pa.sigma00, '.', label='sigma00')
legend(loc='best')
print pa.x
print pa.y
print pa.z
print pa.u
print pa.v
print pa.w
show()
| [
[
8,
0,
0.006,
0.006,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0179,
0.006,
0,
0.66,
0.0192,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0238,
0.006,
0,
0.66,
... | [
"\"\"\" An example solving stress test case \"\"\"",
"import numpy",
"import sys",
"import os",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"from pysph.solver.stress_solver import StressSolver, get_particle_array",
"from pysph.sph.funcs import stress_funcs, arithmetic_funcs",... |
""" Example file showing the use of solver controller and various interfaces
Usage:
Run this file after running the `controller_elliptical_drop.py` example file
A matplotlib plot window will open showing the current position of all
the particles and colored according to their velocities. The plot is updated
every second. This is based on the multiprocessing interface
A browser window is also opened which displays the various solver properties
and also allows you to change then. It is based on the xml-rpc interface
"""
import matplotlib
matplotlib.use('GTKAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import gobject # for the gobject timer
import time
import numpy
import webbrowser
import xmlrpclib
from pysph.solver.solver_interfaces import MultiprocessingClient
def test_interface_nonblocking(controller):
print 't1', controller.get('dt')
print 't2', controller.get_dt()
task_id = controller.pause_on_next()
print task_id
time.sleep(1)
print 'count', controller.get_count()
time.sleep(1)
# main thread is stopped; count should still be same
print 'count2', controller.get_count()
controller.cont()
# main thread now still running; count should have increased
time.sleep(1)
print 'count3', controller.get_count()
task_id = controller.get_particle_array_names()
pa_names = controller.get_result(task_id) # blocking call
print 'pa_names', task_id, pa_names
print controller.get_status()
def test_interface_blocking(controller):
print 't1', controller.get('dt')
print 't2', controller.get_dt()
task_id = controller.pause_on_next()
print task_id
time.sleep(1)
print 'count', controller.get_count()
time.sleep(1)
# main thread is stopped; count should still be same
print 'count2', controller.get_count()
controller.cont()
# main thread now still running; count should have increased
time.sleep(1)
print 'count3', controller.get_count()
pa_names = controller.get_particle_array_names() # blocking call
print 'pa_names', task_id, pa_names
print controller.get_status()
def test_XMLRPC_interface(address='http://localhost:8900/'):
client = xmlrpclib.ServerProxy(address, allow_none=True)
print client.system.listMethods()
# client has all methods of `control` instance
print client.get_t()
print 'xmlrpcclient:count', client.get('count')
test_interface_blocking(client)
client.set_blocking(False)
test_interface_nonblocking(client)
client.set_blocking(True)
return client
def test_web_interface(address='http://127.0.0.1:8900/controller_elliptical_drop_client.html'):
webbrowser.open(url=address)
def test_multiprocessing_interface(address=('localhost',8800), authkey='pysph'):
client = MultiprocessingClient(address, authkey)
controller = client.controller
pa_names = controller.get_particle_array_names() # blocking call
print controller.get_named_particle_array(pa_names[0]) # blocking call
test_interface_blocking(controller)
controller.set_blocking(False)
test_interface_nonblocking(controller)
controller.set_blocking(True)
return controller
def test_plot(controller):
controller.set_blocking(True)
pa_name = controller.get_particle_array_names()[0]
pa = controller.get_named_particle_array(pa_name)
#plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line = ax.scatter(pa.x, pa.y, c=numpy.hypot(pa.u,pa.v))
global t
t = time.time()
def update():
global t
t2 = time.time()
dt = t2 - t
t = t2
print 'count:', controller.get_count(), '\ttimer time:', dt,
pa = controller.get_named_particle_array(pa_name)
line.set_offsets(zip(pa.x, pa.y))
line.set_array(numpy.hypot(pa.u,pa.v))
fig.canvas.draw()
print '\tresult & draw time:', time.time()-t
return True
update()
# due to some gil issues in matplotlib, updates work only when
# mouse is being hovered over the plot area (or a key being pressed)
# when using python threading.Timer. Hence gobject.timeout_add
# is being used instead
gobject.timeout_add_seconds(1, update)
plt.show()
def test_main():
test_XMLRPC_interface()
controller = test_multiprocessing_interface()
test_web_interface()
test_plot(controller)
if __name__ == '__main__':
test_main()
| [
[
8,
0,
0.0353,
0.0641,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0769,
0.0064,
0,
0.66,
0.0588,
75,
0,
1,
0,
0,
75,
0,
0
],
[
8,
0,
0.0833,
0.0064,
0,
0.66,
... | [
"\"\"\" Example file showing the use of solver controller and various interfaces\n\nUsage:\n Run this file after running the `controller_elliptical_drop.py` example file\n A matplotlib plot window will open showing the current position of all\n the particles and colored according to their velocities. The p... |
""" An example solving the Elliptical drop test case with various interfaces """
import pysph.base.api as base
import pysph.solver.api as solver
app = solver.Application()
app.process_command_line(['-q', '--interactive',
'--xml-rpc=0.0.0.0:8900', '--multiproc=pysph@0.0.0.0:8800'])
s = solver.FluidSolver(dim=2, integrator_type=solver.EulerIntegrator)
app.set_solver(s, create_particles=solver.fluid_solver.get_circular_patch,
variable_h=False, name='fluid', type=0)
s.set_time_step(1e-5)
s.set_final_time(1e-1)
s.pfreq = 1000
if __name__ == '__main__':
app.run()
| [
[
8,
0,
0.0455,
0.0455,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1364,
0.0455,
0,
0.66,
0.1,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.1818,
0.0455,
0,
0.66,
... | [
"\"\"\" An example solving the Elliptical drop test case with various interfaces \"\"\"",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"app = solver.Application()",
"app.process_command_line(['-q', '--interactive',\n '--xml-rpc=0.0.0.0:8900', '--multiproc=pysph@0.... |
""" Simple motion. """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from random import randint
from numpy import random
nx = 1 << 5
dx = 0.5/nx
def create_particles_3d(**kwargs):
x, y, z = numpy.mgrid[0.25:0.75+1e-10:dx,
0.25:0.75+1e-10:dx,
0.25:0.75+1e-10:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
np = len(x)
u = random.random(np) * 0
v = random.random(np) * 0
w = random.random(np) * 0
m = numpy.ones_like(x) * dx**3
vol_per_particle = numpy.power(0.5**3/np ,1.0/3.0)
radius = 2 * vol_per_particle
print "Using smoothing length: ", radius
h = numpy.ones_like(x) * radius
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, y=y, z=z,
u=u, v=v, w=w,
m=m,h=h)
print "Number of particles: ", fluid.get_number_of_particles()
return [fluid,]
def create_particles_2d(**kwargs):
x, y = numpy.mgrid[0.25:0.75+1e-10:dx, 0.25:0.75+1e-10:dx]
x = x.ravel()
y = y.ravel()
np = len(x)
u = numpy.zeros_like(x)
v = numpy.zeros_like(x)
m = numpy.ones_like(x) * dx**2
vol_per_particle = numpy.power(0.5**2/np ,1.0/2.0)
radius = 2 * vol_per_particle
print "Using smoothing length: ", radius
h = numpy.ones_like(x) * radius
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, y=y,
u=u, v=v,
m=m,
h=h)
print "Number of particles: ", fluid.get_number_of_particles()
return [fluid,]
# define an integrator
class CrazyIntegrator(solver.EulerIntegrator):
"""Crazy integrator """
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
np = array.get_number_of_particles()
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
# simply use periodicity for the positions
if prop in ['x', 'y', 'z']:
updated_array[numpy.where(updated_array < 0)[0]] += 1
updated_array[numpy.where(updated_array > 1)[0]] -= 1
array.set( **{prop:updated_array} )
# Increment the step by 1
self.cstep += 1
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=CrazyIntegrator)
# Update the density of the particles
s.add_operation(solver.SPHOperation(
sph.SPHRho.withargs(), on_types=[base.Fluid], from_types=[base.Fluid],
updates=["rho"],
id="sd")
)
# Compute some interaction between particles
s.add_operation(solver.SPHIntegration(
sph.ArtificialPotentialForce.withargs(factorp=1.0, factorm=1.0),
on_types=[base.Fluid], from_types=[base.Fluid, base.Solid],
updates=["u","v", "w"],
id="potential")
)
# step the particles
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[base.Fluid],
updates=["x","y","z"],
id="step")
)
s.set_time_step(1e-2)
s.set_final_time(5)
app.setup(
solver=s,
variable_h=False,
create_particles=create_particles_2d)
cm = s.particles.cell_manager
print "Number of cells, cell size = %d, %g"%(len(cm.cells_dict), cm.cell_size)
# add a post step function to save the neighbor information every 10
# iterations
#s.post_step_functions.append( solver.SaveCellManagerData(
# s.pid, path=s.output_directory, count=50) )
app.run()
| [
[
8,
0,
0.0061,
0.0061,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0184,
0.0061,
0,
0.66,
0.0455,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0245,
0.0061,
0,
0.66... | [
"\"\"\" Simple motion. \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"from random import randint",
"from numpy import random",
"nx = 1 << 5",
"dx = 0.5/nx",
"def create_particles_3d(**kwargs):\n\n x, y, z = numpy.mg... |
""" A script to demonstrate the simplest of calculations in parallel
Setup:
------
Two particle arrays are created on two separate processors with the
following procerties:
processor 0:
x ~ [0,1], dx = 0.1, h = 0.2, m = 0.1, fval = x*x
processor 1:
x ~ [1.1, 2], dx = 0.1, h = 0.2, m = 0.1, fval = x*x
"""
# mpi imports
from mpi4py import MPI
#numpy and logging
import numpy, logging
#local pysph imports
import pysph.sph.api as sph
import pysph.solver.api as solver
from pysph.base.carray import LongArray
from pysph.base.api import Particles, get_particle_array
from pysph.base.kernels import CubicSplineKernel
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
if num_procs > 2:
raise SystemError, 'Start this script on less than 5 processors'
# logging setup
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
#create the particles on processor 0
if rank == 0:
x = numpy.linspace(0,1,11)
h = numpy.ones_like(x)*0.2
m = numpy.ones_like(x)*0.1
rho = numpy.ones_like(x)
fval = x*x
#create the particles on processor 1
if rank == 1:
x = numpy.linspace(1.1,2,10)
h = numpy.ones_like(x)*0.2
m = numpy.ones_like(x)*0.1
rho = numpy.ones_like(x)
fval = x*x
#create the particles in parallel without load balancing
kernel = CubicSplineKernel(dim=1)
pa = get_particle_array(x=x, h=h, m=m, fval=fval, rho=rho)
particles = Particles([pa], in_parallel=True,
load_balancing=False)
#make sure the particles need updating
particles.update()
#choose the function and the sph calc
func = sph.SPHRho(pa, pa)
calc = sph.SPHCalc(particles=particles, kernel=kernel, func=func,
updates=['rho'], integrates=False)
tmpx = pa.get('tmpx', only_real_particles=False)
logger.debug('tempx for all particles %s'%(tmpx))
#perform the summation density operation
calc.sph()
local = pa.get('local', only_real_particles=False)
logger.debug('Local indices for process %d are %s'%(rank, local))
#check for the density values on each processor
rho = pa.get('tmpx', only_real_particles=True)
logger.debug('Density for local particles on processor %d is %s '%(rank, rho))
| [
[
1,
0,
0.1,
0.1,
0,
0.66,
0,
985,
0,
1,
0,
0,
985,
0,
0
],
[
1,
0,
0.3,
0.1,
0,
0.66,
0.1667,
954,
0,
2,
0,
0,
954,
0,
0
],
[
1,
0,
0.5,
0.1,
0,
0.66,
0.3333,
... | [
"from mpi4py import MPI",
"import numpy, logging",
"import pysph.sph.api as sph",
"import pysph.solver.api as solver",
"from pysph.base.carray import LongArray",
"from pysph.base.api import Particles, get_particle_array",
"from pysph.base.kernels import CubicSplineKernel"
] |
""" The moving square test case is part of the SPHERIC benchmark
tests. Refer to the document for the test details.
Numerical Parameters:
---------------------
dx = dy = 0.005
h = 0.0065 => h/dx = 1.3
Length of Box = 10
Height of Box = 5
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
Vmax = 1.0
co = 15 (15 * Vmax)
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
DummyFluid = base.ParticleType.DummyFluid
dx = 0.05
h = 1.3*dx
ro = 1000.0
co = 15.0
gamma = 7.0
alpha = 0.5
eps = 0.5
box_length = 10.0
box_height = 5.0
square_side = 1.0
B = co*co*ro/gamma
m = ro*dx*dx
pi = numpy.pi
pi2 = pi/2.0
class MoveSquare:
def __init__(self, fname = "Motion_Body.dat"):
self.original_position = 1.5
motion = numpy.loadtxt(fname)
self.time = motion[:,0]
self.disp = motion[:,3]
def eval(self, solver):
particles = solver.particles
time = solver.time
square = particles.get_named_particle_array("square")
x = square.get('x')
new_pos = numpy.interp(time, self.time, self.disp)
displacement = new_pos - self.original_position
x += displacement
square.set(x=x)
def get_wall():
""" Get the wall particles """
left = base.Line(base.Point(), box_height, pi2)
top = base.Line(base.Point(0, box_height), box_length, 0)
right = base.Line(base.Point(box_length, box_height), box_height, pi+pi2)
bottom = base.Line(base.Point(box_length), box_length, pi)
box_geom = base.Geometry('box', [left, top, right, bottom], is_closed=True)
box_geom.mesh_geometry(dx)
box = box_geom.get_particle_array(re_orient=False)
box.m[:] = m
box.h[:] = h
return box
def get_square():
""" Get the square particle array """
left = base.Line(base.Point(1,2), square_side, pi2)
top = base.Line(base.Point(1,3), square_side, 0)
right = base.Line(base.Point(2,3), square_side, pi+pi2)
bottom = base.Line(base.Point(2,2), square_side, pi)
square_geom = base.Geometry('square', [left, top, right, bottom],
is_closed=True)
square_geom.mesh_geometry(dx)
square = square_geom.get_particle_array(name="square", re_orient=True)
square.m[:] = m
square.h[:] = h
return square
def get_fluid():
""" Get the fluid particle array """
x, y = numpy.mgrid[dx: box_length - 1e-10: dx,
dx: box_height - 1e-10: dx]
xf, yf = x.ravel(), y.ravel()
mf = numpy.ones_like(xf) * m
hf = numpy.ones_like(xf) * h
rhof = numpy.ones_like(xf) * ro
cf = numpy.ones_like(xf) * co
pf = numpy.zeros_like(xf)
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=xf, y=yf, h=hf, rho=rhof, c=cf, p=pf)
# remove indices within the square
indices = []
np = fluid.get_number_of_particles()
x, y = fluid.get('x','y')
for i in range(np):
if 1.0 -dx/2 <= x[i] <= 2.0 + dx/2:
if 2.0 - dx/2 <= y[i] <= 3.0 + dx/2:
indices.append(i)
to_remove = base.LongArray(len(indices))
to_remove.set_data(numpy.array(indices))
fluid.remove_particles(to_remove)
return fluid
def get_dummy_particles():
x, y = numpy.mgrid[-5*dx: box_length + 5*dx + 1e-10: dx,
-5*dx: box_height + 5*dx + 1e-10: dx]
xd, yd = x.ravel(), y.ravel()
md = numpy.ones_like(xd) * m
hd = numpy.ones_like(xd) * h
rhod = numpy.ones_like(xd) * ro
cd = numpy.ones_like(xd) * co
pd = numpy.zeros_like(xd)
dummy_fluid = base.get_particle_array(name="dummy_fluid",
type=Fluid, x=xd, y=yd,
h=hd, rho=rhod, c=cd, p=pd)
# remove indices within the square
indices = []
np = dummy_fluid.get_number_of_particles()
x, y = dummy_fluid.get('x','y')
for i in range(np):
if -dx/2 <= x[i] <= box_length + dx/2:
if - dx/2 <= y[i] <= box_height+ dx/2:
indices.append(i)
to_remove = base.LongArray(len(indices))
to_remove.set_data(numpy.array(indices))
dummy_fluid.remove_particles(to_remove)
return dummy_fluid
def get_particles():
wall = get_wall()
square = get_square()
fluid = get_fluid()
dummy_fluid = get_dummy_particles()
return [wall, square, fluid, dummy_fluid]
app = solver.Application()
app.process_command_line()
particles = app.create_particles(False, get_particles)
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation(co=co, ro=ro),
on_types=[Fluid],
updates=['p', 'cs'],
id='eos')
)
# Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate(),
on_types=[Fluid], from_types=[Fluid, DummyFluid],
updates=['rho'], id='density')
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid, DummyFluid],
updates=['u','v'], id='mom')
)
# monaghan boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
# Position stepping and XSPH correction
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
# add post step and pre step functions for movement
s.set_final_time(3.0)
s.set_time_step(1e-5)
s.post_step_functions.append(MoveSquare())
app.set_solver(s)
app.run()
| [
[
8,
0,
0.0522,
0.1007,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1082,
0.0037,
0,
0.66,
0.0238,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.1119,
0.0037,
0,
0.66... | [
"\"\"\" The moving square test case is part of the SPHERIC benchmark\ntests. Refer to the document for the test details. \n\nNumerical Parameters:\n---------------------\n\ndx = dy = 0.005\nh = 0.0065 => h/dx = 1.3",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"impo... |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from pysph.tools import geometry_utils as geom
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
#h = 0.0156
h = 0.0390
#h = 0.01
dx = dy = 0.03
ro = 1000.0
co = 10.0 * numpy.sqrt(2*9.81*fluid_column_height)
gamma = 7.0
alpha = 0.3
eps = 0.5
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
xb1, yb1 = geom.create_2D_tank(x1=0, y1=0,
x2=container_width, y2=container_height,
dx=dx)
xb2, yb2 = geom.create_2D_tank(x1=-dx/2, y1=-dx/2,
x2=container_width, y2=container_height,
dx=dx)
xb = numpy.concatenate((xb1, xb2))
yb = numpy.concatenate((yb1, yb2))
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*ro*0.5
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(cl_precision="single",
name="boundary", type=Solid,
x=xb, y=yb, h=hb, rho=rhob, cs=cb,
m=mb)
print 'Number of Boundary particles: ', len(xb)
return boundary
def get_fluid_particles():
xf1, yf1 = geom.create_2D_filled_region(x1=dx, y1=dx,
x2=fluid_column_width,
y2=fluid_column_height,
dx=dx)
xf2, yf2 = geom.create_2D_filled_region(x1=dx/2, y1=dx/2,
x2=fluid_column_width,
y2=fluid_column_height,
dx=dx)
x = numpy.concatenate((xf1, xf2))
y = numpy.concatenate((yf1, yf2))
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(cl_precision="single",
name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof,
cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
integrator_type = solver.PredictorCorrectorIntegrator
s = solver.Solver(dim=2, integrator_type=integrator_type)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = dx
n = 4
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
deltap=None, n=n),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='pgrad')
# )
#s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtificialVsicosity.withargs(alpha=alpha, beta=0.0),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='avisc')
# )
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager_type=base.DomainManagerType.LinkedListManager,
cl_locator_type=base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator
)
# this tells the solver to compute the max time step dynamically
#s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
# particles=s.particles)
s.time_step_function = solver.ViscousAndForceBasedTimeStep(co=co, cfl=0.3,
particles=s.particles)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
predictor corrector integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| [
[
8,
0,
0.1374,
0.2711,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2784,
0.0037,
0,
0.66,
0.0238,
358,
0,
1,
0,
0,
358,
0,
0
],
[
1,
0,
0.2857,
0.0037,
0,
0.66... | [
"\"\"\" 2D Dam Break Over a dry bed. The case is described in \"State of\nthe art classical SPH for free surface flows\", Benedict D Rogers,\nRobert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic\nResearch, Vol 48, Extra Issue (2010), pp 6-27\n\n\nSetup:\n------",
"import warnings",
"import numpy",
"i... |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
#h = 0.0156
h = 0.0390
#h = 0.01
dx = dy = h/1.3
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 6.0
B = co*co*ro/gamma
def get_1D_grid(start, end, spacing):
""" Return an array of points in 1D
Parameters:
-----------
start -- the starting coordinate value
end -- the ending coordinate value
spacing -- the uniform spacing between the points
Notes:
------
Uses numpy arange to get the points!
"""
return numpy.arange(start, end+1e-10, spacing)
def get_2D_grid(start_point, end_point, spacing):
""" Return a 2D array of points by calling numpy's mgrid
Parameters:
-----------
start_point -- the starting corner point for the rectangle
end_point -- the ending corner point for the rectangle
spacing -- uniform spacing in x and y
"""
x, y = numpy.mgrid[start_point.x:end_point.x:spacing,
start_point.y:end_point.y:spacing]
x = x.ravel(); y = y.ravel()
return x, y
def get_2D_staggered_grid(bias_point_1, bias_point_2, end_point, spacing):
""" Return a staggered cartesian grid in 2D
Parameters:
-----------
bias_point_1 -- the first grid starting point
bias_point_2 -- the second grid starting point
end_point -- the maximum `x` and `y` for the grid
spacing -- uniform spacing in `x` and `y`
"""
x1, y1 = get_2D_grid(bias_point_1, end_point, spacing)
x2, y2 = get_2D_grid(bias_point_2, end_point, spacing)
x = numpy.zeros(len(x1)+len(x2), float)
y = numpy.zeros(len(x1)+len(x2), float)
x[:len(x1)] = x1; y[:len(x1)] = y1
x[len(x1):] = x2; y[len(x1):] = y2
return x, y
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
#left wall
ylw = get_1D_grid(0, container_height, dy)
xlw = numpy.zeros_like(ylw)
nb1 = len(ylw)
#bottom
xbs = get_1D_grid(dx, container_width+dx, dx)
ybs = numpy.zeros_like(xbs)
nb3 = len(xbs)
max_xb = numpy.max(xbs)
#staggered left wall
yslw = get_1D_grid(-dx/2, container_height, dx)
xslw = numpy.ones_like(yslw) * -dx/2
nb4 = len(yslw)
#staggered bottom
xsb = get_1D_grid(dx/2, container_width+dx+dx, dx)
ysb = numpy.ones_like(xsb) * -dy/2
nb6 = len(xsb)
max_xsb = numpy.max(xsb)
#right wall
yrw = numpy.arange(dx, container_height, dx)
xrw = numpy.ones_like(yrw) * max_xb
nb2 = len(yrw)
#staggered right wall
ysrw = numpy.arange(dy/2, container_height, dy)
xsrw = numpy.ones_like(ysrw) * max_xsb
nb5 = len(ysrw)
nb = nb1 + nb2 + nb3 + nb4 + nb5 + nb6
print "Number of Boundary Particles: ", nb
xb = numpy.zeros(nb, float)
yb = numpy.zeros(nb, float)
idx = 0
xb[:nb1] = xlw; yb[:nb1] = ylw
idx += nb1
xb[idx:idx+nb2] = xrw; yb[idx:idx+nb2] = yrw
idx += nb2
xb[idx:idx+nb3] = xbs; yb[idx:idx+nb3] = ybs
idx += nb3
xb[idx:idx+nb4] = xslw; yb[idx:idx+nb4] = yslw
idx += nb4
xb[idx:idx+nb5] = xsrw; yb[idx:idx+nb5] = ysrw
idx += nb5
xb[idx:] = xsb; yb[idx:] = ysb
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*ro
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(name="boundary", type=Solid,
x=xb, y=yb, h=hb, rho=rhob, cs=cb,
m=mb)
width = max_xb
return boundary, width
def get_fluid_particles(name="fluid"):
x, y = get_2D_staggered_grid(base.Point(dx, dx), base.Point(dx/2, dx/2),
base.Point(1.0,2.0), dx)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name=name, type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
boundary, width = get_boundary_particles()
fluid1 = get_fluid_particles(name="fluid1")
fluid2 = get_fluid_particles(name="fluid2")
fluid2.x = width - fluid2.x
print 'Number of fluid particles: ', len(fluid1.x) + len(fluid2.x)
return [fluid1, fluid2, boundary]
app = solver.Application()
integrator_type = solver.RK2Integrator
kernel = base.HarmonicKernel(dim=2, n=3)
s = solver.Solver(dim=2, integrator_type=integrator_type)
s.default_kernel = kernel
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos')
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
s.set_final_time(10)
s.set_time_step(1e-4)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
RK2 integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| [
[
8,
0,
0.1109,
0.2189,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2249,
0.003,
0,
0.66,
0.0244,
358,
0,
1,
0,
0,
358,
0,
0
],
[
1,
0,
0.2308,
0.003,
0,
0.66,
... | [
"\"\"\" 2D Dam Break Over a dry bed. The case is described in \"State of\nthe art classical SPH for free surface flows\", Benedict D Rogers,\nRobert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic\nResearch, Vol 48, Extra Issue (2010), pp 6-27\n\n\nSetup:\n------",
"import warnings",
"import numpy",
"i... |
""" A tiny dam break problem
Setup:
------
x x !
x x !
x x !
x x !
x o o o o o x !
x o o o o o x !3m
x o o o o o x !
x o o o o o x !
x o o o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
The Monaghan Type Repulsive boundary condition, with a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
Numerical Parameters:
---------------------
h = 0.05
dx = dy = h/1.25 = 0.04
Height of Water column = 2m
Length of Water column = 1m
Number of fluid particles = 1250
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import sys
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
h = 0.05
dx = dy = h/1.25
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
left = base.Line(base.Point(0,0), container_height, numpy.pi/2)
bottom = base.Line(base.Point(container_width,0),
container_width, numpy.pi)
right = base.Line(base.Point(container_width,container_height),
container_height, 1.5*numpy.pi)
g = base.Geometry('box', [left, bottom, right], is_closed=False)
g.mesh_geometry(dx)
boundary = g.get_particle_array(re_orient=False,
name="boundary")
return boundary
def get_fluid_particles():
xarr = numpy.arange(dx, 1.0 + dx, dx)
yarr = numpy.arange(dx, 2.0 + dx, dx)
x,y = numpy.meshgrid( xarr, yarr )
x, y = x.ravel(), y.ravel()
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * ro
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=solver.EulerIntegrator)
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid], from_types=[Fluid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False),
on_types=[Fluid], from_types=[Fluid],
updates=['u','v'], id='mom')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
#the boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce.withargs(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=2*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
if app.options.with_cl:
raise RuntimeError("OpenCL support not added for MonaghanBoundaryForce!")
s.set_print_freq(1000)
app.run()
| [
[
8,
0,
0.1569,
0.3088,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3137,
0.0049,
0,
0.66,
0.0263,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3186,
0.0049,
0,
0.66... | [
"\"\"\" A tiny dam break problem\n\n\nSetup:\n------",
"import sys",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"Fluid = base.ParticleType.Fluid",
"Solid = base.ParticleType.Solid",
"h = 0.05",
"dx = dy = h/1.25",
"ro = 10... |
""" 2D Dam Break Over a dry bed. The case is described in "State of
the art classical SPH for free surface flows", Benedict D Rogers,
Robert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic
Research, Vol 48, Extra Issue (2010), pp 6-27
Setup:
------
x x !
x x !
x x !
x x !
x o o o x !
x o o x !3m
x o o o x !
x o o x !
x o o o x !
x x !
xxxxxxxxxxxxxxxxxxxxx | o -- Fluid Particles
x -- Solid Particles
-dx- dx = dy
_________4m___________
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
Fluid particles are placed on a staggered grid. The nodes of the grid
are located at R = l*dx i + m * dy j with a two point bias (0,0) and
(dx/2, dy/2) refered to the corner defined by R. l and m are integers
and i and j are the unit vectors alon `X` and `Y` respectively.
For the Monaghan Type Repulsive boundary condition, a single row of
boundary particles is used with a boundary spacing delp = dx = dy.
For the Dynamic Boundary Conditions, a staggered grid arrangement is
used for the boundary particles.
Numerical Parameters:
---------------------
dx = dy = 0.012m
h = 0.0156 => h/dx = 1.3
Height of Water column = 2m
Length of Water column = 1m
Number of particles = 27639 + 1669 = 29308
ro = 1000.0
co = 10*sqrt(2*9.81*2) ~ 65.0
gamma = 7.0
Artificial Viscosity:
alpha = 0.5
XSPH Correction:
eps = 0.5
"""
import warnings
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
from pysph.tools import geometry_utils as geom
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
#h = 0.0156
h = 0.039
#h = 0.01
dx = dy = h/1.3
ro = 1000.0
co = 65.0
gamma = 7.0
alpha = 0.5
eps = 0.5
fluid_column_height = 2.0
fluid_column_width = 1.0
container_height = 3.0
container_width = 4.0
B = co*co*ro/gamma
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
xb1, yb1, zb1 = geom.create_3D_tank(0, 0, 0, container_width, container_height, container_width/2, dx)
xb2, yb2, zb2 = geom.create_3D_tank(-dx/2, -dx/2, -dx/2, container_width, container_height,
container_width/2, dx)
xb = numpy.concatenate((xb1, xb2))
yb = numpy.concatenate((yb1, yb2))
zb = numpy.concatenate((zb1, zb2))
hb = numpy.ones_like(xb)*h
mb = numpy.ones_like(xb)*dx*dy*dx*ro*0.5
rhob = numpy.ones_like(xb) * ro
cb = numpy.ones_like(xb)*co
boundary = base.get_particle_array(name="boundary", type=Solid,
x=xb, y=yb, z=zb, h=hb, rho=rhob, cs=cb,
m=mb)
print 'Number of Boundary particles: ', len(xb)
return boundary
def get_fluid_particles():
xf1, yf1, zf1 = geom.create_3D_filled_region(dx, dx, dx,fluid_column_width, fluid_column_height,
fluid_column_width/2, dx)
xf2, yf2, zf2 = geom.create_3D_filled_region(dx/2, dx/2, dx/2, fluid_column_width, fluid_column_height,
fluid_column_width/2, dx)
x = numpy.concatenate((xf1, xf2))
y = numpy.concatenate((yf1, yf2))
z = numpy.concatenate((zf1, zf2))
print 'Number of fluid particles: ', len(x)
hf = numpy.ones_like(x) * h
mf = numpy.ones_like(x) * dx * dy * dx * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
fluid = base.get_particle_array(name="fluid", type=Fluid,
x=x, y=y, z=z, h=hf, m=mf, rho=rhof, cs=csf)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
boundary = get_boundary_particles()
return [fluid, boundary]
app = solver.Application()
integrator_type = solver.PredictorCorrectorIntegrator
s = solver.Solver(dim=2, integrator_type=integrator_type)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = -1/1.3
n = 4
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
# s.add_operation(solver.SPHIntegration(
# sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
# deltap=deltap, n=n),
# on_types=[Fluid], from_types=[Fluid, Solid],
# updates=['u','v'], id='mom')
# )
s.add_operation(solver.SPHIntegration(
sph.SPHPressureGradient.withargs(),
on_types=[Fluid], from_types=[Fluid,],
updates=['u','v','z'], id='pgrad')
)
s.add_operation(solver.SPHIntegration(
sph.MonaghanArtificialViscosity.withargs(alpha=alpha, beta=0.0),
on_types=[Fluid], from_types=[Fluid,Solid],
updates=['u','v','z'], id='avisc')
)
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v','z'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation_step([Fluid])
s.add_operation_xsph(eps=eps)
dt = 1.25e-4
s.set_final_time(3.0)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
# this tells the solver to compute the max time step dynamically
s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
particles=s.particles)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
predictor corrector integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
app.run()
| [
[
8,
0,
0.1431,
0.2824,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2901,
0.0038,
0,
0.66,
0.0233,
358,
0,
1,
0,
0,
358,
0,
0
],
[
1,
0,
0.2977,
0.0038,
0,
0.66... | [
"\"\"\" 2D Dam Break Over a dry bed. The case is described in \"State of\nthe art classical SPH for free surface flows\", Benedict D Rogers,\nRobert A, Dalrymple and Alex J.C Crespo, Journal of Hydraulic\nResearch, Vol 48, Extra Issue (2010), pp 6-27\n\n\nSetup:\n------",
"import warnings",
"import numpy",
"i... |
""" Dam break simulation over a wet bed.
This is part of the SPHERIC validation test cases (case 5)
(http://wiki.manchester.ac.uk/spheric/index.php/SPHERIC_Home_Page)
The main reference for this test case is 'State-of-the-art classical SPH for free-surface flows' by Moncho Gomez-Gesteira and Benedict D. Rogers and Robert
A. Dalrymple and Alex J. Crespo, Journal of Hydraulic Research Extra
Issue (2010) pp 6-27
"""
import numpy
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import pysph.tools.geometry_utils as geom
# Geometric parameters
dx = 0.005
h0 = 0.006
d = 0.0180
H = 0.15
tank_length = 0.38 + 3.0 #9.55
tank_height = 0.2
# Numerical parameters
vmax = numpy.sqrt(2*9.81*H)
co = 10.0 * vmax
ro = 1000.0
B = co*co*ro/7.0
alpha = 0.08
beta = 0.0
eps = 0.5
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
def get_boundary_particles():
""" Get the particles corresponding to the dam and fluids """
# get the tank
xt1, yt1 = geom.create_2D_tank(x1=0, y1=0,
x2=tank_length, y2=tank_height,
dx=dx)
xt2, yt2 = geom.create_2D_tank(x1=-dx/2, y1=-dx/2,
x2=tank_length + dx/2, y2=tank_height+dx/2,
dx=dx)
x = numpy.concatenate( (xt1, xt2) )
y = numpy.concatenate( (yt1, yt2) )
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * ro*dx*dx*0.5
rho = numpy.ones_like(x) * ro
cs = numpy.ones_like(x) * co
tank = base.get_particle_array(cl_precision="single", name="tank",
type=Solid, x=x,y=y,m=m,rho=rho,h=h,cs=cs)
np = tank.get_number_of_particles()
# create the gate
y1 = numpy.arange(dx/2, tank_height+1e-4, dx/2)
x1 = numpy.ones_like(y1)*(0.38-dx/2)
y2 = numpy.arange(dx/2+dx/4, tank_height+1e-4, dx/2)
x2 = numpy.ones_like(y2)*(0.38-dx)
y3 = numpy.arange(dx/2, tank_height+1e-4, dx/2)
x3 = numpy.ones_like(y3)*(0.38-1.5*dx)
x = numpy.concatenate( (x1, x2, x3) )
y = numpy.concatenate( (y1, y2, y3) )
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * 0.5 * dx/2 * dx/2 * ro
rho = numpy.ones_like(x) * ro
cs = numpy.ones_like(x) * co
v = numpy.ones_like(x) * 1.5
gate = base.get_particle_array(cl_precision="single", name="gate",
x=x, y=y, m=m, rho=rho, h=h, cs=cs,
v=v,
type=Solid)
np += gate.get_number_of_particles()
print "Number of solid particles = %d"%(np)
return [tank, gate]
def get_fluid_particles():
# create the dam
xf1, yf1 = geom.create_2D_filled_region(x1=dx, y1=dx,
x2=0.38-2*dx,
y2=0.15,
dx=dx)
xf2, yf2 = geom.create_2D_filled_region(x1=dx/2, y1=dx/2,
x2=0.38-2*dx,
y2=0.15,
dx=dx)
# create the bed
xf3, yf3 = geom.create_2D_filled_region(x1=0.38+dx/2, y1=dx/2,
x2=tank_length-dx, y2=d,
dx=dx)
xf4, yf4 = geom.create_2D_filled_region(x1=0.38, y1=dx,
x2=tank_length-dx/2, y2=d,
dx=dx)
x = numpy.concatenate( (xf1, xf2, xf3, xf4) )
y = numpy.concatenate( (yf1, yf2, yf3, yf4) )
hf = numpy.ones_like(x) * h0
mf = numpy.ones_like(x) * dx * dx * ro * 0.5
rhof = numpy.ones_like(x) * ro
csf = numpy.ones_like(x) * co
rhop = numpy.ones_like(x) * ro
fluid = base.get_particle_array(cl_precision="single",
name="fluid", type=Fluid,
x=x, y=y, h=hf, m=mf, rho=rhof,
cs=csf, rhop=rhop)
np = fluid.get_number_of_particles()
print "Number of fluid particles = %d"%(np)
return fluid
def get_particles(**args):
fluid = get_fluid_particles()
tank, gate = get_boundary_particles()
return [fluid, tank, gate]
app = solver.Application()
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
kernel = base.CubicSplineKernel(dim=2)
# define the artificial pressure term for the momentum equation
deltap = -1/1.3
n = 4
# pilot rho
s.add_operation(solver.SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[base.Fluid], from_types=[base.Fluid, base.Solid],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
s.add_operation(solver.SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=0.7, eps=0.5, hks=False),
on_types=[base.Fluid], updates=['h'], id='adke'),
)
#Equation of state
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(hks=False, co=co, ro=ro),
on_types=[Fluid, Solid],
updates=['p', 'cs'],
id='eos'),
)
#Continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
on_types=[Fluid, Solid], from_types=[Fluid, Solid],
updates=['rho'], id='density')
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=0.0, hks=False,
deltap=deltap, n=n),
on_types=[Fluid], from_types=[Fluid, Solid],
updates=['u','v'], id='mom')
)
#s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='pgrad')
# )
#s.add_operation(solver.SPHIntegration(
# sph.MonaghanArtificialVsicosity.withargs(alpha=alpha, beta=0.0),
# on_types=[Fluid], from_types=[Fluid,Solid],
# updates=['u','v'], id='avisc')
# )
#Gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
# Position stepping and XSPH correction operations
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[base.Fluid,base.Solid],
updates=["x","y"],
id="step")
)
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(),
on_types=[base.Fluid,], from_types=[base.Fluid,],
updates=["x","y"],
id="xsph")
)
dt = 1.25e-4
s.set_final_time(1.5)
s.set_time_step(dt)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles, min_cell_size=4*h0,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
domain_manager=base.DomainManagerType.DomainManager,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator
)
# this tells the solver to compute the max time step dynamically
s.time_step_function = solver.ViscousTimeStep(co=co,cfl=0.3,
particles=s.particles)
app.run()
| [
[
8,
0,
0.0211,
0.0383,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.046,
0.0038,
0,
0.66,
0.0238,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0536,
0.0038,
0,
0.66,... | [
"\"\"\" Dam break simulation over a wet bed.\n\nThis is part of the SPHERIC validation test cases (case 5)\n(http://wiki.manchester.ac.uk/spheric/index.php/SPHERIC_Home_Page)\n\nThe main reference for this test case is 'State-of-the-art classical SPH for free-surface flows' by Moncho Gomez-Gesteira and Benedict D. ... |
""" An example Script to study the behavior of Monaghan type repulsive
particles (Smoothed Particle Hydrodynamics, Reports on Progresses in
Physics)
The boundary particles are an improvement over the Lenard Jones type
repulsive boundary particles. One of the main features is that a
particle moving parallel to the wall will experience the same force.
The force exerted on a boundary particle is
f = f1(x)*f2(y) nk
where f1 is a function of the component of the projection of the
vector rab onto the tangential direction and f2 is a function of the
component of the normal projection of rab.
Each boundary particle must have therefore an associated normal and
tangent.
The setup is described as Test 1 of "Boundary Conditions Generated by
Dynamic Particles in SPH Methods" by A.J.C. Crespo and
M. Gomez-Gesteria and R.A. Dalrymple, CMC, vol 5, no 3 pp 173-184
Setup:
------
o [0, 0.3]
x x x x x x x
-----
dp
o -- fluid particle
x -- boundary particles
Y
|
| Z
| /
| /
|/_______X
The fluid particle falls under the influence of gravity and interacts
with the boundary particles. When the particle `sees` the boundary
particle for the interaction of the boundary force term, a repulsion
is activated on the fluid particle.
Behavior:
---------
We study the motion of the fluid particle in this simple configuration.
From the output files, observe the motion (`x` vs `y`) of the particle.
A state space plot of Velocity (`v`) V/S Position (`y`) should ideally
be a closed loop implying the conservation of energy.
An alternative setup could be switching off gravity and imposing an
initial velocity on the particle directed towards the boundary. We can
study the ability of the method to prevent penetration by observing
the minimum distance 'y' from the wall for increasing velocities.
Parameters:
-----------
The maximum velocity is estimated as Vmax = sqrt(2*9.81*0.3) and the
numerical sound speed is taken as 10*Vmax ~ 25.0 m/s
The reference density is taken as 1.0
h = 2.097e-2
dx = dy = h/(1.3)
g = -9.81
Running:
--------
run like so:
python monaghanbc.py --freq <print-freq> --directory ./monaghanbc
"""
import logging, numpy
import sys
import pysph.solver.api as solver
import pysph.sph.api as sph
import pysph.base.api as base
Fluid = base.ParticleType.Fluid
Solid = base.ParticleType.Solid
fname = sys.argv[0][:-3]
app = solver.Application(fname=fname)
#global variables
h = 2.097e-2
dx = dy = h/(1.3)
g = -9.81
xf = numpy.array([0])
yf = numpy.array([0.3])
hf = numpy.array([h])
mf = numpy.array([1.0])
vf = numpy.array([0.0])
cf = numpy.array([25.0])
rhof = numpy.array([1.0])
fluid = base.get_particle_array(name="fluid", type=Fluid, x=xf, y=yf,
h=hf, m=mf, rho=rhof, v=vf, cs=cf)
#generate the boundary
l = base.Line(base.Point(-.5), 1.0, 0)
g = base.Geometry('line', [l], False)
g.mesh_geometry(dx)
boundary = g.get_particle_array(re_orient=True)
boundary.m[:] = 1.0
particles = base.Particles(arrays=[fluid, boundary])
app.particles = particles
kernel = base.HarmonicKernel(dim=2, n=3)
s = solver.Solver(dim=2, integrator_type=solver.PredictorCorrectorIntegrator)
# set the kernel as the default for the solver
s.default_kernel = kernel
#Tait equation
s.add_operation(solver.SPHOperation(
sph.TaitEquation.withargs(co=25.0, ro=1.0),
on_types=[Fluid],
updates=['p','cs'],
id='eos', kernel=kernel)
)
#continuity equation
s.add_operation(solver.SPHIntegration(
sph.SPHDensityRate.withargs(), from_types=[Fluid],
on_types=[Fluid],
updates=['rho'], id='density', kernel=kernel)
)
#momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=0.0, beta=0.0,),
on_types=[Fluid],
from_types=[Fluid],
updates=['u','v'], id='mom')
)
#gravity force
s.add_operation(solver.SPHIntegration(
sph.GravityForce.withargs(gy=-9.81),
on_types=[Fluid],
updates=['u','v'],id='gravity')
)
#the boundary force
s.add_operation(solver.SPHIntegration(
sph.MonaghanBoundaryForce.withargs(delp=dx),
on_types=[Fluid], from_types=[Solid], updates=['u','v'],
id='bforce')
)
#xsph correction
s.add_operation(solver.SPHIntegration(
sph.XSPHCorrection.withargs(eps=0.1),
from_types=[Fluid],
on_types=[Fluid], updates=['x','y'], id='xsph')
)
#Position stepping
s.add_operation(solver.SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluid],
updates=['x','y'], id='step')
)
s.set_final_time(1)
s.set_time_step(1e-4)
app.setup(s)
app.run()
| [
[
8,
0,
0.2083,
0.4118,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4216,
0.0049,
0,
0.66,
0.0244,
715,
0,
2,
0,
0,
715,
0,
0
],
[
1,
0,
0.4265,
0.0049,
0,
0.66... | [
"\"\"\" An example Script to study the behavior of Monaghan type repulsive\nparticles (Smoothed Particle Hydrodynamics, Reports on Progresses in\nPhysics)\n\nThe boundary particles are an improvement over the Lenard Jones type\nrepulsive boundary particles. One of the main features is that a\nparticle moving parall... |
""" A simple example in which two drops collide """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import numpy
def get_circular_patch(name="", type=0, dx=0.05):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = 0*x
v = 0*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type)
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
return pa
def get_particles():
f1 = get_circular_patch("fluid1")
xlow, xhigh = min(f1.x), max(f1.x)
f1.x += 1.2*(xhigh - xlow)
f1.u[:] = -1.0
f2 = get_circular_patch("fluid2")
f2.u[:] = +1.0
print "Number of particles: ", f1.get_number_of_particles() * 2.0
return [f1,f2]
app = solver.Application()
kernel = base.CubicSplineKernel(dim=2)
s = solver.FluidSolver(dim=2,
integrator_type=solver.PredictorCorrectorIntegrator)
s.set_final_time(1.0)
s.set_time_step(1e-4)
app.setup(
solver=s,
variable_h=False, create_particles=get_particles)
app.run()
| [
[
8,
0,
0.0133,
0.0133,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.04,
0.0133,
0,
0.66,
0.0769,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0533,
0.0133,
0,
0.66,
... | [
"\"\"\" A simple example in which two drops collide \"\"\"",
"import pysph.solver.api as solver",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"import numpy",
"def get_circular_patch(name=\"\", type=0, dx=0.05):\n \n x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]\n x... |
""" NBody Example """
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
import numpy
Fluid = base.ParticleType.Fluid
# number of particles, time step and final time
np = 1024
dt = 1e-2
tf = 10.0
nsteps = tf/dt
def get_particles(**kwargs):
x = numpy.random.random(np) * 2.0 - 1.0
y = numpy.random.random(np) * 2.0 - 1.0
z = numpy.random.random(np) * 2.0 - 1.0
u = numpy.random.random(np) * 2.0 - 1.0
v = numpy.random.random(np) * 2.0 - 1.0
w = numpy.random.random(np) * 2.0 - 1.0
m = numpy.random.random(np)*100
pa = base.get_particle_array(name="test", cl_precision="single",
type=Fluid, x=x, y=y, z=z, m=m, u=u,
v=v, w=w)
return pa
app = solver.Application()
s = solver.Solver(dim=3,
integrator_type=solver.EulerIntegrator)
s.add_operation(solver.SPHIntegration(
sph.NBodyForce.withargs(),
on_types=[Fluid], from_types=[Fluid],
updates=['u','v','w'], id='nbody_force')
)
s.add_operation_step([Fluid])
app.setup(
solver=s,
variable_h=False, create_particles=get_particles,
locator_type=base.NeighborLocatorType.NSquareNeighborLocator,
cl_locator_type=base.OpenCLNeighborLocatorType.AllPairNeighborLocator,
domain_manager=base.DomainManager
)
s.set_final_time(tf)
s.set_time_step(dt)
s.set_print_freq(nsteps + 1)
app.run()
| [
[
8,
0,
0.0164,
0.0164,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
0.66,
0.0526,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0656,
0.0164,
0,
0.66... | [
"\"\"\" NBody Example \"\"\"",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"import numpy",
"Fluid = base.ParticleType.Fluid",
"np = 1024",
"dt = 1e-2",
"tf = 10.0",
"nsteps = tf/dt",
"def get_particles(**kwargs):\n \n x = numpy.rando... |
""" Shock tube problem with the ADKE procedure of Sigalotti """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.base.kernels import CubicSplineKernel
import numpy
Fluid = base.ParticleType.Fluid
Boundary = base.ParticleType.Boundary
# Shock tube parameters
nl = int(320 * 7.5)
nr = int(80 * 7.5)
dxl = 0.6/nl
dxr = 4*dxl
h0 = 2*dxr
eps = 0.8
k = 0.7
beta = 1.0
K = 1.0
f = 0.5
hks = False
class UpdateBoundaryParticles:
def __init__(self, particles):
self.particles = particles
def eval(self):
left = self.particles.get_named_particle_array('left')
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:] = fluid.h[0]
right.h[:] = fluid.h[-1]
def get_fluid_particles(**kwargs):
pa = solver.shock_tube_solver.standard_shock_tube_data(
name="fluid", nl=nl, nr=nr)
pa.add_property({'name':'rhop','type':'double'})
pa.add_property({'name':'div', 'type':'double'})
pa.add_property( {'name':'q', 'type':'double'} )
return pa
def get_boundary_particles(**kwargs):
# left boundary
x = numpy.ones(50)
for i in range(50):
x[i] = -0.6 - (i+1) * dxl
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x)
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 2.5
p = (0.4) * rho * e
cs = numpy.sqrt( 1.4*p/rho )
left = base.get_particle_array(name="left", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs, p=p)
# right boundary
for i in range(50):
x[i] = 0.6 + (i + 1)*dxr
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x) * 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 1.795
p = (0.4) * rho * e
#cs = numpy.sqrt(0.4*e)
cs = numpy.sqrt( 1.4*p/rho )
right = base.get_particle_array(name="right", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs,p=p)
return [left, right]
def get_particles(**kwargs):
particles = []
particles.append(get_fluid_particles())
particles.extend(get_boundary_particles())
return particles
# Create the application
app = solver.Application()
# define the solver and kernel
#s = solver.Solver(dim=1, integrator_type=solver.RK2Integrator)
s = solver.MonaghanShockTubeSolver(dim=1, integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k,
beta=beta, K=K, f=f)
#############################################################
# ADD OPERATIONS
#############################################################
# # pilot rho
# s.add_operation(solver.SPHOperation(
# sph.ADKEPilotRho.withargs(h0=h0),
# on_types=[Fluid], from_types=[Fluid,Boundary],
# updates=['rhop'], id='adke_rho'),
# )
# # smoothing length update
# s.add_operation(solver.SPHOperation(
# sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
# on_types=[Fluid], updates=['h'], id='adke'),
# )
# # summation density
# s.add_operation(solver.SPHOperation(
# sph.SPHRho.withargs(hks=hks),
# from_types=[Fluid, Boundary], on_types=[Fluid],
# updates=['rho'], id = 'density')
# )
# # ideal gas equation
# s.add_operation(solver.SPHOperation(
# sph.IdealGasEquation.withargs(),
# on_types = [Fluid], updates=['p', 'cs'], id='eos')
# )
# # momentum equation pressure equation
# s.add_operation(solver.SPHIntegration(
# sph.SPHPressureGradient.withargs(),
# from_types=[Fluid, Boundary], on_types=[Fluid],
# updates=['u'], id='mom')
# )
# #momentum equation visc
# s.add_operation(solver.SPHIntegration(
# sph.MomentumEquationSignalBasedViscosity.withargs(beta=1.0, K=1.0),
# on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
# updates=['u'],
# id="momvisc")
# )
# # energy equation
# s.add_operation(solver.SPHIntegration(
# sph.EnergyEquationWithSignalBasedViscosity.withargs(beta=1.0, K=1.0, f=0.5),
# on_types=[Fluid], from_types=[Fluid, Boundary],
# updates=['e'],
# id='enr')
# )
# # position stepping
# s.add_operation(solver.SPHIntegration(
# sph.PositionStepping.withargs(),
# on_types=[base.Fluid],
# updates=['x'],
# id="step")
# )
s.set_final_time(0.15)
s.set_time_step(3e-4)
app.setup(
solver=s,
min_cell_size = 4*h0,
variable_h=True, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator
)
# add the boundary update function to the particles
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles) )
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
beta=beta, K=K, f=f, hks=hks)
app.run()
| [
[
8,
0,
0.0049,
0.0049,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0146,
0.0049,
0,
0.66,
0.0323,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0194,
0.0049,
0,
0.66... | [
"\"\"\" Shock tube problem with the ADKE procedure of Sigalotti \"\"\"",
"import pysph.solver.api as solver",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"from pysph.base.kernels import CubicSplineKernel",
"import numpy",
"Fluid = base.ParticleType.Fluid",
"Boundary = base.Particle... |
""" Sjogreen's test case """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as get_data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# shock tube parameters
xl = -1.0; xr = 1.0
pl = 0.4; pr = 0.4
ul = -2.0; ur = 2.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 400
nr = 400
np = nl + nr
# Time step constants
dt = 1e-3
tf = 0.3
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
h0 = 2.5*xr/nr
# Artificial Heat constants
g1 = 0.1
g2 = 1.0
kernel = base.CubicSplineKernel
hks=False
def get_particles(with_boundary=False, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1, integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0118,
0.0118,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0353,
0.0118,
0,
0.66,
0.025,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0588,
0.0118,
0,
0.66,... | [
"\"\"\" Sjogreen's test case \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import get_shock_tube_data as get_data",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"Locator = base.NeighborLocatorType",
"xl = -1.0; x... |
""" Standard shock tube problem by Monaghan """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -0.1; xr = 0.1
pl = 4e-7; pr = 4e-7
ul = 1.0; ur = -1.0
rhol = 1.0; rhor = 1.0
gamma = 1.4
# Number of particles
nl = 400
nr = 400
np = nl + nr
# Time step constants
dt = 1e-6
tf = 0.1
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.4
k=0.7
h0 = 1.0*xr/nr
# Artificial Heat constants
g1 = 0.5
g2 = 1.0
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0118,
0.0118,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0353,
0.0118,
0,
0.66,
0.0244,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0588,
0.0118,
0,
0.66... | [
"\"\"\" Standard shock tube problem by Monaghan \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import get_shock_tube_data as data",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"Locator = base.NeighborLocatorType",
... |
"""Woodward and COllela interacting blast wave."""
import numpy
import pysph.sph.api as sph
import pysph.base.api as base
import pysph.solver.api as solver
xl = 0
xr = 1.0
np = 5001
nbp = 100
dx = (xr-xl)/(np-1)
D = 1.5
h0 = D*dx
adke_eps = 0.5
adke_k = 1.0
g1 = 0.2
g2 = 0.4
alpha = 1.0
beta = 1.0
gamma = 1.4
tf = 0.04
dt = 2.5e-6
class UpdateBoundaryParticles(object):
def __init__(self, particles, dx):
self.particles = particles
self.dx = dx
def eval(self):
left = self.particles.get_named_particle_array("left")
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:nbp] = fluid.h[:nbp]
right.h[-nbp:] = fluid.h[-nbp:]
left.u[:nbp] = -fluid.u[:nbp]
right.u[-nbp:] = -fluid.u[-nbp:]
left.e[:nbp] = fluid.e[:nbp]
right.e[-nbp:] = fluid.e[-nbp:]
left.p[:nbp] = fluid.p[:nbp]
right.p[-nbp:] = fluid.p[-nbp:]
left.rho[:nbp] = fluid.rho[:nbp]
right.rho[-nbp:] = fluid.rho[-nbp:]
left.cs[:nbp] = fluid.cs[:nbp]
right.cs[-nbp:] = fluid.cs[-nbp:]
left.q[:nbp] = fluid.q[:nbp]
right.q[-nbp:] = fluid.q[-nbp:]
def get_particles(**kwargs):
xleft = numpy.arange(xl, 0.1-dx+1e-10, dx)
pleft = numpy.ones_like(xleft) * 1000.0
xmid = numpy.arange(0.1+dx, 0.9-dx+1e-10, dx)
pmid = numpy.ones_like(xmid) * 0.01
xright = numpy.arange(0.9+dx, 1.0+1e-10, dx)
pright = numpy.ones_like(xright) * 100.0
x = numpy.concatenate( (xleft, xmid, xright) )
p = numpy.concatenate( (pleft, pmid, pright) )
rho = numpy.ones_like(x)
m = numpy.ones_like(x) * dx
h = numpy.ones_like(x) * D * dx
e = p/( rho*(gamma-1.0) )
cs = numpy.sqrt(gamma*p/rho)
u = numpy.zeros_like(x)
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = g1 * h * cs
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x, m=m, h=h, rho=rho,
p=p, e=e, cs=cs, u=u,
rhop=rhop, div=div, q=q)
nbp = 100
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xl - (i+1)*dx
m = numpy.ones_like(x) * fluid.m[0]
p = numpy.ones_like(x) * fluid.p[0]
rho = numpy.ones_like(x) * fluid.rho[0]
h = numpy.ones_like(x) * fluid.p[0]
e = p/( (gamma-1.0)*rho )
cs = numpy.sqrt(gamma*p/rho)
div = numpy.zeros_like(x)
q = g1 * h * cs
left = base.get_particle_array(name="left", type=base.Boundary,
x=x, p=p, rho=rho, m=m, h=h,
e=e, cs=cs, div=div, q=q)
x = numpy.ones(nbp)
_xr = xr + (nbp+1)*dx
for i in range(nbp):
x[i] = _xr - i*dx
m = numpy.ones_like(x) * fluid.m[-1]
p = numpy.ones_like(x) * fluid.p[-1]
h = numpy.ones_like(x) * fluid.h[-1]
rho = numpy.ones_like(x) * fluid.rho[-1]
e = p/( (gamma-1.0)*rho )
cs = numpy.sqrt(gamma*p/rho)
div = numpy.zeros_like(x)
q = g1 * h * cs
right = base.get_particle_array(name="right", type=base.Boundary,
x=x, p=p, rho=rho, m=m, h=h,
e=e, cs=cs, div=div, q=q)
return [fluid,left,right]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=adke_eps, k=adke_k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=6*h0,
variable_h=True,
create_particles=get_particles)
# add the boundary update function
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles, dx) )
app.run()
| [
[
8,
0,
0.0064,
0.0064,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0191,
0.0064,
0,
0.66,
0.0345,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0318,
0.0064,
0,
0.66... | [
"\"\"\"Woodward and COllela interacting blast wave.\"\"\"",
"import numpy",
"import pysph.sph.api as sph",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"xl = 0",
"xr = 1.0",
"np = 5001",
"nbp = 100",
"dx = (xr-xl)/(np-1)",
"D = 1.5",
"h0 = D*dx",
"adke_eps = 0.5",
... |
"""1D shock tube problem which simulates the collision of two strong
shocks. The test is described in 'An adaptive SPH method for strong
shocks' by Leonardo Di. G. Sigalotti and Henri Lopez and Leonardo
Trujillo, JCP, vol 228, pp (5888-5907)
"""
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
import numpy
import get_shock_tube_data as get_data
# Parameters
xl = -1.5; xr = 1.5
pl = 460.894; pr = 46.0950
ul = 19.5975; ur = -6.19633
rhol = 5.999242; rhor = 5.999242
# Number of particles
nl = 500*3
nr = 500*3
np = nl + nr
# Time step constants
dt = 5e-6
tf = 0.035
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
D = 1.5
dx = 0.5/500
h0 = D*dx
# mass
m0 = rhol*dx
# Artificial Heat constants
g1 = 0.5
g2 = 0.5
def get_particles(with_boundary=True, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl,nr=nr,xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=1.4)
adke.m[:] = m0
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta)
app.run()
| [
[
8,
0,
0.0412,
0.0706,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0941,
0.0118,
0,
0.66,
0.0256,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.1059,
0.0118,
0,
0.66... | [
"\"\"\"1D shock tube problem which simulates the collision of two strong\nshocks. The test is described in 'An adaptive SPH method for strong\nshocks' by Leonardo Di. G. Sigalotti and Henri Lopez and Leonardo\nTrujillo, JCP, vol 228, pp (5888-5907)\n\n\"\"\"",
"import pysph.solver.api as solver",
"import pysph.... |
""" Robert's problem """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as get_data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
# Roberts problem parameters
vc = 0.42
xl = -4.8; xr = 8.0
pl = 10.33; pr = 1.0
ul = -0.81 + vc; ur = -3.44 + vc
rhol = 3.86; rhor = 1.0
# Number of particles
nl = 7500
nr = 2500
np = nl + nr
# Time step constants
dt = 1e-4
tf = 1.5
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.1
k=1.0
h0 = 1.0*xr/nr
m = xr/nr
dxl = abs(xl)/nl
ml = rhol*dxl
# Artificial Heat constants
g1 = 0.5
g2 = 1.0
kernel = base.CubicSplineKernel
hks=False
def get_particles(with_boundary=False, **kwargs):
adke, left, right = get_data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma,
m0=m)
adke.m[:nl] = ml
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0104,
0.0104,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0312,
0.0104,
0,
0.66,
0.0227,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0521,
0.0104,
0,
0.66... | [
"\"\"\" Robert's problem \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import get_shock_tube_data as get_data",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"Locator = base.NeighborLocatorType",
"vc = 0.42",
"x... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.