code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
import FreeCAD, FreeCADGui, Part, os
from Lens import makeLens
class SingleLens:
def Activated(self): #Initialize
'''Add lens'''
FreeCAD.Console.PrintMessage("Make single lens\n")
makeLens()
def GetResources(self):
import OpticsPaths
IconPath = OpticsPaths.iconsPath() + "/Lens.png"
return {'Pixmap' : IconPath, 'MenuText': 'Create simple lens', 'ToolTip': 'Create simple lens with two surfaces'}
FreeCADGui.addCommand('Single_Lens', SingleLens())
| Python |
class OpticsWorkbench ( Workbench ):
""" @brief Workbench of Optics design module. Here toolbars & icons are append. """
import OpticsPaths
import OpticsGui
Icon = OpticsPaths.iconsPath() + "/Ico.png"
MenuText = "Optics design module"
ToolTip = "Optics design module"
def Initialize(self):
# ToolBar
list = ["Optics_Lens"]
self.appendToolbar("OpticTools",list)
# Menu
list = ["Optics_Lens"]
self.appendMenu("Optics design",list)
Gui.addWorkbench(OpticsWorkbench())
| Python |
#!/usr/bin/python
# coding: koi8-r
import sys
import pygtk
pygtk.require('2.0')
import gtk
#import FreeCAD, FreeCADGui
allvals = []
class MyWin:
def __init__(self, labels, title):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title(title)
window.connect("destroy", self.delete_event)
vbox = gtk.VBox(False, 5)
window.add(vbox)
self.edits = []
for label in labels:
box = gtk.HBox(False, 5)
vbox.pack_start(box, True, True, 0)
edit = gtk.Entry()
glabel = gtk.Label(label)
box.pack_start(glabel, False, False, 0)
box.pack_end(edit, True, True, 0)
self.edits.append(edit)
box = gtk.HBox(False, 5)
vbox.pack_start(box, True, True, 0)
button = gtk.Button("OK")
button.connect("clicked", self.ok)
box.pack_start(button, False, False, 0)
button = gtk.Button("Cancel")
button.connect("clicked", self.no)
box.pack_start(button, True, True, 0)
window.show_all()
def ok(self, widget):
print "O.K.!"
global allvals
for e in self.edits:
value = e.get_text()
if value:
allvals.append(value)
gtk.main_quit()
def no(self, widget):
print "Cancel!"
gtk.main_quit()
def delete_event(self, widget):
gtk.main_quit()
class runner:
def __init__(self, alllab, atitle='Title'):
MyWin(labels=alllab, title=atitle)
gtk.main()
#~ def getNparametersFromWindow(labels, title='Title'):
#~ MyWin(labels, title)
#~ gtk.main()
#~ # Returns a list of strings or empty list if user cancel's
#~ def getNparametersFromWindow(labels, title='Tell me more'):
#~ def callback(self, widget, data=None):
#~ print "Hello again - %s was pressed" % data
#~ def delete_event(self, widget, event, data=None):
#~ gtk.main_quit()
#~ return False
#~ def __init__(self):
#~ window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#~ window.set_title("Table")
#~ window.connect("delete_event", delete_event)
#~ vbox = gtk.VBox(False, 5)
#~ window.add(vbox)
#~ box.show()
#~ for label in labels:
#~ box = gtk.HBox(False, 5)
#~ vbox.pack_start(box, True, True, 0)
#~ box.show()
#~ edit = gtk.Entry()
#~ glabel = gtk.Label(label)
#~ box.pack_start(glabel, False, False, 0)
#~ box2.pack_start(edit, True, True, 0)
#~ edits.append(edit)
#~ box = gtk.HBox(False, 5)
#~ vbox.pack_start(box, True, True, 0)
#~ box.show()
#~ button = gtk.Button("OK")
#~ button.connect("clicked", callback, "OK")
#~ box.pack_start(button, False, False, 0)
#~ button.show()
#~ button = gtk.Button("Cancel")
#~ button.connect("clicked", self.callback, "Cancel")
#~ box.pack_start(button, True, True, 0)
#~ button.show()
#~ window.show()
#~ import sys
#~ from PyQt4 import QtGui, QtCore
#~ import FreeCAD, FreeCADGui
#~
#~ appcode = -1
#~
#~ # Returns a list of strings or empty list if user cancel's
#~ def getNparametersFromWindow(labels, title='Tell me more'):
#~ global appcode
#~ app = QtGui.QApplication(sys.argv)
#~ form = QtGui.QFormLayout()
#~ edits = []
#~ for label in labels:
#~ edit = QtGui.QLineEdit() # ++ QSpinBox & QDoubleSpinBox
#~ form.addRow(label, edit)
#~ edits.append(edit)
#~ buttons = QtGui.QDialogButtonBox()
#~ buttons.setOrientation(QtCore.Qt.Horizontal)
#~ buttons.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
#~ layout = QtGui.QVBoxLayout()
#~ layout.addLayout(form)
#~ layout.addWidget(buttons)
#~
#~ global success
#~ success = False
#~ FreeCAD.Console.PrintMessage("one\n")
#~ def ok():
#~ global success
#~ FreeCAD.Console.PrintMessage("OK\n")
#~ success = True
#~ w.close()
#~ def cancel():
#~ global success
#~ FreeCAD.Console.PrintMessage("Cancel\n")
#~ success = False
#~ w.close()
#~ FreeCAD.Console.PrintMessage("two\n")
#~ QtCore.QObject.connect(buttons, QtCore.SIGNAL("accepted()"), ok);
#~ QtCore.QObject.connect(buttons, QtCore.SIGNAL("rejected()"), cancel);
#~ FreeCAD.Console.PrintMessage("three\n")
#~ w = QtGui.QDialog()
#~ w.setWindowTitle(title)
#~ w.setLayout(layout)
#~ FreeCAD.Console.PrintMessage("four\n")
#~ QtCore.QMetaObject.connectSlotsByName(w)
#~ w.show()
#~ appcode = app.exec_()
#~ while (appcode == -1):
#~ pass
#~ FreeCAD.Console.PrintMessage("five\n")
#~ if success:
#~ FreeCAD.Console.PrintMessage("ret\n")
#~ return edits
#~ FreeCAD.Console.PrintMessage("none\n")
#~ return []
#~ from PyQt4 import QtGui,QtCore
#~
#~ # Get N parameters from dialog window
#~ # Labels - an array with parameters' labels
#~ # Parameters - output array of string values
#~ # proceed - a procceeding function (when OK is pressed)
#~ # Title - window title
#~ def getNparametersFromWindow(Labels, Title="Tell me more"):
#~ RET = 0
#~ Parameters = []
#~ def hide():
#~ RET = 1
#~ del Parameters[:]
#~ dialog.hide()
#~ def proceed():
#~ RET = 1
#~ dialog.hide()
#~ dialog = QtGui.QDialog()
#~ # dialog.resize(200,300)
#~ dialog.setWindowTitle(Title)
#~ la = QtGui.QVBoxLayout(dialog)
#~ lbl = []
#~ for i in range(0, len(Labels)):
#~ lbl.append(QtGui.QLabel(Labels[i]))
#~ la.addWidget(lbl[i])
#~ Parameters.append(QtGui.QLineEdit())
#~ la.addWidget(Parameters[i])
#~ okbox = QtGui.QDialogButtonBox(dialog)
#~ okbox.setOrientation(QtCore.Qt.Horizontal)
#~ okbox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
#~ la.addWidget(okbox)
#~ QtCore.QObject.connect(okbox, QtCore.SIGNAL("accepted()"), proceed)
#~ QtCore.QObject.connect(okbox, QtCore.SIGNAL("rejected()"), hide)
#~ QtCore.QMetaObject.connectSlotsByName(dialog)
#~ dialog.show()
#~ while (RET != 1):
#~ pass
#~ return Parameters
| Python |
import FreeCAD, FreeCADGui
from NCopy import copyVec
from FreeCAD import Base
class _CopyVec:
"Copy selected object[s] N times along vector"
def IsActive(self):
if len(FreeCADGui.Selection.getSelection()) > 0:
return True
else:
return False
def Activated(self):
"Multiple copy by vector"
#~ from getGUIparams import getNparametersFromWindow as getWinPars
from getGUIparams import runner as getWinPars
from getGUIparams import allvals as L
FreeCAD.Console.PrintMessage("CopyVec activated!\n")
getWinPars(["a","b","c","d"])
is_array = lambda var: isinstance(var, (list, tuple))
if (is_array(L) and len(L) != 4):
return
try:
N = int(L[0].text())
dx = float(L[1].text())
dy = float(L[2].text())
dz = float(L[3].text())
except:
FreeCAD.Console.PrintError("Wrong input! Only numbers allowed...\n")
else:
copyVec(N, Base.Vector(dx,dy,dz))
def GetResources(self):
IconPath = FreeCAD.ConfigGet("UserAppData") + "Mod/CopyTools/NCopy.png"
return {'Pixmap' : IconPath, 'MenuText': 'Copy Vec', 'ToolTip': 'Copy selected objects N times by given vector'}
FreeCADGui.addCommand('CopyTools_CopyVec', _CopyVec())
| Python |
# Copy objects by matrix
import math
from PyQt4 import QtGui
import FreeCAD, FreeCADGui, Part
from FreeCAD import Base
class ViewProvider:
def __init__(self, obj):
obj.Proxy = self
def getDisplayModes(self,obj):
''' Return a list of display modes. '''
modes=[]
return modes
def getDefaultDisplayMode(self):
''' Return the name of the default display mode. It must be defined in getDisplayModes. '''
return "Shaded"
#~ def getIcon(self):
#~ Icon = """
#/* XPM */
#static char * icon_xpm[] = {
#~ " @#+ "};
#~ """
#~ return Icon
# copy selected object S N times with offset V
def copyVec(S, N, V):
comp = S
if (N > 1):
comp = S.copy()
shape = S.copy()
for i in range (1, N):
shape.translate(V)
comp = comp.fuse(shape)
return comp
# copy selected object through a matrix
def copyMat(S, N1, N2, V1, V2):
comp = copyVec(S, N1, V1)
if(N2 > 1):
comp = copyVec(comp, N2, V2)
return comp
class MCopy:
def __init__(self, obj, M, N, O, V1, V2):
obj.addProperty("App::PropertyVector","Vector1","","First Vector").Vector1=V1
obj.addProperty("App::PropertyVector","Vector2","","Second Vector").Vector2=V2
obj.addProperty("App::PropertyInteger","N1" ,"","Number of items 1").N1=M
obj.addProperty("App::PropertyInteger","N2" ,"","Number of items 2").N2=N
obj.addProperty("App::PropertyLink","Source" ,"","Source shape").Source=O
obj.Proxy = self
obj.Shape = copyMat(O.Shape, M,N,V1,V2)
ViewProvider(obj.ViewObject)
def onChanged(self, fp, prop):
if prop == "Center" or prop == "Angle" or prop == "Normal" or prop == "N" or prop == "rot":
oldPls = fp.Shape.Placement
self.execute(fp)
fp.Shape.Placement = oldPls
def execute(self, fp):
N1 = fp.N1
N2 = fp.N2
V1 = fp.Vector1
V2 = fp.Vector2
S = fp.Source.Shape
fp.Shape = copyMat(S, N1, N2, V1, V2)
# Copy objects to a matrix' knots, defined by vectors V1, V2 and numbers N1, N2
def makeMatrixCopy(N1=1, N2=1, V1=Base.Vector(0,0,0), V2=Base.Vector(0,0,0)):
sel = FreeCADGui.Selection.getSelection()
if (not sel):
FreeCAD.Console.PrintError("Error: you should select some objects")
QtGui.QMessageBox.critical(None,"Wrong selection","Please select a shape object")
return None
doc = FreeCADGui.ActiveDocument.Document
for Obj in sel:
rc = doc.addObject("Part::FeaturePython","MatrixCopy")
rc.Label = Obj.Label+" (Matrix Copy)"
MCopy(rc, N1, N2, Obj, V1, V2)
sdoc = FreeCADGui.getDocument(Obj.Document.Name)
sdoc.getObject(Obj.Name).Visibility=False
# A simple vector copy
def makeVectorCopy(N=1, X=0, Y=0, Z=0):
V = Base.Vector(X,Y,Z)
makeMatrixCopy(N1=N, V1=V)
| Python |
import math
from PyQt4 import QtGui
import FreeCAD, FreeCADGui, Part
from FreeCAD import Base
from RCopy import makeRadialCopy
from MCopy import makeVectorCopy, makeMatrixCopy
# Copy 1 object N times by selected trajectory (BSpline)
# Trajectory should be first selection, all other - a copying objects
# original object should not be at beginning of trajectory
def copyByTrajectory(N=4):
sel = FreeCADGui.Selection.getSelection()
def prErr():
FreeCAD.Console.PrintError("Error: you should select a traectory and one object\n")
if (not sel):
prErr(); return None
L = len(sel)
if(L != 2):
prErr(); return None
if(N < 2):
FreeCAD.Console.PrintError("Error: N shold be more than 1\n"); return None
Traj = sel[0].Shape
#(a, b) = Traj.ParameterRange
Obj = sel[1]
TLen = Traj.Length / (N - 1)
curPos = 0
doc = FreeCAD.activeDocument()
grp = doc.addObject("App::DocumentObjectGroup", "BSplineCopy")
try:
for i in range(0, N):
v = Traj.valueAt(curPos); curPos += TLen
newobj = CopyObjAt(Obj, v)
grp.addObject(newobj)
except:
FreeCAD.Console.PrintError("Error: bad selection\n")
return None
| Python |
# Copy objects by circular trajectory
import math
from PyQt4 import QtGui
import FreeCAD, FreeCADGui, Part
from FreeCAD import Base
class ViewProvider:
def __init__(self, obj):
obj.Proxy = self
def getDisplayModes(self,obj):
''' Return a list of display modes. '''
modes=[]
return modes
def getDefaultDisplayMode(self):
''' Return the name of the default display mode. It must be defined in getDisplayModes. '''
return "Shaded"
#~ def getIcon(self):
#~ Icon = """
#/* XPM */
#static char * icon_xpm[] = {
#~ " @#+ "};
#~ """
#~ return Icon
# copy selected objects along circle
# C - center of circle
# A - normal to the circle
# Ang - angle between copies
# N - number of copies (N=0 - full circle), N <= 360/Ang
# rot == False - copy selection without rotating
# moveOriToGrp == True if you want to put original into group
def copyCirc(S, C, A, Ang, N, rot):
comp = S
if (N == 0):
N = int(360./math.fabs(Ang))
#if (N > 1):
comp = S.copy()
#comp.rotate(C, A, Ang)
#if (not rot):
# comp.rotate(comp.Placement.Base, A, -(Ang))
for i in range (1, N):
shape = S.copy()
shape.rotate(C, A, Ang*i)
if (not rot):
shape.rotate(shape.Placement.Base, A, -(Ang*i))
comp = comp.fuse(shape)
return comp
class RCopy:
def __init__(self, obj, C,A,N,S,n,r):
obj.addProperty("App::PropertyVector","Center","","Center").Center=C
obj.addProperty("App::PropertyAngle","Angle" ,"","Angle").Angle=A
obj.addProperty("App::PropertyVector","Normal" ,"","Normal to circle").Normal=N
obj.addProperty("App::PropertyLink","Source" ,"","Source shape").Source=S
obj.addProperty("App::PropertyInteger","N" ,"","Number of items").N=n
obj.addProperty("App::PropertyBool","rot" ,"","Rotate objects").rot=r
obj.Proxy = self
obj.Shape = copyCirc(S.Shape, C, N, A, n, r)
ViewProvider(obj.ViewObject)
def onChanged(self, fp, prop):
if prop == "Center" or prop == "Angle" or prop == "Normal" or prop == "N" or prop == "rot":
Anga = math.fabs(fp.Angle)
Sign = fp.Angle / Anga
Anga %= 360
n = fp.N
if (n > (360./Anga+1) or n < 0 or (n > 360./Anga and Anga < 180.)):
fp.N = int(360./Anga)
return
if (fp.Angle != Anga * Sign):
fp.Angle = Anga * Sign
return
oldPls = fp.Shape.Placement
self.execute(fp)
fp.Shape.Placement = oldPls
#ViewProvider(obj.ViewObject)
def execute(self, fp):
S = fp.Source.Shape
C = fp.Center
N = fp.Normal
A = fp.Angle
n = fp.N
r = fp.rot
fp.Shape = copyCirc(S, C, N, A, n, r)
#ViewProvider(obj.ViewObject)
def makeRadialCopy(Center=Base.Vector(0,0,0), Angle=90, Normal=Base.Vector(0,0,1), N=4, rot=True):
sel = FreeCADGui.Selection.getSelection()
if (not sel):
FreeCAD.Console.PrintError("Error: you should select some objects")
QtGui.QMessageBox.critical(None,"Wrong selection","Please select a shape object")
return None
doc = FreeCADGui.ActiveDocument.Document
for Obj in sel:
rc = doc.addObject("Part::FeaturePython","RadialCopy")
rc.Label = Obj.Label+" (Radial Copy)"
RCopy(rc, Center, Angle, Normal, Obj, N, rot)
sdoc = FreeCADGui.getDocument(Obj.Document.Name)
sdoc.getObject(Obj.Name).Visibility=False
#rc.ViewObject.Proxy=0
| Python |
import FreeCAD, FreeCADGui, Part
import math
from FreeCAD import Base
def CopyObj(obj, NMsuff = ""):
name = obj.Name
shape = obj.Shape
newshape = shape.copy()
t = obj.Type
newobject = FreeCAD.ActiveDocument.addObject(t,name)
for p in s.PropertiesList:
newobject.getPropertyByName(p) = obj.getPropertyByName(p)
newobject.Shape = newshape
colr = FreeCADGui.activeDocument().getObject(obj.Name).ShapeColor
FreeCADGui.activeDocument().getObject(newobject.Name).ShapeColor = colr
#newobject.ShapeColor = sel.ShapeColor
newobject.Label = obj.Label + NMsuff
return newobject
# if toGRP == True, move copies to group
# if itself == True, object changed itself
def transformObj(mat, toGRP=False, itself=False, NMsuff=""):
sel = FreeCADGui.Selection.getSelection()
if sel:
if(toGRP):
doc = FreeCAD.activeDocument()
grp = doc.addObject("App::DocumentObjectGroup", "Scaling")
for Obj in sel:
if(not itself):
newobject = CopyObj(Obj, NMsuff)
else:
newobject = Obj
newobject.Shape = newobject.Shape.transformGeometry(mat)
if(toGRP):
grp.addObject(newobject)
else:
FreeCAD.Console.PrintError("Error: you should select some objects")
# scales selected object[s] into copies
def scale(N, toGRP=False, itself=False):
mat = Base.Matrix()
mat.scale(Base.Vector(N,N,N))
transformObj(mat, toGRP, itself, "scaled")
# rotates selected object[s] (angles X,Y,Z) into copies
# !!! first - rotate X, then - Y, last - Z
def rotXYZ(X,Y,Z, Center=None, toGRP=False, itself=False):
sel = FreeCADGui.Selection.getSelection()
if sel:
if(toGRP):
doc = FreeCAD.activeDocument()
grp = doc.addObject("App::DocumentObjectGroup", "Rotating")
for Obj in sel:
if(not itself):
newobject = CopyObj(Obj, "rotated")
else:
newobject = Obj
S = newobject.Shape
if(Center == None):
try:
Center = S.CenterOfMass
except:
Center = Base.Vector(0,0,0)
S.rotate(Center, Base.Vector(1,0,0), X)
S.rotate(Center, Base.Vector(0,1,0), Y)
S.rotate(Center, Base.Vector(0,0,1), Z)
if(toGRP):
grp.addObject(newobject)
else:
FreeCAD.Console.PrintError("Error: you should select some objects")
def moveXYZ(X,Y,Z, toGRP=False, itself=False):
sel = FreeCADGui.Selection.getSelection()
if sel:
if(toGRP):
doc = FreeCAD.activeDocument()
grp = doc.addObject("App::DocumentObjectGroup", "Scaling")
for Obj in sel:
if(not itself):
newobject = CopyObj(Obj, "translated")
else:
newobject = Obj
newobject.Shape.translate(Base.Vector(X,Y,Z))
if(toGRP):
grp.addObject(newobject)
else:
FreeCAD.Console.PrintError("Error: you should select some objects")
# N - number of teeth, norm - normal to gear
def cutNteeth(N, norm=Base.Vector(0,0,1)):
sel = FreeCADGui.Selection.getSelection()
def prErr():
FreeCAD.Console.PrintError("Error: you should select two objects\n")
if (not sel):
prErr(); return None
L = len(sel)
if(L != 2):
prErr(); return None
if(N < 2):
FreeCAD.Console.PrintError("Error: N shold be more than 1\n"); return None
aBase = sel[0]
Tooth = sel[1]
try:
Center = aBase.Shape.CenterOfMass
except:
Center = Base.Vector(0,0,0)
ang = 360./N
try:
for i in range(1, N+1):
aBase.Shape = aBase.Shape.cut(Tooth.Shape)
Tooth.Shape.rotate(Center, norm, ang)
FreeCAD.Console.PrintError(i+"\n")
except:
FreeCAD.Console.PrintError("Error: bad selection\n")
return None
| Python |
import FreeCAD
class CopyToolsWorkbench (Workbench):
"Copy Tools workbench object"
MenuText = "Copy Tools"
ToolTip = "Copy Tools workbench"
Icon = FreeCAD.ConfigGet("UserAppData") + "Mod/CopyTools/NCopy.png"
import CopyTools
def Initialize(self):
#commandslist = ["copy"]
# toolbar
t_list = ["CopyTools_CopyVec"]
self.appendToolbar("Copy Tools",t_list)
# Menu
t_list = ["CopyTools_CopyVec"]
self.appendMenu("Copy Tools",t_list)
#~ def GetClassName(self):
#~ return "NCopy::Workbench"
#~ def Activated(self):
#~ FreeCAD.Console.PrintMessage("MyWorkbench.Activated()\n")
Gui.addWorkbench(CopyToolsWorkbench())
| Python |
#!/usr/bin/env python
import codecs
import re
import jinja2
import markdown
def process_slides():
with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:
md = codecs.open('slides.md', encoding='utf8').read()
md_slides = md.split('\n---\n')
print 'Compiled %s slides.' % len(md_slides)
slides = []
# Process each slide separately.
for md_slide in md_slides:
slide = {}
sections = md_slide.split('\n\n')
# Extract metadata at the beginning of the slide (look for key: value)
# pairs.
metadata_section = sections[0]
metadata = parse_metadata(metadata_section)
slide.update(metadata)
remainder_index = metadata and 1 or 0
# Get the content from the rest of the slide.
content_section = '\n\n'.join(sections[remainder_index:])
html = markdown.markdown(content_section)
slide['content'] = postprocess_html(html, metadata)
slides.append(slide)
template = jinja2.Template(open('base.html').read())
outfile.write(template.render(locals()))
def parse_metadata(section):
"""Given the first part of a slide, returns metadata associated with it."""
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
def postprocess_html(html, metadata):
"""Returns processed HTML to fit into the slide template format."""
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
if __name__ == '__main__':
process_slides()
| Python |
import bcc
import simplejson as json
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class Compiler(webapp.RequestHandler):
parser = bcc.Parser()
def post(self):
source = self.request.get('source')
pages = self.parser.Parse(source)
self.response.out.write(json.dumps({'pages': pages}))
application = webapp.WSGIApplication([
('/compile', Compiler),
], debug=True)
def main():
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
from distutils.core import setup, Command
class TestCommand(Command):
user_options = []
def initialize_options(self):
"""Nothing to initialize."""
def finalize_options(self):
"""Nothing to finalize."""
def run(self):
import logging, unittest
logging.basicConfig(level=logging.FATAL)
ts = unittest.defaultTestLoader.loadTestsFromName('test')
tr = unittest.TextTestRunner()
tr.run(ts)
setup(
name='bcc', version='1.0',
url='https://code.google.com/p/google-breadcrumb/',
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
],
requires=['json', 'pyparsing', 'Markdown'],
py_modules=['bcc'],
scripts=['bcc.py'],
cmdclass={'test': TestCommand})
| Python |
# module pyparsing.py
#
# Copyright (c) 2003-2011 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.6"
__versionTime__ = "26 June 2011 10:53"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
set = lambda s : dict( [(c,0) for c in s] )
alphas = string.lowercase + string.uppercase
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
alphas = string.lowercase + string.uppercase
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
'decorator to trim function calls to match the arity of the target'
if not _PY3K:
def _trim_arity(func, maxargs=2):
limit = [0]
def wrapper(*args):
while 1:
try:
return func(*args[limit[0]:])
except TypeError:
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
return wrapper
else:
def _trim_arity(func, maxargs=2):
limit = maxargs
def wrapper(*args):
#~ nonlocal limit
while 1:
try:
return func(*args[limit:])
except TypeError:
if limit:
limit -= 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join([c for c in initChars if c not in excludeChars])
if bodyChars:
bodyChars = ''.join([c for c in bodyChars if c not in excludeChars])
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{ZeroOrMore} and C{OneOrMore} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{makeXMLTags} or C{makeHTMLTags}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| Python |
#!/usr/bin/python
"""
HeaderID Extension for Python-Markdown
======================================
Adds ability to set HTML IDs for headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header # {#some_id}"
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="some_id">Some Header</h1>'
All header IDs are unique:
>>> text = '''
... #Header
... #Another Header {#header}
... #Third Header {#header}'''
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> md
u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Header with ID # { #foo }'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> md
u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> md
u'<h2>A Header</h2>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
""" Replacement BlockProcessor for Header IDs. """
# Detect a header at start of any line in block
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
""" Ensure ID is unique. Append '_1', '_2'... if not """
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
""" Return ID from Header text. """
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env Python
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
import markdown, re
from markdown import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()]
d, theRest = self.detab(block[m.end():])
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
#import ipdb; ipdb.set_trace()
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)
| Python |
#!usr/bin/python
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<p>The body. This is paragraph one.</p>'
>>> md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
"""
import markdown, re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (markdown.Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(markdown.preprocessors.Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
meta[key] = [m1.group('value').strip()]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
"""
========================= IMAGE LINKS =================================
Turns paragraphs like
<~~~~~~~~~~~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~~~~~~>
Into mini-photo galleries.
"""
import re, markdown
import url_manager
IMAGE_LINK = """<a href="%s"><img src="%s" title="%s"/></a>"""
SLIDESHOW_LINK = """<a href="%s" target="_blank">[slideshow]</a>"""
ALBUM_LINK = """ <a href="%s">[%s]</a>"""
class ImageLinksExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add("imagelink", ImageLinkPreprocessor(md), "_begin")
class ImageLinkPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
url = url_manager.BlogEntryUrl(url_manager.BlogUrl("all"),
"2006/08/29/the_rest_of_our")
all_images = []
blocks = []
in_image_block = False
new_lines = []
for line in lines:
if line.startswith("<~~~~~~~"):
albums = []
rows = []
in_image_block = True
if not in_image_block:
new_lines.append(line)
else:
line = line.strip()
if line.endswith("~~~~~~>") or not line:
in_image_block = False
new_block = "<div><br/><center><span class='image-links'>\n"
album_url_hash = {}
for row in rows:
for photo_url, title in row:
new_block += " "
new_block += IMAGE_LINK % (photo_url,
photo_url.get_thumbnail(),
title)
album_url_hash[str(photo_url.get_album())] = 1
new_block += "<br/>"
new_block += "</span>"
new_block += SLIDESHOW_LINK % url.get_slideshow()
album_urls = album_url_hash.keys()
album_urls.sort()
if len(album_urls) == 1:
new_block += ALBUM_LINK % (album_urls[0], "complete album")
else :
for i in range(len(album_urls)) :
new_block += ALBUM_LINK % (album_urls[i],
"album %d" % (i + 1) )
new_lines.append(new_block + "</center><br/></div>")
elif line[1:6] == "~~~~~" :
rows.append([]) # start a new row
else :
parts = line.split()
line = parts[0]
title = " ".join(parts[1:])
album, photo = line.split("/")
photo_url = url.get_photo(album, photo,
len(all_images)+1)
all_images.append(photo_url)
rows[-1].append((photo_url, title))
if not album in albums :
albums.append(album)
return new_lines
def makeExtension(configs):
return ImageLinksExtension(configs)
| Python |
import markdown
from markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
| Python |
#!/usr/bin/env python
"""
HTML Tidy Extension for Python-Markdown
=======================================
Runs [HTML Tidy][] on the output of Python-Markdown using the [uTidylib][]
Python wrapper. Both libtidy and uTidylib must be installed on your system.
Note than any Tidy [options][] can be passed in as extension configs. So,
for example, to output HTML rather than XHTML, set ``output_xhtml=0``. To
indent the output, set ``indent=auto`` and to have Tidy wrap the output in
``<html>`` and ``<body>`` tags, set ``show_body_only=0``.
[HTML Tidy]: http://tidy.sourceforge.net/
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/quickref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [HTML Tidy](http://utidylib.berlios.de/)
* [uTidylib](http://utidylib.berlios.de/)
"""
import markdown
import tidy
class TidyExtension(markdown.Extension):
def __init__(self, configs):
# Set defaults to match typical markdown behavior.
self.config = dict(output_xhtml=1,
show_body_only=1,
)
# Merge in user defined configs overriding any present if nessecary.
for c in configs:
self.config[c[0]] = c[1]
def extendMarkdown(self, md, md_globals):
# Save options to markdown instance
md.tidy_options = self.config
# Add TidyProcessor to postprocessors
md.postprocessors['tidy'] = TidyProcessor(md)
class TidyProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
# Pass text to Tidy. As Tidy does not accept unicode we need to encode
# it and decode its return value.
return unicode(tidy.parseString(text.encode('utf-8'),
**self.markdown.tidy_options))
def makeExtension(configs=None):
return TidyExtension(configs=configs)
| Python |
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
Example:
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
"""
import re, markdown
from markdown import etree
FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(markdown.Extension):
""" Footnote Extension. """
def __init__ (self, configs):
""" Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."]}
for key, value in configs:
self.config[key][0] = value
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before the inline treeprocessor so inline patterns
# run on the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"<inline")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute")
def reset(self):
""" Clear the footnotes on reset, and prepare for a distinct document. """
self.footnotes = markdown.odict.OrderedDict()
self.unique_prefix += 1
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return (child, element), False
finder(child)
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn:%d-%s' % (self.unique_prefix, id)
else:
return 'fn:%s' % id
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fnref:%d-%s' % (self.unique_prefix, id)
else:
return 'fnref:%s' % id
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not self.footnotes.keys():
return None
div = etree.Element("div")
div.set('class', 'footnote')
hr = etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
backlink.set("rev", "footnote")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
""" Find all footnote references and store for later use. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, lines):
lines = self._handleFootnoteDefinitions(lines)
text = "\n".join(lines)
return text.split("\n")
def _handleFootnoteDefinitions(self, lines):
"""
Recursively find all footnote definitions in lines.
Keywords:
* lines: A list of lines of text
Return: A list of lines with footnote definitions removed.
"""
i, id, footnote = self._findFootnoteDefinition(lines)
if id :
plain = lines[:i]
detabbed, theRest = self.detectTabbed(lines[i+1:])
self.footnotes.setFootnote(id,
footnote + "\n"
+ "\n".join(detabbed))
more_plain = self._handleFootnoteDefinitions(theRest)
return plain + [""] + more_plain
else :
return lines
def _findFootnoteDefinition(self, lines):
"""
Find the parts of a footnote definition.
Keywords:
* lines: A list of lines of text.
Return: A three item tuple containing the index of the first line of a
footnote definition, the id of the definition and the body of the
definition.
"""
counter = 0
for line in lines:
m = DEF_RE.match(line)
if m:
return counter, m.group(2), m.group(3)
counter += 1
return counter, None, None
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the unused
remainder of the original list
"""
items = []
item = -1
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
line = detab(line)
if line:
items.append(line)
i += 1
continue
else:
return items, lines[i:]
else: # Blank line: _maybe_ we are done.
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, lines[i:]
class FootnotePattern(markdown.inlinepatterns.Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.footnotes = footnotes
def handleMatch(self, m):
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
id = m.group(2)
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
a.set('rel', 'footnote')
a.text = str(self.footnotes.footnotes.index(id) + 1)
return sup
class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
node, isText = result
if isText:
node.text = None
node.getchildren().insert(0, footnotesDiv)
else:
child, element = node
ind = element.getchildren().find(child)
element.getchildren().insert(ind + 1, footnotesDiv)
child.tail = None
fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
else:
root.append(footnotesDiv)
class FootnotePostprocessor(markdown.postprocessors.Postprocessor):
""" Replace placeholders with html entities. """
def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, "↩")
return text.replace(NBSP_PLACEHOLDER, " ")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)
| Python |
#!/usr/bin/env python
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> html
u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
Whitespace behavior:
>>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
>>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
u'<p>foo bar</p>'
To define custom settings the simple way:
>>> markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
MetaData should not carry over to next document:
>>> md.convert("No [[MetaData]] here.")
u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> md.convert('[[foo]]')
u'<p><a class="wikilink" href="/bar/">foo</a></p>'
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
'''
import markdown
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(markdown.Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, config):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'][0](label, base_url, end_url)
a = markdown.etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url'][0]
end_url = self.config['end_url'][0]
html_class = self.config['html_class'][0]
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('wiki_base_url'):
base_url = self.md.Meta['wiki_base_url'][0]
if self.md.Meta.has_key('wiki_end_url'):
end_url = self.md.Meta['wiki_end_url'][0]
if self.md.Meta.has_key('wiki_html_class'):
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/python
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [Pygments](http://pygments.org/)
"""
import markdown
# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
try:
TAB_LENGTH = markdown.TAB_LENGTH
except AttributeError:
TAB_LENGTH = 4
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenos=False, css_class="codehilite"):
self.src = src
self.lang = None
self.linenos = linenos
self.css_class = css_class
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
self._getLang()
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, \
TextLexer
from pygments.formatters import HtmlFormatter
except ImportError:
# just escape and pass through
txt = self._escape(self.src)
if self.linenos:
txt = self._number(txt)
else :
txt = '<div class="%s"><pre>%s</pre></div>\n'% \
(self.css_class, txt)
return txt
else:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
lexer = guess_lexer(self.src)
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=self.linenos,
cssclass=self.css_class)
return highlight(self.src, lexer, formatter)
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def _number(self, txt):
""" Use <ol> for line numbering """
# Fix Whitespace
txt = txt.replace('\t', ' '*TAB_LENGTH)
txt = txt.replace(" "*4, " ")
txt = txt.replace(" "*3, " ")
txt = txt.replace(" "*2, " ")
# Add line numbers
lines = txt.splitlines()
txt = '<div class="codehilite"><pre><ol>\n'
for line in lines:
txt += '\t<li>%s</li>\n'% line
txt += '</ol></pre></div>\n'
return txt
def _getLang(self):
"""
Determines language of a code block from shebang lines and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang lines and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:::+)|(?P<shebang>[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if m.group('shebang'):
# shebang exists - use line numbers
self.linenos = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
linenos=self.config['force_linenos'][0],
css_class=self.config['css_class'][0])
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(markdown.Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'force_linenos' : [False, "Force line numbers - Default: False"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.config
md.treeprocessors.add("hilite", hiliter, "_begin")
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)
| Python |
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
last_li = None
# Add title to the div
if self.config["title"][0]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"][0]
level = 0
list_stack=[div]
header_rgx = re.compile("[Hh][123456]")
# Get a list of id attributes
used_ids = []
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.append(c.attrib["id"])
for (p, c) in self.iterparent(doc):
if not c.text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
if header_rgx.match(c.tag):
tag_level = int(c.tag[-1])
while tag_level < level:
list_stack.pop()
level -= 1
if tag_level > level:
newlist = etree.Element("ul")
if last_li:
last_li.append(newlist)
else:
list_stack[-1].append(newlist)
list_stack.append(newlist)
level += 1
# Do not override pre-existing ids
if not "id" in c.attrib:
id = self.config["slugify"][0](c.text)
if id in used_ids:
ctr = 1
while "%s_%d" % (id, ctr) in used_ids:
ctr += 1
id = "%s_%d" % (id, ctr)
used_ids.append(id)
c.attrib["id"] = id
else:
id = c.attrib["id"]
# List item link, to be inserted into the toc div
last_li = etree.Element("li")
link = etree.SubElement(last_li, "a")
link.text = c.text
link.attrib["href"] = '#' + id
if int(self.config["anchorlink"][0]):
anchor = etree.SubElement(c, "a")
anchor.text = c.text
anchor.attrib["href"] = "#" + id
anchor.attrib["class"] = "toclink"
c.text = ""
list_stack[-1].append(last_li)
class TocExtension(markdown.Extension):
def __init__(self, configs):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [self.slugify,
"Function to generate anchors based on header text-"
"Defaults to a built in slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
# This is exactly the same as Django's slugify
def slugify(self, value):
""" Slugify a string, to make it URL friendly. """
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+','-',value)
def extendMarkdown(self, md, md_globals):
tocext = TocTreeprocessor(md)
tocext.config = self.config
md.treeprocessors.add("toc", tocext, "_begin")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| Python |
#!/usr/bin/env python
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> html
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
...
... ~~~~~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code>\\n~~~~\\n\\n</code></pre>'
Multiple blocks and language tags:
>>> text = '''
... ~~~~{.python}
... block one
... ~~~~
...
... ~~~~.html
... <p>block two</p>
... ~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code class="python">block one\\n</code></pre>\\n\\n<pre><code class="html"><p>block two</p>\\n</code></pre>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/Fenced__Code__Blocks>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown, re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
"_begin")
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
lang = LANG_TAG % m.group('lang')
code = CODE_WRAP % (lang, self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(configs=None):
return FencedCodeExtension()
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
"""
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
In the event that one or more of the supported extensions are not
available for import, Markdown will issue a warning and simply continue
without that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
"""
import markdown
extensions = ['fenced_code',
'footnotes',
'headerid',
'def_list',
'tables',
'abbr',
]
class ExtraExtension(markdown.Extension):
""" Add various extensions to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
def makeExtension(configs={}):
return ExtraExtension(configs=dict(configs))
| Python |
#!/usr/bin/env Python
"""
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
A simple example:
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
import markdown
from markdown import etree
class TableProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1][0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[:2]
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header[0].startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(header[1], border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header[0], thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row, tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(markdown.Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return TableExtension(configs=configs)
| Python |
import markdown
import re
def isString(s):
""" Check if it's string """
return isinstance(s, unicode) or isinstance(s, str)
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Treeprocessor(Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__ (self, md):
self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = markdown.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})')
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = markdown.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, markdown.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we starting search
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData) + match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, markdown.AtomicString):
# We need to process current node too
for child in [node] + node.getchildren():
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("data won't be processed with inline patterns")
Arguments:
* markdownTree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, markdown.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if element.text:
element.text = \
markdown.inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
# Processing attributes
if newChild.tail:
newChild.tail = \
markdown.inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text:
newChild.text = \
markdown.inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and markdown.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if markdown.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
| Python |
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
| Python |
"""
CORE MARKDOWN BLOCKPARSER
=============================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
import re
import markdown
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser=None):
self.parser = parser
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*markdown.TAB_LENGTH):
newtext.append(line[markdown.TAB_LENGTH:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
lines[i] = lines[i][markdown.TAB_LENGTH*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
block = '%s\n\n%s' % (sibling[-1].text, block)
sibling[-1].text = ''
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = markdown.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/markdown.TAB_LENGTH
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = markdown.etree.SubElement(parent, 'pre')
code = markdown.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = markdown.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
self.parser.parseChunk(quote, block)
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in ['ol', 'ul']:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p.
if len(lst) and lst[-1].text and not len(lst[-1]):
p = markdown.etree.SubElement(lst[-1], 'p')
p.text = lst[-1].text
lst[-1].text = ''
# parse first block differently as it gets wrapped in a p.
li = markdown.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
else:
# This is a new list so create parent with appropriate tag.
lst = markdown.etree.SubElement(parent, self.TAG)
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*markdown.TAB_LENGTH):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = markdown.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new item. Append
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*markdown.TAB_LENGTH):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE)
# Match a hr on a single line of text.
MATCH_RE = re.compile(r'^%s$' % RE)
def test(self, parent, block):
return bool(self.SEARCH_RE.search(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
prelines = []
# Check for lines in block before hr.
for line in lines:
m = self.MATCH_RE.match(line)
if m:
break
else:
prelines.append(line)
if len(prelines):
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, ['\n'.join(prelines)])
# create hr
hr = markdown.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
lines = lines[len(prelines)+1:]
if len(lines):
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, '\n'.join(lines))
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks and start with an empty line. """
# Detect a block that only contains whitespace
# or only whitespace on the first line.
RE = re.compile(r'^\s*\n')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.match(block)
if m:
# Add remaining line to master blocks for later.
blocks.insert(0, block[m.end():])
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and sibling[0] and \
sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text )
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list. Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = markdown.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| Python |
import markdown
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self):
self.blockprocessors = markdown.odict.OrderedDict()
self.state = State()
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = markdown.etree.Element(markdown.DOC_TAG)
self.parseChunk(self.root, '\n'.join(lines))
return markdown.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
processor.run(parent, blocks)
break
| Python |
# markdown/html4.py
#
# Add html4 serialization to older versions of Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import markdown
ElementTree = markdown.etree.ElementTree
QName = markdown.etree.QName
Comment = markdown.etree.Comment
PI = markdown.etree.PI
ProcessingInstruction = markdown.etree.ProcessingInstruction
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def write_html(root, f,
# keyword arguments
encoding="us-ascii",
default_namespace=None):
assert root is not None
if not hasattr(f, "write"):
f = open(f, "wb")
write = f.write
if not encoding:
encoding = "us-ascii"
qnames, namespaces = _namespaces(
root, encoding, default_namespace
)
_serialize_html(
write, root, encoding, qnames, namespaces
)
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
write_html(ElementTree(element).getroot(),file,encoding)
return "".join(data)
| Python |
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
The rest of the code is specifically for handling the case where Python
Markdown is called from the command line.
"""
import markdown
import sys
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
""" The name used in the usage statement displayed for python versions < 2.3.
(With python 2.3 and higher the usage statement is generated by optparse
and uses the actual name of the executable called.) """
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
try:
optparse = __import__("optparse")
except:
if len(sys.argv) == 2:
return {'input': sys.argv[1],
'output': None,
'safe': False,
'extensions': [],
'encoding': None }, CRITICAL
else:
print OPTPARSE_WARNING
return None, None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename", default=sys.stdout,
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="Format of output. One of 'xhtml1' (default) or 'html4'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1:
parser.print_help()
return None, None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(0)
if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level)
# Run
markdown.markdownFromFile(**options)
| Python |
"""
POST-PROCESSORS
=============================================================================
Markdown also allows post-processors, which are similar to preprocessors in
that they need to implement a "run" method. However, they are run after core
processing.
"""
import markdown
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Postprocessor(Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend markdown.Postprocessor.
"""
def run(self, text):
"""
Subclasses of Postprocessor should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass
class RawHtmlPostprocessor(Postprocessor):
""" Restore raw html to the document. """
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = markdown.HTML_REMOVED_TEXT
if safe or not self.markdown.safeMode:
text = text.replace("<p>%s</p>" %
(markdown.preprocessors.HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i,
html)
return text
def escape(self, html):
""" Basic html escaping """
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
return html.replace('"', '"')
class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def __init__(self):
pass
def run(self, text):
text = text.replace(markdown.AMP_SUBSTITUTE, "&")
return text
| Python |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
| Python |
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
## Basic use from the command line:
markdown source.txt > destination.html
Run "markdown --help" to see more options.
## Extensions
See <http://www.freewisdom.org/projects/python-markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
version = "2.0.3"
version_info = (2,0,3, "Final")
import re
import codecs
import sys
import warnings
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
"""
CONSTANTS
=============================================================================
"""
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
# default logging level for command-line use
COMMAND_LINE_LOGGING_LEVEL = CRITICAL
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = True # this_or_that does not become this<i>or</i>that
DEFAULT_OUTPUT_FORMAT = 'xhtml1' # xhtml or html4 output
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td")
DOC_TAG = "div" # Element used to wrap document - later removed
# Placeholders
STX = u'\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = u'\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
(u'\u2D30', u'\u2D7F'), # Tifinagh
)
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def message(level, text):
""" A wrapper method for logging debug messages. """
logger = logging.getLogger('MARKDOWN')
if logger.handlers:
# The logger is configured
logger.log(level, text)
if level > WARN:
sys.exit(0)
elif level > WARN:
raise MarkdownException, text
else:
warnings.warn(text, MarkdownWarning)
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
return BLOCK_LEVEL_ELEMENTS.match(tag)
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(unicode):
"""A string which should not be further processed."""
pass
class MarkdownException(Exception):
""" A Markdown Exception. """
pass
class MarkdownWarning(Warning):
""" A Markdown Warning. """
pass
"""
OVERALL DESIGN
=============================================================================
Markdown processing takes place in four steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One such
treeprocessor runs InlinePatterns against the ElementTree, detecting inline
markup.
4. Some post-processors are run against the text after the ElementTree has
been serialized into text.
5. The output is written to a string.
Those steps are put together by the Markdown() class.
"""
import preprocessors
import blockprocessors
import treeprocessors
import inlinepatterns
import postprocessors
import blockparser
import etree_loader
import odict
# Extensions should use "markdown.etree" instead of "etree" (or do `from
# markdown import etree`). Do not import it by yourself.
etree = etree_loader.importETree()
# Adds the ability to output html4
import html4
class Markdown:
"""Convert Markdown to HTML."""
def __init__(self,
extensions=[],
extension_configs={},
safe_mode = False,
output_format=DEFAULT_OUTPUT_FORMAT):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension-configs: Configuration setting for extensions.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
"""
self.safeMode = safe_mode
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
# Preprocessors
self.preprocessors = odict.OrderedDict()
self.preprocessors["html_block"] = \
preprocessors.HtmlBlockPreprocessor(self)
self.preprocessors["reference"] = \
preprocessors.ReferencePreprocessor(self)
# footnote preprocessor will be inserted with "<reference"
# Block processors - ran by the parser
self.parser = blockparser.BlockParser()
self.parser.blockprocessors['empty'] = \
blockprocessors.EmptyBlockProcessor(self.parser)
self.parser.blockprocessors['indent'] = \
blockprocessors.ListIndentProcessor(self.parser)
self.parser.blockprocessors['code'] = \
blockprocessors.CodeBlockProcessor(self.parser)
self.parser.blockprocessors['hashheader'] = \
blockprocessors.HashHeaderProcessor(self.parser)
self.parser.blockprocessors['setextheader'] = \
blockprocessors.SetextHeaderProcessor(self.parser)
self.parser.blockprocessors['hr'] = \
blockprocessors.HRProcessor(self.parser)
self.parser.blockprocessors['olist'] = \
blockprocessors.OListProcessor(self.parser)
self.parser.blockprocessors['ulist'] = \
blockprocessors.UListProcessor(self.parser)
self.parser.blockprocessors['quote'] = \
blockprocessors.BlockQuoteProcessor(self.parser)
self.parser.blockprocessors['paragraph'] = \
blockprocessors.ParagraphProcessor(self.parser)
#self.prePatterns = []
# Inline patterns - Run on the tree
self.inlinePatterns = odict.OrderedDict()
self.inlinePatterns["backtick"] = \
inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE)
self.inlinePatterns["escape"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE)
self.inlinePatterns["reference"] = \
inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self)
self.inlinePatterns["link"] = \
inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self)
self.inlinePatterns["image_link"] = \
inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self)
self.inlinePatterns["image_reference"] = \
inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self)
self.inlinePatterns["autolink"] = \
inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self)
self.inlinePatterns["automail"] = \
inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self)
self.inlinePatterns["linebreak2"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br')
self.inlinePatterns["linebreak"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br')
self.inlinePatterns["html"] = \
inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self)
self.inlinePatterns["entity"] = \
inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self)
self.inlinePatterns["not_strong"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE)
self.inlinePatterns["strong_em"] = \
inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em')
self.inlinePatterns["strong"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong')
self.inlinePatterns["emphasis"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em')
self.inlinePatterns["emphasis2"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em')
# The order of the handlers matters!!!
# Tree processors - run once we have a basic parse.
self.treeprocessors = odict.OrderedDict()
self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self)
self.treeprocessors["prettify"] = \
treeprocessors.PrettifyTreeprocessor(self)
# Postprocessors - finishing touches.
self.postprocessors = odict.OrderedDict()
self.postprocessors["raw_html"] = \
postprocessors.RawHtmlPostprocessor(self)
self.postprocessors["amp_substitute"] = \
postprocessors.AndSubstitutePostprocessor()
# footnote postprocessor will be inserted with ">amp_substitute"
# Map format keys to serializers
self.output_formats = {
'html' : html4.to_html_string,
'html4' : html4.to_html_string,
'xhtml' : etree.tostring,
'xhtml1': etree.tostring,
}
self.references = {}
self.htmlStash = preprocessors.HtmlStash()
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.set_output_format(output_format)
self.reset()
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword aurguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = load_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
try:
ext.extendMarkdown(self, globals())
except NotImplementedError, e:
message(ERROR, e)
else:
message(ERROR, 'Extension "%s.%s" must be of type: "markdown.Extension".' \
% (ext.__class__.__module__, ext.__class__.__name__))
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
extension.reset()
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
source = source.replace(STX, "").replace(ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(TAB_LENGTH)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf-8"))
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
end = output.rindex('</%s>'%DOC_TAG)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%DOC_TAG):
# We have an empty document
output = ''
else:
# We have a serious problem
message(CRITICAL, 'Failed to strip top level tags.')
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: Name of source text file.
* output: Name of output file. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = text.lstrip(u'\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if isinstance(output, (str, unicode)):
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(html)
output_file.close()
else:
output.write(html.encode(encoding))
"""
Extensions
-----------------------------------------------------------------------------
"""
class Extension:
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
""" Return all config settings as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError, 'Extension "%s.%s" must define an "extendMarkdown"' \
'method.' % (self.__class__.__module__, self.__class__.__name__)
def load_extension(ext_name, configs = []):
"""Load extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module names
ext_module = 'markdown.extensions'
module_name_new_style = '.'.join([ext_module, ext_name])
module_name_old_style = '_'.join(['mdx', ext_name])
# Try loading the extention first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name_new_style, {}, {}, [ext_module])
except ImportError:
try: # Old style (mdx.<extension>)
module = __import__(module_name_old_style)
except ImportError:
message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name_new_style, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError:
message(CRITICAL, "Failed to initiate extension '%s'" % ext_name)
def load_extensions(ext_names):
"""Loads multiple extensions"""
extensions = []
for ext_name in ext_names:
extension = load_extension(ext_name)
if extension:
extensions.append(extension)
return extensions
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text,
extensions = [],
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* extensions: A list of extensions or extension names (may contain config args).
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
Returns: An HTML document as a string.
"""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
return md.convert(text)
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Read markdown code from a file and write it to a file or a stream."""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
md.convertFile(input, output, encoding)
| Python |
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| Python |
from markdown import message, CRITICAL
import sys
## Import
def importETree():
"""Import the best implementation of ElementTree, return a module object."""
etree_in_c = None
try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
import xml.etree.cElementTree as etree_in_c
except ImportError:
try: # Is it Python 2.5+ with Python implementation of ElementTree?
import xml.etree.ElementTree as etree
except ImportError:
try: # An earlier version of Python with cElementTree installed?
import cElementTree as etree_in_c
except ImportError:
try: # An earlier version of Python with Python ElementTree?
import elementtree.ElementTree as etree
except ImportError:
message(CRITICAL, "Failed to import ElementTree")
sys.exit(1)
if etree_in_c and etree_in_c.VERSION < "1.0":
message(CRITICAL, "For cElementTree version 1.0 or higher is required.")
sys.exit(1)
elif etree_in_c :
return etree_in_c
elif etree.VERSION < "1.1":
message(CRITICAL, "For ElementTree version 1.1 or higher is required")
sys.exit(1)
else :
return etree
| Python |
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""The Breadcrumb parser + compiler.
Requirements:
* PyParsing 1.5 (http://pypi.python.org/pypi/pyparsing)
* Markdown 2.0 (http://pypi.python.org/pypi/Markdown)
"""
__author__ = 'blinks@google.com (Adam Blinkinsop)'
from pyparsing import *
from xml.dom import minidom
import logging
import markdown
import re
import simplejson as json
import sys
impl = minidom.getDOMImplementation()
def main(*args, **opts):
"""Compile Breadcrumb source files into HTML output."""
# Create a parser.
parser = Parser()
# Parse each filename in turn.
for filename in args:
# Open the file, create the output filename.
if filename != '-':
f = open(filename, 'r')
out = filename + '.html'
else:
f = sys.stdin
out = 'out.html'
# Perform the actual compilation.
try:
code = f.read()
pages = parser.Parse(code)
finally:
f.close()
f = open(out, 'w')
try:
f.write(TEMPLATE % {'story': json.dumps({'pages': pages})})
print 'Compiled %r to %r.' % (filename, out)
finally:
f.close()
def BuildPage(s, l, t):
"""Create a page as a parse action."""
logging.debug('BuildPage(%r, %r, %r)', s, l, t)
_, key, script = t[0]
t[1] = "p.push('<p>');%s;p.push('</p>')" % t[1].strip()
if script:
t[1] = script + ';' + t[1]
return {'id': key, '_value': t[1]}
def BuildLink(s, l, t):
"""Create a link as a parse action."""
logging.debug('BuildLink(%r, %r, %r)', s, l, t)
text, hrefs = t[0], t[1:]
if not hrefs:
return '[%s]' % text
doc = impl.createDocument(None, None, None)
a = doc.createElement('a')
try:
dom = minidom.parseString(u'<p>%s</p>' % text)
dom = dom.documentElement.childNodes[0]
except Exception, err:
dom = doc.createTextNode(text)
a.appendChild(dom)
if len(hrefs) == 1:
_, key, script = hrefs[0]
if key.startswith('http:') or key.startswith('https:'):
a.setAttribute('href', key)
elif key.startswith('javascript'):
a.setAttribute('href', key)
elif script:
a.setAttribute('href', 'javascript:%s;go(%r)' % (script, str(key)))
else:
a.setAttribute('href', 'javascript:go(%r)' % str(key))
else:
def Script(key, script):
if key in ['http', 'https']:
return 'window.location = %r' % str(key + ':' + script)
elif key in ['javascript', '?']:
return script
elif script:
return script + ';go(%r)' % str(key)
else:
return 'go(%r)' % str(key)
a.setAttribute('href', 'javascript:choose(%s)'
% ','.join('function(){%s}' % Script(key, script)
for _, key, script in hrefs))
return a.toxml()
def BuildText(s, l, t):
logging.debug('BuildText(%r, %r, %r)', s, l, t)
t = u''.join(unicode(x) for x in t)
if not t.strip():
return None
else:
text = markdown.markdown(t)
if text.startswith('<p>') and text.endswith('</p>'):
text = text[3:-4]
text = re.sub(r'\s+', ' ', text)
if re.search(r'^\s+', t):
text = u' ' + text
if re.search(r'\s+$', t):
text = text + u' '
return u'p.push(%r)' % text.encode('ascii', 'xmlcharrefreplace')
class Parser(object):
"""A parser for Breadcrumb stories."""
def Parse(self, text):
logging.debug('Parser.Parse(%r)', text)
try:
results = []
pages = re.split(r'\n+\s*(?=\()', text)
for page in pages:
logging.debug('page == %r', page)
if not page.strip():
continue
ast = Parser.page.parseString(page.strip())
results.append(ast[0])
logging.debug('results == %r', results)
return results
except ParseException, err:
# Log the error, with help text (for debugging, not a program fault).
logging.error('%s:\n%s\n%s', err, err.line, ' ' * (err.column - 1) + '^')
# Return the error to the user.
return [{'id': 'error', '_value': ';'.join([
'p.push("<p class=\'error\'>I found a problem while proofreading: ")',
'p.push("<pre>")',
'p.push(%r)' % str(err.line) + '\n',
'p.push(%r)' % (' ' * (err.column - 1) + '^\n'),
'p.push("</pre>")',
'p.push(%r)' % str(err),
'p.push("</p>")',
])}]
id = Word(alphanums + '$_')
script = Forward()
LPAREN, RPAREN = Suppress('('), Suppress(')')
href = (LPAREN + (Regex(r'(?:https?|javascript)://[^)]+') ^ id)
+ Optional(Suppress(':') + script, default='') + RPAREN)
href.setParseAction(lambda s, p, t: ('href', t[0], t[1]))
text = Regex(r'[^[<]+|</?\w.*?>|<(?!%)')
LBRACK, RBRACK = Suppress('['), Suppress(']')
link = (LBRACK + Regex(r'[^]]+') + RBRACK
+ Regex('\s*').suppress() + OneOrMore(href))
link.setParseAction(BuildLink)
# Script is executed or evaluated.
script << Combine(
ZeroOrMore(Regex('([^()/\'"%]|%(?!>))+').setName('boring chars')
^ ('(' + script + ')').setName('parenthetical')
^ cStyleComment
^ quotedString)).setName('script')
LESC, RESC = Suppress('<%'), Suppress('%>')
escaped = LESC + Optional('=', default=False) + script + RESC
escaped.setName('script')
escaped.setParseAction(lambda s, l, t: (t[0] and 'p.push(%s)' or '%s')
% str(t[1]).strip())
markdown = OneOrMore(text | link)
markdown.setParseAction(BuildText)
body = Combine(OneOrMore(escaped ^ markdown), joinString=';')
page = href + body + StringEnd()
page.setParseAction(BuildPage)
TEMPLATE = """
<html>
<head>
<script src="http://www.google.com/jsapi"></script>
<script>
var story, state = {};
google.load("jquery", "1");
google.setOnLoadCallback(function() {
story = %(story)s;
story.cache = {}
// Compile the pages in the story.
for (i in story.pages) {
var body = ("var p = []; with(state) {"
+ story.pages[i]._value + "} return p.join('');");
try {
story.cache[story.pages[i].id] = new Function('state', body);
} catch (e) {
alert(e);
}
}
go(story.pages[0].id);
});
function choose() { // Call one of the arguments at random.
if (arguments.length == 0) {
alert('No arguments passed to choose().');
} else {
choice = Math.floor(Math.random() * arguments.length);
arguments[choice]();
}
}
function go(key) {
$('#view').fadeOut('fast', function() {
try {
var fn = story.cache[key];
var html;
if (fn != undefined) {
html = fn(state);
if (story.cache['Status']) {
html += '<hr />' + story.cache['Status'](state);
}
} else {
html = '<p>There is no page ' + key + '.</p>';
}
$('#view').html(html);
} catch (e) {
$('#view').html('<p>Error at ' + key + ': ' + e + '</p>');
} finally {
$('#view').fadeIn('fast');
}
});
}
</script>
</head>
<body><div id="view"><p>Just a second...</p></div></body>
</html>
"""
def Flags():
from optparse import OptionParser
parser = OptionParser(
version='bcc 1.0',
usage='usage: %prog [options] script1 [script2 [...]]',
description='A Breadcrumb story compiler.',
epilog='See https://code.google.com/p/google-breadcrumb/ for details.')
opts, args = parser.parse_args()
if not args:
parser.error('You must provide a script filename (or "-" for stdin).')
return opts, args
if __name__ == '__main__':
opts, args = Flags()
main(*args, **opts.__dict__)
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), tuple subclasses with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif (_namedtuple_as_object and isinstance(o, tuple) and
hasattr(o, '_asdict')):
for chunk in _iterencode_dict(o._asdict(), _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.2.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
from bcc import *
import unittest
class TestParser(unittest.TestCase):
"""Unit tests for the Breadcrumb parser."""
def setUp(self):
self.parser = Parser()
def check(self, story, page_count):
out = self.parser.Parse(story)
self.assert_(out is not None, story)
self.assertEqual(len(out), page_count,
'generated %i pages instead of %i: %r' %
(len(out), page_count, out))
return out
def checkPage(self, page, id, _value):
self.assertTrue('id' in page, 'page %r has no id' % page)
self.assertTrue('_value' in page, 'page %r has no _value' % page)
self.assertEqual(page['id'], id, 'page %r has the wrong id %r' % (page, id))
self.assertTrue(_value in page['_value'],
'page %r has _value not including %r' % (page, _value))
def testSimple(self):
out = self.check('(1) Hello, world!', 1)
self.checkPage(out[0], '1', 'Hello, world!')
def testTwoPages(self):
out = self.check("""
(1) Hello, world! [Continue](2)?
(2) Hello again, world!
""", 2)
self.checkPage(out[0], '1', 'Hello, world!')
self.checkPage(out[0], '1', '<a')
self.checkPage(out[1], '2', 'Hello again, world!')
def testTwoPagesWithHtmlInLink(self):
out = self.check("""
(1) Hello, world! [<b>Continue</b>](2)?
(2) Hello again, world!
""", 2)
self.checkPage(out[0], '1', 'Hello, world!')
self.checkPage(out[0], '1', '<b>Continue</b>')
def testTwoPagesWithBadHtmlInLink(self):
out = self.check("""
(1) Hello, world! [<button>Continue](2)?
(2) Hello again, world!
""", 2)
self.checkPage(out[0], '1', 'Hello, world!')
self.checkPage(out[0], '1', '<button>Continue')
def testMarkdown(self):
out = self.check("""
(1) Hello, world, with EOLs!
[Continue](2)?
(2) Hello *again*, world!
""", 2)
self.checkPage(out[0], '1', "EOLs!</p>")
self.checkPage(out[1], '2', '<em>again</em>')
def testEOLThenNewPage(self):
out = self.check("""
(1) Hello, world, with EOLs; [Continue](2)
(2) Hello *again*, world!
""", 2)
self.checkPage(out[0], '1', ">Continue</a>")
self.checkPage(out[1], '2', '<em>again</em>')
def testAttachedScripts(self):
out = self.check("""
(1) Hello, world! [Continue](2: back='1')?
(2: alert('Hi!')) Hello again, world!
""", 2)
self.checkPage(out[0], '1', 'Hello, world!')
self.checkPage(out[1], '2', 'Hello again, world!')
def testEmbeddedScripts(self):
out = self.check("""
(1) Hello, <%= 'green' %> world! [Continue](2)?
(2) Hello again, world! <% alert('Hi!') %>
""", 2)
self.checkPage(out[0], '1', "p.push('green')")
self.checkPage(out[1], '2', 'alert(')
def testLinkAfterEndOfLine(self):
out = self.check("""
(1) Test
Test [link] (2).
(2) Test 2.
""", 2)
self.checkPage(out[0], '1', '<a')
self.checkPage(out[1], '2', 'Test 2')
def testSimpleScript(self):
out = self.check('(1: baz=[foo]) Hello, world!', 1)
self.checkPage(out[0], '1', 'baz=[foo]')
self.checkPage(out[0], '1', 'Hello, world!')
def testScriptWithoutEOLs(self):
out = self.check("""
(1) Testing
Hello, <%= cruel %> world!<br/>
Line Two.
""", 1)
self.checkPage(out[0], '1', ');p.push(cruel);p.push(')
def testInterestingScript(self):
out = self.check("""
(1) Hello, [world](2: foo.happy-=.25)
(2) Is <%= foo.happy %> < 0?
""", 2)
self.checkPage(out[0], '1', 'foo.happy-=.25')
def testExternalLink(self):
out = self.check("""
(1) Hello, [world](http://google.com/)
""", 1)
self.checkPage(out[0], '1', '<a href=')
def testSyntaxNotServerError(self):
try:
out = self.check('(1) Hello, [world]! (http://google.com/)', 1)
self.checkPage(out[0], 'error', 'world')
except:
self.assertFalse()
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# Demonstration client script for Freebase's OAuth implementation
#
# Author: Kurt Bollacker, May 2010 (with cribbing from Alec Flett)
# Copyright: Public Domain
#
#
import oauth,urlparse,urllib,urllib2,json
# Put your consumer key and secret here!
CONSUMER_KEY = 'YOUR APPPLICATION CONSUMER KEY (A FREEBASE GUID)'
CONSUMER_SECRET = 'YOUR APPLICATION CONSUMER SECRET'
# Define default callback and ondecline
CALLBACK_URL = 'http://MYSERVER:7777/getaccesstoken'
DECLINE_URL = 'http://MYSERVER:7777/failed'
# These URLs should work for production
#REQUEST_TOKEN_URL = 'https://api.freebase.com/api/oauth/request_token'
#ACCESS_TOKEN_URL = 'https://api.freebase.com/api/oauth/access_token'
#AUTHORIZATION_URL = 'https://www.freebase.com/signin/authorize_token'
#USER_INFO_URL='http://freebase.com/api/service/user_info'
#MQL_WRITE_URL='http://api.freebase.com/api/service/mqlwrite'
# These URLs should work for sandbox
REQUEST_TOKEN_URL = 'https://api.sandbox-freebase.com/api/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.sandbox-freebase.com/api/oauth/access_token'
AUTHORIZATION_URL = 'https://sandbox-freebase.com/signin/authorize_token'
USER_INFO_URL='http://api.sandbox-freebase.com/api/service/user_info'
MQL_WRITE_URL='http://api.sandbox-freebase.com/api/service/mqlwrite'
def fetch_url(url, http_method='GET',headers={},body=None):
''' Fetch the given URL, cleaning up headers if needed'''
headers.update({'X-Requested-With':'MyTestScript',
'X-Metaweb-Request':'True',
'Content-Type': 'application/x-www-form-urlencoded'})
if http_method=='POST' and body==None:
body=''
if body!=None:
headers['Content-Length']=str(len(body))
print "HTTP METHOD = %s\n " % http_method
print "HEADERS = %s\n " % json.dumps(headers)
print "URL = %s\n" % url
print "BODY = %s\n" % body
req=urllib2.Request(url,body,headers)
try:
response=urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
return None
print response.getcode()
if response.getcode() != 200:
print "GETCODE "+str(response.getcode())+" MSG "+response.msg+" INFO "+response.info+" READ "+response.read()
return response.read()
def get_request_token():
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_method='GET',
http_url=REQUEST_TOKEN_URL)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,None)
r=fetch_url(REQUEST_TOKEN_URL,
http_method='GET',
headers=request.to_header())
result=dict([p.split('=') for p in r.split('&')])
return result
def authorize_token_url(request_token_key,callback=CALLBACK_URL,decline=DECLINE_URL):
return AUTHORIZATION_URL+'?oauth_token='+request_token_key+'&oauth_callback='+callback+'&ondecline='+decline
def get_access_token(key,secret):
requestToken=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_method='GET',
token=requestToken,
http_url=ACCESS_TOKEN_URL)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,requestToken)
r=fetch_url(ACCESS_TOKEN_URL,
http_method='GET',
headers=request.to_header())
result=dict([p.split('=') for p in r.split('&')])
return result
def freebase_user_info(key,secret):
''' Do a test of the Freebase user_info service '''
token=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_url=USER_INFO_URL,
http_method='POST',
token=token)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,token)
headers=request.to_header()
r=fetch_url(USER_INFO_URL,
http_method='POST',
headers=headers)
return r
def freebase_mqlwrite(key,secret):
''' Do a test of the Freebase MQL write service '''
token=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
queryenvelope = {"q":{"query": {"id":None,
"create":"unconditional"}}}
querybody={'queries':json.dumps(queryenvelope)}
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_url=MQL_WRITE_URL,
http_method='POST',
token=token,
parameters=querybody)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,token)
headers=request.to_header()
r=fetch_url(MQL_WRITE_URL,
http_method='POST',
body=urllib.urlencode(querybody),
headers=headers)
return r
if __name__=='__main__':
# Make sure to define consumer key/secret and callback url in defaults at the top of this script.
# Get request token
print "Getting Request Token\n"
reqtok=get_request_token()
print "Our request token is = ",reqtok
# Assemble an authorization URL
print "Authorizing Request Token\n"
authorizeurl=authorize_token_url(reqtok['oauth_token'])
# Wait for the user to authorize
raw_input("Visit the URL= "+authorizeurl+" and then press return.")
# Get our access token
print "Getting Access Token\n"
acctok=get_access_token(reqtok['oauth_token'],reqtok['oauth_token_secret'])
print "Our access token is = ",acctok
# As a test, call Freebase's API for user information and MQL write
print "Getting Freebase User Info\n"
print freebase_user_info(acctok['oauth_token'],acctok['oauth_token_secret'])
print "Performing a Freebase Test Write\n"
print freebase_mqlwrite(acctok['oauth_token'],acctok['oauth_token_secret'])
| Python |
from apiclient import discovery
from apiclient import model
import json
DEVELOPER_KEY = 'YOUR-KEY-GOES-HERE'
DEVELOPER_KEY = open('DEVELOPER_KEY').read() #hide
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=DEVELOPER_KEY)
response = freebase.search(query='John Smith').execute()
for result in response['results']:
print result['id']
| Python |
from apiclient import discovery
from apiclient import model
import json
DEVELOPER_KEY = ''
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=DEVELOPER_KEY)
query = [{'id': None, 'name': None, 'type': '/film/film'}]
def do_query(cursor=""):
response = json.loads(freebase.mqlread(query=json.dumps(query), cursor=cursor).execute())
for item in response['result']:
print item['name']
return response.get("cursor")
cursor = do_query()
while(cursor):
cursor = do_query(cursor)
| Python |
#!/usr/bin/env python
# Demonstration client script for Freebase's OAuth implementation
#
# Author: Kurt Bollacker, May 2010 (with cribbing from Alec Flett)
# Copyright: Public Domain
#
#
import oauth,urlparse,urllib,urllib2,json
# Put your consumer key and secret here!
CONSUMER_KEY = 'YOUR APPPLICATION CONSUMER KEY (A FREEBASE GUID)'
CONSUMER_SECRET = 'YOUR APPLICATION CONSUMER SECRET'
# Define default callback and ondecline
CALLBACK_URL = 'http://MYSERVER:7777/getaccesstoken'
DECLINE_URL = 'http://MYSERVER:7777/failed'
# These URLs should work for production
#REQUEST_TOKEN_URL = 'https://api.freebase.com/api/oauth/request_token'
#ACCESS_TOKEN_URL = 'https://api.freebase.com/api/oauth/access_token'
#AUTHORIZATION_URL = 'https://www.freebase.com/signin/authorize_token'
#USER_INFO_URL='http://freebase.com/api/service/user_info'
#MQL_WRITE_URL='http://api.freebase.com/api/service/mqlwrite'
# These URLs should work for sandbox
REQUEST_TOKEN_URL = 'https://api.sandbox-freebase.com/api/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.sandbox-freebase.com/api/oauth/access_token'
AUTHORIZATION_URL = 'https://sandbox-freebase.com/signin/authorize_token'
USER_INFO_URL='http://api.sandbox-freebase.com/api/service/user_info'
MQL_WRITE_URL='http://api.sandbox-freebase.com/api/service/mqlwrite'
def fetch_url(url, http_method='GET',headers={},body=None):
''' Fetch the given URL, cleaning up headers if needed'''
headers.update({'X-Requested-With':'MyTestScript',
'X-Metaweb-Request':'True',
'Content-Type': 'application/x-www-form-urlencoded'})
if http_method=='POST' and body==None:
body=''
if body!=None:
headers['Content-Length']=str(len(body))
print "HTTP METHOD = %s\n " % http_method
print "HEADERS = %s\n " % json.dumps(headers)
print "URL = %s\n" % url
print "BODY = %s\n" % body
req=urllib2.Request(url,body,headers)
try:
response=urllib2.urlopen(req)
except urllib2.HTTPError,e:
print e
return None
print response.getcode()
if response.getcode() != 200:
print "GETCODE "+str(response.getcode())+" MSG "+response.msg+" INFO "+response.info+" READ "+response.read()
return response.read()
def get_request_token():
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_method='GET',
http_url=REQUEST_TOKEN_URL)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,None)
r=fetch_url(REQUEST_TOKEN_URL,
http_method='GET',
headers=request.to_header())
result=dict([p.split('=') for p in r.split('&')])
return result
def authorize_token_url(request_token_key,callback=CALLBACK_URL,decline=DECLINE_URL):
return AUTHORIZATION_URL+'?oauth_token='+request_token_key+'&oauth_callback='+callback+'&ondecline='+decline
def get_access_token(key,secret):
requestToken=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_method='GET',
token=requestToken,
http_url=ACCESS_TOKEN_URL)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,requestToken)
r=fetch_url(ACCESS_TOKEN_URL,
http_method='GET',
headers=request.to_header())
result=dict([p.split('=') for p in r.split('&')])
return result
def freebase_user_info(key,secret):
''' Do a test of the Freebase user_info service '''
token=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_url=USER_INFO_URL,
http_method='POST',
token=token)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,token)
headers=request.to_header()
r=fetch_url(USER_INFO_URL,
http_method='POST',
headers=headers)
return r
def freebase_mqlwrite(key,secret):
''' Do a test of the Freebase MQL write service '''
token=oauth.OAuthToken(key,secret)
consumer=oauth.OAuthConsumer(CONSUMER_KEY,CONSUMER_SECRET)
queryenvelope = {"q":{"query": {"id":None,
"create":"unconditional"}}}
querybody={'queries':json.dumps(queryenvelope)}
request=oauth.OAuthRequest.from_consumer_and_token(consumer,
http_url=MQL_WRITE_URL,
http_method='POST',
token=token,
parameters=querybody)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),consumer,token)
headers=request.to_header()
r=fetch_url(MQL_WRITE_URL,
http_method='POST',
body=urllib.urlencode(querybody),
headers=headers)
return r
if __name__=='__main__':
# Make sure to define consumer key/secret and callback url in defaults at the top of this script.
# Get request token
print "Getting Request Token\n"
reqtok=get_request_token()
print "Our request token is = ",reqtok
# Assemble an authorization URL
print "Authorizing Request Token\n"
authorizeurl=authorize_token_url(reqtok['oauth_token'])
# Wait for the user to authorize
raw_input("Visit the URL= "+authorizeurl+" and then press return.")
# Get our access token
print "Getting Access Token\n"
acctok=get_access_token(reqtok['oauth_token'],reqtok['oauth_token_secret'])
print "Our access token is = ",acctok
# As a test, call Freebase's API for user information and MQL write
print "Getting Freebase User Info\n"
print freebase_user_info(acctok['oauth_token'],acctok['oauth_token_secret'])
print "Performing a Freebase Test Write\n"
print freebase_mqlwrite(acctok['oauth_token'],acctok['oauth_token_secret'])
| Python |
from apiclient import discovery
from apiclient import model
import json
DEVELOPER_KEY = 'YOUR-KEY-GOES-HERE'
DEVELOPER_KEY = open('DEVELOPER_KEY').read() #hide
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=DEVELOPER_KEY)
query = [{'id': None, 'name': None, 'type': '/astronomy/planet'}]
response = json.loads(freebase.mqlread(query=json.dumps(query)).execute())
for planet in response['result']:
print planet['name']
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
from StringIO import StringIO
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
INTEGER_OR_INTEGER_LIST = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
# The fractional score for each choice in this question, if it is
# multiple-choice. Each of these values should be between 0.0 and
# 1.0, inclusive.
'choiceScores': [FLOAT],
# The weight given to the entire question.
'weight': INTEGER,
'multiLine': BOOLEAN,
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'questionHTML': STRING,
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionGroupHTML': STRING,
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER_OR_INTEGER_LIST,
'multiSelect': BOOLEAN}],
'allCorrectMinCount': INTEGER,
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'questionHTML': STRING,
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNIT_TYPE_UNIT = 'U'
UNIT_TYPE_LINK = 'O'
UNIT_TYPE_ASSESSMENT = 'A'
UNIT_TYPES = [UNIT_TYPE_UNIT, UNIT_TYPE_LINK, UNIT_TYPE_ASSESSMENT]
UNIT_TYPE_NAMES = {
UNIT_TYPE_UNIT: 'Unit',
UNIT_TYPE_LINK: 'Link',
UNIT_TYPE_ASSESSMENT: 'Assessment'}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': None,
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', lambda value: value == 'True')
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('lesson_id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', lambda value: value == 'yes'),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, basestring):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER_OR_INTEGER_LIST:
return 'INTEGER_OR_INTEGER_LIST'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, basestring) and isinstance(avalue, basestring):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, basestring):
if isinstance(value, basestring):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, basestring):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER_OR_INTEGER_LIST:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
if is_integer_list(value):
self.visit_element('INTEGER_OR_INTEGER_LIST', value, context)
return True
raise SchemaException(
'Expected: \'integer\' or '
'\'array of integer\'\nfound: %s', value,
path=context.format_path())
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element(
'dict', value, context.new(aname), is_terminal=False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), in_order=True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable-msg=protected-access
values_class = values.__class__
# pylint: enable-msg=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer_list(s):
try:
if not isinstance(s, list):
return False
for item in s:
if not isinstance(item, int):
return False
return True
except ValueError:
return False
def is_integer(s):
try:
return int(s) == float(s)
except Exception: # pylint: disable-msg=broad-except
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values, converter=None):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(len(names)):
if converter:
target_def = converter.get(names[i])
if target_def:
target_name = target_def[0]
target_type = target_def[1]
setattr(target_object, target_name, target_type(values[i]))
continue
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], bool(values[i]))
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_stream(stream, header, new_object, converter=None):
return read_objects_from_csv(
csv.reader(StringIO(stream.read())), header, new_object,
converter=converter)
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv_stream(open(fname), header, new_object)
def read_objects_from_csv(value_rows, header, new_object, converter=None):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
# Decode string values in case they were encoded in UTF-8. The CSV
# reader should do this automatically, but it does not. The issue is
# discussed here: http://docs.python.org/2/library/csv.html
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values, converter=converter)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'correctAnswerRegex([:][ ]*)([/])(.*)([/][ismx]*)',
r'correctAnswerRegex: regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable-msg=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable-msg=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def legacy_eval_python_expression_for_test(content, scope, unused_root_name):
"""Legacy content parsing function using compile/exec."""
print 'WARNING! This code is unsafe and uses compile/exec!'
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
return restricted_scope
def not_implemented_parse_content(
unused_content, unused_scope, unused_root_name):
raise Exception('Not implemented.')
# by default no parser method is configured; set custom parser if you have it
parse_content = not_implemented_parse_content
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
restricted_scope = parse_content(content, scope, root_name)
if noverify_text:
restricted_scope['noverify'] = noverify_text
if restricted_scope.get(root_name) is None:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, UNIT_TYPES):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected: %s.' % (unit.type, unit.id, UNIT_TYPES))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if lesson.unit_title != unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def get_activity_as_python(self, unit_id, lesson_id):
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-%s.%s.js' % (unit_id, lesson_id))
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
return activity
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
activity = self.get_activity_as_python(
lesson.unit_id, lesson.lesson_id)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-%s.js' % assessment_name)
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index + 1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
info = (
'Schema usage statistics: %s'
'Completed verification: %s warnings, %s errors.' % (
self.schema_helper.type_stats, self.warnings, self.errors))
self.info(info)
return self.warnings, self.errors, info
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable-msg=anomalous-backslash-in-string
assert escape_javascript_regex(
'correctAnswerRegex: /site:bls.gov?/i, blah') == (
'correctAnswerRegex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'correctAnswerRegex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'correctAnswerRegex: '
'regex(\"/site:http:\/\/www.google.com?q=abc/i\"), blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable-msg=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
units = read_objects_from_csv(
[
['id', 'type', 'now_available'],
[1, 'U', 'True'],
[1, 'U', 'False']],
'id,type,now_available', Unit, converter=UNIT_CSV_TO_DB_CONVERTER)
assert units[0].now_available
assert not units[1].now_available
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING})
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object(
'{correctAnswerRegex: /hello/i}'),
{'correctAnswerRegex': Term(REGEX, '/hello/i')})
def run_example_activity_tests():
"""Parses and validates example activity file."""
fname = os.path.join(
os.path.dirname(__file__), '../assets/js/activity-examples.js')
if not os.path.exists(fname):
raise Exception('Missing file: %s', fname)
verifier = Verifier()
verifier.echo_func = echo
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, verifier.echo_func)
verifier.verify_activity_instance(activity, fname)
def test_exec():
"""This test shows that exec/compile are explitable, thus not safe."""
content = """
foo = [
c for c in ().__class__.__base__.__subclasses__()
if c.__name__ == 'catch_warnings'
][0]()._module.__builtins__
"""
restricted_scope = {}
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
assert 'isinstance' in restricted_scope.get('foo')
def test_sample_assets():
"""Test assets shipped with the sample course."""
_, _, output = Verifier().load_and_verify_model(echo)
if (
'Schema usage statistics: {'
'\'REGEX\': 19, \'STRING\': 415, \'NUMBER\': 1, '
'\'BOOLEAN\': 81, \'dict\': 73, \'str\': 41, \'INTEGER\': 9, '
'\'CORRECT\': 9}' not in output
or 'Completed verification: 0 warnings, 0 errors.' not in output):
raise Exception('Sample course verification failed.\n%s' % output)
def run_all_unit_tests():
"""Runs all unit tests in this module."""
global parse_content
original = parse_content
try:
parse_content = legacy_eval_python_expression_for_test
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_example_activity_tests()
test_exec()
test_sample_assets()
finally:
parse_content = original
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows export of Lessons and Units to other systems."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
from datetime import datetime
import os
import verify
RELEASE_TAG = '1.0'
def echo(unused_x):
pass
JS_GCB_REGEX = """
function gcb_regex(base, modifiers) {
// NB: base should already have backslashes escaped
return new RegExp(base, modifiers);
}
"""
def export_to_javascript(filename, lines, date):
"""Creates JavaScript export function from given lines and writes a file."""
code = []
code.append(JS_GCB_REGEX)
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' %s' % line)
else:
code.append('')
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
code.append('}')
afile = open('%s.js' % filename, 'w')
afile.write('// Course Builder %s JavaScript Export on %s\n' % (
RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.close()
PYTHON_GCB_REGEX = """
import re
def gcb_regex(base, modifiers):
flags = 0
if 'i' in modifiers:
flags |= re.IGNORECASE
if 'm' in modifiers:
flags |= re.MULTILINE
return re.compile(base, flags)
"""
def export_to_python(filename, lines, date):
"""Creates Python export function from given lines and writes a file."""
code = []
code.append('class Array(dict):')
code.append(' pass')
code.append('')
code.append('true = True')
code.append('false = False')
code.append(PYTHON_GCB_REGEX)
code.append('def gcb_import():')
for line in lines:
code.append(' %s' % line)
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
afile = open('%s.py' % filename, 'w')
afile.write('# Course Builder %s Python Export on %s\n' % (
RELEASE_TAG, date))
afile.write('# begin\n')
afile.write('\n'.join(code))
afile.write('\n# end')
afile.close()
# TODO(psimakov): implement PHP_GCB_REGEX, but it's unclear how to return a new
# regexp object in PHP. maybe see http://www.regular-expressions.info/php.html
def export_to_php(filename, lines, date):
"""Creates PHP export function from given lines and writes a file."""
code = []
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' $%s' % line)
else:
code.append('')
code.append('')
code.append(' $course = Array();')
code.append(' $course["units"] = $units;')
code.append(' $course["assessments"] = $assessments;')
code.append(' return $course;')
code.append('}')
afile = open('%s.php' % filename, 'w')
afile.write('<?php\n')
afile.write('// Course Builder %s PHP Export on %s\n' %
(RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.write('?>')
afile.close()
def export_to_file(filename, lines):
date = datetime.utcnow()
export_to_javascript(filename, lines, date)
export_to_python(filename, lines, date)
export_to_php(filename, lines, date)
if __name__ == '__main__':
print 'Export started using %s' % os.path.realpath(__file__)
verifier = verify.Verifier()
errors = verifier.load_and_verify_model(echo)
if errors:
raise Exception('Please fix all errors reported by tools/verify.py '
'before continuing!')
fname = os.path.join(os.getcwd(), 'coursebuilder_course')
export_to_file(fname, verifier.export)
print 'Export complete to %s' % fname
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Examples of custom extract-transform-load jobs.
Custom jobs are run via tools/etl/etl.py. You must do environment setup before
etl.py can be invoked; see its module docstring for details.
See tools/etl/etl_lib.py for documentation on writing Job subclasses.
"""
__author__ = [
'johncox@google.com',
]
import os
import sys
import appengine_config
from models import models
from tools.etl import etl_lib
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
class PrintMemcacheStats(etl_lib.Job):
"""Example job that prints remote memcache statistics.
Usage:
etl.py run tools.etl.examples.PrintMemcacheStats /course myapp \
server.appspot.com
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
# String. Template to use when printing memcache stats.
_STATS_TEMPLATE = """Global memcache stats:
\tHits: %(hits)s
\tItems in cache: %(items)s
\tMisses: %(misses)s
\tOldest item in seconds: %(oldest_item_age)s
\tTotal bytes in cache: %(bytes)s
\tTotal bytes retrieved via get: %(byte_hits)s"""
def main(self):
# Custom jobs execute locally, but can talk to remote services like the
# datastore and memcache. Here we get the same memcache stats you can
# see in the Memcache Viewer part of App Engine's admin console.
print self._STATS_TEMPLATE % memcache.get_stats()
class UploadFileToCourse(etl_lib.Job):
"""Example job that writes a single local file to a remote server.
Usage:
etl.py run tools.etl.examples.UploadFileToCourse /course myapp \
server.appspot.com --job_args='/path/to/local/file path/to/remote/file'
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
def _configure_parser(self):
# Add custom arguments by manipulating self.parser:
self.parser.add_argument(
'path', help='Absolute path of the file to upload', type=str)
self.parser.add_argument(
'target',
help=('Internal Course Builder path to upload to (e.g. '
'"assets/img/logo.png")'), type=str)
def main(self):
# By the time main() is invoked, arguments are parsed and available as
# self.args. If you need more complicated argument validation than
# argparse gives you, do it here:
if not os.path.exists(self.args.path):
sys.exit('%s does not exist' % self.args.path)
# Arguments passed to etl.py are also parsed and available as
# self.etl_args. Here we use them to figure out the requested course's
# context.
context = etl_lib.get_context(self.etl_args.course_url_prefix)
# Create the absolute path we'll write to.
remote_path = os.path.join(
appengine_config.BUNDLE_ROOT, self.args.target)
with open(self.args.path) as f:
# Perform the write using the context's filesystem. In a real
# program you'd probably want to do additional work (preventing
# overwrites of existing files, etc.).
context.fs.impl.put(remote_path, f, is_draft=False)
class WriteStudentEmailsToFile(etl_lib.Job):
"""Example job that reads student emails from remote server to local file.
Usage:
etl.py run tools.etl.examples.WriteStudentEmailsToFile /course myapp \
server.appspot.com --job_args=/path/to/output_file
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
def _configure_parser(self):
# Add custom arguments by manipulating self.parser.
self.parser.add_argument(
'path', help='Absolute path to save output to', type=str)
self.parser.add_argument(
'--batch_size', default=20,
help='Number of students to download in each batch', type=int)
def main(self):
# By the time main() is invoked, arguments are parsed and available as
# self.args. If you need more complicated argument validation than
# argparse gives you, do it here:
if self.args.batch_size < 1:
sys.exit('--batch size must be positive')
if os.path.exists(self.args.path):
sys.exit('Cannot download to %s; file exists' % self.args.path)
# Arguments passed to etl.py are also parsed and available as
# self.etl_args. Here we use them to figure out the requested course's
# namespace.
namespace = etl_lib.get_context(
self.etl_args.course_url_prefix).get_namespace_name()
# Because our models are namespaced, we need to change to the requested
# course's namespace before doing datastore reads or we won't find its
# data. Get the current namespace so we can change back when we're done.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(namespace)
# For this example, we'll only process the first 1000 results. Can
# do a keys_only query because the student's email is key.name().
keys = models.Student.all(keys_only=True).fetch(1000)
finally:
# The current namespace is global state. We must change it back to
# the old value no matter what to prevent corrupting datastore
# operations that run after us.
namespace_manager.set_namespace(old_namespace)
# Write the results. Done!
with open(self.args.path, 'w') as f:
for key in keys:
f.write(str(key.name() + '\n'))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote environment manager for extract-transform-load utilities."""
__author__ = [
'johncox@google.com',
]
import os
import sys
import appengine_config
# Override SERVER_SOFTWARE before doing any App Engine imports so import-time
# detection of dev mode, done against SERVER_SOFTWARE of 'Development*', fails.
# Once imports are done, this environment variable can be reset as needed (for
# tests, etc.). pylint: disable-msg=g-import-not-at-top
SERVER_SOFTWARE = 'Production Emulation'
if appengine_config.PRODUCTION_MODE:
sys.exit('Running etl/tools/remote.py in production is not supported.')
os.environ['SERVER_SOFTWARE'] = SERVER_SOFTWARE
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import appengine_rpc
from google.appengine.tools import remote_api_shell
# String. Used to detect appspot.com servers.
_APPSPOT_SERVER_SUFFIX = 'appspot.com'
# String. Password used when a password is not necessary.
_BOGUS_PASSWORD = 'bogus_password'
# String. Infix for google.com application ids.
_GOOGLE_APPLICATION_INFIX = 'google.com'
# String. Prefix App Engine uses application ids in the dev appserver.
_LOCAL_APPLICATION_ID_PREFIX = 'dev~'
# String. Prefix used to detect if a server is running locally.
_LOCAL_SERVER_PREFIX = 'localhost'
# String. Prefix App Engine uses for application ids in production.
_REMOTE_APPLICATION_ID_PREFIX = 's~'
# String. Email address used unless os.environ['USER_EMAIL'] is set in tests.
_TEST_EMAIL = 'test@example.com'
# String. os.ENVIRON['SERVER_SOFTWARE'] value that indicates we're running under
# the test environment.
TEST_SERVER_SOFTWARE = 'Test'
class Error(Exception):
"""Base error type."""
class EnvironmentAuthenticationError(Error):
"""Raised when establishing an environment fails due to bad credentials."""
class Environment(object):
"""Sets up the execution environment to use remote_api for RPCs.
As with any use of remote_api, this has three important caveats:
1. By going through the Remote API rather than your application's handlers,
you are bypassing any business logic in those handlers. It is easy in
this way to accidentally corrupt the system receiving your RPCs.
2. There is no guarantee that the code running on the system receiving your
RPCs is the same version as the code running locally. It is easy to have
version skew that corrupts the destination system.
3. Execution is markedly slower than running in production.
"""
def __init__(
self, application_id, server, path='/_ah/remote_api'):
"""Constructs a new Environment.
Args:
application_id: string. The application id of the environment
(myapp).
server: string. The full name of the server to connect to
(myurl.appspot.com).
path: string. The URL of your app's remote api entry point.
"""
self._application_id = application_id
self._path = path
self._server = server
@staticmethod
def _dev_appserver_auth_func():
"""Auth function to run for dev_appserver (bogus password)."""
return raw_input('Email: '), _BOGUS_PASSWORD
@staticmethod
def _test_auth_func():
"""Auth function to run in tests (bogus username and password)."""
return os.environ.get('USER_EMAIL', _TEST_EMAIL), _BOGUS_PASSWORD
def _get_auth_func(self):
"""Returns authentication function for the remote API."""
if os.environ.get('SERVER_SOFTWARE', '').startswith(
TEST_SERVER_SOFTWARE):
return self._test_auth_func
elif self._is_localhost():
return self._dev_appserver_auth_func
else:
return remote_api_shell.auth_func
def _get_internal_application_id(self):
"""Returns string containing App Engine's internal id representation."""
prefix = _REMOTE_APPLICATION_ID_PREFIX
if self._is_localhost():
prefix = _LOCAL_APPLICATION_ID_PREFIX
elif not self._is_appspot():
prefix = '%s%s:' % (prefix, _GOOGLE_APPLICATION_INFIX)
return prefix + self._application_id
def _get_secure(self):
"""Returns boolean indicating whether or not to use https."""
return not self._is_localhost()
def _is_appspot(self):
"""Returns True iff server is appspot.com."""
return self._server.endswith(_APPSPOT_SERVER_SUFFIX)
def _is_localhost(self):
"""Returns True if environment is dev_appserver and False otherwise."""
return self._server.startswith(_LOCAL_SERVER_PREFIX)
def establish(self):
"""Establishes the environment for RPC execution."""
try:
remote_api_stub.ConfigureRemoteApi(
self._get_internal_application_id(), self._path,
self._get_auth_func(), servername=self._server,
save_cookies=True, secure=self._get_secure(),
rpc_server_factory=appengine_rpc.HttpRpcServer)
remote_api_stub.MaybeInvokeAuthentication()
except AttributeError:
raise EnvironmentAuthenticationError
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract-transform-load utility.
There are four features:
1. Download and upload of Course Builder 1.3+ data:
$ python etl.py download course /cs101 myapp server.appspot.com archive.zip
This will result in a file called archive.zip that contains the files that make
up the Course Builder 1.3+ course found at the URL /cs101 on the application
with id myapp running on the server named server.appspot.com. archive.zip will
contain assets and data files from the course along with a manifest.json
enumerating them. The format of archive.zip will change and should not be relied
upon.
For upload,
$ python etl.py upload course /cs101 myapp server.appspot.com \
--archive_path archive.zip
2. Download of datastore entities. This feature is experimental and upload is
not supported:
$ python etl.py download datastore /cs101 myapp server.appspot.com \
--archive_path archive.zip --datastore_types model1,model2
This will result in a file called archive.zip that contains a dump of all model1
and model2 instances found in the specified course, identified as above. The
archive will contain serialized data along with a manifest. The format of
archive.zip will change and should not be relied upon.
3. Deletion of all datastore entities in a single course. Delete of the course
itself not supported. To run:
$ python etl.py delete datastore /cs101 myapp server.appspot.com
Before delete commences, you will be told what entity kinds will be deleted and
you will be prompted for confirmation. Note that this process is irreversible,
and, if interrupted, may leave the course in an invalid state. Note also that it
races with writes against your datastore unless you first disable writes.
Finally, note that only the datastore entities of the kinds listed will be
deleted, and those will only be deleted from the namespace corresponding to the
target course. Custom entities you added to base Course Builder may or may not
be processed. Entities in the global namespace and those created by App Engine
will not be processed.
Deleting a course flushes caches. Because memcache does not support namespaced
flush all operations, all caches for all courses will be flushed.
4. Execution of custom jobs.
$ python etl.py run path.to.my.Job /cs101 myapp server.appspot.com \
--job_args='more_args --delegated_to my.Job'
This requires that you have written a custom class named Job found in the
directory path/to/my, relative to the Course Builder root. Job's main method
will be executed against the specified course, identified as above. See
etl_lib.Job for more information.
In order to run this script, you must add the following to the head of sys.path:
1. The absolute path of your Course Builder installation.
2. The absolute path of your App Engine SDK.
3. The absolute paths of third party libraries from the SDK used by Course
Builder:
fancy_urllib
jinja2
webapp2
webob
Their locations in the supported 1.8.2 App Engine SDK are
<sdk_path>/lib/fancy_urllib
<sdk_path>/lib/jinja2-2.6
<sdk_path>/lib/webapp2-2.5.2
<sdk_path>/lib/webob-1.2.3
where <sdk_path> is the absolute path of the 1.8.2 App Engine SDK.
4. If you are running a custom job, the absolute paths of all code required
by your custom job, unless covered above.
When running etl.py against a remote endpoint you will be prompted for a
username and password. If the remote endpoint is a development server, you may
enter any username and password. If the remote endpoint is in production, enter
your username and an application-specific password. See
http://support.google.com/accounts/bin/answer.py?hl=en&answer=185833 for help on
application-specific passwords.
Pass --help for additional usage information.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import argparse
import functools
import hashlib
import hmac
import logging
import os
import random
import re
import sys
import traceback
import zipfile
import yaml
# Placeholders for modules we'll import after setting up sys.path. This allows
# us to avoid lint suppressions at every callsite.
appengine_config = None
config = None
courses = None
db = None
etl_lib = None
memcache = None
metadata = None
namespace_manager = None
remote = None
transforms = None
vfs = None
# String. Prefix for files stored in an archive.
_ARCHIVE_PATH_PREFIX = 'files'
# String. End of the path to course.json in an archive.
_COURSE_JSON_PATH_SUFFIX = 'data/course.json'
# String. End of the path to course.yaml in an archive.
_COURSE_YAML_PATH_SUFFIX = 'course.yaml'
# String. Message the user must type to confirm datastore deletion.
_DELETE_DATASTORE_CONFIRMATION_INPUT = 'YES, DELETE'
# Function that takes one arg and returns it.
_IDENTITY_TRANSFORM = lambda x: x
# Regex. Format of __internal_names__ used by datastore kinds.
_INTERNAL_DATASTORE_KIND_REGEX = re.compile(r'^__.*__$')
# Path prefix strings from local disk that will be included in the archive.
_LOCAL_WHITELIST = frozenset([_COURSE_YAML_PATH_SUFFIX, 'assets', 'data'])
# Path prefix strings that are subdirectories of the whitelist that we actually
# want to exclude because they aren't userland code and will cause conflicts.
_LOCAL_WHITELIST_EXCLUDES = frozenset(['assets/lib'])
# logging.Logger. Module logger.
_LOG = logging.getLogger('coursebuilder.tools.etl')
logging.basicConfig()
# List of string. Valid values for --log_level.
_LOG_LEVEL_CHOICES = ['DEBUG', 'ERROR', 'INFO', 'WARNING']
# String. Name of the manifest file.
_MANIFEST_FILENAME = 'manifest.json'
# String. Identifier for delete mode.
_MODE_DELETE = 'delete'
# String. Identifier for download mode.
_MODE_DOWNLOAD = 'download'
# String. Identifier for custom run mode.
_MODE_RUN = 'run'
# String. Identifier for upload mode.
_MODE_UPLOAD = 'upload'
# List of all modes.
_MODES = [_MODE_DELETE, _MODE_DOWNLOAD, _MODE_RUN, _MODE_UPLOAD]
# Int. The number of times to retry remote_api calls.
_RETRIES = 3
# String. Identifier for type corresponding to course definition data.
_TYPE_COURSE = 'course'
# String. Identifier for type corresponding to datastore entities.
_TYPE_DATASTORE = 'datastore'
# Command-line argument configuration.
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'mode', choices=_MODES,
help='Indicates the kind of operation we are performing', type=str)
PARSER.add_argument(
'type',
help=(
'Type of entity to process. If mode is %s or %s, should be one of '
'%s or %s. If mode is %s, should be an importable dotted path to your '
'etl_lib.Job subclass') % (
_MODE_DOWNLOAD, _MODE_UPLOAD, _TYPE_COURSE, _TYPE_DATASTORE,
_MODE_RUN),
type=str)
PARSER.add_argument(
'course_url_prefix',
help=(
"URL prefix of the course you want to download (e.g. '/foo' in "
"'course:/foo:/directory:namespace'"), type=str)
PARSER.add_argument(
'application_id',
help="The id of the application to read from (e.g. 'myapp')", type=str)
PARSER.add_argument(
'server',
help=(
'The full name of the source application to read from (e.g. '
'myapp.appspot.com)'), type=str)
PARSER.add_argument(
'--archive_path',
help=(
'Absolute path of the archive file to read or write; required if mode '
'is %s or %s' % (_MODE_DOWNLOAD, _MODE_UPLOAD)), type=str)
PARSER.add_argument(
'--batch_size',
help='Number of results to attempt to retrieve per batch',
default=20, type=int)
PARSER.add_argument(
'--datastore_types',
help=(
"When type is '%s', comma-separated list of datastore model types to "
'process; all models are processed by default' % _TYPE_DATASTORE),
type=lambda s: s.split(','))
PARSER.add_argument(
'--disable_remote', action='store_true',
help=(
'If mode is %s, pass this flag to skip authentication and remote '
'environment setup. Should only pass for jobs that run entirely '
'locally and do not require RPCs') % _MODE_RUN)
PARSER.add_argument(
'--force_overwrite', action='store_true',
help=(
'If mode is download and type is course, forces overwrite of entities '
'on the target system that are also present in the archive. Note that '
'this operation is dangerous and may result in data loss'))
PARSER.add_argument(
'--job_args', default=[],
help=(
'If mode is %s, string containing args delegated to etl_lib.Job '
'subclass') % _MODE_RUN, type=lambda s: s.split())
PARSER.add_argument(
'--log_level', choices=_LOG_LEVEL_CHOICES,
help='Level of logging messages to emit', default='INFO',
type=lambda s: s.upper())
PARSER.add_argument(
'--privacy', action='store_true',
help=(
"When mode is '%s' and type is '%s', passing this flag will strip or "
"obfuscate information that can identify a single user" % (
_MODE_DOWNLOAD, _TYPE_DATASTORE)))
PARSER.add_argument(
'--privacy_secret',
help=(
"When mode is '%s', type is '%s', and --privacy is passed, pass this "
"secret to have user ids transformed with it rather than with random "
"bits") % (_MODE_DOWNLOAD, _TYPE_DATASTORE), type=str)
class _Archive(object):
"""Manager for local archives of Course Builder data.
The internal format of the archive may change from version to version; users
must not depend on it.
Archives contain assets and data from a single course, along with a manifest
detailing the course's raw definition string, version of Course Builder the
course is compatible with, and the list of course files contained within
the archive.
# TODO(johncox): possibly obfuscate this archive so it cannot be unzipped
# outside etl.py. Add a command-line flag for creating a zip instead. For
# uploads, require an obfuscated archive, not a zip.
"""
def __init__(self, path):
"""Constructs a new archive.
Args:
path: string. Absolute path where the archive will be written.
"""
self._path = path
self._zipfile = None
@classmethod
def get_external_path(cls, internal_path):
"""Gets external path string from results of cls.get_internal_path."""
prefix = _ARCHIVE_PATH_PREFIX + os.sep
assert internal_path.startswith(prefix)
return internal_path.split(prefix)[1]
@classmethod
def get_internal_path(cls, external_path):
"""Get path string used in the archive from an external path string.
Generates the path used within an archive for an asset. All assets
(meaning all archive contents except the manifest file) must have
their paths generated this way, and those paths must be re-translated to
external paths via cls.get_external_path before use with systems
external to the archive file.
Args:
external_path: string. Path to generate an internal archive path
from.
Returns:
String. Internal archive path.
"""
assert not external_path.startswith(_ARCHIVE_PATH_PREFIX)
return os.path.join(
_ARCHIVE_PATH_PREFIX, _remove_bundle_root(external_path))
def add(self, filename, contents):
"""Adds contents to the archive.
Args:
filename: string. Path of the contents to add.
contents: bytes. Contents to add.
"""
self._zipfile.writestr(filename, contents)
def add_local_file(self, local_filename, internal_filename):
"""Adds a file from local disk to the archive.
Args:
local_filename: string. Path on disk of file to add.
internal_filename: string. Internal archive path to write to.
"""
self._zipfile.write(local_filename, arcname=internal_filename)
def close(self):
"""Closes archive and test for integrity; must close before read."""
self._zipfile.testzip()
self._zipfile.close()
def get(self, path):
"""Return the raw bytes of the archive entity found at path.
Returns None if path is not in the archive.
Args:
path: string. Path of file to retrieve from the archive.
Returns:
Bytes of file contents.
"""
assert self._zipfile
try:
return self._zipfile.read(path)
except KeyError:
pass
def open(self, mode):
"""Opens archive in the mode given by mode string ('r', 'w', 'a')."""
assert not self._zipfile
self._zipfile = zipfile.ZipFile(self._path, mode)
@property
def manifest(self):
"""Returns the archive's manifest."""
return _Manifest.from_json(self.get(_MANIFEST_FILENAME))
@property
def path(self):
return self._path
class _Manifest(object):
"""Manifest that lists the contents and version of an archive folder."""
def __init__(self, raw, version):
"""Constructs a new manifest.
Args:
raw: string. Raw course definition string.
version: string. Version of Course Builder course this manifest was
generated from.
"""
self._entities = []
self._raw = raw
self._version = version
@classmethod
def from_json(cls, json):
"""Returns a manifest for the given JSON string."""
parsed = transforms.loads(json)
instance = cls(parsed['raw'], parsed['version'])
for entity in parsed['entities']:
instance.add(_ManifestEntity(entity['path'], entity['is_draft']))
return instance
def add(self, entity):
self._entities.append(entity)
def get(self, path):
"""Gets _Entity by path string; returns None if not found."""
for entity in self._entities:
if entity.path == path:
return entity
@property
def entities(self):
return sorted(self._entities, key=lambda e: e.path)
@property
def raw(self):
return self._raw
@property
def version(self):
return self._version
def __str__(self):
"""Returns JSON representation of the manifest."""
manifest = {
'entities': [e.__dict__ for e in self.entities],
'raw': self.raw,
'version': self.version,
}
return transforms.dumps(manifest, indent=2, sort_keys=2)
class _ManifestEntity(object):
"""Object that represents an entity in a manifest."""
def __init__(self, path, is_draft):
self.is_draft = is_draft
self.path = path
class _ReadWrapper(object):
"""Wrapper for raw bytes that supports read()."""
def __init__(self, data):
"""Constructs a new read wrapper.
Args:
data: bytes. The bytes to return on read().
"""
self._data = data
def read(self):
return self._data
def _confirm_delete_datastore_or_die(kind_names, namespace, title):
context = {
'confirmation_message': _DELETE_DATASTORE_CONFIRMATION_INPUT,
'kinds': ', '.join(kind_names),
'linebreak': os.linesep,
'namespace': namespace,
'title': title,
}
response = _raw_input(
('You are about to delete all entities of the kinds "%(kinds)s" from '
'the course %(title)s in namespace %(namespace)s.%(linebreak)sYou are '
'also about to flush all caches for all courses on your production '
'instance.%(linebreak)sYou cannot undo this operation.%(linebreak)sTo '
'confirm, type "%(confirmation_message)s": ') % context)
if response != _DELETE_DATASTORE_CONFIRMATION_INPUT:
_die('Delete not confirmed. Aborting')
def _delete(course_url_prefix, delete_type, batch_size):
context = _get_context_or_die(course_url_prefix)
if delete_type == _TYPE_COURSE:
_delete_course()
elif delete_type == _TYPE_DATASTORE:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
_delete_datastore(context, batch_size)
finally:
namespace_manager.set_namespace(old_namespace)
def _delete_course():
"""Stub for possible future course deleter."""
raise NotImplementedError
def _delete_datastore(context, batch_size):
kind_names = _get_datastore_kinds()
_confirm_delete_datastore_or_die(
kind_names, context.get_namespace_name(), context.get_title())
# Fetch all classes before the loop so we cannot hit an import error partway
# through issuing delete RPCs.
model_classes = [db.class_for_kind(kind_name) for kind_name in kind_names]
_LOG.info('Beginning datastore delete')
for model_class in model_classes:
_LOG.info('Deleting entities of kind %s', model_class.kind())
_process_models(model_class, batch_size, delete=True)
_LOG.info('Flushing all caches')
memcache.flush_all()
_LOG.info('Done')
def _die(message, with_trace=False):
if with_trace: # Also logs most recent traceback.
info = sys.exc_info()
message = '%s%s%s%s%s%s%s' % (
message, os.linesep,
info[0], os.linesep, # exception class name
info[1], os.linesep, # exception message
''.join(traceback.format_tb(info[2]))) # exception stack
_LOG.critical(message)
sys.exit(1)
def _download(
download_type, archive_path, course_url_prefix, datastore_types,
batch_size, privacy_transform_fn):
"""Validates and dispatches to a specific download method."""
archive_path = os.path.abspath(archive_path)
context = _get_context_or_die(course_url_prefix)
course = _get_course_from(context)
if download_type == _TYPE_COURSE:
_download_course(context, course, archive_path, course_url_prefix)
elif download_type == _TYPE_DATASTORE:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
_download_datastore(
context, course, archive_path, datastore_types, batch_size,
privacy_transform_fn)
finally:
namespace_manager.set_namespace(old_namespace)
def _download_course(context, course, archive_path, course_url_prefix):
if course.version < courses.COURSE_MODEL_VERSION_1_3:
_die(
'Cannot export course made with Course Builder version < %s' % (
courses.COURSE_MODEL_VERSION_1_3))
archive = _Archive(archive_path)
archive.open('w')
manifest = _Manifest(context.raw, course.version)
_LOG.info('Processing course with URL prefix ' + course_url_prefix)
datastore_files = set(_list_all(context))
all_files = set(_filter_filesystem_files(_list_all(
context, include_inherited=True)))
filesystem_files = all_files - datastore_files
_LOG.info('Adding files from datastore')
for external_path in datastore_files:
internal_path = _Archive.get_internal_path(external_path)
stream = _get_stream(context, external_path)
is_draft = False
if stream.metadata and hasattr(stream.metadata, 'is_draft'):
is_draft = stream.metadata.is_draft
entity = _ManifestEntity(internal_path, is_draft)
archive.add(internal_path, stream.read())
manifest.add(entity)
_LOG.info('Adding files from filesystem')
for external_path in filesystem_files:
with open(external_path) as f:
internal_path = _Archive.get_internal_path(external_path)
archive.add(internal_path, f.read())
manifest.add(_ManifestEntity(internal_path, False))
_finalize_download(archive, manifest)
def _download_datastore(
context, course, archive_path, datastore_types, batch_size,
privacy_transform_fn):
available_types = set(_get_datastore_kinds())
if not datastore_types:
datastore_types = available_types
requested_types = set(datastore_types)
missing_types = requested_types - available_types
if missing_types:
_die(
'Requested types not found: %s%sAvailable types are: %s' % (
', '.join(missing_types), os.linesep,
', '.join(available_types)))
found_types = requested_types & available_types
archive = _Archive(archive_path)
archive.open('w')
manifest = _Manifest(context.raw, course.version)
for found_type in found_types:
json_path = os.path.join(
os.path.dirname(archive_path), '%s.json' % found_type)
_LOG.info(
'Adding entities of type %s to temporary file %s',
found_type, json_path)
json_file = transforms.JsonFile(json_path)
json_file.open('w')
model_map_fn = functools.partial(
_write_model_to_json_file, json_file, privacy_transform_fn)
_process_models(
db.class_for_kind(found_type), batch_size,
model_map_fn=model_map_fn)
json_file.close()
internal_path = _Archive.get_internal_path(
os.path.basename(json_file.name))
_LOG.info('Adding %s to archive', internal_path)
archive.add_local_file(json_file.name, internal_path)
manifest.add(_ManifestEntity(internal_path, False))
_LOG.info('Removing temporary file ' + json_file.name)
os.remove(json_file.name)
_finalize_download(archive, manifest)
def _filter_filesystem_files(files):
"""Filters out unnecessary files from a local filesystem.
If we just read from disk, we'll pick up and archive lots of files that we
don't need to upload later, plus non-userland code that on reupload will
shadow the system versions (views, assets/lib, etc.).
Args:
files: list of string. Absolute file paths.
Returns:
List of string. Absolute filepaths we want to archive.
"""
filtered_files = []
for path in files:
relative_name = _remove_bundle_root(path)
not_in_excludes = not any(
[relative_name.startswith(e) for e in _LOCAL_WHITELIST_EXCLUDES])
head_directory = relative_name.split(os.path.sep)[0]
if not_in_excludes and head_directory in _LOCAL_WHITELIST:
filtered_files.append(path)
return filtered_files
def _finalize_download(archive, manifest):
_LOG.info('Adding manifest')
archive.add(_MANIFEST_FILENAME, str(manifest))
archive.close()
_LOG.info('Done; archive saved to ' + archive.path)
def _force_config_reload():
# For some reason config properties aren't being automatically pulled from
# the datastore with the remote environment. Force an update of all of them.
config.Registry.get_overrides(force_update=True)
def _get_context_or_die(course_url_prefix):
context = etl_lib.get_context(course_url_prefix)
if not context:
_die('No course found with course_url_prefix %s' % course_url_prefix)
return context
def _get_privacy_transform_fn(privacy, privacy_secret):
"""Returns a transform function to use for export."""
assert privacy_secret is not None
if not privacy:
return _IDENTITY_TRANSFORM
else:
return functools.partial(_hmac_sha_2_256, privacy_secret)
def _get_privacy_secret(privacy_secret):
"""Gets the passed privacy secret (or 128 random bits if None)."""
secret = privacy_secret
if secret is None:
secret = random.getrandbits(128)
return secret
def _get_course_from(app_context):
"""Gets a courses.Course from the given sites.ApplicationContext."""
class _Adapter(object):
def __init__(self, app_context):
self.app_context = app_context
return courses.Course(_Adapter(app_context))
def _hmac_sha_2_256(privacy_secret, value):
"""HMAC-SHA-2-256 for use as a privacy transformation function."""
return hmac.new(
str(privacy_secret), msg=str(value), digestmod=hashlib.sha256
).hexdigest()
def _import_entity_modules():
"""Import all entity type classes.
We need to import main.py to make sure all known entity types are imported
by the time the ETL code runs. If a transitive closure of main.py imports
does not import all required classes, import them here explicitly.
"""
# pylint: disable-msg=g-import-not-at-top,global-variable-not-assigned,
# pylint: disable-msg=redefined-outer-name,unused-variable
try:
import main
except ImportError, e:
_die((
'Unable to import required modules; see tools/etl/etl.py for '
'docs.'), with_trace=True)
def _import_modules_into_global_scope():
"""Import helper; run after _set_up_sys_path() for imports to resolve."""
# pylint: disable-msg=g-import-not-at-top,global-variable-not-assigned,
# pylint: disable-msg=redefined-outer-name,unused-variable
global appengine_config
global memcache
global namespace_manager
global db
global metadata
global config
global courses
global transforms
global vfs
global etl_lib
global remote
try:
import appengine_config
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.ext import db
from google.appengine.ext.db import metadata
from models import config
from models import courses
from models import transforms
from models import vfs
from tools.etl import etl_lib
from tools.etl import remote
except ImportError, e:
_die((
'Unable to import required modules; see tools/etl/etl.py for '
'docs.'), with_trace=True)
def _remove_bundle_root(path):
"""Removes BUNDLE_ROOT prefix from a path."""
if path.startswith(appengine_config.BUNDLE_ROOT):
path = path.split(appengine_config.BUNDLE_ROOT)[1]
# Path must not start with path separator so it is os.path.join()able.
if path.startswith(os.path.sep):
path = path[1:]
return path
def _retry(message=None, times=_RETRIES):
"""Returns a decorator that automatically retries functions on error.
Args:
message: string or None. The optional message to log on retry.
times: int. Number of times to retry.
Returns:
Function wrapper.
"""
assert times > 0
def decorator(fn):
"""Real decorator."""
def wrapped(*args, **kwargs):
failures = 0
while failures < times:
try:
return fn(*args, **kwargs)
# We can't be more specific by default.
# pylint: disable-msg=broad-except
except Exception as e:
if message:
_LOG.info(message)
failures += 1
if failures == times:
raise e
return wrapped
return decorator
@_retry(message='Clearing course cache failed; retrying')
def _clear_course_cache(context):
courses.CachedCourse13.delete(context) # Force update in UI.
@_retry(message='Checking if the specified course is empty failed; retrying')
def _context_is_for_empty_course(context):
# True if course is entirely empty or contains only a course.yaml.
current_course_files = context.fs.impl.list(
appengine_config.BUNDLE_ROOT)
empty_course_files = [os.path.join(
appengine_config.BUNDLE_ROOT, _COURSE_YAML_PATH_SUFFIX)]
return (
(not current_course_files) or
current_course_files == empty_course_files)
@_retry(message='Getting list of datastore_types failed; retrying')
def _get_datastore_kinds():
# Return only user-defined names, not __internal_appengine_names__.
return [
k for k in metadata.get_kinds()
if not _INTERNAL_DATASTORE_KIND_REGEX.match(k)]
@_retry(message='Getting contents for entity failed; retrying')
def _get_stream(context, path):
return context.fs.impl.get(path)
@_retry(message='Fetching asset list failed; retrying')
def _list_all(context, include_inherited=False):
return context.fs.impl.list(
appengine_config.BUNDLE_ROOT, include_inherited=include_inherited)
def _process_models(model_class, batch_size, delete=False, model_map_fn=None):
"""Fetch all rows in batches."""
assert (delete or model_map_fn) or (not delete and model_map_fn)
reportable_chunk = batch_size * 10
total_count = 0
cursor = None
while True:
batch_count, cursor = _process_models_batch(
model_class, cursor, batch_size, delete, model_map_fn)
if not batch_count:
break
if not cursor:
break
total_count += batch_count
if not total_count % reportable_chunk:
_LOG.info('Processed records: %s', total_count)
@_retry(message='Processing datastore entity batch failed; retrying')
def _process_models_batch(
model_class, cursor, batch_size, delete, model_map_fn):
"""Processes or deletes models in batches."""
query = model_class.all(keys_only=delete)
if cursor:
query.with_cursor(start_cursor=cursor)
count = 0
empty = True
results = query.fetch(limit=batch_size)
if results:
empty = False
if delete:
key_count = len(results)
db.delete(results)
count += key_count
else:
for result in results:
model_map_fn(result)
count += 1
cursor = None
if not empty:
cursor = query.cursor()
return count, cursor
def _get_entity_dict(model, privacy_transform_fn):
key = model.safe_key(model.key(), privacy_transform_fn)
if privacy_transform_fn is not _IDENTITY_TRANSFORM:
model = model.for_export(privacy_transform_fn)
entity_dict = transforms.entity_to_dict(model, force_utf_8_encoding=True)
entity_dict['key.name'] = unicode(key.name())
return entity_dict
@_retry(message='Upload failed; retrying')
def _put(context, content, path, is_draft, force_overwrite):
path = os.path.join(appengine_config.BUNDLE_ROOT, path)
if force_overwrite and context.fs.impl.isfile(path):
_LOG.info('File %s found on target system; forcing overwrite', path)
context.fs.impl.delete(path)
context.fs.impl.non_transactional_put(
os.path.join(appengine_config.BUNDLE_ROOT, path), content,
is_draft=is_draft)
def _raw_input(message):
"""raw_input wrapper scoped to the module for swapping during tests."""
return raw_input(message)
def _run_custom(parsed_args):
try:
module_name, job_class_name = parsed_args.type.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [job_class_name])
job_class = getattr(module, job_class_name)
assert issubclass(job_class, etl_lib.Job)
job = job_class(parsed_args)
except: # Any error means death. pylint: disable-msg=bare-except
_die(
'Unable to import and instantiate %s, or not of type %s' % (
parsed_args.type, etl_lib.Job.__name__),
with_trace=True)
job.run()
def _upload(upload_type, archive_path, course_url_prefix, force_overwrite):
_LOG.info(
'Processing course with URL prefix %s from archive path %s',
course_url_prefix, archive_path)
context = _get_context_or_die(course_url_prefix)
if upload_type == _TYPE_COURSE:
_upload_course(
context, archive_path, course_url_prefix, force_overwrite)
elif upload_type == _TYPE_DATASTORE:
_upload_datastore()
def _upload_course(context, archive_path, course_url_prefix, force_overwrite):
if not _context_is_for_empty_course(context) and not force_overwrite:
_die(
'Cannot upload to non-empty course with course_url_prefix %s' % (
course_url_prefix))
archive = _Archive(archive_path)
try:
archive.open('r')
except IOError:
_die('Cannot open archive_path ' + archive_path)
course_json = archive.get(
_Archive.get_internal_path(_COURSE_JSON_PATH_SUFFIX))
if course_json:
try:
courses.PersistentCourse13().deserialize(course_json)
except (AttributeError, ValueError):
_die((
'Cannot upload archive at %s containing malformed '
'course.json') % archive_path)
course_yaml = archive.get(
_Archive.get_internal_path(_COURSE_YAML_PATH_SUFFIX))
if course_yaml:
try:
yaml.safe_load(course_yaml)
except Exception: # pylint: disable-msg=broad-except
_die((
'Cannot upload archive at %s containing malformed '
'course.yaml') % archive_path)
_LOG.info('Validation passed; beginning upload')
count = 0
for entity in archive.manifest.entities:
external_path = _Archive.get_external_path(entity.path)
_put(
context, _ReadWrapper(archive.get(entity.path)), external_path,
entity.is_draft, force_overwrite)
count += 1
_LOG.info('Uploaded ' + external_path)
_clear_course_cache(context)
_LOG.info(
'Done; %s entit%s uploaded', count, 'y' if count == 1 else 'ies')
def _upload_datastore():
"""Stub for possible future datastore entity uploader."""
raise NotImplementedError
def _validate_arguments(parsed_args):
"""Validate parsed args for additional constraints."""
if (parsed_args.mode in {_MODE_DOWNLOAD, _MODE_UPLOAD}
and not parsed_args.archive_path):
_die('--archive_path missing')
if parsed_args.batch_size < 1:
_die('--batch_size must be a positive value')
if (parsed_args.mode == _MODE_DOWNLOAD and
os.path.exists(parsed_args.archive_path)):
_die(
'Cannot download to archive path %s; file already exists' % (
parsed_args.archive_path))
if parsed_args.disable_remote and parsed_args.mode != _MODE_RUN:
_die('--disable_remote supported only if mode is ' + _MODE_RUN)
if parsed_args.force_overwrite and not (
parsed_args.mode == _MODE_UPLOAD and
parsed_args.type == _TYPE_COURSE):
_die(
'--force_overwrite supported only if mode is %s and type is %s' % (
_MODE_UPLOAD, _TYPE_COURSE))
if parsed_args.privacy and not (
parsed_args.mode == _MODE_DOWNLOAD and
parsed_args.type == _TYPE_DATASTORE):
_die(
'--privacy supported only if mode is %s and type is %s' % (
_MODE_DOWNLOAD, _TYPE_DATASTORE))
if parsed_args.privacy_secret and not (
parsed_args.mode == _MODE_DOWNLOAD and
parsed_args.type == _TYPE_DATASTORE and parsed_args.privacy):
_die(
'--privacy_secret supported only if mode is %s, type is %s, and '
'--privacy is passed' % (_MODE_DOWNLOAD, _TYPE_DATASTORE))
def _write_model_to_json_file(json_file, privacy_transform_fn, model):
entity_dict = _get_entity_dict(model, privacy_transform_fn)
json_file.write(transforms.dict_to_json(entity_dict, None))
def main(parsed_args, environment_class=None):
"""Performs the requested ETL operation.
Args:
parsed_args: argparse.Namespace. Parsed command-line arguments.
environment_class: None or remote.Environment. Environment setup class
used to configure the service stub map. Injectable for tests only;
defaults to remote.Environment if not specified.
"""
_validate_arguments(parsed_args)
_LOG.setLevel(parsed_args.log_level.upper())
_import_modules_into_global_scope()
_import_entity_modules()
if not environment_class:
environment_class = remote.Environment
privacy_secret = _get_privacy_secret(parsed_args.privacy_secret)
privacy_transform_fn = _get_privacy_transform_fn(
parsed_args.privacy, privacy_secret)
_LOG.info('Mode is %s', parsed_args.mode)
_LOG.info(
'Target is url %s from application_id %s on server %s',
parsed_args.course_url_prefix, parsed_args.application_id,
parsed_args.server)
if not parsed_args.disable_remote:
environment_class(
parsed_args.application_id, parsed_args.server).establish()
_force_config_reload()
if parsed_args.mode == _MODE_DELETE:
_delete(
parsed_args.course_url_prefix, parsed_args.type,
parsed_args.batch_size)
elif parsed_args.mode == _MODE_DOWNLOAD:
_download(
parsed_args.type, parsed_args.archive_path,
parsed_args.course_url_prefix, parsed_args.datastore_types,
parsed_args.batch_size, privacy_transform_fn)
elif parsed_args.mode == _MODE_RUN:
_run_custom(parsed_args)
elif parsed_args.mode == _MODE_UPLOAD:
_upload(
parsed_args.type, parsed_args.archive_path,
parsed_args.course_url_prefix, parsed_args.force_overwrite)
if __name__ == '__main__':
main(PARSER.parse_args())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MapReduce extensions for ETL."""
__author__ = [
'johncox@google.com (John Cox)',
'juliaoh@google.com (Julia Oh)',
]
import csv
import os
import sys
from xml.etree import ElementTree
from models import transforms
import mrs
from tools.etl import etl_lib
# String. Event source value for YouTube videos in EventEntity.json.
_YOUTUBE_MILESTONE_SOURCE = 'tag-youtube-milestone'
# Int. Value of GCB_VIDEO_TRACKING_CHUNK_SEC in youtube_video.js.
_BUCKET_SIZE_SECONDS = 30
# Int. 3hrs limit on the playhead_position.
_POS_LIMIT = 60 * 60 * 3
class MapReduceJob(etl_lib.Job):
"""Parent classes for custom jobs that run a mapreduce.
Usage:
python etl.py run path.to.my.job / appid server.appspot.com \
--disable_remote \
--job_args='path_to_input_file path_to_output_directory'
"""
# Subclass of mrs.MapReduce; override in child.
MAPREDUCE_CLASS = None
def _configure_parser(self):
"""Shim that works with the arg parser expected by mrs.Mapreduce."""
self.parser.add_argument(
'file', help='Absolute path of the input file', type=str)
self.parser.add_argument(
'output', help='Absolute path of the output directory', type=str)
def main(self):
if not os.path.exists(self.args.file):
sys.exit('Input file %s not found' % self.args.file)
if not os.path.exists(self.args.output):
sys.exit('Output directory %s not found' % self.args.output)
mrs.main(self.MAPREDUCE_CLASS, args=self._parsed_etl_args.job_args)
class MapReduceBase(mrs.MapReduce):
"""Common functionalities of MR jobs combined into one class."""
def json_parse(self, value):
"""Parses JSON file into Python."""
if value.strip()[-1] == ',':
value = value.strip()[:-1]
try:
return transforms.loads(value)
# Skip unparseable rows like the first and last
# pylint: disable=bare-except
except:
return None
def make_reduce_data(self, job, interm_data):
"""Change the outout format to JSON."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=JsonWriter)
return output_data
class JsonWriter(mrs.fileformats.Writer):
"""Outputs one JSON literal per line.
Example JSON output may look like:
{'foo': 123, 'bar': 456, 'quz': 789}
{'foo': 321, 'bar': 654, 'quz': 987}
.
.
.
{'foo': 456, 'bar': 534, 'quz': 154}
"""
ext = 'json'
def __init__(self, fileobj, *args, **kwds):
super(JsonWriter, self).__init__(fileobj, *args, **kwds)
def _write_json(self, write_fn, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
write_fn: Python file object write() method.
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
TypeError: if python_object is not a dict or a list.
"""
if isinstance(python_object, dict):
write_fn(unicode(
transforms.dumps(python_object) + '\n').encode('utf-8'))
elif isinstance(python_object, list):
for item in python_object:
self._write_json(write_fn, item)
else:
raise TypeError('Value must be a dict or a list of dicts.')
def writepair(self, kvpair, **unused_kwds):
unused_key, value = kvpair
self._write_json(self.fileobj.write, value)
class Histogram(object):
"""Histogram that bins values into _BUCKET_SIZE_SECONDS sized intervals."""
def __init__(self):
# Map of 0-indexed bin #int -> count int
self._values = {}
def add(self, value):
"""Adds value into self._values and updates self._max_key."""
bin_number = self._get_bin_number(value)
self._increment_bin(bin_number)
def _get_bin_number(self, value):
"""Returns appropriate bin number for given value."""
if value < 0:
raise ValueError('Cannot calculate index for negative value')
return max(0, (value - 1) // _BUCKET_SIZE_SECONDS)
def _increment_bin(self, n):
self._values[n] = self._values.get(n, 0) + 1
def to_list(self):
"""Returns self._values converted into a list, sorted by its keys."""
try:
max_key = max(self._values.iterkeys())
return [self._values.get(n, 0) for n in xrange(1, max_key+1)]
except ValueError:
return []
class YoutubeHistogramGenerator(MapReduceBase):
"""Generates time histogram of user video engagement.
Input file: EventEntity JSON file.
Each event has a 'source' that defines a place in a code where the event
was recorded. Each event has a 'user_id' to represent an actor who
triggered the event. The event 'data' is a JSON object and its format and
content depends on the type of the event. For YouTube video events, 'data'
is a dictionary with 'video_id', 'instance_id', 'event_id', 'position',
'data', 'location'.
"""
def map(self, unused_key, value):
"""Filters out YouTube video data from EventEntity JSON file.
Args:
unused_key: int. line number of each EventEntity in file.
value: str. instance of EventEntity extracted from file.
Yields:
A tuple of (video_identifier, time_position) to be passed into
reduce function.
Video_identifier is a tuple of YouTube video_id and instance_id,
and time_position is the video playhead count.
"""
json = self.json_parse(value)
if json and json['source'] == _YOUTUBE_MILESTONE_SOURCE:
data = transforms.loads(json['data'])
video_identifier = (data['video_id'], data['instance_id'])
playhead_position = data['position']
if (playhead_position <= _POS_LIMIT and
# Youtube API may return NaN if value couldn't be computed.
playhead_position != float('nan')):
yield video_identifier, playhead_position
def reduce(self, key, values):
"""Creates a histogram from time_position values.
The value of _BUCKET_SIZE_SECONDS comes from the constant
GCB_VIDEO_TRACKING_CHUNK_SEC in youtube_video.js. This value indicates
the interval of the milestone events. If GCB_VIDEO_TRACKING_CHUNK_SEC
changes, _BUCKET_SIZE_SECONDS will have to be updated accordingly.
Args:
key: tuple. video_id, video instance id.
values: a generator over video playhead positions.
Yields:
A dictionary with video_id, instance_id, and histogram.
The time histogram is a list in which each index represents
sequential milestone events and the corresponding item at each
index represents the number of users watching the video.
An example output looks like:
{'video_id': 123456, 'instance_id': 0, 'histogram': [10, 8, 7, 5, 2, 1]}
"""
histogram = Histogram()
for value in values:
histogram.add(value)
yield {
'video_id': key[0],
'instance_id': key[1],
'histogram': histogram.to_list()
}
class YoutubeHistogram(MapReduceJob):
"""MapReduce Job that generates a histogram for user video engagement.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.YoutubeHistogram \
/coursename appid server.appspot.com \
--job_args='path_to_EventEntity.json path_to_output_directory'
"""
MAPREDUCE_CLASS = YoutubeHistogramGenerator
class XmlWriter(mrs.fileformats.Writer):
"""Writes file in XML format.
The writer does not use the key from kvpair and expects the value to be a
list of string representation of XML elements.
Example:
kvpair: some_key, ['<row><name>Jane</name></row>',
'<row><name>John</name></row>']
Output:
<rows>
<row>
<name>Jane</name>
</row>
<row>
<name>John</name>
</row>
</rows>
"""
ext = 'xml'
def __init__(self, fileobj, *args, **kwds):
super(XmlWriter, self).__init__(fileobj, *args, **kwds)
self.fileobj.write('<rows>')
def writepair(self, kvpair, **unused_kwds):
unused_key, values = kvpair
write = self.fileobj.write
for value in values:
write(value)
write('\n')
def finish(self):
self.fileobj.write('</rows>')
self.fileobj.flush()
class XmlGenerator(MapReduceBase):
"""Generates a XML file from a JSON formatted input file."""
def map(self, key, value):
"""Converts JSON object to xml.
Args:
key: int. line number of the value in Entity file.
value: str. A line of JSON literal extracted from Entity file.
Yields:
A tuple with the string 'key' and a tuple containing line number and
string representaiton of the XML element.
"""
json = self.json_parse(value)
if json:
root = ElementTree.Element('row')
transforms.convert_dict_to_xml(root, json)
yield 'key', (key, ElementTree.tostring(root, encoding='utf-8'))
def reduce(self, unused_key, values):
"""Sorts the values by line number to keep the order of the document.
Args:
unused_key: str. The arbitrary string 'key' set to accumulate all
values under one key.
values: list of tuples. Each tuple contains line number and JSON
literal converted to XML string.
Yields:
A list of XML strings sorted by the line number.
"""
sorted_values = sorted(values, key=lambda x: x[0])
yield [value[1] for value in sorted_values]
def make_reduce_data(self, job, interm_data):
"""Change the outout format to XML."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=XmlWriter)
return output_data
class JsonToXml(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to XML.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToXml \
/coursename appid server.appspot.com \
--job_args='path_to_any_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = XmlGenerator
class CsvWriter(mrs.fileformats.Writer):
"""Writes file in CSV format.
The default value to be written if the dictionary is missing a key is an
empty string.
Example:
kvpair: (some_key, (['bar', 'foo', 'quz'],
[{'foo': 1, 'bar': 2, 'quz': 3},
{'bar': 2, 'foo': 3}])
Output:
'bar', 'foo', 'quz'
2, 1, 3
2, 3, ''
"""
ext = 'csv'
def __init__(self, fileobj, *args, **kwds):
super(CsvWriter, self).__init__(fileobj, *args, **kwds)
def writepair(self, kvpair, **unused_kwds):
"""Writes list of JSON objects to CSV format.
Args:
kvpair: tuple of unused_key, and a tuple of master_list and
json_list. Master_list is a list that contains all the
fieldnames across json_list sorted in alphabetical order, and
json_list is a list of JSON objects.
**unused_kwds: keyword args that won't be used.
"""
unused_key, (master_list, json_list) = kvpair
writer = csv.DictWriter(
self.fileobj, fieldnames=master_list, restval='')
writer.writeheader()
writer.writerows(json_list)
class CSVGenerator(MapReduceBase):
"""Generates a CSV file from a JSON formatted input file."""
@classmethod
def _flatten_json(cls, json, prefix=''):
"""Flattens given JSON object and encodes all the values in utf-8."""
for k, v in json.items():
try:
if type(transforms.loads(v)) == dict:
flattened = cls._flatten_json(
transforms.loads(json.pop(k)), prefix=prefix + k + '_')
json.update(flattened)
# pylint: disable=bare-except
except:
json[prefix + k] = unicode(json.pop(k)).encode('utf-8')
return json
def map(self, unused_key, value):
"""Loads JSON object and flattens it.
Example:
json['data']['foo'] = 'bar' -> json['data_foo'] = 'bar', with
json['data'] removed.
Args:
unused_key: int. line number of the value in Entity file.
value: str. instance of Entity file extracted from file.
Yields:
A tuple of string key and flattened dictionary. map() outputs
constant string 'key' as the key so that all the values can be
accumulated under one key in reduce(). This accumulation is
necessary because reduce() must go through the list of all JSON
literals and determine all existing fieldnames. Then, reduce()
supplies the master_list of fieldnames to CSVWriter's writepair()
which uses the list as csv header.
"""
json = self.json_parse(value)
if json:
json = CSVGenerator._flatten_json(json)
yield 'key', json
def reduce(self, unused_key, values):
"""Creates a master_list of all the keys present in an Entity file.
Args:
unused_key: str. constant string 'key' emitted by map().
values: a generator over list of json objects.
Yields:
A tuple of master_list and list of json objects.
master_list is a list of all keys present across every json object.
This list is used to create header for CSV files.
"""
master_list = []
values = [value for value in values]
for value in values:
for k, _ in value.iteritems():
if k not in master_list:
master_list.append(k)
yield sorted(master_list), values
def make_reduce_data(self, job, interm_data):
"""Set the output data format to CSV."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=CsvWriter)
return output_data
class JsonToCsv(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to CSV format.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToCSV
/coursename appid server.appspot.com \
--job_args='path_to_an_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = CSVGenerator
mrs.fileformats.writer_map['csv'] = CsvWriter
mrs.fileformats.writer_map['json'] = JsonWriter
mrs.fileformats.writer_map['xml'] = XmlWriter
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for writing extract-transform-load scripts."""
__author__ = [
'johncox@google.com',
]
import argparse
from controllers import sites
def get_context(course_url_prefix):
"""Gets requested application context from the given course URL prefix.
Args:
course_url_prefix: string. Value of etl.py's course_url_prefix flag.
Returns:
sites.ApplicationContext.
"""
found = None
for context in sites.get_all_courses():
if context.raw.startswith('course:%s:' % course_url_prefix):
found = context
break
return found
class Job(object):
"""Abstract base class for user-defined custom ETL jobs.
Custom jobs can be executed by etl.py. The advantage of this is that they
can run arbitrary local computations, but calls to App Engine services
(db.get() or db.put(), for example) are executed against a remove server.
This allows you to perform arbitrary computations against your app's data,
and to construct data pipelines that are not possible within the App Engine
execution environment.
When you run your custom job under etl.py in this way, it authenticates
against the remove server, prompting the user for credentials if necessary.
It then configures the local environment so RPCs execute against the
requested remote endpoint.
It then imports your custom job. Your job must be a Python class that is
a child of this class. Before invoking etl.py, you must configure sys.path
so all required libraries are importable. See etl.py for details. Your
class must override main() with the computations you want your job to
perform.
You invoke your custom job via etl.py:
$ python etl.py run path.to.my.Job /cs101 myapp server.appspot.com \
--job_args='more_args --delegated_to my.Job'
Before main() is executed, arguments are parsed. The full set of parsed
arguments passed to etl.py are available in your job as self.etl_args. The
arguments passed as a quote-enclosed string to --job_args, if any, are
delegated to your job. An argument parser is available as self.parser. You
must override self._configure_parser to register command-line arguments for
parsing. They will be parsed in advance of running main() and will be
available as self.args.
See tools/etl/examples.py for some nontrivial sample job implementations.
"""
def __init__(self, parsed_etl_args):
"""Constructs a new job.
Args:
parsed_etl_args: argparse.Namespace. Parsed arguments passed to
etl.py.
"""
self._parsed_args = None
self._parsed_etl_args = parsed_etl_args
self._parser = None
def _configure_parser(self):
"""Configures custom command line parser for this job, if any.
For example:
self.parser.add_argument(
'my_arg', help='A required argument', type=str)
"""
pass
def main(self):
"""Computations made by this job; must be overridden in subclass."""
pass
@property
def args(self):
"""Returns etl.py's parsed --job_args, or None if run() not invoked."""
return self._parsed_args
@property
def etl_args(self):
"""Returns parsed etl.py arguments."""
return self._parsed_etl_args
@property
def parser(self):
"""Returns argparse.ArgumentParser, or None if run() not yet invoked."""
if not self._parser:
self._parser = argparse.ArgumentParser(
prog='%s.%s' % (
self.__class__.__module__, self.__class__.__name__),
usage=(
'etl.py run %(prog)s [etl.py options] [--job_args] '
'[%(prog)s options]'))
return self._parser
def _parse_args(self):
self._configure_parser()
self._parsed_args = self.parser.parse_args(
self._parsed_etl_args.job_args)
def run(self):
"""Executes the job; called for you by etl.py."""
self._parse_args()
self.main()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample announcements."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
SAMPLE_ANNOUNCEMENT_1 = {
'edit_url': None,
'title': 'Example Announcement',
'date': datetime.date(2012, 10, 6),
'is_draft': False,
'html': """
<br>Certificates will be e-mailed to qualifying participants by
Friday, October 12.
<br>
<br>Do you want to check your assessment scores? Visit the
<a href="student/home">"My profile"</a> page!</p>
"""}
SAMPLE_ANNOUNCEMENT_2 = {
'edit_url': None,
'title': 'Welcome to Class 6 and the Post-class Assessment',
'date': datetime.date(2012, 10, 5),
'is_draft': True,
'html': """
<br>Welcome to the final class! <a href="class?class=6"> Class 6</a>
focuses on combining the skills you have learned throughout the class
to maximize the effectiveness of your searches.
<br>
<br><b>Customize Your Experience</b>
<br>You can customize your experience in several ways:
<ul>
<li>You can watch the videos multiple times for a deeper understanding
of each lesson. </li>
<li>You can read the text version for each lesson. Click the button
above the video to access it.</li>
<li>Lesson activities are designed for multiple levels of experience.
The first question checks your recall of the material in the video;
the second question lets you verify your mastery of the lesson; the
third question is an opportunity to apply your skills and share your
experiences in the class forums. You can answer some or all of the
questions depending on your familiarity and interest in the topic.
Activities are not graded and do not affect your final grade. </li>
<li>We'll also post extra challenges in the forums for people who seek
additional opportunities to practice and test their new skills!</li>
</ul>
<br><b>Forum</b>
<br>Apply your skills, share with others, and connect with your peers
and course staff in the <a href="forum">forum.</a> Discuss your favorite
search tips and troubleshoot technical issues. We'll also post bonus
videos and challenges there!
<p> </p>
<p>For an optimal learning experience, please plan to use the most
recent version of your browser, as well as a desktop, laptop or a tablet
computer instead of your mobile phone.</p>
"""}
SAMPLE_ANNOUNCEMENTS = [SAMPLE_ANNOUNCEMENT_1, SAMPLE_ANNOUNCEMENT_2]
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Announcements."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import urllib
from common import tags
from controllers.utils import BaseHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import ReflectiveRequestHandler
from controllers.utils import XsrfTokenManager
from models import custom_modules
from models import entities
from models import notify
from models import roles
from models import transforms
from models.models import MemcacheManager
from models.models import Student
import modules.announcements.samples as samples
from modules.oeditor import oeditor
from google.appengine.ext import db
class AnnouncementsRights(object):
"""Manages view/edit rights for announcements."""
@classmethod
def can_view(cls, unused_handler):
return True
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
@classmethod
def apply_rights(cls, handler, items):
"""Filter out items that current user can't see."""
if AnnouncementsRights.can_edit(handler):
return items
allowed = []
for item in items:
if not item.is_draft:
allowed.append(item)
return allowed
class AnnouncementsHandler(BaseHandler, ReflectiveRequestHandler):
"""Handler for announcements."""
default_action = 'list'
get_actions = [default_action, 'edit']
post_actions = ['add', 'delete']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/announcements/item', AnnouncementsItemRESTHandler)]
def get_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'/announcements?%s' % urllib.urlencode(args))
def format_items_for_template(self, items):
"""Formats a list of entities into template values."""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
# add 'edit' actions
if AnnouncementsRights.can_edit(self):
item['edit_action'] = self.get_action_url(
'edit', key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token('delete')
item['delete_action'] = self.get_action_url(
'delete', key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# add 'add' action
if AnnouncementsRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token('add')
output['add_action'] = self.get_action_url('add')
return output
def put_sample_announcements(self):
"""Loads sample data into a database."""
items = []
for item in samples.SAMPLE_ANNOUNCEMENTS:
entity = AnnouncementEntity()
transforms.dict_to_entity(entity, item)
entity.put()
items.append(entity)
return items
def get_list(self):
"""Shows a list of announcements."""
user = self.personalize_page_and_get_user()
transient_student = False
if user is None:
transient_student = True
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
transient_student = True
self.template_value['transient_student'] = transient_student
items = AnnouncementEntity.get_announcements()
if not items and AnnouncementsRights.can_edit(self):
items = self.put_sample_announcements()
items = AnnouncementsRights.apply_rights(self, items)
self.template_value['announcements'] = self.format_items_for_template(
items)
self.template_value['navbar'] = {'announcements': True}
self.render('announcements.html')
def get_edit(self):
"""Shows an editor for an announcement."""
if not AnnouncementsRights.can_edit(self):
self.error(401)
return
key = self.request.get('key')
exit_url = self.canonicalize_url(
'/announcements#%s' % urllib.quote(key, safe=''))
rest_url = self.canonicalize_url('/rest/announcements/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
AnnouncementsItemRESTHandler.SCHEMA_JSON,
AnnouncementsItemRESTHandler.get_schema_annotation_dict(
self.get_course().get_course_announcement_list_email()),
key, rest_url, exit_url,
required_modules=AnnouncementsItemRESTHandler.REQUIRED_MODULES)
self.template_value['navbar'] = {'announcements': True}
self.template_value['content'] = form_html
self.render('bare.html')
def post_delete(self):
"""Deletes an announcement."""
if not AnnouncementsRights.can_delete(self):
self.error(401)
return
key = self.request.get('key')
entity = AnnouncementEntity.get(key)
if entity:
entity.delete()
self.redirect('/announcements')
def post_add(self):
"""Adds a new announcement and redirects to an editor for it."""
if not AnnouncementsRights.can_add(self):
self.error(401)
return
entity = AnnouncementEntity()
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.html = 'Here is my announcement!'
entity.is_draft = True
entity.put()
self.redirect(self.get_action_url('edit', key=entity.key()))
class AnnouncementsItemRESTHandler(BaseRESTHandler):
"""Provides REST API for an announcement."""
# TODO(psimakov): we should really use an ordered dictionary, not plain
# text; it can't be just a normal dict because a dict iterates its items in
# undefined order; thus when we render a dict to JSON an order of fields
# will not match what we specify here; the final editor will also show the
# fields in an undefined order; for now we use the raw JSON, rather than the
# dict, but will move to an ordered dict late.
SCHEMA_JSON = """
{
"id": "Announcement Entity",
"type": "object",
"description": "Announcement",
"properties": {
"key" : {"type": "string"},
"title": {"optional": true, "type": "string"},
"date": {"optional": true, "type": "date"},
"html": {"optional": true, "type": "html"},
"is_draft": {"type": "boolean"},
"send_email": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-date', 'gcb-rte', 'inputex-select', 'inputex-string',
'inputex-uneditable', 'inputex-checkbox']
@staticmethod
def get_send_email_description(announcement_email):
"""Get the description for Send Email field."""
if announcement_email:
return 'Email will be sent to : ' + announcement_email
return 'Announcement list not configured.'
@staticmethod
def get_schema_annotation_dict(announcement_email):
"""Utility to get schema annotation dict for this course."""
schema_dict = [
(['title'], 'Announcement'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'date', '_inputex'], {
'label': 'Date', '_type': 'date', 'dateFormat': 'Y/m/d',
'valueFormat': 'Y/m/d'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'html', '_inputex'], {
'label': 'Body', '_type': 'html',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags':
tags.EditorBlacklists.COURSE_SCOPE}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Draft', 'Published'),
(['properties', 'send_email', '_inputex'], {
'label': 'Send Email', '_type': 'boolean',
'description':
AnnouncementsItemRESTHandler.get_send_email_description(
announcement_email)})]
return schema_dict
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
try:
entity = AnnouncementEntity.get(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
viewable = AnnouncementsRights.apply_rights(self, [entity])
if not viewable:
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = viewable[0]
json_payload = transforms.dict_to_json(transforms.entity_to_dict(
entity), AnnouncementsItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'announcement-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'announcement-put', {'key': key}):
return
if not AnnouncementsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = AnnouncementEntity.get(key)
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
transforms.dict_to_entity(entity, transforms.json_to_dict(
transforms.loads(payload),
AnnouncementsItemRESTHandler.SCHEMA_DICT))
entity.put()
email_sent = False
if entity.send_email:
email_manager = notify.EmailManager(self.get_course())
email_sent = email_manager.send_announcement(
entity.title, entity.html)
if entity.send_email and not email_sent:
if not self.get_course().get_course_announcement_list_email():
message = 'Saved. Announcement list not configured.'
else:
message = 'Saved, but there was an error sending email.'
else:
message = 'Saved.'
transforms.send_json_response(self, 200, message)
class AnnouncementEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of announcement."""
title = db.StringProperty(indexed=False)
date = db.DateProperty()
html = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty()
send_email = db.BooleanProperty()
memcache_key = 'announcements'
@classmethod
def get_announcements(cls, allow_cached=True):
items = MemcacheManager.get(cls.memcache_key)
if not allow_cached or items is None:
items = AnnouncementEntity.all().order('-date').fetch(1000)
# TODO(psimakov): prepare to exceed 1MB max item size
# read more here: http://stackoverflow.com
# /questions/5081502/memcache-1-mb-limit-in-google-app-engine
MemcacheManager.set(cls.memcache_key, items)
return items
def put(self):
"""Do the normal put() and also invalidate memcache."""
result = super(AnnouncementEntity, self).put()
MemcacheManager.delete(self.memcache_key)
return result
def delete(self):
"""Do the normal delete() and invalidate memcache."""
super(AnnouncementEntity, self).delete()
MemcacheManager.delete(self.memcache_key)
custom_module = None
def register_module():
"""Registers this module in the registry."""
announcement_handlers = [('/announcements', AnnouncementsHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Announcements',
'A set of pages for managing course announcements.',
[], announcement_handlers)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search module that uses Google App Engine's full text search."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import gettext
import logging
import math
import mimetypes
import os
import time
import traceback
import appengine_config
from common import safe_dom
from controllers import sites
from controllers import utils
import jinja2
from models import config
from models import counters
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
import webapp2
import resources
from google.appengine.api import namespace_manager
from google.appengine.api import search
from google.appengine.ext import db
MODULE_NAME = 'Full Text Search'
CAN_INDEX_ALL_COURSES_IN_CRON = config.ConfigProperty(
'gcb_can_index_automatically', bool, safe_dom.Text(
'Whether the search module can automatically index the course daily '
'using a cron job. If enabled, this job would index the course '
'incrementally so that only new items or items which have not been '
'recently indexed are indexed.'),
default_value=False)
SEARCH_QUERIES_MADE = counters.PerfCounter(
'gcb-search-queries-made',
'The number of student queries made to the search module.')
SEARCH_RESULTS_RETURNED = counters.PerfCounter(
'gcb-search-results-returned',
'The number of search results returned across all student queries.')
SEARCH_FAILURES = counters.PerfCounter(
'gcb-search-failures',
'The number of search failure messages returned across all student '
'queries.')
INDEX_NAME = 'gcb_search_index'
RESULTS_LIMIT = 10
GCB_SEARCH_FOLDER_NAME = os.path.normpath('/modules/search/')
MAX_RETRIES = 5
# I18N: Message displayed on search results page when error occurs.
SEARCH_ERROR_TEXT = gettext.gettext('Search is currently unavailable.')
class ModuleDisabledException(Exception):
"""Exception thrown when the search module is disabled."""
pass
def get_index(course):
return search.Index(name=INDEX_NAME,
namespace=course.app_context.get_namespace_name())
def index_all_docs(course, incremental):
"""Index all of the docs for a given models.Course object.
Args:
course: models.courses.Course. the course to index.
incremental: boolean. whether or not to index only new or out-of-date
items.
Returns:
A dict with three keys.
'num_indexed_docs' maps to an int, the number of documents added to the
index.
'doc_type' maps to a counter with resource types as keys mapping to the
number of that resource added to the index.
'indexing_time_secs' maps to a float representing the number of seconds
the indexing job took.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
start_time = time.time()
index = get_index(course)
timestamps, doc_types = (_get_index_metadata(index) if incremental
else ({}, {}))
for doc in resources.generate_all_documents(course, timestamps):
retry_count = 0
while retry_count < MAX_RETRIES:
try:
index.put(doc)
timestamps[doc.doc_id] = doc['date'][0].value
doc_types[doc.doc_id] = doc['type'][0].value
break
except search.Error, e:
if e.results[0].code == search.OperationResult.TRANSIENT_ERROR:
retry_count += 1
if retry_count >= MAX_RETRIES:
logging.error(
'Multiple transient errors indexing doc_id: %s',
doc.doc_id)
else:
logging.error('Failed to index doc_id: %s', doc.doc_id)
break
total_time = '%.2f' % (time.time() - start_time)
indexed_doc_types = collections.Counter()
for type_name in doc_types.values():
indexed_doc_types[type_name] += 1
return {'num_indexed_docs': len(timestamps),
'doc_types': indexed_doc_types,
'indexing_time_secs': total_time}
def clear_index(course):
"""Delete all docs in the index for a given models.Course object."""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(course)
doc_ids = [document.doc_id for document in index.get_range(ids_only=True)]
total_docs = len(doc_ids)
while doc_ids:
index.delete(doc_ids)
doc_ids = [document.doc_id
for document in index.get_range(ids_only=True)]
return {'deleted_docs': total_docs}
def _get_index_metadata(index):
"""Returns dict from doc_id to timestamp and one from doc_id to doc_type."""
timestamps = []
doc_types = []
cursor = search.Cursor()
while cursor:
options = search.QueryOptions(
limit=1000,
cursor=cursor,
returned_fields=['date', 'type'])
query = search.Query(query_string='', options=options)
current_docs = index.search(query)
cursor = current_docs.cursor
for doc in current_docs:
timestamps.append((doc.doc_id, doc['date'][0].value))
doc_types.append((doc.doc_id, doc['type'][0].value))
return dict(timestamps), dict(doc_types)
def fetch(course, query_string, offset=0, limit=RESULTS_LIMIT):
"""Return an HTML fragment with the results of a search for query_string.
Args:
course: models.courses.Course. the course to search.
query_string: str. the user's specified query.
offset: int. the number of results to skip.
limit: int. the number of results to return.
Returns:
A dict with two keys.
'results' maps to an ordered list of resources.Result objects.
'total_found' maps to the total number of results in the index which
match query_string.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(course)
try:
# TODO(emichael): Don't compute these for every query
returned_fields = resources.get_returned_fields()
snippeted_fields = resources.get_snippeted_fields()
options = search.QueryOptions(
limit=limit,
offset=offset,
returned_fields=returned_fields,
number_found_accuracy=100,
snippeted_fields=snippeted_fields)
query = search.Query(query_string=query_string, options=options)
results = index.search(query)
except search.Error:
logging.info('Failed searching for: %s', query_string)
return {'results': None, 'total_found': 0}
processed_results = resources.process_results(results)
return {'results': processed_results, 'total_found': results.number_found}
class SearchHandler(utils.BaseHandler):
"""Handler for generating the search results page."""
def get(self):
"""Process GET request."""
# TODO(emichael): move timing to Javascript
if not custom_module.enabled:
self.error(404)
return
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
try:
start = time.time()
# TODO(emichael): Don't use get because it can't handle utf-8
query = self.request.get('query')
offset = self.request.get('offset')
self.template_value['navbar'] = {}
if query:
try:
offset = int(offset)
except (ValueError, TypeError):
offset = 0
self.template_value['query'] = query
SEARCH_QUERIES_MADE.inc()
response = fetch(self.get_course(), query, offset=offset)
self.template_value['time'] = '%.2f' % (time.time() - start)
self.template_value['search_results'] = response['results']
total_found = response['total_found']
if offset + RESULTS_LIMIT < total_found:
self.template_value['next_link'] = (
'search?query=%s&offset=%d' %
(query, offset + RESULTS_LIMIT))
if offset - RESULTS_LIMIT >= 0:
self.template_value['previous_link'] = (
'search?query=%s&offset=%d' %
(query, offset - RESULTS_LIMIT))
self.template_value['page_number'] = offset / RESULTS_LIMIT + 1
self.template_value['total_pages'] = int(math.ceil(
float(total_found) / RESULTS_LIMIT))
if response['results']:
SEARCH_RESULTS_RETURNED.inc(len(response['results']))
# TODO(emichael): Remove this check when the unicode issue is fixed in
# dev_appserver.
except UnicodeEncodeError as e:
SEARCH_FAILURES.inc()
if not appengine_config.PRODUCTION_MODE:
# This message will only be displayed to the course author in
# dev, so it does not need to be I18N'd
self.template_value['search_error'] = (
'There is a known issue in App Engine\'s SDK '
'(code.google.com/p/googleappengine/issues/detail?id=9335) '
'which causes an error when generating search snippets '
'which contain non-ASCII characters. This error does not '
'occur in the production environment, so you can safely '
'run your course with unicode characters on appspot.com.')
logging.error('[Unicode/Dev server issue] Error rendering the '
'search page: %s.', e)
else:
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
except Exception as e: # pylint: disable-msg=broad-except
SEARCH_FAILURES.inc()
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
finally:
path = sites.abspath(self.app_context.get_home_folder(),
GCB_SEARCH_FOLDER_NAME)
template = self.get_template('search.html', additional_dirs=[path])
self.template_value['navbar'] = {}
self.response.out.write(template.render(self.template_value))
class AssetsHandler(webapp2.RequestHandler):
"""Content handler for assets associated with search."""
def get(self):
"""Respond to HTTP GET methods."""
if not custom_module.enabled:
self.error(404)
return
path = self.request.path
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
if os.path.basename(os.path.dirname(path)) != 'assets':
self.error(404)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
sites.set_static_resource_cache_control(self)
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
stream = open(resource_file)
self.response.write(stream.read())
except IOError:
self.error(404)
class SearchDashboardHandler(object):
"""Should only be inherited by DashboardHandler, not instantiated."""
def get_search(self):
"""Renders course indexing view."""
template_values = {}
template_values['page_title'] = self.format_title('Search')
mc_template_value = {}
mc_template_value['module_enabled'] = custom_module.enabled
indexing_job = IndexCourse(self.app_context).load()
clearing_job = ClearIndex(self.app_context).load()
if indexing_job and (not clearing_job or
indexing_job.updated_on > clearing_job.updated_on):
if indexing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Indexing in progress.'
mc_template_value['job_in_progress'] = True
elif indexing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['indexed'] = True
mc_template_value['last_updated'] = (
indexing_job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT))
mc_template_value['index_info'] = transforms.loads(
indexing_job.output)
elif indexing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Indexing job failed with error: %s' % indexing_job.output)
elif clearing_job:
if clearing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Clearing in progress.'
mc_template_value['job_in_progress'] = True
elif clearing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['status_message'] = (
'The index has been cleared.')
elif clearing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Clearing job failed with error: %s' % clearing_job.output)
else:
mc_template_value['status_message'] = (
'No indexing job has been run yet.')
mc_template_value['index_course_xsrf_token'] = self.create_xsrf_token(
'index_course')
mc_template_value['clear_index_xsrf_token'] = self.create_xsrf_token(
'clear_index')
template_values['main_content'] = jinja2.Markup(self.get_template(
'search_dashboard.html', [os.path.dirname(__file__)]
).render(mc_template_value, autoescape=True))
self.render_page(template_values)
def post_index_course(self):
"""Submits a new indexing operation."""
try:
incremental = self.request.get('incremental') == 'true'
check_jobs_and_submit(IndexCourse(self.app_context, incremental),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
def post_clear_index(self):
"""Submits a new indexing operation."""
try:
check_jobs_and_submit(ClearIndex(self.app_context),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
class CronHandler(utils.BaseHandler):
"""Iterates through all courses and starts an indexing job for each one.
All jobs should be submitted through the transactional check_jobs_and_submit
method to prevent multiple index operations from running at the same time.
If an index job is currently running when this cron job attempts to start
one, this operation will be a noop for that course.
"""
def get(self):
"""Start an index job for each course."""
cron_logger = logging.getLogger('modules.search.cron')
self.response.headers['Content-Type'] = 'text/plain'
if CAN_INDEX_ALL_COURSES_IN_CRON.value:
counter = 0
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
counter += 1
try:
check_jobs_and_submit(IndexCourse(context), context)
except db.TransactionFailedError as e:
cron_logger.info(
'Failed to submit job #%s in namespace %s: %s',
counter, namespace, e)
else:
cron_logger.info(
'Index job #%s submitted for namespace %s.',
counter, namespace)
cron_logger.info('All %s indexing jobs started; cron job complete.',
counter)
else:
cron_logger.info('Automatic indexing disabled. Cron job halting.')
self.response.write('OK\n')
@db.transactional(xg=True)
def check_jobs_and_submit(job, app_context):
"""Determines whether an indexing job is running and submits if not."""
indexing_job = IndexCourse(app_context).load()
clearing_job = ClearIndex(app_context).load()
bad_status_codes = [jobs.STATUS_CODE_STARTED, jobs.STATUS_CODE_QUEUED]
if ((indexing_job and indexing_job.status_code in bad_status_codes) or
(clearing_job and clearing_job.status_code in bad_status_codes)):
raise db.TransactionFailedError('Index job is currently running.')
else:
job.non_transactional_submit()
class IndexCourse(jobs.DurableJob):
"""A job that indexes the course."""
def __init__(self, app_context, incremental=True):
super(IndexCourse, self).__init__(app_context)
self.incremental = incremental
def run(self):
"""Index the course."""
namespace = namespace_manager.get_namespace()
logging.info('Running indexing job for namespace %s. Incremental: %s',
namespace_manager.get_namespace(), self.incremental)
app_context = sites.get_app_context_for_namespace(namespace)
course = courses.Course(None, app_context=app_context)
return index_all_docs(course, self.incremental)
class ClearIndex(jobs.DurableJob):
"""A job that clears the index for a course."""
def run(self):
"""Clear the index."""
namespace = namespace_manager.get_namespace()
logging.info('Running clearing job for namespace %s.', namespace)
app_context = sites.get_app_context_for_namespace(namespace)
course = courses.Course(None, app_context=app_context)
return clear_index(course)
# Module registration
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_routes = [
('/modules/search/assets/.*', AssetsHandler),
('/cron/search/index_courses', CronHandler)
]
namespaced_routes = [
('/search', SearchHandler)
]
global custom_module
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides search capabilities for courses',
global_routes, namespaced_routes)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resources to be indexed and searched over by the search module."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import datetime
import gettext
import HTMLParser
import logging
import operator
import os
import Queue
import re
import robotparser
import urllib
import urlparse
from xml.dom import minidom
import appengine_config
from common import jinja_utils
import jinja2
from modules.announcements import announcements
from google.appengine.api import search
from google.appengine.api import urlfetch
PROTOCOL_PREFIX = 'http://'
YOUTUBE_DATA_URL = 'http://gdata.youtube.com/feeds/api/videos/'
YOUTUBE_TIMED_TEXT_URL = 'http://youtube.com/api/timedtext'
# The limit (in seconds) for the time that elapses before a new transcript
# fragment should be started. A lower value results in more fine-grain indexing
# and more docs in the index.
YOUTUBE_CAPTION_SIZE_SECS = 30
class URLNotParseableException(Exception):
"""Exception thrown when the resource at a URL cannot be parsed."""
pass
class ResourceHTMLParser(HTMLParser.HTMLParser):
"""Custom parser for processing HTML files."""
IGNORED_TAGS = ['script', 'style']
def __init__(self, url):
HTMLParser.HTMLParser.__init__(self)
self.content_list = []
self._links = []
self._title = ''
self.tag_tracker = collections.Counter()
self.url = url
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'a' and 'href' in attrs_dict:
self._links.append(urlparse.urljoin(self.url, attrs_dict['href']))
self.tag_tracker[tag] += 1
def handle_endtag(self, tag):
if self.tag_tracker[tag] > 0:
self.tag_tracker[tag] -= 1
def handle_data(self, data):
"""Invoked every time the parser encounters the page's inner content."""
if self.tag_tracker['title']:
if self._title:
self._title += '\n%s' % data
else:
self._title = data
stripped_data = data.strip()
if (not any([self.tag_tracker[tag] for tag in self.IGNORED_TAGS]) and
stripped_data):
self.content_list.append(stripped_data)
def get_content(self):
return '\n'.join(self.content_list)
def get_links(self):
return self._links
def get_title(self):
return self._title
def get_parser_for_html(url, ignore_robots=False):
"""Returns a ResourceHTMLParser with the parsed data."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
parser = ResourceHTMLParser(url)
try:
result = urlfetch.fetch(url)
if (result.status_code in [200, 304] and
any(content_type in result.headers['Content-type'] for
content_type in ['text/html', 'xml'])):
if not isinstance(result.content, unicode):
result.content = result.content.decode('utf-8')
parser.feed(result.content)
else:
raise ValueError
except BaseException as e:
raise URLNotParseableException('Could not parse file at URL: %s\n%s' %
(url, e))
return parser
def get_minidom_from_xml(url, ignore_robots=False):
"""Returns a minidom representation of an XML file at url."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
try:
result = urlfetch.fetch(url)
except urlfetch.Error as e:
raise URLNotParseableException('Could not parse file at URL: %s. %s' %
(url, e))
if result.status_code not in [200, 304]:
raise URLNotParseableException('Bad status code (%s) for URL: %s' %
(result.status_code, url))
try:
if isinstance(result.content, unicode):
result.content = result.content.encode('utf-8')
xmldoc = minidom.parseString(result.content)
except BaseException as e:
raise URLNotParseableException(
'Error parsing XML document at URL: %s. %s' % (url, e))
return xmldoc
def _url_allows_robots(url):
"""Checks robots.txt for user agent * at URL."""
url = url.encode('utf-8')
try:
parts = urlparse.urlparse(url)
base = urlparse.urlunsplit((
parts.scheme, parts.netloc, '', None, None))
rp = robotparser.RobotFileParser(url=urlparse.urljoin(
base, '/robots.txt'))
rp.read()
except BaseException as e:
logging.info('Could not retreive robots.txt for URL: %s', url)
raise URLNotParseableException(e)
else:
return rp.can_fetch('*', url)
class Resource(object):
"""Abstract superclass for a resource."""
# Each subclass should define this constant
TYPE_NAME = 'Resource'
# Each subclass should use this constant to define the fields it needs
# returned with a search result.
RETURNED_FIELDS = []
# Each subclass should use this constant to define the fields it needs
# returned as snippets in the search result. In most cases, this should be
# one field.
SNIPPETED_FIELDS = []
# Each subclass should use this constant to define how many days should
# elapse before a resource should be re-indexed. This value should be
# nonnegative.
FRESHNESS_THRESHOLD_DAYS = 0
@classmethod
def generate_all(
cls, course, timestamps): # pylint: disable-msg=unused-argument
"""A generator returning objects of type cls in the course.
This generator should yield resources based on the last indexed time in
timestamps.
Args:
course: models.courses.course. the course to index.
timestamps: dict from doc_ids to last indexed datetimes.
Yields:
A sequence of Resource objects.
"""
# For the superclass, return a generator which immediately halts. All
# implementations in subclasses must also be generators for memory-
# management reasons.
return
yield # pylint: disable-msg=unreachable
@classmethod
def _get_doc_id(cls, *unused_vargs):
"""Subclasses should implement this with identifying fields as args."""
raise NotImplementedError
@classmethod
def _indexed_within_num_days(cls, timestamps, doc_id, num_days):
"""Determines whether doc_id was indexed in the last num_days days."""
try:
timestamp = timestamps[doc_id]
except (KeyError, TypeError):
return False
else:
delta = datetime.datetime.utcnow() - timestamp
return delta <= datetime.timedelta(num_days)
def get_document(self):
"""Return a search.Document to be indexed."""
raise NotImplementedError
def get_links(self):
"""External links to be indexed should be stored in self.links."""
return self.links if hasattr(self, 'links') else []
class Result(object):
"""The abstract superclass for a result returned by the search module."""
def get_html(self):
"""Return an HTML fragment to be used in the results page."""
raise NotImplementedError
@classmethod
def _generate_html_from_template(cls, template_name, template_value):
"""Generates marked-up HTML from template."""
template = jinja_utils.get_template(
template_name,
[os.path.join(appengine_config.BUNDLE_ROOT,
'modules', 'search', 'results_templates')])
return jinja2.Markup(template.render(template_value))
@classmethod
def _get_returned_field(cls, result, field):
"""Returns the value of a field in result, '' if none exists."""
try:
return result[field][0].value
except (KeyError, IndexError, AttributeError):
return ''
@classmethod
def _get_snippet(cls, result):
"""Returns the value of the snippet in result, '' if none exists."""
try:
return result.expressions[0].value
except (AttributeError, IndexError):
return ''
class LessonResource(Resource):
"""A lesson in a course."""
TYPE_NAME = 'Lesson'
RETURNED_FIELDS = ['title', 'unit_id', 'lesson_id', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 3
@classmethod
def generate_all(cls, course, timestamps):
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
doc_id = cls._get_doc_id(lesson.unit_id, lesson.lesson_id)
if (lesson.now_available and unit.now_available and
not cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield LessonResource(lesson)
except HTMLParser.HTMLParseError as e:
logging.info(
'Error parsing objectives for Lesson %s.%s: %s',
lesson.unit_id, lesson.lesson_id, e)
continue
@classmethod
def _get_doc_id(cls, unit_id, lesson_id):
return '%s_%s_%s' % (cls.TYPE_NAME, unit_id, lesson_id)
def __init__(self, lesson):
super(LessonResource, self).__init__()
self.unit_id = lesson.unit_id
self.lesson_id = lesson.lesson_id
self.title = lesson.title
if lesson.notes:
self.notes = urlparse.urljoin(PROTOCOL_PREFIX, lesson.notes)
else:
self.notes = ''
if lesson.objectives:
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(lesson.objectives)
self.content = parser.get_content()
self.links = parser.get_links()
else:
self.content = ''
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.unit_id, self.lesson_id),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=(
'unit?unit=%s&lesson=%s' %
(self.unit_id, self.lesson_id))),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class LessonResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(LessonResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes a lesson link.
lesson_string = gettext.gettext('Lesson')
template_value = {
'result_title': '%s - %s' % (self.title, lesson_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class ExternalLinkResource(Resource):
"""An external link from a course."""
TYPE_NAME = 'ExternalLink'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 15
# TODO(emichael): Allow the user to turn off external links in the dashboard
@classmethod
def generate_all_from_dist_dict(cls, link_dist, timestamps):
"""Generate all external links from a map from URL to distance.
Args:
link_dist: dict. a map from URL to distance in the link graph from
the course.
timestamps: dict from doc_ids to last indexed datetimes. An empty
dict indicates that all documents should be generated.
Yields:
A sequence of ExternalLinkResource.
"""
url_queue = Queue.LifoQueue()
for url, dist in sorted(link_dist.iteritems(),
key=operator.itemgetter(1)):
url_queue.put(url)
while not url_queue.empty():
url = url_queue.get()
doc_id = cls._get_doc_id(url)
if (cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
continue
dist = link_dist[url]
if dist > 1:
break
try:
resource = ExternalLinkResource(url)
except URLNotParseableException as e:
logging.info(e)
else:
if dist < 1:
for new_link in resource.get_links():
if new_link not in link_dist:
link_dist[new_link] = dist + 1
url_queue.put(new_link)
yield resource
def __init__(self, url):
# distance is the distance from the course material in the link graph,
# where a lesson notes page has a distance of 0
super(ExternalLinkResource, self).__init__()
self.url = url
parser = get_parser_for_html(url)
self.content = parser.get_content()
self.title = parser.get_title()
self.links = parser.get_links()
@classmethod
def _get_doc_id(cls, url):
return '%s_%s' % (cls.TYPE_NAME, url)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.url),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=self.url),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class ExternalLinkResult(Result):
"""An object for an external link in the search results."""
def __init__(self, search_result):
super(ExternalLinkResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class YouTubeFragmentResource(Resource):
"""An object for a YouTube transcript fragment in search results."""
TYPE_NAME = 'YouTubeFragment'
RETURNED_FIELDS = ['title', 'video_id', 'start', 'thumbnail_url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 30
@classmethod
def generate_all(cls, course, timestamps):
"""Generate all YouTubeFragments for a course."""
# TODO(emichael): Handle the existence of a single video in multiple
# places in a course.
youtube_ct_regex = r"""<[ ]*gcb-youtube[^>]+videoid=['"]([^'"]+)['"]"""
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
if not (lesson.now_available and unit.now_available):
continue
lesson_url = 'unit?unit=%s&lesson=%s' % (
lesson.unit_id, lesson.lesson_id)
if lesson.video and not cls._indexed_within_num_days(
timestamps, lesson.video, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.video, lesson_url):
yield fragment
match = re.search(youtube_ct_regex, lesson.objectives)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
video_id, lesson_url):
yield fragment
if announcements.custom_module.enabled:
for entity in announcements.AnnouncementEntity.get_announcements():
if entity.is_draft:
continue
announcement_url = 'announcements#%s' % entity.key()
match = re.search(youtube_ct_regex, entity.html)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id,
cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
video_id, announcement_url):
yield fragment
@classmethod
def _indexed_within_num_days(cls, timestamps, video_id, num_days):
for doc_id in timestamps:
if doc_id.startswith(cls._get_doc_id(video_id, '')):
return super(
YouTubeFragmentResource, cls)._indexed_within_num_days(
timestamps, doc_id, num_days)
return False
@classmethod
def _get_fragments_for_video(cls, video_id, url_in_course):
"""Get all of the transcript fragment docs for a specific video."""
try:
(transcript, title, thumbnail_url) = cls._get_video_data(video_id)
except BaseException as e:
logging.info('Could not parse YouTube video with id %s.\n%s',
video_id, e)
return []
# Aggregate the fragments into YOUTUBE_CAPTION_SIZE_SECS time chunks
fragments = transcript.getElementsByTagName('text')
aggregated_fragments = []
# This parser is only used for unescaping HTML entities
parser = HTMLParser.HTMLParser()
while fragments:
current_start = float(fragments[0].attributes['start'].value)
current_text = []
while (fragments and
float(fragments[0].attributes['start'].value) -
current_start < YOUTUBE_CAPTION_SIZE_SECS):
current_text.append(parser.unescape(
fragments.pop(0).firstChild.nodeValue))
aggregated_fragment = YouTubeFragmentResource(
video_id, url_in_course, current_start,
'\n'.join(current_text), title, thumbnail_url)
aggregated_fragments.append(aggregated_fragment)
return aggregated_fragments
@classmethod
def _get_video_data(cls, video_id):
"""Returns (track_minidom, title, thumbnail_url) for a video."""
try:
vid_info = get_minidom_from_xml(
urlparse.urljoin(YOUTUBE_DATA_URL, video_id),
ignore_robots=True)
title = vid_info.getElementsByTagName(
'title')[0].firstChild.nodeValue
thumbnail_url = vid_info.getElementsByTagName(
'media:thumbnail')[0].attributes['url'].value
except (URLNotParseableException, IOError,
IndexError, AttributeError) as e:
logging.error('Could not parse video info for video id %s.\n%s',
video_id, e)
title = ''
thumbnail_url = ''
# TODO(emichael): Handle the existence of multiple tracks
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL,
'?v=%s&type=list' % video_id)
tracklist = get_minidom_from_xml(url, ignore_robots=True)
tracks = tracklist.getElementsByTagName('track')
if not tracks:
raise URLNotParseableException('No tracks for video %s' % video_id)
track_name = tracks[0].attributes['name'].value
track_lang = tracks[0].attributes['lang_code'].value
track_id = tracks[0].attributes['id'].value
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL, urllib.quote(
'?v=%s&lang=%s&name=%s&id=%s' %
(video_id, track_lang, track_name, track_id), '?/=&'))
transcript = get_minidom_from_xml(url, ignore_robots=True)
return (transcript, title, thumbnail_url)
@classmethod
def _get_doc_id(cls, video_id, start_time):
return '%s_%s_%s' % (cls.TYPE_NAME, video_id, start_time)
def __init__(self, video_id, url, start, text, video_title, thumbnail_url):
super(YouTubeFragmentResource, self).__init__()
self.url = url
self.video_id = video_id
self.start = start
self.text = text
self.video_title = video_title
self.thumbnail_url = thumbnail_url
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.video_id, self.start),
fields=[
search.TextField(name='title', value=self.video_title),
search.TextField(name='video_id', value=self.video_id),
search.TextField(name='content', value=self.text),
search.NumberField(name='start', value=self.start),
search.TextField(name='thumbnail_url',
value=self.thumbnail_url),
search.TextField(name='url', value=self.url),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class YouTubeFragmentResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(YouTubeFragmentResult, self).__init__()
self.doc_id = search_result.doc_id
self.title = self._get_returned_field(search_result, 'title')
self.video_id = self._get_returned_field(search_result, 'video_id')
self.start = self._get_returned_field(search_result, 'start')
self.thumbnail_url = self._get_returned_field(search_result,
'thumbnail_url')
self.url = self._get_returned_field(search_result, 'url')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'video_id': self.video_id,
'start_time': self.start,
'thumbnail_url': self.thumbnail_url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('youtube.html', template_value)
class AnnouncementResource(Resource):
"""An announcement in a course."""
TYPE_NAME = 'Announcement'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 1
@classmethod
def generate_all(cls, course, timestamps):
if announcements.custom_module.enabled:
for entity in announcements.AnnouncementEntity.get_announcements():
doc_id = cls._get_doc_id(entity.key())
if not(entity.is_draft or cls._indexed_within_num_days(
timestamps, doc_id, cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield AnnouncementResource(entity)
except HTMLParser.HTMLParseError as e:
logging.info('Error parsing Announcement %s: %s',
entity.title, e)
continue
def __init__(self, announcement):
super(AnnouncementResource, self).__init__()
self.title = announcement.title
self.key = announcement.key()
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(announcement.html)
self.content = parser.get_content()
@classmethod
def _get_doc_id(cls, key):
return '%s_%s' % (cls.TYPE_NAME, key)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.key),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url',
value='announcements#%s' % self.key),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class AnnouncementResult(Result):
"""An object for an announcement in search results."""
def __init__(self, search_result):
super(AnnouncementResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes an announcement link.
announcement_string = gettext.gettext('Announcement')
template_value = {
'result_title': '%s - %s' % (self.title, announcement_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
# Register new resource types here
RESOURCE_TYPES = [
(LessonResource, LessonResult),
(ExternalLinkResource, ExternalLinkResult),
(YouTubeFragmentResource, YouTubeFragmentResult),
(AnnouncementResource, AnnouncementResult)
]
def get_returned_fields():
"""Returns a list of fields that should be returned in a search result."""
returned_fields = set(['type'])
for resource_type, unused_result_type in RESOURCE_TYPES:
returned_fields |= set(resource_type.RETURNED_FIELDS)
return list(returned_fields)
def get_snippeted_fields():
"""Returns a list of fields that should be snippeted in a search result."""
snippeted_fields = set()
for resource_type, unused_result_type in RESOURCE_TYPES:
snippeted_fields |= set(resource_type.SNIPPETED_FIELDS)
return list(snippeted_fields)
def generate_all_documents(course, timestamps):
"""A generator for all docs for a given course.
Args:
course: models.courses.Course. the course to be indexed.
timestamps: dict from doc_ids to last indexed datetimes. An empty dict
indicates that all documents should be generated.
Yields:
A sequence of search.Document. If a document is within the freshness
threshold, no document will be generated. This function does not modify
timestamps.
"""
link_dist = {}
for resource_type, unused_result_type in RESOURCE_TYPES:
for resource in resource_type.generate_all(course, timestamps):
if isinstance(resource, LessonResource) and resource.notes:
link_dist[resource.notes] = 0
for link in resource.get_links():
link_dist[link] = 1
yield resource.get_document()
for resource in ExternalLinkResource.generate_all_from_dist_dict(
link_dist, timestamps):
yield resource.get_document()
def process_results(results):
"""Generate result objects for the results of a query."""
result_types = {resource_type.TYPE_NAME: result_type
for (resource_type, result_type) in RESOURCE_TYPES}
processed_results = []
for result in results:
try:
result_type = result_types[result['type'][0].value]
processed_results.append(result_type(result))
except (AttributeError, IndexError, KeyError) as e:
# If there is no type information, we cannot process the result
logging.error("%s. Couldn't process result", e)
return processed_results
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing question tags."""
__author__ = 'sll@google.com (Sean Lip)'
import os
from common import jinja_utils
from common import schema_fields
from common import tags
import jinja2
from models import custom_modules
from models import models as m_models
from models import transforms
RESOURCES_PATH = '/modules/assessment_tags/resources'
def render_question(
quid, instanceid, locale, embedded=False, weight=None, progress=None):
"""Generates the HTML for a question.
Args:
quid: String. The question id.
instanceid: String. The unique reference id for the question instance
(different instances of the same question in a page will have
different instanceids).
locale: String. The locale for the Jinja environment that is used to
generate the question HTML.
embedded: Boolean. Whether this question is embedded within a container
object.
weight: number. The weight to be used when grading the question in a
scored lesson. This value is cast to a float and, if this cast
fails, defaults to 1.0.
progress: None, 0 or 1. If None, no progress marker should be shown. If
0, a 'not-started' progress marker should be shown. If 1, a
'complete' progress marker should be shown.
Returns:
a Jinja markup string that represents the HTML for the question.
"""
try:
question_dto = m_models.QuestionDAO.load(quid)
except Exception: # pylint: disable-msg=broad-except
return '[Invalid question]'
if not question_dto:
return '[Question deleted]'
try:
weight = float(weight)
except ValueError:
weight = 1.0
template_values = question_dto.dict
template_values['embedded'] = embedded
template_values['instanceid'] = instanceid
template_values['resources_path'] = RESOURCES_PATH
if progress is not None:
template_values['progress'] = progress
template_file = None
js_data = {}
if question_dto.type == question_dto.MULTIPLE_CHOICE:
template_file = 'templates/mc_question.html'
multi = template_values['multiple_selections']
template_values['button_type'] = 'checkbox' if multi else 'radio'
choices = [{
'score': choice['score'], 'feedback': choice.get('feedback')
} for choice in template_values['choices']]
js_data['choices'] = choices
elif question_dto.type == question_dto.SHORT_ANSWER:
template_file = 'templates/sa_question.html'
js_data['graders'] = template_values['graders']
js_data['hint'] = template_values.get('hint')
js_data['defaultFeedback'] = template_values.get('defaultFeedback')
# The following two lines are included for backwards compatibility with
# v1.5 questions that do not have the row and column properties set.
template_values['rows'] = template_values.get(
'rows', m_models.SaQuestionConstants.DEFAULT_HEIGHT_ROWS)
template_values['columns'] = template_values.get(
'columns', m_models.SaQuestionConstants.DEFAULT_WIDTH_COLUMNS)
else:
return '[Unsupported question type]'
# Display the weight as an integer if it is sufficiently close to an
# integer. Otherwise, round it to 2 decimal places. This ensures that the
# weights displayed to the student are exactly the same as the weights that
# are used for grading.
weight = (int(round(weight)) if abs(weight - round(weight)) < 1e-6
else round(weight, 2))
template_values['displayed_weight'] = weight
if not embedded:
js_data['weight'] = float(weight)
template_values['js_data'] = transforms.dumps(js_data)
template = jinja_utils.get_template(
template_file, [os.path.dirname(__file__)], locale=locale)
return jinja2.utils.Markup(template.render(template_values))
class QuestionTag(tags.BaseTag):
"""A tag for rendering questions."""
binding_name = 'question'
def get_icon_url(self):
return '/modules/assessment_tags/resources/question.png'
@classmethod
def name(cls):
return 'Question'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, handler):
"""Renders a question."""
locale = handler.app_context.get_environ()['course']['locale']
quid = node.attrib.get('quid')
weight = node.attrib.get('weight')
instanceid = node.attrib.get('instanceid')
progress = None
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
instanceid)
html_string = render_question(
quid, instanceid, locale, embedded=False, weight=weight,
progress=progress)
return tags.html_string_to_element_tree(html_string)
def get_schema(self, unused_handler):
"""Get the schema for specifying the question."""
questions = m_models.QuestionDAO.get_all()
question_list = [(
unicode(q.id), # q.id is a number but the schema requires a string
q.description) for q in questions]
if not question_list:
return self.unavailable_schema('No questions available')
reg = schema_fields.FieldRegistry('Question')
reg.add_property(
schema_fields.SchemaField(
'quid', 'Question', 'string', optional=True,
select_data=question_list))
reg.add_property(
schema_fields.SchemaField(
'weight', 'Weight', 'string', optional=True,
extra_schema_dict_values={'value': '1'},
description='The number of points for a correct answer.'))
return reg
class QuestionGroupTag(tags.BaseTag):
"""A tag for rendering question groups."""
binding_name = 'question-group'
def get_icon_url(self):
return '/modules/assessment_tags/resources/question_group.png'
@classmethod
def name(cls):
return 'Question Group'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, handler):
"""Renders a question."""
locale = handler.app_context.get_environ()['course']['locale']
qgid = node.attrib.get('qgid')
group_instanceid = node.attrib.get('instanceid')
question_group_dto = m_models.QuestionGroupDAO.load(qgid)
if not question_group_dto:
return tags.html_string_to_element_tree('[Deleted question group]')
template_values = question_group_dto.dict
template_values['embedded'] = False
template_values['instanceid'] = group_instanceid
template_values['resources_path'] = RESOURCES_PATH
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
group_instanceid)
template_values['progress'] = progress
template_values['question_html_array'] = []
js_data = {}
for ind, item in enumerate(question_group_dto.dict['items']):
quid = item['question']
question_instanceid = '%s.%s.%s' % (group_instanceid, ind, quid)
template_values['question_html_array'].append(render_question(
quid, question_instanceid, locale, weight=item['weight'],
embedded=True
))
js_data[question_instanceid] = item
template_values['js_data'] = transforms.dumps(js_data)
template_file = 'templates/question_group.html'
template = jinja_utils.get_template(
template_file, [os.path.dirname(__file__)], locale=locale)
html_string = template.render(template_values)
return tags.html_string_to_element_tree(html_string)
def get_schema(self, unused_handler):
"""Get the schema for specifying the question group."""
question_groups = m_models.QuestionGroupDAO.get_all()
question_group_list = [(
unicode(q.id), # q.id is a number but the schema requires a string
q.description) for q in question_groups]
if not question_group_list:
return self.unavailable_schema('No question groups available')
reg = schema_fields.FieldRegistry('Question Group')
reg.add_property(
schema_fields.SchemaField(
'qgid', 'Question Group', 'string', optional=True,
select_data=question_group_list))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
def when_module_enabled():
# Register custom tags.
tags.Registry.add_tag_binding(
QuestionTag.binding_name, QuestionTag)
tags.EditorBlacklists.register(
QuestionTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.Registry.add_tag_binding(
QuestionGroupTag.binding_name, QuestionGroupTag)
tags.EditorBlacklists.register(
QuestionGroupTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
def when_module_disabled():
# Unregister custom tags.
tags.Registry.remove_tag_binding(QuestionTag.binding_name)
tags.EditorBlacklists.unregister(
QuestionTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.Registry.remove_tag_binding(QuestionGroupTag.binding_name)
tags.EditorBlacklists.unregister(
QuestionGroupTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
# Add a static handler for icons shown in the rich text editor.
global_routes = [(
os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
global custom_module
custom_module = custom_modules.Module(
'Question tags',
'A set of tags for rendering questions within a lesson body.',
global_routes,
[],
notify_module_enabled=when_module_enabled,
notify_module_disabled=when_module_disabled)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic object editor view that uses REST services."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import urllib
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
import jinja2
from models import custom_modules
from models import transforms
import webapp2
# a set of YUI and inputex modules required by the editor
COMMON_REQUIRED_MODULES = [
'inputex-group', 'inputex-form', 'inputex-jsonschema']
ALL_MODULES = [
'querystring-stringify-simple', 'inputex-select', 'inputex-string',
'inputex-radio', 'inputex-date', 'inputex-datepicker', 'inputex-checkbox',
'inputex-list', 'inputex-color', 'gcb-rte', 'inputex-textarea',
'inputex-url', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-file', 'io-upload-iframe']
class ObjectEditor(object):
"""Generic object editor powered by jsonschema."""
@classmethod
def get_html_for(
cls, handler, schema_json, annotations, object_key,
rest_url, exit_url,
extra_args=None,
save_method='put',
delete_url=None, delete_message=None, delete_method='post',
auto_return=False, read_only=False,
required_modules=None,
extra_js_files=None,
delete_button_caption='Delete',
save_button_caption='Save',
exit_button_caption='Close'):
"""Creates an HTML code needed to embed and operate this form.
This method creates an HTML, JS and CSS required to embed JSON
schema-based object editor into a view.
Args:
handler: a BaseHandler class, which will host this HTML, JS and CSS
schema_json: a text of JSON schema for the object being edited
annotations: schema annotations dictionary
object_key: a key of an object being edited
rest_url: a REST endpoint for object GET/PUT operation
exit_url: a URL to go to after the editor form is dismissed
extra_args: extra request params passed back in GET and POST
save_method: how the data should be saved to the server (put|upload)
delete_url: optional URL for delete operation
delete_message: string. Optional custom delete confirmation message
delete_method: optional HTTP method for delete operation
auto_return: whether to return to the exit_url on successful save
read_only: optional flag; if set, removes Save and Delete operations
required_modules: list of inputex modules required for this editor
extra_js_files: list of extra JS files to be included
delete_button_caption: string. A caption for the 'Delete' button
save_button_caption: a caption for the 'Save' button
exit_button_caption: a caption for the 'Close' button
Returns:
The HTML, JS and CSS text that will instantiate an object editor.
"""
required_modules = required_modules or ALL_MODULES
if not delete_message:
kind = transforms.loads(schema_json).get('description')
if not kind:
kind = 'Generic Object'
delete_message = 'Are you sure you want to delete this %s?' % kind
# construct parameters
get_url = rest_url
get_args = {'key': object_key}
post_url = rest_url
post_args = {'key': object_key}
if extra_args:
get_args.update(extra_args)
post_args.update(extra_args)
if read_only:
post_url = ''
post_args = ''
custom_rte_tag_icons = []
for tag, tag_class in tags.get_tag_bindings().items():
custom_rte_tag_icons.append({
'name': tag,
'iconUrl': tag_class().get_icon_url()})
template_values = {
'enabled': custom_module.enabled,
'schema': schema_json,
'get_url': '%s?%s' % (get_url, urllib.urlencode(get_args, True)),
'save_url': post_url,
'save_args': transforms.dumps(post_args),
'exit_button_caption': exit_button_caption,
'exit_url': exit_url,
'required_modules': COMMON_REQUIRED_MODULES + required_modules,
'extra_js_files': extra_js_files or [],
'schema_annotations': [
(item[0], transforms.dumps(item[1])) for item in annotations],
'save_method': save_method,
'auto_return': auto_return,
'delete_button_caption': delete_button_caption,
'save_button_caption': save_button_caption,
'custom_rte_tag_icons': transforms.dumps(custom_rte_tag_icons),
'delete_message': delete_message,
}
if delete_url and not read_only:
template_values['delete_url'] = delete_url
if delete_method:
template_values['delete_method'] = delete_method
if appengine_config.BUNDLE_LIB_FILES:
template_values['bundle_lib_files'] = True
return jinja2.utils.Markup(handler.get_template(
'oeditor.html', [os.path.dirname(__file__)]
).render(template_values))
class PopupHandler(webapp2.RequestHandler, utils.ReflectiveRequestHandler):
"""A handler to serve the content of the popup subeditor."""
default_action = 'custom_tag'
get_actions = ['edit_custom_tag', 'add_custom_tag']
post_actions = []
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)])
def get_edit_custom_tag(self):
"""Return the the page used to edit a custom HTML tag in a popup."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
tag_class = tag_bindings[tag_name]
schema = tag_class().get_schema(self)
if schema.has_subregistries():
raise NotImplementedError()
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None)
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def get_add_custom_tag(self):
"""Return the page for the popup used to add a custom HTML tag."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
select_data = []
for name in tag_bindings.keys():
clazz = tag_bindings[name]
select_data.append((name, '%s: %s' % (
clazz.vendor(), clazz.name())))
select_data = sorted(select_data, key=lambda pair: pair[1])
if tag_name:
tag_class = tag_bindings[tag_name]
else:
tag_class = tag_bindings[select_data[0][0]]
tag_schema = tag_class().get_schema(self)
schema = schema_fields.FieldRegistry('Add a Component')
type_select = schema.add_sub_registry('type', 'Component Type')
type_select.add_property(schema_fields.SchemaField(
'tag', 'Name', 'string', select_data=select_data))
schema.add_sub_registry('attributes', registry=tag_schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None, required_modules=tag_class.required_modules(),
extra_js_files=['add_custom_tag.js'])
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def create_bool_select_annotation(
keys_list, label, true_label, false_label, class_name=None,
description=None):
"""Creates inputex annotation to display bool type as a select."""
properties = {
'label': label, 'choices': [
{'value': True, 'label': true_label},
{'value': False, 'label': false_label}]}
if class_name:
properties['className'] = class_name
if description:
properties['description'] = description
return (keys_list, {'type': 'select', '_inputex': properties})
custom_module = None
def register_module():
"""Registers this module in the registry."""
from controllers import sites # pylint: disable-msg=g-import-not-at-top
yui_handlers = [
('/static/inputex-3.1.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'))),
('/static/yui_3.6.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'))),
('/static/2in3/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip')))]
if appengine_config.BUNDLE_LIB_FILES:
yui_handlers += [
('/static/combo/inputex', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'),
'/static/inputex-3.1.0/')),
('/static/combo/yui', sites.make_css_combo_zip_handler(
os.path.join(appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'),
'/yui/')),
('/static/combo/2in3', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip'),
'/static/2in3/'))]
oeditor_handlers = [('/oeditorpopup', PopupHandler)]
global custom_module
custom_module = custom_modules.Module(
'Object Editor',
'A visual editor for editing various types of objects.',
yui_handlers, oeditor_handlers)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course explorer module."""
__author__ = 'Rahul Singal (rahulsingal@google.com)'
from common import safe_dom
from controllers import utils
from models import custom_modules
from models.config import ConfigProperty
from models.models import StudentProfileDAO
from modules.course_explorer import student
from google.appengine.api import users
GCB_ENABLE_COURSE_EXPLORER_PAGE = ConfigProperty(
'gcb_enable_course_explorer_page', bool,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
If this option is selected, "/" redirects to the course explorer page.
Otherwise, it redirects to the preview page for the default course.""")
), False, multiline=False, validator=None)
custom_module = None
class ExplorerPageInitializer(utils.PageInitializer):
"""Page initializer for explorer page.
Allow links to the course explorer to be added
to the navbars of all course pages.
"""
@classmethod
def initialize(cls, template_values):
template_values.update(
{'show_course_explorer_tab': GCB_ENABLE_COURSE_EXPLORER_PAGE.value})
user = users.get_current_user()
if user:
profile = StudentProfileDAO.get_profile_by_user_id(
users.get_current_user().user_id())
template_values.update({'has_global_profile': profile is not None})
def register_module():
"""Registers this module in the registry."""
# set the page initializer
utils.PageInitializerService.set(ExplorerPageInitializer)
# setup routes
explorer_routes = [
('/', student.IndexPageHandler),
('/explorer', student.AllCoursesHandler),
(r'/explorer/assets/(.*)', student.AssetsHandler),
('/explorer/courses', student.RegisteredCoursesHandler),
('/explorer/profile', student.ProfileHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Explorer',
'A set of pages for delivering an online course.',
explorer_routes, [])
return custom_module
def unregister_module():
"""Unregisters this module in the registry."""
# set the page intializer to default.
utils.PageInitializerService.set(utils.DefaultPageInitializer)
return custom_modules
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting courses viewed by a student."""
__author__ = 'Rahul Singal (rahulsingal@google.com)'
import mimetypes
import appengine_config
from common import jinja_utils
from controllers import sites
from controllers.utils import PageInitializerService
from controllers.utils import XsrfTokenManager
from models import courses as Courses
from models import transforms
from models.models import StudentProfileDAO
from models.roles import Roles
import webapp2
import course_explorer
from google.appengine.api import users
# We want to use views file in both /views and /modules/course_explorer/views.
DIR = appengine_config.BUNDLE_ROOT
LOCALE = Courses.COURSE_TEMPLATE_DICT['base']['locale']
STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID = 'rename-student-global'
# Int. Maximum number of bytes App Engine's db.StringProperty can store.
_STRING_PROPERTY_MAX_BYTES = 500
class IndexPageHandler(webapp2.RequestHandler):
"""Handles routing of root url."""
def get(self):
"""Handles GET requests."""
if course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.redirect('/explorer')
else:
self.redirect('/course')
class BaseStudentHandler(webapp2.RequestHandler):
"""Base Handler for a student's courses."""
def __init__(self, *args, **kwargs):
super(BaseStudentHandler, self).__init__(*args, **kwargs)
self.template_values = {}
self.initialize_student_state()
def initialize_student_state(self):
"""Initialize course information related to student."""
PageInitializerService.get().initialize(self.template_values)
self.enrolled_courses_dict = {}
self.courses_progress_dict = {}
user = users.get_current_user()
if not user:
return
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
if not profile:
return
self.template_values['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.enrolled_courses_dict = transforms.loads(profile.enrollment_info)
if self.enrolled_courses_dict:
self.template_values['has_enrolled_courses'] = True
if profile.course_info:
self.courses_progress_dict = transforms.loads(profile.course_info)
def get_public_courses(self):
"""Get all the public courses."""
public_courses = []
for course in sites.get_all_courses():
info = sites.ApplicationContext.get_environ(course)
if info['course']['now_available']:
public_courses.append(course)
return public_courses
def is_enrolled(self, course):
"""Returns true if student is enrolled else false."""
return bool(
self.enrolled_courses_dict.get(course.get_namespace_name()))
def is_completed(self, course):
"""Returns true if student has completed course else false."""
info = self.courses_progress_dict.get(course.get_namespace_name())
if info and 'final_grade' in info:
return True
return False
def get_course_info(self, course):
"""Returns course info required in views."""
info = sites.ApplicationContext.get_environ(course)
slug = course.get_slug()
course_preview_url = slug
if slug == '/':
course_preview_url = '/course'
slug = ''
info['course']['slug'] = slug
info['course']['course_preview_url'] = course_preview_url
info['course']['is_registered'] = self.is_enrolled(course)
info['course']['is_completed'] = self.is_completed(course)
return info
def get_enrolled_courses(self, courses):
"""Returns list of courses registered by student."""
enrolled_courses = []
for course in courses:
if self.is_enrolled(course):
enrolled_courses.append(self.get_course_info(course))
return enrolled_courses
def initialize_page_and_get_user(self):
"""Add basic fields to template and return user."""
self.template_values['course_info'] = Courses.COURSE_TEMPLATE_DICT
self.template_values['course_info']['course'] = {'locale': LOCALE}
user = users.get_current_user()
if not user:
self.template_values['loginUrl'] = users.create_login_url('/')
else:
self.template_values['email'] = user.email()
self.template_values['is_super_admin'] = Roles.is_super_admin()
self.template_values['logoutUrl'] = users.create_logout_url('/')
return user
def is_valid_xsrf_token(self, action):
"""Asserts the current request has proper XSRF token or fails."""
token = self.request.get('xsrf_token')
return token and XsrfTokenManager.is_xsrf_token_valid(token, action)
class ProfileHandler(BaseStudentHandler):
"""Global profile handler for a student."""
def _storable_in_string_property(self, value):
# db.StringProperty can hold 500B. len(1_unicode_char) == 1, so len() is
# not a good proxy for unicode string size. Instead, cast to utf-8-
# encoded str first.
return len(value.encode('utf-8')) <= _STRING_PROPERTY_MAX_BYTES
def get(self):
"""Handles GET requests."""
user = self.initialize_page_and_get_user()
courses = self.get_public_courses()
self.template_values['student'] = (
StudentProfileDAO.get_profile_by_user_id(user.user_id()))
self.template_values['navbar'] = {'profile': True}
self.template_values['courses'] = self.get_enrolled_courses(courses)
self.template_values['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token(
STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID))
template = jinja_utils.get_template(
'/modules/course_explorer/views/profile.html', DIR, LOCALE)
self.response.write(template.render(self.template_values))
def post(self):
"""Handles post requests."""
user = self.initialize_page_and_get_user()
if not user:
self.error(403)
return
if not self.is_valid_xsrf_token(STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID):
self.error(403)
return
new_name = self.request.get('name')
if not (new_name and self._storable_in_string_property(new_name)):
self.error(400)
return
StudentProfileDAO.update(
user.user_id(), None, nick_name=new_name, profile_only=True)
self.redirect('/explorer/profile')
class AllCoursesHandler(BaseStudentHandler):
"""Handles list of courses that can be viewed by a student."""
def get(self):
"""Handles GET requests."""
self.initialize_page_and_get_user()
courses = self.get_public_courses()
self.template_values['courses'] = (
[self.get_course_info(course) for course in courses])
self.template_values['navbar'] = {'course_explorer': True}
template = jinja_utils.get_template(
'/modules/course_explorer/views/course_explorer.html', DIR, LOCALE)
self.response.write(template.render(self.template_values))
class RegisteredCoursesHandler(BaseStudentHandler):
"""Handles registered courses view for a student."""
def get(self):
"""Handles GET request."""
self.initialize_page_and_get_user()
courses = self.get_public_courses()
enrolled_courses = self.get_enrolled_courses(courses)
self.template_values['courses'] = enrolled_courses
self.template_values['navbar'] = {'mycourses': True}
self.template_values['can_enroll_more_courses'] = (
len(courses) - len(enrolled_courses) > 0)
template = jinja_utils.get_template(
'/modules/course_explorer/views/course_explorer.html', DIR, LOCALE)
self.response.write(template.render(self.template_values))
class AssetsHandler(webapp2.RequestHandler):
"""Handles asset file for the home page."""
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def get(self, path):
"""Handles GET requests."""
filename = '%s/assets/%s' % (appengine_config.BUNDLE_ROOT, path)
with open(filename, 'r') as f:
self.response.headers['Content-Type'] = self.get_mime_type(filename)
self.response.write(f.read())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the admin panel."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
COURSES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateNewCourse')
DEPLOYMENT_DESCRIPTION = assemble_sanitized_message("""
These deployment settings are configurable by editing the Course Builder code
before uploading it to Google App Engine.
""", 'https://code.google.com/p/course-builder/wiki/AdminPage')
METRICS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Site administration functionality."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import cStringIO
import datetime
import os
import sys
import time
import urllib
from appengine_config import PRODUCTION_MODE
from common import jinja_utils
from common import safe_dom
from common import tags
from controllers import sites
from controllers.utils import ReflectiveRequestHandler
from models import config
from models import counters
from models import custom_modules
from models import roles
from models.config import ConfigProperty
import modules.admin.config
from modules.admin.config import ConfigPropertyEditor
import webapp2
import messages
from google.appengine.api import users
import google.appengine.api.app_identity as app
DIRECT_CODE_EXECUTION_UI_ENABLED = False
# A time this module was initialized.
BEGINNING_OF_TIME = time.time()
DELEGATED_ACCESS_IS_NOT_ALLOWED = """
You must be an actual admin user to continue.
Users with the delegated admin rights are not allowed."""
def escape(text):
"""Escapes HTML in text."""
if text:
return cgi.escape(text)
return text
def evaluate_python_code(code):
"""Compiles and evaluates a Python script in a restricted environment."""
code = code.replace('\r\n', '\n')
save_stdout = sys.stdout
results_io = cStringIO.StringIO()
try:
sys.stdout = results_io
try:
compiled_code = compile(code, '<string>', 'exec')
exec(compiled_code, globals()) # pylint: disable-msg=exec-statement
except Exception as e: # pylint: disable-msg=broad-except
results_io.write('Error: %s' % e)
return results_io.getvalue(), False
finally:
sys.stdout = save_stdout
return results_io.getvalue(), True
class AdminHandler(
webapp2.RequestHandler, ReflectiveRequestHandler, ConfigPropertyEditor):
"""Handles all pages and actions required for administration of site."""
default_action = 'courses'
@property
def get_actions(self):
actions = [
self.default_action, 'settings', 'deployment', 'perf',
'config_edit', 'add_course']
if DIRECT_CODE_EXECUTION_UI_ENABLED:
actions.append('console')
return actions
@property
def post_actions(self):
actions = ['config_reset', 'config_override']
if DIRECT_CODE_EXECUTION_UI_ENABLED:
actions.append('console_run')
return actions
def can_view(self):
"""Checks if current user has viewing rights."""
return roles.Roles.is_super_admin()
def can_edit(self):
"""Checks if current user has editing rights."""
return self.can_view()
def get(self):
"""Enforces rights to all GET operations."""
if not self.can_view():
self.redirect('/')
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(AdminHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect('/')
return
return super(AdminHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)])
def _get_user_nav(self):
current_action = self.request.get('action')
nav_mappings = [
('', 'Courses'),
('settings', 'Settings'),
('perf', 'Metrics'),
('deployment', 'Deployment')]
if DIRECT_CODE_EXECUTION_UI_ENABLED:
nav_mappings.append(('console', 'Console'))
nav = safe_dom.NodeList()
for action, title in nav_mappings:
if action == current_action:
elt = safe_dom.Element(
'a', href='/admin?action=%s' % action,
className='selected')
else:
elt = safe_dom.Element('a', href='/admin?action=%s' % action)
elt.add_text(title)
nav.append(elt).append(safe_dom.Text(' '))
if PRODUCTION_MODE:
app_id = app.get_application_id()
nav.append(safe_dom.Element(
'a', target='_blank',
href=(
'https://appengine.google.com/'
'dashboard?app_id=s~%s' % app_id)
).add_text('Google App Engine'))
else:
nav.append(safe_dom.Element(
'a', target='_blank', href='http://localhost:8000/'
).add_text('Google App Engine')).append(safe_dom.Text(' '))
nav.append(safe_dom.Element(
'a', target='_blank',
href='https://code.google.com/p/course-builder/wiki/AdminPage'
).add_text('Help'))
return nav
def render_page(self, template_values):
"""Renders a page using provided template values."""
template_values['top_nav'] = self._get_user_nav()
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout')
)
template_values[
'page_footer'] = 'Created on: %s' % datetime.datetime.now()
self.response.write(
self.get_template('view.html', []).render(template_values))
def render_dict(self, source_dict, title):
"""Renders a dictionary ordered by keys."""
keys = sorted(source_dict.keys())
content = safe_dom.NodeList()
content.append(safe_dom.Element('h3').add_text(title))
ol = safe_dom.Element('ol')
content.append(ol)
for key in keys:
value = source_dict[key]
if isinstance(value, ConfigProperty):
value = value.value
ol.add_child(
safe_dom.Element('li').add_text('%s: %s' % (key, value)))
return content
def format_title(self, text):
"""Formats standard title."""
return safe_dom.NodeList().append(
safe_dom.Text('Course Builder ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' Admin ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s' % text))
def get_perf(self):
"""Shows server performance counters page."""
template_values = {}
template_values['page_title'] = self.format_title('Metrics')
template_values['page_description'] = messages.METRICS_DESCRIPTION
perf_counters = {}
# built in counters
perf_counters['gcb-admin-uptime-sec'] = long(
time.time() - BEGINNING_OF_TIME)
# config counters
perf_counters['gcb-config-overrides'] = len(
config.Registry.get_overrides())
perf_counters['gcb-config-age-sec'] = (
long(time.time()) - config.Registry.last_update_time)
perf_counters['gcb-config-update-time-sec'] = (
config.Registry.last_update_time)
perf_counters['gcb-config-update-index'] = config.Registry.update_index
# add all registered counters
all_counters = counters.Registry.registered.copy()
for name in all_counters.keys():
global_value = all_counters[name].global_value
if not global_value:
global_value = 'NA'
perf_counters[name] = '%s / %s' % (
all_counters[name].value, global_value)
template_values['main_content'] = self.render_dict(
perf_counters, 'In-process Performance Counters (local/global)')
self.render_page(template_values)
def _make_routes_dom(self, parent_element, routes, caption):
"""Renders routes as DOM."""
if routes:
# sort routes
all_routes = []
for route in routes:
if route:
all_routes.append(str(route))
# render as DOM
ul = safe_dom.Element('ul')
parent_element.add_child(ul)
ul.add_child(safe_dom.Element('li').add_text(caption))
ul2 = safe_dom.Element('ul')
ul.add_child(ul2)
for route in sorted(all_routes):
if route:
ul2.add_child(safe_dom.Element('li').add_text(route))
def get_deployment(self):
"""Shows server environment and deployment information page."""
template_values = {}
template_values['page_title'] = self.format_title('Deployment')
template_values['page_description'] = messages.DEPLOYMENT_DESCRIPTION
# modules
module_content = safe_dom.NodeList()
module_content.append(
safe_dom.Element('h3').add_text('Custom Modules'))
ol = safe_dom.Element('ol')
module_content.append(ol)
for name in sorted(custom_modules.Registry.registered_modules.keys()):
enabled_text = ''
if name not in custom_modules.Registry.enabled_module_names:
enabled_text = ' (disabled)'
li = safe_dom.Element('li').add_text('%s%s' % (name, enabled_text))
ol.add_child(li)
amodule = custom_modules.Registry.registered_modules.get(name)
self._make_routes_dom(
li, amodule.global_routes, 'Global Routes')
self._make_routes_dom(
li, amodule.namespaced_routes, 'Namespaced Routes')
# Custom tags.
tag_content = safe_dom.NodeList()
tag_content.append(
safe_dom.Element('h3').add_text('Custom Tags'))
ol = safe_dom.Element('ol')
tag_content.append(ol)
tag_bindings = tags.get_tag_bindings()
for name in sorted(tag_bindings.keys()):
clazz = tag_bindings.get(name)
tag = clazz()
vendor = tag.vendor()
ol.add_child(safe_dom.Element('li').add_text(
'%s: %s: %s' % (name, tag.__class__.__name__, vendor)))
# Yaml file content.
yaml_content = safe_dom.NodeList()
yaml_content.append(
safe_dom.Element('h3').add_text('Contents of ').add_child(
safe_dom.Element('code').add_text('app.yaml')))
ol = safe_dom.Element('ol')
yaml_content.append(ol)
yaml_lines = open(os.path.join(os.path.dirname(
__file__), '../../app.yaml'), 'r').readlines()
for line in yaml_lines:
ol.add_child(safe_dom.Element('li').add_text(line))
# Application identity.
app_id = app.get_application_id()
app_dict = {}
app_dict['application_id'] = escape(app_id)
app_dict['default_ver_hostname'] = escape(
app.get_default_version_hostname())
template_values['main_content'] = safe_dom.NodeList().append(
self.render_dict(app_dict, 'About the Application')
).append(
module_content
).append(
tag_content
).append(
yaml_content
).append(
self.render_dict(os.environ, 'Server Environment Variables'))
self.render_page(template_values)
def get_settings(self):
"""Shows configuration properties information page."""
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
content = safe_dom.NodeList()
table = safe_dom.Element('table', className='gcb-config').add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Name')
).add_child(
safe_dom.Element('th').add_text('Current Value')
).add_child(
safe_dom.Element('th').add_text('Actions')
).add_child(
safe_dom.Element('th').add_text('Description')
))
content.append(
safe_dom.Element('h3').add_text('All Settings')
).append(table)
def get_style_for(value, value_type):
"""Formats CSS style for given value."""
style = ''
if not value or value_type in [int, long, bool]:
style = 'text-align: center;'
return style
def get_action_html(caption, args, onclick=None):
"""Formats actions <a> link."""
a = safe_dom.Element(
'a', href='/admin?%s' % urllib.urlencode(args),
className='gcb-button'
).add_text(caption)
if onclick:
a.add_attribute(onclick=onclick)
return a
def get_actions(name, override):
"""Creates actions appropriate to an item."""
if override:
return get_action_html('Edit', {
'action': 'config_edit', 'name': name})
else:
return safe_dom.Element(
'form',
action='/admin?%s' % urllib.urlencode(
{'action': 'config_override', 'name': name}),
method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('config_override'))
).add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit'
).add_text('Override'))
def get_doc_string(item, default_value):
"""Formats an item documentation string for display."""
doc_string = item.doc_string
if not doc_string:
doc_string = 'No documentation available.'
if isinstance(doc_string, safe_dom.NodeList) or isinstance(
doc_string, safe_dom.Node):
return safe_dom.NodeList().append(doc_string).append(
safe_dom.Text(' Default: \'%s\'.' % default_value))
doc_string = ' %s Default: \'%s\'.' % (doc_string, default_value)
return safe_dom.Text(doc_string)
def get_lines(value):
"""Convert \\n line breaks into <br> and escape the lines."""
escaped_value = safe_dom.NodeList()
for line in str(value).split('\n'):
escaped_value.append(
safe_dom.Text(line)).append(safe_dom.Element('br'))
return escaped_value
# get fresh properties and their overrides
unused_overrides = config.Registry.get_overrides(force_update=True)
registered = config.Registry.registered.copy()
db_overrides = config.Registry.db_overrides.copy()
names_with_draft = config.Registry.names_with_draft.copy()
count = 0
for name in sorted(registered.keys()):
count += 1
item = registered[name]
has_environ_value, unused_environ_value = item.get_environ_value()
# figure out what kind of override this is
class_current = ''
if has_environ_value:
class_current = 'gcb-env-diff'
if item.name in db_overrides:
class_current = 'gcb-db-diff'
if item.name in names_with_draft:
class_current = 'gcb-db-draft'
# figure out default and current value
default_value = item.default_value
value = item.value
if default_value:
default_value = str(default_value)
if value:
value = str(value)
style_current = get_style_for(value, item.value_type)
tr = safe_dom.Element('tr')
table.add_child(tr)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;').add_text(item.name))
td_value = safe_dom.Element('td').add_child(get_lines(value))
if style_current:
td_value.add_attribute(style=style_current)
if class_current:
td_value.add_attribute(className=class_current)
tr.add_child(td_value)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;', align='center'
).add_child(get_actions(
name, name in db_overrides or name in names_with_draft)))
tr.add_child(
safe_dom.Element(
'td').add_child(get_doc_string(item, default_value)))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element(
'td', colspan='4', align='right'
).add_text('Total: %s item(s)' % count)))
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text('Legend')
).add_text(':').add_text("""
For each property, the value shown corresponds to, in
descending order of priority:
""").add_child(
safe_dom.Element('span', className='gcb-db-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the value override set via this page ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-db-draft').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the default value with pending value override ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-env-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the environment value in app.yaml ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_text("""
and the [ default value ] in the Course Builder codebase.
"""))
template_values['main_content'] = content
self.render_page(template_values)
def get_courses(self):
"""Shows a list of all courses available on this site."""
template_values = {}
template_values['page_title'] = self.format_title('Courses')
template_values['page_description'] = messages.COURSES_DESCRIPTION
content = safe_dom.NodeList()
content.append(
safe_dom.Element(
'a', id='add_course', className='gcb-button gcb-pull-right',
role='button', href='admin?action=add_course'
).add_text('Add Course')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(
safe_dom.Element('h3').add_text('All Courses')
)
table = safe_dom.Element('table')
content.append(table)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Course Title')
).add_child(
safe_dom.Element('th').add_text('Context Path')
).add_child(
safe_dom.Element('th').add_text('Content Location')
).add_child(
safe_dom.Element('th').add_text('Student Data Location')
)
)
courses = sites.get_all_courses()
count = 0
for course in courses:
count += 1
error = safe_dom.Text('')
slug = course.get_slug()
try:
name = course.get_title()
except Exception as e: # pylint: disable-msg=broad-except
name = 'UNKNOWN COURSE'
error = safe_dom.Element('p').add_text('Error in ').add_child(
safe_dom.Element('strong').add_text('course.yaml')
).add_text(' file. ').add_child(
safe_dom.Element('br')
).add_child(
safe_dom.Element('pre').add_text('\n%s\n%s\n' % (
e.__class__.__name__, str(e)))
)
if course.fs.is_read_write():
location = 'namespace: %s' % course.get_namespace_name()
else:
location = 'disk: %s' % sites.abspath(
course.get_home_folder(), '/')
if slug == '/':
link = '/dashboard'
else:
link = '%s/dashboard' % slug
link = safe_dom.Element('a', href=link).add_text(name)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td').add_child(link).add_child(error)
).add_child(
safe_dom.Element('td').add_text(slug)
).add_child(
safe_dom.Element('td').add_text(location)
).add_child(
safe_dom.Element('td').add_text(
'namespace: %s' % course.get_namespace_name())
))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td', colspan='4', align='right').add_text(
'Total: %s item(s)' % count)))
template_values['main_content'] = content
self.render_page(template_values)
def get_console(self):
"""Shows interactive Python console page."""
template_values = {}
template_values['page_title'] = self.format_title('Console')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
content = safe_dom.NodeList()
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('i').add_child(
safe_dom.Element('strong').add_text('WARNING!')
).add_text("""
The Interactive Console has the same
access to the application's environment and services as a .py file
inside the application itself. Be careful, because this means writes
to your data store will be executed for real!""")
)
).append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text("""
Input your Python code below and press "Run Program" to execute.""")
)
).append(
safe_dom.Element(
'form', action='/admin?action=console_run', method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('console_run'))
).add_child(
safe_dom.Element(
'textarea', style='width: 95%; height: 200px;',
name='code')
).add_child(
safe_dom.Element('p', align='center').add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit'
).add_text('Run Program')
)
)
)
template_values['main_content'] = content
self.render_page(template_values)
def post_console_run(self):
"""Executes dynamically submitted Python code."""
template_values = {}
template_values['page_title'] = self.format_title('Execution Results')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
# Execute code.
code = self.request.get('code')
time_before = time.time()
output, results = evaluate_python_code(code)
duration = long(time.time() - time_before)
status = 'FAILURE'
if results:
status = 'SUCCESS'
# Render results.
content = safe_dom.NodeList()
content.append(
safe_dom.Element('h3').add_text('Submitted Python Code'))
ol = safe_dom.Element('ol')
content.append(ol)
for line in code.split('\n'):
ol.add_child(safe_dom.Element('li').add_text(line))
content.append(
safe_dom.Element('h3').add_text('Execution Results')
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text('Status: %s' % status)
).add_child(
safe_dom.Element('li').add_text('Duration (sec): %s' % duration)
)
).append(
safe_dom.Element('h3').add_text('Program Output')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text(output))
)
template_values['main_content'] = content
self.render_page(template_values)
custom_module = None
def register_module():
"""Registers this module in the registry."""
admin_handlers = [
('/admin', AdminHandler),
('/rest/config/item', (
modules.admin.config.ConfigPropertyItemRESTHandler)),
('/rest/courses/item', modules.admin.config.CoursesItemRESTHandler)]
global custom_module
custom_module = custom_modules.Module(
'Site Admin',
'A set of pages for Course Builder site administrator.',
admin_handlers, [])
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting configuration property editor and REST operations."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import urllib
from controllers import sites
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
from google.appengine.api import users
from google.appengine.ext import db
# This is a template because the value type is not yet known.
SCHEMA_JSON_TEMPLATE = """
{
"id": "Configuration Property",
"type": "object",
"description": "Configuration Property Override",
"properties": {
"name" : {"type": "string"},
"value": {"optional": true, "type": "%s"},
"is_draft": {"type": "boolean"}
}
}
"""
# This is a template because the doc_string is not yet known.
SCHEMA_ANNOTATIONS_TEMPLATE = [
(['title'], 'Configuration Property Override'),
(['properties', 'name', '_inputex'], {
'label': 'Name', '_type': 'uneditable'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Pending', 'Active',
description='<strong>Active</strong>: This value is active and '
'overrides all other defaults.<br/><strong>Pending</strong>: This '
'value is not active yet, and the default settings still apply.')]
class ConfigPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_view(cls):
return cls.can_edit()
@classmethod
def can_edit(cls):
return roles.Roles.is_super_admin()
@classmethod
def can_delete(cls):
return cls.can_edit()
@classmethod
def can_add(cls):
return cls.can_edit()
class ConfigPropertyEditor(object):
"""An editor for any configuration property."""
# Map of configuration property type into inputex type.
type_map = {str: 'string', int: 'integer', bool: 'boolean'}
@classmethod
def get_schema_annotations(cls, config_property):
"""Gets editor specific schema annotations."""
doc_string = '%s Default: \'%s\'.' % (
config_property.doc_string, config_property.default_value)
item_dict = [] + SCHEMA_ANNOTATIONS_TEMPLATE
item_dict.append((
['properties', 'value', '_inputex'], {
'label': 'Value', '_type': '%s' % cls.get_value_type(
config_property),
'description': doc_string}))
return item_dict
@classmethod
def get_value_type(cls, config_property):
"""Gets an editor specific type for the property."""
value_type = cls.type_map[config_property.value_type]
if not value_type:
raise Exception('Unknown type: %s', config_property.value_type)
if config_property.value_type == str and config_property.multiline:
return 'text'
return value_type
@classmethod
def get_schema_json(cls, config_property):
"""Gets JSON schema for configuration property."""
return SCHEMA_JSON_TEMPLATE % cls.get_value_type(config_property)
def get_add_course(self):
"""Handles 'add_course' action and renders new course entry editor."""
exit_url = '/admin?action=courses'
rest_url = CoursesItemRESTHandler.URI
template_values = {}
template_values[
'page_title'] = 'Course Builder - Add Course'
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, CoursesItemRESTHandler.SCHEMA_JSON,
CoursesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Add New Course')
self.render_page(template_values)
def get_config_edit(self):
"""Handles 'edit' property action."""
key = self.request.get('name')
if not key:
self.redirect('/admin?action=settings')
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
template_values = {}
template_values[
'page_title'] = 'Course Builder - Edit Settings'
exit_url = '/admin?action=settings#%s' % cgi.escape(key)
rest_url = '/rest/config/item'
delete_url = '/admin?%s' % urllib.urlencode({
'action': 'config_reset',
'name': key,
'xsrf_token': cgi.escape(self.create_xsrf_token('config_reset'))})
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, ConfigPropertyEditor.get_schema_json(item),
ConfigPropertyEditor.get_schema_annotations(item),
key, rest_url, exit_url, delete_url=delete_url)
self.render_page(template_values)
def post_config_override(self):
"""Handles 'override' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Add new entity if does not exist.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
except db.BadKeyError:
entity = None
if not entity:
entity = config.ConfigPropertyEntity(key_name=name)
entity.value = str(item.value)
entity.is_draft = True
entity.put()
models.EventEntity.record(
'override-property', users.get_current_user(), transforms.dumps({
'name': name, 'value': str(entity.value)}))
self.redirect('/admin?%s' % urllib.urlencode(
{'action': 'config_edit', 'name': name}))
def post_config_reset(self):
"""Handles 'reset' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Delete if exists.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
if entity:
old_value = entity.value
entity.delete()
models.EventEntity.record(
'delete-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(old_value)}))
except db.BadKeyError:
pass
self.redirect('/admin?action=settings')
class CoursesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for course entries."""
URI = '/rest/courses/item'
SCHEMA_JSON = """
{
"id": "Course Entry",
"type": "object",
"description": "Course Entry",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"admin_email": {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'New Course Entry'),
(['properties', 'name', '_inputex'], {'label': 'Unique Name'}),
(['properties', 'title', '_inputex'], {'label': 'Course Title'}),
(['properties', 'admin_email', '_inputex'], {
'label': 'Course Admin Email'})]
def get(self):
"""Handles HTTP GET verb."""
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.')
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={
'name': 'new_course',
'title': 'My New Course',
'admin_email': self.get_user().email()},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'add-course-put'))
def put(self):
"""Handles HTTP PUT verb."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'add-course-put', {}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.')
return
payload = request.get('payload')
json_object = transforms.loads(payload)
name = json_object.get('name')
title = json_object.get('title')
admin_email = json_object.get('admin_email')
# Add the new course entry.
errors = []
entry = sites.add_new_course_entry(name, title, admin_email, errors)
if not entry:
errors.append('Error adding a new course entry.')
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# We can't expect our new configuration being immediately available due
# to datastore queries consistency limitations. So we will instantiate
# our new course here and not use the normal sites.get_all_courses().
app_context = sites.get_all_courses(entry)[0]
# Update course with a new title and admin email.
new_course = courses.Course(None, app_context=app_context)
if not new_course.init_new_course_settings(title, admin_email):
transforms.send_json_response(
self, 412,
'Added new course entry, but failed to update title and/or '
'admin email. The course.yaml file already exists and must be '
'updated manually.')
return
transforms.send_json_response(
self, 200, 'Added.', {'entry': entry})
class ConfigPropertyItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a configuration property."""
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
else:
entity_dict = {'name': key, 'is_draft': entity.is_draft}
entity_dict['value'] = transforms.string_to_value(
entity.value, item.value_type)
json_payload = transforms.dict_to_json(
entity_dict,
transforms.loads(
ConfigPropertyEditor.get_schema_json(item)))
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'config-property-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'config-property-put', {'key': key}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
json_object = transforms.loads(payload)
new_value = item.value_type(json_object['value'])
# Validate the value.
errors = []
if item.validator:
item.validator(new_value, errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# Update entity.
old_value = entity.value
entity.value = str(new_value)
entity.is_draft = json_object['is_draft']
entity.put()
models.EventEntity.record(
'put-property', users.get_current_user(), transforms.dumps({
'name': key,
'before': str(old_value), 'after': str(entity.value)}))
transforms.send_json_response(self, 200, 'Saved.')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2 module implementation.
In order to use this module with your app you must enable it in main.py by
changing
modules.oauth2.oauth2.register_module()
to
modules.oauth2.oauth2.register_module().enable()
Additionally, you must:
1. Visit https://code.google.com/apis/console. Click on API Access and create a
client id for your web app with redirect URI set to:
https://<appid>.appspot|googleplex.com/<callback_uri>
and optionally include
http://localhost:<port>/<callback_uri>
where <appid> is your app id, <callback_uri> is the oauth2 callback URI you'd
like to use, and <port> is the port you'd like to use for localhost. You can
set <port> and <callback_uri> to basically whatever you want as long as they
are unique.
2. Once you've created the client id, click Download JSON. Take the file you get
and overwrite client_secrets.json in this directory.
3. In https://code.google.com/apis/console, click on Services and enable the
services your app requires. For these demos, you'll need to enable Drive API
and Google+.
Whenever you change scopes you'll need to revoke your access tokens. You can do
this at https://accounts.google.com/b/0/IssuedAuthSubTokens.
You can find a list of the available APIs at
http://api-python-client-doc.appspot.com/.
Finally, a note about dependencies. Oauth2 requires google-api-python-client,
which you can find at https://code.google.com/p/google-api-python-client/. We
bundle version 1.1 with Course Builder. It requires httplib2, which you can find
at https://code.google.com/p/httplib2/. We bundle version 0.8 with Course
Builder.
It also requires python-gflags from https://code.google.com/p/python-gflags/. We
bundle 2.0 with Course Builder, and we've repackaged the downloadable .tar.gz as
a .zip so Python can load its contents directly from sys.path.
Good luck!
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import os
import traceback
from apiclient import discovery
from common import safe_dom
from models import custom_modules
from oauth2client import appengine
import webapp2
# In real life we'd check in a blank file and set up the code to error with a
# message pointing people to https://code.google.com/apis/console.
_CLIENTSECRETS_JSON_PATH = os.path.join(
os.path.dirname(__file__), 'client_secrets.json')
class _ErrorDecorator(object):
"""Decorator used when a real decorator cannot be created.
Most often this is because there is no valid client_secrets.json. This
decorator replaces the wrapped method with one that either is a no-op, or,
if an error was given, displays the error.
"""
def __init__(self, **kwargs):
self.callback_path = 'not_enabled'
self.error = kwargs.pop('error', '')
def callback_handler(self):
"""Stub for API compatibility."""
pass
def oauth_required(self, unused_method):
"""Prints an error messsage and exits with a 500."""
def print_error_and_return_500(
request_handler, *unused_args, **unused_kwargs):
contents = safe_dom.NodeList().append(
safe_dom.Element('h1').add_text('500 internal server error')
).append(
safe_dom.Element('pre').add_text(self.error)
)
request_handler.response.write(contents.sanitized)
request_handler.response.status = 500
return print_error_and_return_500
# In real life we'd want to make one decorator per service because we wouldn't
# want users to have to give so many permissions.
def _build_decorator():
"""Builds a decorator for using oauth2 with webapp2.RequestHandlers."""
try:
return appengine.oauth2decorator_from_clientsecrets(
_CLIENTSECRETS_JSON_PATH,
scope=[
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/plus.login',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
],
message='client_secrets.json missing')
# Deliberately catch everything. pylint: disable-msg=broad-except
except Exception as e:
display_error = (
'oauth2 module enabled, but unable to load client_secrets.json. '
'See docs in modules/oauth2.py. Original exception was:\n\n%s') % (
traceback.format_exc(e))
return _ErrorDecorator(error=display_error)
_DECORATOR = _build_decorator()
class ServiceHandler(webapp2.RequestHandler):
def build_service(self, oauth2_decorator, name, version):
http = oauth2_decorator.credentials.authorize(oauth2_decorator.http())
return discovery.build(name, version, http=http)
class GoogleDriveHandler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
drive = self.build_service(_DECORATOR, 'drive', 'v2')
about = drive.about().get().execute()
self.response.write('Drive sees you as ' + about['user']['displayName'])
class GoogleOauth2Handler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
oauth2 = self.build_service(_DECORATOR, 'oauth2', 'v2')
userinfo = oauth2.userinfo().get().execute()
self.response.write('Oauth2 sees you as ' + userinfo['name'])
class GooglePlusHandler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
plus = self.build_service(_DECORATOR, 'plus', 'v1')
# This call will barf if you're logged in as @google.com because your
# profile will not be fetchable. Log in as @gmail.com and you'll be
# fine.
me = plus.people().get(userId='me').execute()
self.response.write('Plus sees you as ' + me['displayName'])
# None or custom_modules.Module. Placeholder for the module created by
# register_module.
module = None
def register_module():
"""Adds this module to the registry."""
global module
handlers = [
('/oauth2_google_drive', GoogleDriveHandler),
('/oauth2_google_oauth2', GoogleOauth2Handler),
('/oauth2_google_plus', GooglePlusHandler),
(_DECORATOR.callback_path, _DECORATOR.callback_handler()),
]
module = custom_modules.Module('Oauth2', 'Oauth2 pages', handlers, [])
return module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to provide a tag to embed activities into lesson bodies."""
import os
from xml.etree import cElementTree
from common import schema_fields
from common import tags
from models import courses
from models import custom_modules
# String. Course Builder root-relative path where resources for this module are.
_RESOURCES_PATH = os.path.join(
os.path.sep, 'modules', 'activity_tag', 'resources')
class Activity(tags.BaseTag):
"""A tag to embed activities into lesson bodies."""
binding_name = 'gcb-activity'
@classmethod
def name(cls):
return 'Activity'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, unused_handler):
activity_id = node.attrib.get('activityid')
script = cElementTree.XML("""
<div>
<script></script>
<div style="width: 785px;" id="activityContents"></div>
</div>""")
script[0].set('src', 'assets/js/%s' % activity_id)
return script
def get_icon_url(self):
return os.path.join(_RESOURCES_PATH, 'activity.png')
def get_schema(self, handler):
"""The schema of the tag editor."""
course = courses.Course(handler)
if course.version == courses.COURSE_MODEL_VERSION_1_2:
return self.unavailable_schema(
'Not available in file-based courses.')
lesson_id = handler.request.get('lesson_id')
activity_list = []
for unit in course.get_units():
for lesson in course.get_lessons(unit.unit_id):
filename = 'activity-%s.js' % lesson.lesson_id
if lesson.has_activity:
if lesson.activity_title:
title = lesson.activity_title
else:
title = filename
name = '%s - %s (%s) ' % (unit.title, lesson.title, title)
activity_list.append((filename, name))
elif str(lesson.lesson_id) == lesson_id:
name = 'Current Lesson (%s)' % filename
activity_list.append((filename, name))
reg = schema_fields.FieldRegistry('Activity')
reg.add_property(
schema_fields.SchemaField(
'activityid', 'Activity Id', 'string', optional=True,
select_data=activity_list))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(Activity.binding_name)
tags.EditorBlacklists.unregister(
Activity.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.unregister(
Activity.binding_name, tags.EditorBlacklists.ASSESSMENT_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(Activity.binding_name, Activity)
tags.EditorBlacklists.register(
Activity.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.register(
Activity.binding_name, tags.EditorBlacklists.ASSESSMENT_SCOPE)
global custom_module
# Add a static handler for icons shown in the rich text editor.
global_routes = [(
os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
custom_module = custom_modules.Module(
'Embedded Activity',
'Adds a custom tag to embed an activity in a lesson.',
global_routes, [],
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import random
from models import counters
from models import custom_modules
from models import entities
from models import student_work
from models import utils
import models.review
from modules.review import domain
from modules.review import peer
from google.appengine.ext import db
# In-process increment-only performance counters.
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY = counters.PerfCounter(
'gcb-pr-add-reviewer-bad-summary-key',
'number of times add_reviewer() failed due to a bad review summary key')
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN = counters.PerfCounter(
'gcb-pr-add-reviewer-set-assigner-kind-human',
("number of times add_reviewer() changed an existing step's assigner_kind "
'to ASSIGNER_KIND_HUMAN'))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP = counters.PerfCounter(
'gcb-pr-add-reviewer-create-review-step',
'number of times add_reviewer() created a new review step')
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED = counters.PerfCounter(
'gcb-pr-add-reviewer-expired-step-reassigned',
'number of times add_reviewer() reassigned an expired step')
COUNTER_ADD_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-failed',
'number of times add_reviewer() had a fatal error')
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED = counters.PerfCounter(
'gcb-pr-add-reviewer-removed-step-unremoved',
'number of times add_reviewer() unremoved a removed review step')
COUNTER_ADD_REVIEWER_START = counters.PerfCounter(
'gcb-pr-add-reviewer-start',
'number of times add_reviewer() has started processing')
COUNTER_ADD_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-add-reviewer-success',
'number of times add_reviewer() completed successfully')
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-unremoved-step-failed',
('number of times add_reviewer() failed on an unremoved step with a fatal '
'error'))
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED = counters.PerfCounter(
'gcb-pr-assignment-candidates-query-results-returned',
('number of results returned by the query returned by '
'get_assignment_candidates_query()'))
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-already-removed',
('number of times delete_reviewer() called on review step with removed '
'already True'))
COUNTER_DELETE_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-failed',
'number of times delete_reviewer() had a fatal error')
COUNTER_DELETE_REVIEWER_START = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-start',
'number of times delete_reviewer() has started processing')
COUNTER_DELETE_REVIEWER_STEP_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-step-miss',
'number of times delete_reviewer() found a missing review step')
COUNTER_DELETE_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-success',
'number of times delete_reviewer() completed successfully')
COUNTER_DELETE_REVIEWER_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-summary-miss',
'number of times delete_reviewer() found a missing review summary')
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION = counters.PerfCounter(
'gcb-pr-expire-review-cannot-transition',
('number of times expire_review() was called on a review step that could '
'not be transitioned to REVIEW_STATE_EXPIRED'))
COUNTER_EXPIRE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-expire-review-failed',
'number of times expire_review() had a fatal error')
COUNTER_EXPIRE_REVIEW_START = counters.PerfCounter(
'gcb-pr-expire-review-start',
'number of times expire_review() has started processing')
COUNTER_EXPIRE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-expire-review-step-miss',
'number of times expire_review() found a missing review step')
COUNTER_EXPIRE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-review-success',
'number of times expire_review() completed successfully')
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-expire-review-summary-miss',
'number of times expire_review() found a missing review summary')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-expire',
'number of records expire_old_reviews_for_unit() has expired')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-skip',
('number of times expire_old_reviews_for_unit() skipped a record due to an '
'error'))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-start',
'number of times expire_old_reviews_for_unit() has started processing')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-success',
'number of times expire_old_reviews_for_unit() completed successfully')
COUNTER_EXPIRY_QUERY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-expiry-query-keys-returned',
'number of keys returned by the query returned by get_expiry_query()')
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED = counters.PerfCounter(
'gcb-pr-get-new-review-already-assigned',
('number of times get_new_review() rejected a candidate because the '
'reviewer is already assigned to or has already completed it'))
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED = counters.PerfCounter(
'gcb-pr-get-new-review-assignment-attempted',
'number of times get_new_review() attempted to assign a candidate')
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED = counters.PerfCounter(
'gcb-pr-get-new-review-cannot-unremove-completed',
('number of times get_new_review() failed because the reviewer already had '
'a completed, removed review step'))
COUNTER_GET_NEW_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-get-new-review-failed',
'number of times get_new_review() had a fatal error')
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE = counters.PerfCounter(
'gcb-pr-get-new-review-none-assignable',
'number of times get_new_review() failed to find an assignable review')
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING = counters.PerfCounter(
'gcb-pr-get-new-review-reassign-existing',
('number of times get_new_review() unremoved and reassigned an existing '
'review step'))
COUNTER_GET_NEW_REVIEW_START = counters.PerfCounter(
'gcb-pr-get-new-review-start',
'number of times get_new_review() has started processing')
COUNTER_GET_NEW_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-get-new-review-success',
'number of times get_new_review() found and assigned a new review')
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED = counters.PerfCounter(
'gcb-pr-get-new-review-summary-changed',
('number of times get_new_review() rejected a candidate because the review '
'summary changed during processing'))
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-keys-returned',
'number of keys get_review_step_keys_by() returned')
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-failed',
'number of times get_review_step_keys_by() had a fatal error')
COUNTER_GET_REVIEW_STEP_KEYS_BY_START = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-start',
'number of times get_review_step_keys_by() started processing')
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-success',
'number of times get_review_step_keys_by() completed successfully')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-failed',
'number of times get_submission_and_review_step_keys() had a fatal error')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-keys-returned',
'number of keys get_submission_and_review_step_keys() returned')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-start',
('number of times get_submission_and_review_step_keys() has begun '
'processing'))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS = (
counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-submission-miss',
('number of times get_submission_and_review_step_keys() failed to find '
'a submission_key')))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step_keys-success',
('number of times get_submission-and-review-step-keys() completed '
'successfully'))
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED = counters.PerfCounter(
'gcb-pr-start-review-process-for-already-started',
('number of times start_review_process_for() called when review already '
'started'))
COUNTER_START_REVIEW_PROCESS_FOR_FAILED = counters.PerfCounter(
'gcb-pr-start-review-process-for-failed',
'number of times start_review_process_for() had a fatal error')
COUNTER_START_REVIEW_PROCESS_FOR_START = counters.PerfCounter(
'gcb-pr-start-review-process-for-start',
'number of times start_review_process_for() has started processing')
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS = counters.PerfCounter(
'gcb-pr-start-review-process-for-success',
'number of times start_review_process_for() completed successfully')
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-assigned-step',
'number of times write_review() transitioned an assigned step to completed')
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-expired-step',
'number of times write_review() transitioned an expired step to completed')
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-created-new-review',
'number of times write_review() created a new review')
COUNTER_WRITE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-write-review-failed',
'number of times write_review() had a fatal error')
COUNTER_WRITE_REVIEW_REVIEW_MISS = counters.PerfCounter(
'gcb-pr-write-review-review-miss',
'number of times write_review() found a missing review')
COUNTER_WRITE_REVIEW_START = counters.PerfCounter(
'gcb-pr-write-review-start',
'number of times write_review() started processing')
COUNTER_WRITE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-write-review-step-miss',
'number of times write_review() found a missing review step')
COUNTER_WRITE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-write-review-summary-miss',
'number of times write_review() found a missing review summary')
COUNTER_WRITE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-write-review-success',
'number of times write_review() completed successfully')
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-updated-existing-review',
'number of times write_review() updated an existing review')
# Number of entities to fetch when querying for all review steps that meet
# given criteria. Ideally we'd cursor through results rather than setting a
# ceiling, but for now let's allow as many removed results as unremoved.
_REVIEW_STEP_QUERY_LIMIT = 2 * domain.MAX_UNREMOVED_REVIEW_STEPS
class Manager(object):
"""Object that manages the review subsystem."""
@classmethod
def add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
"""Adds a reviewer for a submission.
If there is no pre-existing review step, one will be created.
Attempting to add an existing unremoved step in REVIEW_STATE_ASSIGNED or
REVIEW_STATE_COMPLETED is an error.
If there is an existing unremoved review in REVIEW_STATE_EXPIRED, it
will be put in REVIEW_STATE_ASSIGNED. If there is a removed review in
REVIEW_STATE_ASSIGNED or REVIEW_STATE_EXPIRED, it will be put in
REVIEW_STATE_ASSIGNED and unremoved. If it is in REVIEW_STATE_COMPLETED,
it will be unremoved but its state will not change. In all these cases
the assigner kind will be set to ASSIGNER_KIND_HUMAN.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
reviewer_key: db.Key of models.models.Student. The student to add as
a reviewer.
Raises:
domain.TransitionError: if there is a pre-existing review step found
in domain.REVIEW_STATE_ASSIGNED|COMPLETED.
Returns:
db.Key of written review step.
"""
try:
COUNTER_ADD_REVIEWER_START.inc()
key = cls._add_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
COUNTER_ADD_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_ADD_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
found = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(submission_key, reviewer_key))
if not found:
return cls._add_new_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
else:
return cls._add_reviewer_update_step(found)
@classmethod
def _add_new_reviewer(
cls, unit_id, submission_key, reviewee_key, reviewer_key):
summary = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=unit_id)
# Synthesize summary key to avoid a second synchronous put op.
summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(submission_key))
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=reviewee_key,
reviewer_key=reviewer_key, state=domain.REVIEW_STATE_ASSIGNED,
submission_key=submission_key, unit_id=unit_id)
step_key, written_summary_key = entities.put([step, summary])
if summary_key != written_summary_key:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Synthesized invalid review summary key %s' % repr(summary_key))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP.inc()
return step_key
@classmethod
def _add_reviewer_update_step(cls, step):
should_increment_human = False
should_increment_reassigned = False
should_increment_unremoved = False
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Found invalid review summary key %s' % repr(
step.review_summary_key))
if not step.removed:
if step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
elif (step.state == domain.REVIEW_STATE_ASSIGNED or
step.state == domain.REVIEW_STATE_COMPLETED):
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED.inc()
raise domain.TransitionError(
'Unable to add new reviewer to step %s' % (
repr(step.key())),
step.state, domain.REVIEW_STATE_ASSIGNED)
else:
should_increment_unremoved = True
step.removed = False
if step.state != domain.REVIEW_STATE_EXPIRED:
summary.increment_count(step.state)
else:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
if step.assigner_kind != domain.ASSIGNER_KIND_HUMAN:
should_increment_human = True
step.assigner_kind = domain.ASSIGNER_KIND_HUMAN
step_key = entities.put([step, summary])[0]
if should_increment_human:
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN.inc()
if should_increment_reassigned:
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED.inc()
if should_increment_unremoved:
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED.inc()
return step_key
@classmethod
def delete_reviewer(cls, review_step_key):
"""Deletes the given review step.
We do not physically delete the review step; we mark it as removed,
meaning it will be ignored from most queries and the associated review
summary will have its corresponding state count decremented. Calling
this method on a removed review step is an error.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to delete.
Raises:
domain.RemovedError: if called on a review step that has already
been marked removed.
KeyError: if there is no review step with the given key, or if the
step references a review summary that does not exist.
Returns:
db.Key of deleted review step.
"""
try:
COUNTER_DELETE_REVIEWER_START.inc()
key = cls._mark_review_step_removed(review_step_key)
COUNTER_DELETE_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_DELETE_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _mark_review_step_removed(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_DELETE_REVIEWER_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED.inc()
raise domain.RemovedError(
'Cannot remove step %s' % repr(review_step_key), step.removed)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_DELETE_REVIEWER_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
step.removed = True
summary.decrement_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_review(cls, review_step_key):
"""Puts a review step in state REVIEW_STATE_EXPIRED.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to expire.
Raises:
domain.RemovedError: if called on a step that is removed.
domain.TransitionError: if called on a review step that cannot be
transitioned to REVIEW_STATE_EXPIRED (that is, it is already in
REVIEW_STATE_COMPLETED or REVIEW_STATE_EXPIRED).
KeyError: if there is no review with the given key, or the step
references a review summary that does not exist.
Returns:
db.Key of the expired review step.
"""
try:
COUNTER_EXPIRE_REVIEW_START.inc()
key = cls._transition_state_to_expired(review_step_key)
COUNTER_EXPIRE_REVIEW_SUCCESS.inc()
return key
except Exception as e:
COUNTER_EXPIRE_REVIEW_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _transition_state_to_expired(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_EXPIRE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.RemovedError(
'Cannot transition step %s' % repr(review_step_key),
step.removed)
if step.state in (
domain.REVIEW_STATE_COMPLETED, domain.REVIEW_STATE_EXPIRED):
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.TransitionError(
'Cannot transition step %s' % repr(review_step_key),
step.state, domain.REVIEW_STATE_EXPIRED)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_EXPIRED
summary.increment_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_old_reviews_for_unit(cls, review_window_mins, unit_id):
"""Finds and expires all old review steps for a single unit.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
Returns:
2-tuple of list of db.Key of peer.ReviewStep. 0th element is keys
that were written successfully; 1st element is keys that we failed
to update.
"""
query = cls.get_expiry_query(review_window_mins, unit_id)
mapper = utils.QueryMapper(
query, counter=COUNTER_EXPIRY_QUERY_KEYS_RETURNED, report_every=100)
expired_keys = []
exception_keys = []
def map_fn(review_step_key, expired_keys, exception_keys):
try:
expired_keys.append(cls.expire_review(review_step_key))
except: # All errors are the same. pylint: disable-msg=bare-except
# Skip. Either the entity was updated between the query and
# the update, meaning we don't need to expire it; or we ran into
# a transient datastore error, meaning we'll expire it next
# time.
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP.inc()
exception_keys.append(review_step_key)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START.inc()
mapper.run(map_fn, expired_keys, exception_keys)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE.inc(
increment=len(expired_keys))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS.inc()
return expired_keys, exception_keys
@classmethod
def get_assignment_candidates_query(cls, unit_id):
"""Gets query that returns candidates for new review assignment.
New assignment candidates are scoped to a unit. We prefer first items
that have the smallest number of completed reviews, then those that have
the smallest number of assigned reviews, then those that were created
most recently.
The results of the query are user-independent.
Args:
unit_id: string. Id of the unit to restrict the query to.
Returns:
db.Query that will return [peer.ReviewSummary].
"""
return peer.ReviewSummary.all(
).filter(
peer.ReviewSummary.unit_id.name, unit_id
).order(
peer.ReviewSummary.completed_count.name
).order(
peer.ReviewSummary.assigned_count.name
).order(
peer.ReviewSummary.create_date.name)
@classmethod
def get_expiry_query(
cls, review_window_mins, unit_id, now_fn=datetime.datetime.utcnow):
"""Gets a db.Query that returns review steps to mark expired.
Results are items that were assigned by machine, are currently assigned,
are not removed, were last updated more than review_window_mins ago,
and are ordered by change date ascending.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
now_fn: function that returns the current UTC datetime. Injectable
for tests only.
Returns:
db.Query.
"""
get_before = now_fn() - datetime.timedelta(
minutes=review_window_mins)
return peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.unit_id.name, unit_id,
).filter(
peer.ReviewStep.assigner_kind.name, domain.ASSIGNER_KIND_AUTO
).filter(
peer.ReviewStep.state.name, domain.REVIEW_STATE_ASSIGNED
).filter(
peer.ReviewStep.removed.name, False
).filter(
'%s <=' % peer.ReviewStep.change_date.name, get_before
).order(
peer.ReviewStep.change_date.name)
@classmethod
def get_new_review(
cls, unit_id, reviewer_key, candidate_count=20, max_retries=5):
"""Attempts to assign a review to a reviewer.
We prioritize possible reviews by querying review summary objects,
finding those that best satisfy cls.get_assignment_candidates_query.
To minimize write contention, we nontransactionally grab candidate_count
candidates from the head of the query results. Post-query we filter out
any candidates that are for the prospective reviewer's own work.
Then we randomly select one. We transactionally attempt to assign that
review. If assignment fails because the candidate is updated between
selection and assignment or the assignment is for a submission the
reviewer already has or has already done, we remove the candidate from
the list. We then retry assignment up to max_retries times. If we run
out of retries or candidates, we raise domain.NotAssignableError.
This is a naive implementation because it scales only to relatively low
new review assignments per second and because it can raise
domain.NotAssignableError when there are in fact assignable reviews.
Args:
unit_id: string. The unit to assign work from.
reviewer_key: db.Key of models.models.Student. The reviewer to
attempt to assign the review to.
candidate_count: int. The number of candidate keys to fetch and
attempt to assign from. Increasing this decreases the chance
that we will have write contention on reviews, but it costs 1 +
num_results datastore reads and can get expensive for large
courses.
max_retries: int. Number of times to retry failed assignment
attempts. Careful not to set this too high as a) datastore
throughput is slow and latency from this method is user-facing,
and b) if you encounter a few failures it is likely that all
candidates are now failures, so each retry past the first few is
of questionable value.
Raises:
domain.NotAssignableError: if no review can currently be assigned
for the given unit_id.
Returns:
db.Key of peer.ReviewStep. The newly created assigned review step.
"""
try:
COUNTER_GET_NEW_REVIEW_START.inc()
# Filter out candidates that are for submissions by the reviewer.
raw_candidates = cls.get_assignment_candidates_query(unit_id).fetch(
candidate_count)
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED.inc(
increment=len(raw_candidates))
candidates = [
candidate for candidate in raw_candidates
if candidate.reviewee_key != reviewer_key]
retries = 0
while True:
if not candidates or retries >= max_retries:
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE.inc()
raise domain.NotAssignableError(
'No reviews assignable for unit %s and reviewer %s' % (
unit_id, repr(reviewer_key)))
candidate = cls._choose_assignment_candidate(candidates)
candidates.remove(candidate)
assigned_key = cls._attempt_review_assignment(
candidate.key(), reviewer_key, candidate.change_date)
if not assigned_key:
retries += 1
else:
COUNTER_GET_NEW_REVIEW_SUCCESS.inc()
return assigned_key
except Exception, e:
COUNTER_GET_NEW_REVIEW_FAILED.inc()
raise e
@classmethod
def _choose_assignment_candidate(cls, candidates):
"""Seam that allows different choice functions in tests."""
return random.choice(candidates)
@classmethod
@db.transactional(xg=True)
def _attempt_review_assignment(
cls, review_summary_key, reviewer_key, last_change_date):
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED.inc()
summary = entities.get(review_summary_key)
if not summary:
raise KeyError('No review summary found with key %s' % repr(
review_summary_key))
if summary.change_date != last_change_date:
# The summary has changed since we queried it. We cannot know for
# sure what the edit was, but let's skip to the next one because it
# was probably a review assignment.
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED.inc()
return
step = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(summary.submission_key, reviewer_key))
if not step:
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_summary_key=summary.key(),
reviewee_key=summary.reviewee_key, reviewer_key=reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=summary.submission_key, unit_id=summary.unit_id)
else:
if step.state == domain.REVIEW_STATE_COMPLETED:
# Reviewer has previously done this review and the review
# has been deleted. Skip to the next one.
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED.inc()
return
if step.removed:
# We can reassign the existing review step.
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING.inc()
step.removed = False
step.assigner_kind = domain.ASSIGNER_KIND_AUTO
step.state = domain.REVIEW_STATE_ASSIGNED
else:
# Reviewee has already reviewed or is already assigned to review
# this submission, so we cannot reassign the step.
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED.inc()
return
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
return entities.put([step, summary])[0]
@classmethod
def get_review_step_keys_by(cls, unit_id, reviewer_key):
"""Gets the keys of all review steps in a unit for a reviewer.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewer_key: db.Key of models.models.Student. The author of the
requested reviews.
Returns:
[db.Key of peer.ReviewStep].
"""
COUNTER_GET_REVIEW_STEP_KEYS_BY_START.inc()
try:
query = peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.reviewer_key.name, reviewer_key
).filter(
peer.ReviewStep.unit_id.name, unit_id
).order(
peer.ReviewStep.create_date.name,
)
keys = [key for key in query.fetch(_REVIEW_STEP_QUERY_LIMIT)]
except Exception as e:
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED.inc()
raise e
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS.inc()
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED.inc(increment=len(keys))
return keys
@classmethod
def get_review_steps_by_keys(cls, keys):
"""Gets review steps by their keys.
Args:
keys: [db.Key of peer.ReviewStep]. Keys to fetch.
Returns:
[domain.ReviewStep or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_review_step(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review_step(cls, model):
if model is None:
return
return domain.ReviewStep(
assigner_kind=model.assigner_kind, change_date=model.change_date,
create_date=model.create_date, key=model.key(),
removed=model.removed, review_key=model.review_key,
review_summary_key=model.review_summary_key,
reviewee_key=model.reviewee_key, reviewer_key=model.reviewer_key,
state=model.state, submission_key=model.submission_key,
unit_id=model.unit_id
)
@classmethod
def get_reviews_by_keys(cls, keys):
"""Gets reviews by their keys.
Args:
keys: [db.Key of review.Review]. Keys to fetch.
Returns:
[domain.Review or None]. Missed keys return None in place in result
list.
"""
return [cls._make_domain_review(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review(cls, model):
if model is None:
return
return domain.Review(contents=model.contents, key=model.key())
@classmethod
def get_submission_and_review_step_keys(cls, unit_id, reviewee_key):
"""Gets the submission key/review step keys for the given pair.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
domain.ConstraintError: if multiple review summary keys were found
for the given unit_id, reviewee_key pair.
KeyError: if there is no review summary for the given unit_id,
reviewee pair.
Returns:
(db.Key of Submission, [db.Key of peer.ReviewStep]) if submission
found for given unit_id, reviewee_key pair; None otherwise.
"""
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START.inc()
try:
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(unit_id, reviewee_key))
submission = entities.get(submission_key)
if not submission:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS.inc(
)
return
step_keys_query = peer.ReviewStep.all(
keys_only=True
).filter(
peer.ReviewStep.submission_key.name, submission_key
)
step_keys = step_keys_query.fetch(_REVIEW_STEP_QUERY_LIMIT)
results = (submission_key, step_keys)
except Exception as e:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED.inc()
raise e
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS.inc()
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED.inc(
increment=len(step_keys))
return results
@classmethod
def get_submissions_by_keys(cls, keys):
"""Gets submissions by their keys.
Args:
keys: [db.Key of review.Submission]. Keys to fetch.
Returns:
[domain.Submission or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_submission(model) for model in entities.get(keys)]
@classmethod
def _make_domain_submission(cls, model):
if model is None:
return
return domain.Submission(contents=model.contents, key=model.key())
@classmethod
def start_review_process_for(cls, unit_id, submission_key, reviewee_key):
"""Registers a new submission with the review subsystem.
Once registered, reviews can be assigned against a given submission,
either by humans or by machine. No reviews are assigned during
registration -- this method merely makes them assignable.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
db.BadValueError: if passed args are invalid.
domain.ReviewProcessAlreadyStartedError: if the review process has
already been started for this student's submission.
Returns:
db.Key of created ReviewSummary.
"""
try:
COUNTER_START_REVIEW_PROCESS_FOR_START.inc()
key = cls._create_review_summary(
reviewee_key, submission_key, unit_id)
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS.inc()
return key
except Exception as e:
COUNTER_START_REVIEW_PROCESS_FOR_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _create_review_summary(cls, reviewee_key, submission_key, unit_id):
collision = peer.ReviewSummary.get_by_key_name(
peer.ReviewSummary.key_name(submission_key))
if collision:
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED.inc()
raise domain.ReviewProcessAlreadyStartedError()
return peer.ReviewSummary(
reviewee_key=reviewee_key, submission_key=submission_key,
unit_id=unit_id,
).put()
@classmethod
def write_review(
cls, review_step_key, review_payload, mark_completed=True):
"""Writes a review, updating associated internal state.
If the passed step already has a review, that review will be updated. If
it does not have a review, a new one will be created with the passed
payload.
Args:
review_step_key: db.Key of peer.ReviewStep. The key of the review
step to update.
review_payload: string. New contents of the review.
mark_completed: boolean. If True, set the state of the review to
domain.REVIEW_STATE_COMPLETED. If False, leave the state as it
was.
Raises:
domain.ConstraintError: if no review found for the review step.
domain.RemovedError: if the step for the review is removed.
domain.TransitionError: if mark_completed was True but the step was
already in domain.REVIEW_STATE_COMPLETED.
KeyError: if no review step was found with review_step_key.
Returns:
db.Key of peer.ReviewStep: key of the written review step.
"""
COUNTER_WRITE_REVIEW_START.inc()
try:
step_key = cls._update_review_contents_and_change_state(
review_step_key, review_payload, mark_completed)
except Exception as e:
COUNTER_WRITE_REVIEW_FAILED.inc()
raise e
COUNTER_WRITE_REVIEW_SUCCESS.inc()
return step_key
@classmethod
@db.transactional(xg=True)
def _update_review_contents_and_change_state(
cls, review_step_key, review_payload, mark_completed):
should_increment_created_new_review = False
should_increment_updated_existing_review = False
should_increment_assigned_to_completed = False
should_increment_expired_to_completed = False
step = entities.get(review_step_key)
if not step:
COUNTER_WRITE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
elif step.removed:
raise domain.RemovedError(
'Unable to process step %s' % repr(step.key()), step.removed)
elif mark_completed and step.state == domain.REVIEW_STATE_COMPLETED:
raise domain.TransitionError(
'Unable to transition step %s' % repr(step.key()),
step.state, domain.REVIEW_STATE_COMPLETED)
if step.review_key:
review_to_update = entities.get(step.review_key)
if review_to_update:
should_increment_updated_existing_review = True
else:
review_to_update = student_work.Review(
contents=review_payload, reviewee_key=step.reviewee_key,
reviewer_key=step.reviewer_key, unit_id=step.unit_id)
step.review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
step.unit_id, step.reviewee_key, step.reviewer_key))
should_increment_created_new_review = True
if not review_to_update:
COUNTER_WRITE_REVIEW_REVIEW_MISS.inc()
raise domain.ConstraintError(
'No review found with key %s' % repr(step.review_key))
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_WRITE_REVIEW_SUMMARY_MISS.inc()
raise domain.ConstraintError(
'No review summary found with key %s' % repr(
step.review_summary_key))
review_to_update.contents = review_payload
updated_step_key = None
if not mark_completed:
_, updated_step_key = entities.put([review_to_update, step])
else:
if step.state == domain.REVIEW_STATE_ASSIGNED:
should_increment_assigned_to_completed = True
elif step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_expired_to_completed = True
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_COMPLETED
summary.increment_count(step.state)
_, updated_step_key, _ = entities.put(
[review_to_update, step, summary])
if should_increment_created_new_review:
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW.inc()
elif should_increment_updated_existing_review:
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW.inc()
if should_increment_assigned_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP.inc()
elif should_increment_expired_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP.inc()
return updated_step_key
custom_module = None
def register_module():
"""Registers this module in the registry."""
import modules.dashboard # pylint: disable-msg=g-import-not-at-top
from modules.review import stats # pylint: disable-msg=g-import-not-at-top
from modules.review import cron # pylint: disable-msg=g-import-not-at-top
# register custom dashboard section
modules.dashboard.dashboard.DashboardRegistry.add_analytics_section(
stats.PeerReviewStatsHandler)
# register this peer review implementation
models.review.ReviewsProcessor.set_peer_matcher(Manager)
# register cron handler
cron_handlers = [(
'/cron/expire_old_assigned_reviews',
cron.ExpireOldAssignedReviewsHandler)]
global custom_module
custom_module = custom_modules.Module(
'Peer Review Engine',
'A set of classes for managing peer review process.',
cron_handlers, [])
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for displaying peer review analytics."""
__author__ = 'Sean Lip (sll@google.com)'
import os
from common import safe_dom
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
import jinja2
from models import courses
from models import jobs
from models import transforms
from models import utils
from modules.review import peer
class ReviewStatsAggregator(object):
"""Aggregates peer review statistics."""
def __init__(self):
# This dict records, for each unit, how many submissions have a given
# number of completed reviews. The format of each key-value pair is
# unit_id: {num_reviews: count_of_submissions}
self.counts_by_completed_reviews = {}
def visit(self, review_summary):
unit_id = review_summary.unit_id
if unit_id not in self.counts_by_completed_reviews:
self.counts_by_completed_reviews[unit_id] = {}
count = review_summary.completed_count
if count not in self.counts_by_completed_reviews[unit_id]:
self.counts_by_completed_reviews[unit_id][count] = 1
else:
self.counts_by_completed_reviews[unit_id][count] += 1
class ComputeReviewStats(jobs.DurableJob):
"""A job for computing peer review statistics."""
def run(self):
"""Computes peer review statistics."""
stats = ReviewStatsAggregator()
mapper = utils.QueryMapper(
peer.ReviewSummary.all(), batch_size=500, report_every=1000)
mapper.run(stats.visit)
completed_arrays_by_unit = {}
for unit_id in stats.counts_by_completed_reviews:
max_completed_reviews = max(
stats.counts_by_completed_reviews[unit_id].keys())
completed_reviews_array = []
for i in range(max_completed_reviews + 1):
if i in stats.counts_by_completed_reviews[unit_id]:
completed_reviews_array.append(
stats.counts_by_completed_reviews[unit_id][i])
else:
completed_reviews_array.append(0)
completed_arrays_by_unit[unit_id] = completed_reviews_array
return {'counts_by_completed_reviews': completed_arrays_by_unit}
class PeerReviewStatsHandler(ApplicationHandler):
"""Shows peer review analytics on the dashboard."""
# The key used in the statistics dict that generates the dashboard page.
# Must be unique.
name = 'peer_review_stats'
# The class that generates the data to be displayed.
stats_computer = ComputeReviewStats
def get_markup(self, job):
"""Returns Jinja markup for peer review statistics."""
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
course = courses.Course(self)
serialized_units = []
if not job:
update_message = safe_dom.Text(
'Peer review statistics have not been calculated yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
for unit in course.get_peer_reviewed_units():
if unit.unit_id in stats['counts_by_completed_reviews']:
unit_stats = (
stats['counts_by_completed_reviews'][unit.unit_id])
serialized_units.append({
'stats': unit_stats,
'title': unit.title,
'unit_id': unit.unit_id,
})
update_message = safe_dom.Text("""
Peer review statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating peer review statistics.
Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text("""
Peer review statistics update started at %s and is running
now. Please come back shortly.""" % job.updated_on.strftime(
HUMAN_READABLE_TIME_FORMAT))
return jinja2.utils.Markup(self.get_template(
'stats.html', [os.path.dirname(__file__)]
).render({
'errors': errors,
'serialized_units': serialized_units,
'serialized_units_json': transforms.dumps(serialized_units),
'stats_calculated': stats_calculated,
'update_message': update_message,
}, autoescape=True))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron job definitions for the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
from controllers import sites
from controllers import utils
from models import courses
from modules.review import review
from google.appengine.api import namespace_manager
_LOG = logging.getLogger('modules.reviews.cron')
logging.basicConfig()
class ExpireOldAssignedReviewsHandler(utils.BaseHandler):
"""Iterates through all units in all courses, expiring old review steps.
The system will run a maximum of one of these jobs at any given time. This
is enforced by the 10 minute execution time limit on cron jobs plus the
scheduler, which is configured to run this every 15 minutes.
Write operations done by this handler must be atomic since admins may visit
this page at any time, kicking off any number of runs.
"""
def get(self):
"""Runs the expiry operation once for each peer-reviwed unit."""
try:
self.response.headers['Content-Type'] = 'text/plain'
# namespace_string -> [{
# 'id': unit_id_string, 'review_window_mins': int}]
namespace_to_units = {} # namespace_string -> [unit_id_strings]
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
namespace_to_units[namespace] = []
course = courses.Course(None, context)
for unit in course.get_peer_reviewed_units():
namespace_to_units[namespace].append({
'review_window_mins': (
unit.workflow.get_review_window_mins()),
'id': str(unit.unit_id),
})
total_count = 0
total_expired_count = 0
total_exception_count = 0
_LOG.info('Begin expire_old_assigned_reviews cron')
for namespace, units in namespace_to_units.iteritems():
start_namespace_message = (
('Begin processing course in namespace "%s"; %s unit%s '
'found') % (
namespace, len(units), '' if len(units) == 1 else 's'))
_LOG.info(start_namespace_message)
for unit in units:
begin_unit_message = 'Begin processing unit %s' % unit['id']
_LOG.info(begin_unit_message)
namespace_manager.set_namespace(namespace)
expired_keys, exception_keys = (
review.Manager.expire_old_reviews_for_unit(
unit['review_window_mins'], unit['id']))
unit_expired_count = len(expired_keys)
unit_exception_count = len(exception_keys)
unit_total_count = unit_expired_count + unit_exception_count
total_expired_count += unit_expired_count
total_exception_count += total_exception_count
total_count += unit_total_count
end_unit_message = (
'End processing unit %s. Expired: %s, Exceptions: %s, '
'Total: %s' % (
unit['id'], unit_expired_count,
unit_exception_count, unit_total_count))
_LOG.info(end_unit_message)
_LOG.info('Done processing namespace "%s"', namespace)
end_message = (
('End expire_old_assigned_reviews cron. Expired: %s, '
'Exceptions : %s, Total: %s') % (
total_expired_count, total_exception_count, total_count))
_LOG.info(end_message)
self.response.write('OK\n')
except: # Hide all errors. pylint: disable-msg=bare-except
pass
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and constants for use by internal and external clients."""
__author__ = [
'johncox@google.com (John Cox)',
]
# Identifier for reviews that have been computer-assigned.
ASSIGNER_KIND_AUTO = 'AUTO'
# Identifier for reviews that have been assigned by a human.
ASSIGNER_KIND_HUMAN = 'HUMAN'
ASSIGNER_KINDS = (
ASSIGNER_KIND_AUTO,
ASSIGNER_KIND_HUMAN,
)
# Maximum number of ReviewSteps with removed = False, in any REVIEW_STATE, that
# can exist in the backend at a given time.
MAX_UNREMOVED_REVIEW_STEPS = 100
# State of a review that is currently assigned, either by a human or by machine.
REVIEW_STATE_ASSIGNED = 'ASSIGNED'
# State of a review that is complete and may be shown to the reviewee, provided
# the reviewee is themself in a state to see their reviews.
REVIEW_STATE_COMPLETED = 'COMPLETED'
# State of a review that used to be assigned but the assignment has been
# expired. Only machine-assigned reviews can be expired.
REVIEW_STATE_EXPIRED = 'EXPIRED'
REVIEW_STATES = (
REVIEW_STATE_ASSIGNED,
REVIEW_STATE_COMPLETED,
REVIEW_STATE_EXPIRED,
)
class Error(Exception):
"""Base error class."""
class ConstraintError(Error):
"""Raised when data is found indicating a constraint is violated."""
class NotAssignableError(Error):
"""Raised when review assignment is requested but cannot be satisfied."""
class RemovedError(Error):
"""Raised when an op cannot be performed on a step because it is removed."""
def __init__(self, message, value):
"""Constructs a new RemovedError."""
super(RemovedError, self).__init__(message)
self.value = value
def __str__(self):
return '%s: removed is %s' % (self.message, self.value)
class ReviewProcessAlreadyStartedError(Error):
"""Raised when someone attempts to start a review process in progress."""
class TransitionError(Error):
"""Raised when an invalid state transition is attempted."""
def __init__(self, message, before, after):
"""Constructs a new TransitionError.
Args:
message: string. Exception message.
before: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition from.
after: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition to.
"""
super(TransitionError, self).__init__(message)
self.after = after
self.before = before
def __str__(self):
return '%s: attempted to transition from %s to %s' % (
self.message, self.before, self.after)
class Review(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
class ReviewStep(object):
"""Domain object for the status of a single review at a point in time."""
def __init__(
self, assigner_kind=None, change_date=None, create_date=None, key=None,
removed=None, review_key=None, review_summary_key=None,
reviewee_key=None, reviewer_key=None, state=None, submission_key=None,
unit_id=None):
self._assigner_kind = assigner_kind
self._change_date = change_date
self._create_date = create_date
self._key = key
self._removed = removed
self._review_key = review_key
self._review_summary_key = review_summary_key
self._reviewee_key = reviewee_key
self._reviewer_key = reviewer_key
self._state = state
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigner_kind(self):
return self._assigner_kind
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def is_assigned(self):
"""Predicate for whether the step is in REVIEW_STATE_ASSIGNED."""
return self.state == REVIEW_STATE_ASSIGNED
@property
def is_completed(self):
"""Predicate for whether the step is in REVIEW_STATE_COMPLETED."""
return self.state == REVIEW_STATE_COMPLETED
@property
def is_expired(self):
"""Predicate for whether the step is in REVIEW_STATE_EXPIRED."""
return self.state == REVIEW_STATE_EXPIRED
@property
def key(self):
return self._key
@property
def removed(self):
return self._removed
@property
def review_key(self):
return self._review_key
@property
def review_summary_key(self):
return self._review_summary_key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def reviewer_key(self):
return self._reviewer_key
@property
def state(self):
return self._state
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class ReviewSummary(object):
"""Domain object for review state aggregate entities."""
def __init__(
self, assigned_count=None, completed_count=None, change_date=None,
create_date=None, key=None, reviewee_key=None, submission_key=None,
unit_id=None):
self._assigned_count = assigned_count
self._completed_count = completed_count
self._change_date = change_date
self._create_date = create_date
self._key = key
self._reviewee_key = reviewee_key
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigned_count(self):
return self._assigned_count
@property
def completed_count(self):
return self._completed_count
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def key(self):
return self._key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class Submission(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal implementation details of the peer review subsystem.
Public classes, including domain objects, can be found in domain.py and
models/student_work.py. Entities declared here should not be used by external
clients.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import models
from models import student_work
from modules.review import domain
from google.appengine.ext import db
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX = counters.PerfCounter(
'gcb-pr-increment-count-count-aggregate-exceeded-max',
('number of times increment_count() failed because the new aggregate of '
'the counts would have exceeded domain.MAX_UNREMOVED_REVIEW_STEPS'))
class ReviewSummary(student_work.BaseEntity):
"""Object that tracks the aggregate state of reviews for a submission."""
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Strong counters. Callers should never manipulate these directly. Instead,
# use decrement|increment_count.
# Count of ReviewStep entities for this submission currently in state
# STATE_ASSIGNED.
assigned_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_COMPLETED.
completed_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_EXPIRED.
expired_count = db.IntegerProperty(default=0, required=True)
# Key of the student who wrote the submission being reviewed.
reviewee_key = student_work.KeyProperty(
kind=models.Student.kind(), required=True)
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewSummary."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
submission_key = kwargs.get('submission_key')
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key)
super(ReviewSummary, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key):
"""Creates a key_name string for datastore operations."""
return '(review_summary:%s)' % submission_key.id_or_name()
@classmethod
def safe_key(cls, db_key, transform_fn):
_, _, unit_id, unsafe_reviewee_key_name = cls._split_key(db_key.name())
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(cls.kind(), cls.key_name(safe_submission_key))
def _check_count(self):
count_sum = (
self.assigned_count + self.completed_count + self.expired_count)
if count_sum >= domain.MAX_UNREMOVED_REVIEW_STEPS:
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX.inc()
raise db.BadValueError(
'Unable to increment %s to %s; max is %s' % (
self.kind(), count_sum, domain.MAX_UNREMOVED_REVIEW_STEPS))
def decrement_count(self, state):
"""Decrements the count for the given state enum; does not save.
Args:
state: string. State indicating counter to decrement; must be one of
domain.REVIEW_STATES.
Raises:
ValueError: if state not in domain.REVIEW_STATES.
"""
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count -= 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count -= 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count -= 1
else:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
def increment_count(self, state):
"""Increments the count for the given state enum; does not save.
Args:
state: string. State indicating counter to increment; must be one of
domain.REVIEW_STATES.
Raises:
db.BadValueError: if incrementing the counter would cause the sum of
all *_counts to exceed domain.MAX_UNREMOVED_REVIEW_STEPS.
ValueError: if state not in domain.REVIEW_STATES
"""
if state not in domain.REVIEW_STATES:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
self._check_count()
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count += 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count += 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count += 1
def for_export(self, transform_fn):
model = super(ReviewSummary, self).for_export(transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
class ReviewStep(student_work.BaseEntity):
"""Object that represents a single state of a review."""
# Audit trail information.
# Identifier for the kind of thing that did the assignment. Used to
# distinguish between assignments done by humans and those done by the
# review subsystem.
assigner_kind = db.StringProperty(
choices=domain.ASSIGNER_KINDS, required=True)
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Repeated data to allow filtering/ordering in queries.
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Unit this review step is part of.
unit_id = db.StringProperty(required=True)
# State information.
# State of this review step.
state = db.StringProperty(choices=domain.REVIEW_STATES, required=True)
# Whether or not the review has been removed. By default removed entities
# are ignored for most queries.
removed = db.BooleanProperty(default=False)
# Pointers that tie the work and people involved together.
# Key of the Review associated with this step.
review_key = student_work.KeyProperty(kind=student_work.Review.kind())
# Key of the associated ReviewSummary.
review_summary_key = student_work.KeyProperty(kind=ReviewSummary.kind())
# Key of the Student being reviewed.
reviewee_key = student_work.KeyProperty(kind=models.Student.kind())
# Key of the Student doing this review.
reviewer_key = student_work.KeyProperty(kind=models.Student.kind())
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewStep."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
reviewer_key = kwargs.get('reviewer_key')
submission_key = kwargs.get('submission_key')
assert reviewer_key, 'Missing required reviewer_key property'
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key, reviewer_key)
super(ReviewStep, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key, reviewer_key):
"""Creates a key_name string for datastore operations."""
return '(review_step:%s:%s)' % (
submission_key.id_or_name(), reviewer_key.id_or_name())
@classmethod
def safe_key(cls, db_key, transform_fn):
"""Constructs a version of the entitiy's key that is safe for export."""
cls._split_key(db_key.name())
name = db_key.name().strip('()')
unsafe_submission_key_name, unsafe_reviewer_id_or_name = name.split(
':', 1)[1].rsplit(':', 1)
unsafe_reviewer_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewer_id_or_name)
safe_reviewer_key = models.Student.safe_key(
unsafe_reviewer_key, transform_fn)
# Treating as module-protected. pylint: disable-msg=protected-access
_, unit_id, unsafe_reviewee_key_name = (
student_work.Submission._split_key(unsafe_submission_key_name))
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(
cls.kind(), cls.key_name(safe_submission_key, safe_reviewer_key))
def for_export(self, transform_fn):
"""Creates a version of the entity that is safe for export."""
model = super(ReviewStep, self).for_export(transform_fn)
model.review_key = student_work.Review.safe_key(
model.review_key, transform_fn)
model.review_summary_key = ReviewSummary.safe_key(
model.review_summary_key, transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.reviewer_key = models.Student.safe_key(
model.reviewer_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import custom_modules
from tools import verify
custom_module = None
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.ActivityHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting online file editing."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import cgi
import os
import urllib
import appengine_config
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
# Set of string. The relative, normalized path bases we allow uploading of
# binary data into.
ALLOWED_ASSET_BINARY_BASES = frozenset([
'assets/img',
])
# Set of string. The relative, normalized path bases we allow uploading of text
# data into.
ALLOWED_ASSET_TEXT_BASES = frozenset([
'assets/css',
'assets/lib',
'views'
])
# Set of string. The relative, normalized path bases we allow uploading into.
ALLOWED_ASSET_UPLOAD_BASES = ALLOWED_ASSET_BINARY_BASES.union(
ALLOWED_ASSET_TEXT_BASES)
MAX_ASSET_UPLOAD_SIZE_K = 500
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
def is_text_payload(payload):
try:
transforms.dumps(payload)
return True
except: # All errors are equivalently bad. pylint: disable-msg=bare-except
return False
def is_readonly_asset(asset):
return not getattr(asset, 'metadata', None)
def strip_leading_and_trailing_slashes(path_base):
"""Given a path base string of the form '/foo/bar/', return 'foo/bar'."""
return path_base.lstrip('/').rstrip('/')
class FilesRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class FileManagerAndEditor(ApplicationHandler):
"""An editor for editing and managing files."""
local_fs = vfs.LocalReadOnlyFileSystem(logical_home_folder='/')
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def _get_normalized_base(self):
"""Gets base arg from URL and normalizes it for membership checks."""
base = self.request.get('base')
assert base
base = strip_leading_and_trailing_slashes(base)
assert base in ALLOWED_ASSET_UPLOAD_BASES
return base
def post_create_or_edit_settings(self):
"""Handles creation or/and editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url('edit_settings', key='/course.yaml'))
def get_edit_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/files/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
FilesItemRESTHandler.SCHEMA_JSON,
FilesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=FilesItemRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_add_asset(self):
"""Show an upload dialog for assets."""
key = self._get_normalized_base()
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetItemRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetItemRESTHandler.SCHEMA_JSON,
AssetItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url, save_method='upload', auto_return=True,
required_modules=AssetItemRESTHandler.REQUIRED_MODULES,
save_button_caption='Upload')
template_values = {}
template_values['page_title'] = self.format_title('Upload Asset')
template_values['page_description'] = messages.UPLOAD_ASSET_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_delete_asset(self):
"""Show an review/delete page for assets."""
uri = self.request.get('uri')
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetUriRESTHandler.URI)
delete_url = self._get_delete_url(
FilesItemRESTHandler.URI, uri, 'delete-asset')
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetUriRESTHandler.SCHEMA_JSON,
AssetUriRESTHandler.SCHEMA_ANNOTATIONS_DICT,
uri, rest_url, exit_url, save_method='',
delete_url=delete_url, delete_method='delete')
template_values = {}
template_values['page_title'] = self.format_title('View Asset')
template_values['main_content'] = form_html
self.render_page(template_values)
def get_manage_text_asset(self):
"""Show an edit/save/delete/revert form for a text asset."""
assert is_editable_fs(self.app_context)
uri = self.request.get('uri')
assert uri
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, uri))
assert asset
asset_in_datastore_fs = not is_readonly_asset(asset)
try:
asset_in_local_fs = bool(self.local_fs.get(uri))
except IOError:
asset_in_local_fs = False
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(TextAssetRESTHandler.URI)
delete_button_caption = 'Delete'
delete_message = None
delete_url = None
if asset_in_datastore_fs:
delete_message = 'Are you sure you want to delete %s?' % uri
delete_url = self._get_delete_url(
TextAssetRESTHandler.URI, uri,
TextAssetRESTHandler.XSRF_TOKEN_NAME)
if asset_in_local_fs:
delete_message = (
'Are you sure you want to restore %s to the original version? '
'All your customizations will be lost.' % uri)
delete_button_caption = 'Restore original'
# Disable the save button if the payload is not text by setting method
# to ''.
save_method = 'put' if is_text_payload(asset.read()) else ''
form_html = oeditor.ObjectEditor.get_html_for(
self,
TextAssetRESTHandler.SCHEMA.get_json_schema(),
TextAssetRESTHandler.SCHEMA.get_schema_dict(),
uri,
rest_url,
exit_url,
delete_button_caption=delete_button_caption,
delete_method='delete',
delete_message=delete_message,
delete_url=delete_url,
required_modules=TextAssetRESTHandler.REQUIRED_MODULES,
save_method=save_method,
)
self.render_page({
'page_title': self.format_title('Edit ' + uri),
'main_content': form_html,
})
class TextAssetRESTHandler(BaseRESTHandler):
"""REST endpoints for text assets."""
ERROR_MESSAGE_UNEDITABLE = (
'Error: contents are not text and cannot be edited.')
REQUIRED_MODULES = [
'inputex-hidden',
'inputex-textarea',
]
SCHEMA = schema_fields.FieldRegistry('Edit asset', description='Text Asset')
SCHEMA.add_property(schema_fields.SchemaField(
'contents', 'Contents', 'text',
))
SCHEMA.add_property(schema_fields.SchemaField(
'is_text', 'Is Text', 'boolean', hidden=True,
))
SCHEMA.add_property(schema_fields.SchemaField(
'readonly', 'ReadOnly', 'boolean', hidden=True,
))
URI = '/rest/assets/text'
XSRF_TOKEN_NAME = 'manage-text-asset'
def _check_asset_in_allowed_bases(self, filename):
assert os.path.dirname(filename) in ALLOWED_ASSET_UPLOAD_BASES
def delete(self):
"""Handles the delete verb."""
assert is_editable_fs(self.app_context)
filename = self.request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
self._check_asset_in_allowed_bases(filename)
self.app_context.fs.impl.delete(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
transforms.send_json_response(self, 200, 'Done.')
def get(self):
"""Handles the get verb."""
assert FilesRights.can_edit(self)
filename = self.request.get('key')
assert filename
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
assert asset
contents = asset.read()
is_text = is_text_payload(contents)
if not is_text:
contents = self.ERROR_MESSAGE_UNEDITABLE
json_message = 'Success.' if is_text else self.ERROR_MESSAGE_UNEDITABLE
json_payload = {
'contents': contents,
'is_text': is_text,
'readonly': is_readonly_asset(asset),
}
transforms.send_json_response(
self, 200, json_message, payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN_NAME))
def put(self):
"""Handles the put verb."""
assert is_editable_fs(self.app_context)
request = self.request.get('request')
assert request
request = transforms.loads(request)
payload = transforms.loads(request.get('payload'))
filename = request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
self._check_asset_in_allowed_bases(filename)
self.app_context.fs.impl.put(
os.path.join(appengine_config.BUNDLE_ROOT, filename),
vfs.string_to_stream(unicode(payload.get('contents'))))
transforms.send_json_response(self, 200, 'Saved.')
class FilesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
SCHEMA_JSON = """
{
"id": "Text File",
"type": "object",
"description": "Text File",
"properties": {
"key" : {"type": "string"},
"encoding" : {"type": "string"},
"content": {"type": "text"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Text File'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'encoding', '_inputex'], {
'label': 'Encoding', '_type': 'uneditable'}),
(['properties', 'content', '_inputex'], {
'label': 'Content', '_type': 'text'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-textarea', 'inputex-select',
'inputex-uneditable']
URI = '/rest/files/item'
FILE_ENCODING_TEXT = 'text/utf-8'
FILE_ENCODING_BINARY = 'binary/base64'
FILE_EXTENTION_TEXT = ['.js', '.css', '.yaml', '.html', '.csv']
@classmethod
def is_text_file(cls, filename):
# TODO(psimakov): this needs to be better and not use linear search
for extention in cls.FILE_EXTENTION_TEXT:
if filename.endswith(extention):
return True
return False
@classmethod
def validate_content(cls, filename, content):
# TODO(psimakov): handle more file types here
if filename.endswith('.yaml'):
yaml.safe_load(content)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {'key': key}
if self.is_text_file(key):
entity['encoding'] = self.FILE_ENCODING_TEXT
entity['content'] = vfs.stream_to_string(stream)
else:
entity['encoding'] = self.FILE_ENCODING_BINARY
entity['content'] = base64.b64encode(stream.read())
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
FilesItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'file-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'file-put', {'key': key}):
return
# TODO(psimakov): we don't allow editing of all files; restrict further
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
entity = transforms.loads(payload)
encoding = entity['encoding']
content = entity['content']
# Validate the file content.
errors = []
try:
if encoding == self.FILE_ENCODING_TEXT:
content_stream = vfs.string_to_stream(content)
elif encoding == self.FILE_ENCODING_BINARY:
content_stream = base64.b64decode(content)
else:
errors.append('Unknown encoding: %s.' % encoding)
self.validate_content(key, content)
except Exception as e: # pylint: disable=W0703
errors.append('Validation error: %s' % e)
if errors:
transforms.send_json_response(self, 412, ''.join(errors))
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-asset', {'key': key}):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
path = fs.physical_to_logical(key)
if not fs.isfile(path):
transforms.send_json_response(
self, 403, 'File does not exist.', None)
return
fs.delete(path)
transforms.send_json_response(self, 200, 'Deleted.')
class AssetItemRESTHandler(BaseRESTHandler):
"""Provides REST API for managing assets."""
URI = '/rest/assets/item'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"base": {"type": "string"},
"file": {"type": "string", "optional": true}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Upload Asset'),
(['properties', 'base', '_inputex'], {
'label': 'Base', '_type': 'uneditable'}),
(['properties', 'file', '_inputex'], {
'label': 'File', '_type': 'file'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-uneditable', 'inputex-file',
'io-upload-iframe']
def _can_write_payload_to_base(self, payload, base):
"""Determine if a given payload type can be put in a base directory."""
# Binary data can go in images; text data can go anywhere else.
if base in ALLOWED_ASSET_BINARY_BASES:
return True
else:
return is_text_payload(payload) and base in ALLOWED_ASSET_TEXT_BASES
def get(self):
"""Provides empty initial content for asset upload editor."""
# TODO(jorr): Pass base URI through as request param when generalized.
base = self.request.get('key')
assert base in ALLOWED_ASSET_UPLOAD_BASES
json_payload = {'file': '', 'base': base}
transforms.send_json_response(
self, 200, 'Success.', payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-upload'))
def post(self):
"""Handles asset uploads."""
assert is_editable_fs(self.app_context)
if not FilesRights.can_add(self):
transforms.send_json_file_upload_response(
self, 401, 'Access denied.')
return
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'asset-upload', None):
return
payload = transforms.loads(request['payload'])
base = payload['base']
assert base in ALLOWED_ASSET_UPLOAD_BASES
upload = self.request.POST['file']
if not isinstance(upload, cgi.FieldStorage):
transforms.send_json_file_upload_response(
self, 403, 'No file specified.')
return
filename = os.path.split(upload.filename)[1]
assert filename
physical_path = os.path.join(base, filename)
fs = self.app_context.fs.impl
path = fs.physical_to_logical(physical_path)
if fs.isfile(path):
transforms.send_json_file_upload_response(
self, 403, 'Cannot overwrite existing file.')
return
content = upload.file.read()
if not self._can_write_payload_to_base(content, base):
transforms.send_json_file_upload_response(
self, 403, 'Cannot write binary data to %s.' % base)
return
upload.file.seek(0)
if len(content) > MAX_ASSET_UPLOAD_SIZE_K * 1024:
transforms.send_json_file_upload_response(
self, 403,
'Max allowed file upload size is %dK' % MAX_ASSET_UPLOAD_SIZE_K)
return
fs.put(path, upload.file)
transforms.send_json_file_upload_response(self, 200, 'Saved.')
class AssetUriRESTHandler(BaseRESTHandler):
"""Provides REST API for managing asserts by means of their URIs."""
# TODO(jorr): Refactor the asset management classes to have more meaningful
# REST URI's and class names
URI = '/rest/assets/uri'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"uri": {"type": "string"}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Image or Document'),
(['properties', 'uri', '_inputex'], {
'label': 'Asset',
'_type': 'uneditable',
'visu': {
'visuType': 'funcName',
'funcName': 'renderAsset'}})]
def get(self):
"""Handles REST GET verb and returns the uri of the asset."""
uri = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': uri})
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={'uri': uri},
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-delete'))
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage analytics dashboards."""
__author__ = 'Sean Lip (sll@google.com)'
import logging
import os
import urlparse
from common import safe_dom
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
import jinja2
from models import courses
from models import jobs
from models import progress
from models import transforms
from models import utils
from models.models import EventEntity
from models.models import Student
from models.models import StudentPropertyEntity
class ComputeStudentStats(jobs.DurableJob):
"""A job that computes student statistics."""
class ScoresAggregator(object):
"""Aggregates scores statistics."""
def __init__(self):
# We store all data as tuples keyed by the assessment type name.
# Each tuple keeps:
# (student_count, sum(score))
self.name_to_tuple = {}
def visit(self, student):
if student.scores:
scores = transforms.loads(student.scores)
for key in scores.keys():
if key in self.name_to_tuple:
count = self.name_to_tuple[key][0]
score_sum = self.name_to_tuple[key][1]
else:
count = 0
score_sum = 0
self.name_to_tuple[key] = (
count + 1, score_sum + float(scores[key]))
class EnrollmentAggregator(object):
"""Aggregates enrollment statistics."""
def __init__(self):
self.enrolled = 0
self.unenrolled = 0
def visit(self, student):
if student.is_enrolled:
self.enrolled += 1
else:
self.unenrolled += 1
def run(self):
"""Computes student statistics."""
enrollment = self.EnrollmentAggregator()
scores = self.ScoresAggregator()
mapper = utils.QueryMapper(
Student.all(), batch_size=500, report_every=1000)
def map_fn(student):
enrollment.visit(student)
scores.visit(student)
mapper.run(map_fn)
data = {
'enrollment': {
'enrolled': enrollment.enrolled,
'unenrolled': enrollment.unenrolled},
'scores': scores.name_to_tuple}
return data
class StudentEnrollmentAndScoresHandler(ApplicationHandler):
"""Shows student enrollment analytics on the dashboard."""
# The key used in the statistics dict that generates the dashboard page.
# Must be unique.
name = 'enrollment_and_scores'
# The class that generates the data to be displayed.
stats_computer = ComputeStudentStats
def get_markup(self, job):
"""Returns Jinja markup for peer review analytics."""
template_values = {}
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
if not job:
update_message = safe_dom.Text(
'Enrollment/assessment statistics have not been calculated '
'yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
template_values['enrolled'] = stats['enrollment']['enrolled']
template_values['unenrolled'] = (
stats['enrollment']['unenrolled'])
scores = []
total_records = 0
for key, value in stats['scores'].items():
total_records += value[0]
avg = round(value[1] / value[0], 1) if value[0] else 0
scores.append({'key': key, 'completed': value[0],
'avg': avg})
template_values['scores'] = scores
template_values['total_records'] = total_records
update_message = safe_dom.Text("""
Enrollment and assessment statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating enrollment/assessment
statistics. Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text(
'Enrollment and assessment statistics update started at %s'
' and is running now. Please come back shortly.' %
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT))
template_values['stats_calculated'] = stats_calculated
template_values['errors'] = errors
template_values['update_message'] = update_message
return jinja2.utils.Markup(self.get_template(
'basic_analytics.html', [os.path.dirname(__file__)]
).render(template_values, autoescape=True))
class ComputeStudentProgressStats(jobs.DurableJob):
"""A job that computes student progress statistics."""
class ProgressAggregator(object):
"""Aggregates student progress statistics."""
def __init__(self, course):
self.progress_data = {}
self._tracker = progress.UnitLessonCompletionTracker(course)
def visit(self, student_property):
if student_property.value:
entity_scores = transforms.loads(student_property.value)
for entity in entity_scores:
entity_score = self.progress_data.get(
entity, {'progress': 0, 'completed': 0})
if self._tracker.determine_if_composite_entity(entity):
if (entity_scores[entity] ==
self._tracker.IN_PROGRESS_STATE):
entity_score['progress'] += 1
elif (entity_scores[entity] ==
self._tracker.COMPLETED_STATE):
entity_score['completed'] += 1
else:
if entity_scores[entity] != 0:
entity_score['completed'] += 1
self.progress_data[entity] = entity_score
def __init__(self, app_context):
super(ComputeStudentProgressStats, self).__init__(app_context)
self._course = courses.Course(None, app_context)
def run(self):
"""Computes student progress statistics."""
student_progress = self.ProgressAggregator(self._course)
mapper = utils.QueryMapper(
StudentPropertyEntity.all(), batch_size=500, report_every=1000)
mapper.run(student_progress.visit)
return student_progress.progress_data
class StudentProgressStatsHandler(ApplicationHandler):
"""Shows student progress analytics on the dashboard."""
name = 'student_progress_stats'
stats_computer = ComputeStudentProgressStats
def get_markup(self, job):
"""Returns Jinja markup for student progress analytics."""
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
course = courses.Course(self)
entity_codes = (
progress.UnitLessonCompletionTracker.EVENT_CODE_MAPPING.values())
value = None
course_content = None
if not job:
update_message = safe_dom.Text(
'Student progress statistics have not been calculated yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
value = transforms.loads(job.output)
stats_calculated = True
try:
course_content = progress.ProgressStats(
course).compute_entity_dict('course', [])
update_message = safe_dom.Text("""
Student progress statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(
HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
except IOError:
update_message = safe_dom.Text("""
This feature is supported by CB 1.3 and up.""")
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating student progress statistics.
Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text("""
Student progress statistics update started at %s and is
running now. Please come back shortly.""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT)))
if value:
value = transforms.dumps(value)
else:
value = None
return jinja2.utils.Markup(self.get_template(
'progress_stats.html', [os.path.dirname(__file__)]
).render({
'errors': errors,
'progress': value,
'content': transforms.dumps(course_content),
'entity_codes': transforms.dumps(entity_codes),
'stats_calculated': stats_calculated,
'update_message': update_message,
}, autoescape=True))
class ComputeQuestionStats(jobs.DurableJob):
"""A job that computes stats for student submissions to questions."""
class MultipleChoiceQuestionAggregator(object):
"""Class that aggregates submissions for multiple-choice questions."""
ATTEMPT_ACTIVITY = 'attempt-activity'
TAG_ASSESSMENT = 'tag-assessment'
ATTEMPT_LESSON = 'attempt-lesson'
SUBMIT_ASSESSMENT = 'submit-assessment'
ATTEMPT_ASSESSMENT = 'attempt-assessment'
MC_QUESTION = 'McQuestion'
QUESTION_GROUP = 'QuestionGroup'
ACTIVITY_CHOICE = 'activity-choice'
ACTIVITY_GROUP = 'activity-group'
def __init__(self, course):
self._course = course
self.id_to_questions_dict = progress.UnitLessonCompletionTracker(
course).get_id_to_questions_dict()
self.id_to_assessments_dict = progress.UnitLessonCompletionTracker(
course).get_id_to_assessments_dict()
def _get_course(self):
return self._course
def _append_data(self, summarized_question, dict_to_update):
# Validate the structure and content of summarized_question dict.
if set(summarized_question.keys()) != {'id', 'score', 'answers'}:
return
if not isinstance(summarized_question['score'], (int, float)):
return
if not isinstance(summarized_question['answers'], list):
return
if any(not isinstance(answer, int) for answer in (
summarized_question['answers'])):
return
if summarized_question['id'] not in dict_to_update:
return
if max(summarized_question['answers']) >= len(
dict_to_update[summarized_question['id']]['answer_counts']):
return
# Add the summarized_question to the aggregating dict.
q_dict = dict_to_update[summarized_question['id']]
q_dict['score'] += summarized_question['score']
q_dict['num_attempts'] += 1
for choice_index in summarized_question['answers']:
q_dict['answer_counts'][choice_index] += 1
def _get_unit_and_lesson_id_from_url(self, url):
url_components = urlparse.urlparse(url)
query_dict = urlparse.parse_qs(url_components.query)
if 'unit' not in query_dict:
return None, None
unit_id = query_dict['unit'][0]
lesson_id = None
if 'lesson' in query_dict:
lesson_id = query_dict['lesson'][0]
else:
lessons = self._get_course().get_lessons(unit_id)
lesson_id = lessons[0].lesson_id if lessons else None
return unit_id, lesson_id
def _summarize_multiple_questions(self, data, id_prefix):
"""Helper method that summarizes events from a list of questions.
Args:
data: data dict from event_entity['data'].
id_prefix: str. Questions in lessons have 'u.#.l.#' formatted
prefix representing the unit and lesson id, and questions
in assessments have 's.#' formatted prefix representing
the assessment id.
Returns:
A list of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
type_info_dict = data['containedTypes']
questions_list = []
for instanceid, type_info in type_info_dict.iteritems():
if isinstance(type_info, list):
# This is a question group.
mc_indices = [i for i in xrange(len(type_info))
if type_info[i] == self.MC_QUESTION]
questions_list += [{
'id': '%s.c.%s.i.%s' % (id_prefix, instanceid, index),
'score': data['individualScores'][instanceid][index],
'answers': data['answers'][instanceid][index]
} for index in mc_indices if (
data['answers'][instanceid][index])]
elif (type_info == self.MC_QUESTION and
data['answers'][instanceid]):
# This is an individual multiple-choice question.
questions_list += [{
'id': '%s.c.%s' % (id_prefix, instanceid),
'score': data['individualScores'][instanceid],
'answers': data['answers'][instanceid]
}]
return questions_list
def _get_questions_from_attempt_activity(self, event_data):
"""Summarizes activity event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
if (event_data['type'] == self.ACTIVITY_CHOICE and
event_data['value'] is not None):
return [{
'id': 'u.%s.l.%s.b.%s' % (
unit_id, lesson_id, event_data['index']),
'score': 1.0 if event_data['correct'] else 0.0,
'answers': [event_data['value']]
}]
elif event_data['type'] == self.ACTIVITY_GROUP:
block_id = event_data['index']
return [{
'id': 'u.%s.l.%s.b.%s.i.%s' % (
unit_id, lesson_id, block_id, answer['index']),
'score': 1.0 if answer['correct'] else 0.0,
'answers': answer['value']
} for answer in event_data['values'] if answer['value']]
else:
return []
def _get_questions_from_tag_assessment(self, event_data):
"""Summarizes assessment tag event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
if event_data['type'] == self.QUESTION_GROUP:
mc_indices = [
i for i in xrange(len(event_data['containedTypes']))
if event_data['containedTypes'][i] == self.MC_QUESTION]
return [{
'id': 'u.%s.l.%s.c.%s.i.%s' % (
unit_id, lesson_id, event_data['instanceid'], index),
'score': event_data['individualScores'][index],
'answers': event_data['answer'][index]
} for index in mc_indices if event_data['answer'][index]]
elif (event_data['type'] == self.MC_QUESTION and
event_data['answer']):
# This is a single multiple-choice question.
return [{
'id': 'u.%s.l.%s.c.%s' % (
unit_id, lesson_id, event_data['instanceid']),
'score': event_data['score'],
'answers': event_data['answer']
}]
else:
return []
def _get_questions_from_attempt_lesson(self, event_data):
"""Summarizes lesson attempt event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
return self._summarize_multiple_questions(
event_data, 'u.%s.l.%s' % (unit_id, lesson_id))
def _get_questions_from_submit_and_attempt_assessment(self, event_data):
"""Summarizes assessment submission event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
if not event_data['type'].startswith('assessment-'):
return []
assessment_id = event_data['type'][len('assessment-'):]
values = event_data['values']
if isinstance(values, list):
# This is a v1.4 (or older) assessment.
mc_indices = [i for i in xrange(len(values))
if values[i]['type'] == 'choices']
return [{
'id': 's.%s.i.%s' % (assessment_id, index),
'score': 1.0 if values[index]['correct'] else 0.0,
'answers': [values[index]['value']]
} for index in mc_indices if values[index]['value'] is not None]
elif isinstance(values, dict):
# This is a v1.5 assessment.
return self._summarize_multiple_questions(
values, 's.%s' % assessment_id)
else:
return []
def _process_event(self, source, data):
"""Returns a list of questions that correspond to the event."""
question_list = []
try:
if source == self.ATTEMPT_ACTIVITY:
question_list = self._get_questions_from_attempt_activity(
data)
elif source == self.TAG_ASSESSMENT:
question_list = self._get_questions_from_tag_assessment(
data)
elif source == self.ATTEMPT_LESSON:
question_list = self._get_questions_from_attempt_lesson(
data)
elif (source == self.SUBMIT_ASSESSMENT or
source == self.ATTEMPT_ASSESSMENT):
question_list = (
self._get_questions_from_submit_and_attempt_assessment(
data))
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process question analytics event: '
'source %s, data %s, error %s', source, data, e)
return question_list
def visit(self, event_entity):
"""Records question data from given event_entity."""
if not event_entity or not event_entity.source:
return
try:
data = transforms.loads(event_entity.data)
except Exception: # pylint: disable-msg=broad-except
return
# A list of dicts. Each dict represents a question instance and has
# the following keys: ['id', 'score', 'answers']. Note that a
# single event may correspond to multiple question instance dicts.
question_list = self._process_event(event_entity.source, data)
# Update the correct dict according to the event source.
if (event_entity.source == self.SUBMIT_ASSESSMENT or
event_entity.source == self.ATTEMPT_ASSESSMENT):
dict_to_update = self.id_to_assessments_dict
else:
dict_to_update = self.id_to_questions_dict
for summarized_question in question_list:
self._append_data(summarized_question, dict_to_update)
def __init__(self, app_context):
super(ComputeQuestionStats, self).__init__(app_context)
self._course = courses.Course(None, app_context)
def run(self):
"""Computes submitted question answers statistics."""
question_stats = self.MultipleChoiceQuestionAggregator(self._course)
mapper = utils.QueryMapper(
EventEntity.all(), batch_size=500, report_every=1000)
mapper.run(question_stats.visit)
return (question_stats.id_to_questions_dict,
question_stats.id_to_assessments_dict)
class QuestionStatsHandler(ApplicationHandler):
"""Shows statistics on the dashboard for students' answers to questions."""
name = 'question_answers_stats'
stats_computer = ComputeQuestionStats
def get_markup(self, job):
"""Returns Jinja markup for question stats analytics."""
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
accumulated_question_answers = None
accumulated_assessment_answers = None
if not job:
update_message = safe_dom.Text(
'Multiple-choice question statistics have not been calculated '
'yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
accumulated_question_answers, accumulated_assessment_answers = (
transforms.loads(job.output))
stats_calculated = True
update_message = safe_dom.Text("""
Multiple-choice question statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(
HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating multiple-choice question
statistics. Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text("""
Multiple-choice question statistics update started at %s
and is running now. Please come back shortly.""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT)))
return jinja2.utils.Markup(self.get_template(
'question_stats.html', [os.path.dirname(__file__)]
).render({
'errors': errors,
'accumulated_question_answers': transforms.dumps(
accumulated_question_answers),
'accumulated_assessment_answers': transforms.dumps(
accumulated_assessment_answers),
'stats_calculated': stats_calculated,
'update_message': update_message,
}, autoescape=True))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of questions."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import copy
import urllib
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import transforms
from models.models import QuestionDAO
from models.models import QuestionDTO
from models.models import SaQuestionConstants
from modules.oeditor import oeditor
import messages
from unit_lesson_editor import CourseOutlineRights
class BaseDatastoreAssetEditor(ApplicationHandler):
def get_form(self, rest_handler, key=''):
"""Build the Jinja template for adding a question."""
rest_url = self.canonicalize_url(rest_handler.URI)
exit_url = self.canonicalize_url('/dashboard?action=assets')
if key:
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(rest_handler.XSRF_TOKEN))
}))
else:
delete_url = None
schema = rest_handler.get_schema()
return oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
delete_url=delete_url, delete_method='delete',
required_modules=rest_handler.REQUIRED_MODULES,
extra_js_files=rest_handler.EXTRA_JS_FILES)
class QuestionManagerAndEditor(BaseDatastoreAssetEditor):
"""An editor for editing and managing questions."""
def prepare_template(self, rest_handler, key=''):
"""Build the Jinja template for adding a question."""
template_values = {}
template_values['page_title'] = self.format_title('Edit Question')
template_values['main_content'] = self.get_form(rest_handler, key=key)
return template_values
def get_add_mc_question(self):
self.render_page(self.prepare_template(McQuestionRESTHandler))
def get_add_sa_question(self):
self.render_page(self.prepare_template(SaQuestionRESTHandler))
def get_edit_question(self):
key = self.request.get('key')
question = QuestionDAO.load(key)
if not question:
raise Exception('No question found')
if question.type == QuestionDTO.MULTIPLE_CHOICE:
self.render_page(
self.prepare_template(McQuestionRESTHandler, key=key))
elif question.type == QuestionDTO.SHORT_ANSWER:
self.render_page(
self.prepare_template(SaQuestionRESTHandler, key=key))
else:
raise Exception('Unknown question type: %s' % question.type)
class BaseQuestionRESTHandler(BaseRESTHandler):
"""Common methods for handling REST end points with questions."""
def put(self):
"""Store a question in the datastore in response to a PUT."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
question_dict = transforms.loads(payload)
question_dict['description'] = question_dict['description'].strip()
question_dict, errors = self.import_and_validate(question_dict, key)
if errors:
self.validation_error('\n'.join(errors), key=key)
return
if key:
question = QuestionDTO(key, question_dict)
else:
question = QuestionDTO(None, question_dict)
question.type = self.TYPE
key_after_save = QuestionDAO.save(question)
transforms.send_json_response(
self, 200, 'Saved.', payload_dict={'key': key_after_save})
def delete(self):
"""Remove a question from the datastore in response to DELETE."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
question = QuestionDAO.load(key)
if not question:
transforms.send_json_response(
self, 404, 'Question not found.', {'key': key})
return
used_by = QuestionDAO.used_by(question.id)
if used_by:
group_names = ['"%s"' % x for x in used_by]
transforms.send_json_response(
self, 403,
('Question in use by question groups:\n%s.\nPlease delete it '
'from those groups and try again.') % ',\n'.join(group_names),
{'key': key})
return
QuestionDAO.delete(question)
transforms.send_json_response(self, 200, 'Deleted.')
def validate_no_description_collision(self, description, key, errors):
descriptions = {q.description for q in QuestionDAO.get_all()
if not key or q.id != long(key)}
if description in descriptions:
errors.append(
'The description must be different from existing questions.')
class McQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing multiple choice questions."""
URI = '/rest/question/mc'
REQUIRED_MODULES = [
'array-extras', 'gcb-rte', 'inputex-radio', 'inputex-select',
'inputex-string', 'inputex-list', 'inputex-number', 'inputex-hidden']
EXTRA_JS_FILES = ['mc_question_editor_lib.js', 'mc_question_editor.js']
TYPE = QuestionDTO.MULTIPLE_CHOICE
XSRF_TOKEN = 'mc-question-edit'
SCHEMA_VERSION = '1.5'
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the multiple choice question editor."""
mc_question = schema_fields.FieldRegistry(
'Multiple Choice Question',
description='multiple choice question',
extra_schema_dict_values={'className': 'mc-container'})
mc_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
mc_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-question'}))
mc_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'mc-description'},
description=messages.QUESTION_DESCRIPTION))
mc_question.add_property(schema_fields.SchemaField(
'multiple_selections', 'Selection', 'boolean',
optional=True,
select_data=[
('false', 'Allow only one selection'),
('true', 'Allow multiple selections')],
extra_schema_dict_values={
'_type': 'radio',
'className': 'mc-selection'}))
choice_type = schema_fields.FieldRegistry(
'Choice',
extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True,
extra_schema_dict_values={
'className': 'mc-choice-score', 'value': '0'}))
choice_type.add_property(schema_fields.SchemaField(
'text', 'Text', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-text'}))
choice_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-feedback'}))
choices_array = schema_fields.FieldArray(
'choices', '', item_type=choice_type,
extra_schema_dict_values={
'className': 'mc-choice-container',
'listAddLabel': 'Add a choice',
'listRemoveLabel': 'Delete choice'})
mc_question.add_property(choices_array)
return mc_question
def get(self):
"""Get the data to populate the question editor form."""
def export(q_dict):
p_dict = copy.deepcopy(q_dict)
# InputEx does not correctly roundtrip booleans, so pass strings
p_dict['multiple_selections'] = (
'true' if q_dict.get('multiple_selections') else 'false')
return p_dict
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
question = QuestionDAO.load(key)
payload_dict = export(question.dict)
else:
payload_dict = {
'version': self.SCHEMA_VERSION,
'question': '',
'description': '',
'multiple_selections': 'false',
'choices': [
{'score': '1', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''}
]}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
def import_and_validate(self, unvalidated_dict, key):
version = unvalidated_dict.get('version')
if self.SCHEMA_VERSION != version:
return (None, ['Version %s question not supported.' % version])
return self._import_and_validate15(unvalidated_dict, key)
def _import_and_validate15(self, unvalidated_dict, key):
errors = []
try:
question_dict = transforms.json_to_dict(
unvalidated_dict, self.get_schema().get_json_schema_dict())
except ValueError as err:
errors.append(str(err))
return (None, errors)
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
if not question_dict['choices']:
errors.append('The question must have at least one choice.')
choices = question_dict['choices']
for index in range(0, len(choices)):
choice = choices[index]
if not choice['text'].strip():
errors.append('Choice %s has no response text.' % (index + 1))
try:
# Coefrce the score attrib into a python float
choice['score'] = float(choice['score'])
except ValueError:
errors.append(
'Choice %s must have a numeric score.' % (index + 1))
return (question_dict, errors)
class SaQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing short answer questions."""
URI = '/rest/question/sa'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-list',
'inputex-hidden', 'inputex-integer']
EXTRA_JS_FILES = []
TYPE = QuestionDTO.SHORT_ANSWER
XSRF_TOKEN = 'sa-question-edit'
GRADER_TYPES = [
('case_insensitive', 'Case insensitive string match'),
('regex', 'Regular expression'),
('numeric', 'Numeric')]
SCHEMA_VERSION = '1.5'
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the short answer question editor."""
sa_question = schema_fields.FieldRegistry(
'Short Answer Question',
description='short answer question',
extra_schema_dict_values={'className': 'sa-container'})
sa_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
sa_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-question'}))
sa_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-description'},
description=messages.QUESTION_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'hint', 'Hint', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-hint'}))
sa_question.add_property(schema_fields.SchemaField(
'defaultFeedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-feedback'},
description=messages.INCORRECT_ANSWER_FEEDBACK))
sa_question.add_property(schema_fields.SchemaField(
'rows', 'Rows', 'string', optional=True,
extra_schema_dict_values={
'className': 'sa-rows',
'value': SaQuestionConstants.DEFAULT_HEIGHT_ROWS
},
description=messages.INPUT_FIELD_HEIGHT_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'columns', 'Columns', 'string', optional=True,
extra_schema_dict_values={
'className': 'sa-columns',
'value': SaQuestionConstants.DEFAULT_WIDTH_COLUMNS
},
description=messages.INPUT_FIELD_WIDTH_DESCRIPTION))
grader_type = schema_fields.FieldRegistry(
'Answer',
extra_schema_dict_values={'className': 'sa-grader'})
grader_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'matcher', 'Grading', 'string', optional=True,
select_data=cls.GRADER_TYPES,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'response', 'Response', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-grader-text'}))
grader_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-grader-feedback'}))
graders_array = schema_fields.FieldArray(
'graders', '', item_type=grader_type,
extra_schema_dict_values={
'className': 'sa-grader-container',
'listAddLabel': 'Add an answer',
'listRemoveLabel': 'Delete this answer'})
sa_question.add_property(graders_array)
return sa_question
def get(self):
"""Get the data to populate the question editor form."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
question = QuestionDAO.load(key)
payload_dict = question.dict
else:
payload_dict = {
'version': self.SCHEMA_VERSION,
'question': '',
'description': '',
'graders': [
{
'score': '1.0',
'matcher': 'case_insensitive',
'response': '',
'feedback': ''}]}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
def import_and_validate(self, unvalidated_dict, key):
version = unvalidated_dict.get('version')
if self.SCHEMA_VERSION != version:
return (None, ['Version %s question not supported.' % version])
return self._import_and_validate15(unvalidated_dict, key)
def _import_and_validate15(self, unvalidated_dict, key):
errors = []
try:
question_dict = transforms.json_to_dict(
unvalidated_dict, self.get_schema().get_json_schema_dict())
except ValueError as err:
errors.append(str(err))
return (None, errors)
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
try:
# Coerce the rows attrib into a python int
question_dict['rows'] = int(question_dict['rows'])
if question_dict['rows'] <= 0:
errors.append('Rows must be a positive whole number')
except ValueError:
errors.append('Rows must be a whole number')
try:
# Coerce the cols attrib into a python int
question_dict['columns'] = int(question_dict['columns'])
if question_dict['columns'] <= 0:
errors.append('Columns must be a positive whole number')
except ValueError:
errors.append('Columns must be a whole number')
if not question_dict['graders']:
errors.append('The question must have at least one answer.')
graders = question_dict['graders']
for index in range(0, len(graders)):
grader = graders[index]
assert grader['matcher'] in [
matcher for (matcher, unused_text) in self.GRADER_TYPES]
if not grader['response'].strip():
errors.append('Answer %s has no response text.' % (index + 1))
try:
float(grader['score'])
except ValueError:
errors.append(
'Answer %s must have a numeric score.' % (index + 1))
return (question_dict, errors)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for management of individual students' peer review assignments."""
__author__ = 'Sean Lip (sll@google.com)'
import os
import urllib
from controllers.lessons import create_readonly_assessment_params
from controllers.utils import ApplicationHandler
import jinja2
from models import courses
from models import models
from models import review
from models import roles
from models import student_work
from models import transforms
from modules.review import domain
import messages
class AssignmentsRights(object):
"""Manages view/edit rights for assignments and reviews."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class AssignmentManager(ApplicationHandler):
"""A view for managing human-reviewed assignments."""
def get_assignment_html(
self, peer_reviewed_units, unit_id=None, reviewee_id=None,
error_msg=None, readonly_assessment=None, review_steps=None,
reviewers=None, reviews_params=None, model_version=None):
"""Renders a template allowing an admin to select an assignment."""
edit_url = self.canonicalize_url('/dashboard')
return jinja2.utils.Markup(self.get_template(
'assignments_menu.html', [os.path.dirname(__file__)]
).render({
'REVIEW_STATE_COMPLETED': domain.REVIEW_STATE_COMPLETED,
'add_reviewer_action': self.get_action_url('add_reviewer'),
'add_reviewer_xsrf_token': self.create_xsrf_token('add_reviewer'),
'delete_reviewer_action': self.get_action_url('delete_reviewer'),
'delete_reviewer_xsrf_token': self.create_xsrf_token(
'delete_reviewer'),
'edit_assignment_action': 'edit_assignment',
'edit_url': edit_url,
'error_msg': error_msg,
'peer_reviewed_units': peer_reviewed_units,
'readonly_student_assessment': readonly_assessment,
'reviewee_id': reviewee_id or '',
'reviewers': reviewers,
'reviews_params': reviews_params,
'review_steps': review_steps,
'unit_id': unit_id,
'model_version': model_version
}, autoescape=True))
def parse_request(self, course, unit_id, reviewee_id, reviewer_id=None):
"""Parses request parameters in a GET or POST request.
Args:
course: Course. A course object.
unit_id: str. The id of the unit.
reviewee_id: str. The email address of the reviewee.
reviewer_id: str. The email address of the reviewer.
Returns:
- a dict containing some subset of the following keys: unit,
reviewee, reviewer.
- if necessary, an error message to be passed to the frontend.
"""
request_params = {}
# Check unit validity.
if not unit_id:
return request_params, ''
unit = course.find_unit_by_id(unit_id)
if not unit:
return request_params, '404: Unit not found.'
if (unit.workflow.get_grader() != courses.HUMAN_GRADER or
unit.workflow.get_matcher() != review.PEER_MATCHER):
return request_params, '412: This unit is not peer-graded.'
request_params['unit'] = unit
# Check reviewee validity.
if not reviewee_id:
return request_params, '412: No student email supplied.'
reviewee = models.Student.get_enrolled_student_by_email(reviewee_id)
if not reviewee:
return (request_params,
'412: No student with this email address exists.')
request_params['reviewee'] = reviewee
# Check reviewer validity, if applicable.
if reviewer_id is not None:
if not reviewer_id:
return request_params, '412: No reviewer email supplied.'
reviewer = models.Student.get_enrolled_student_by_email(reviewer_id)
if not reviewer:
return (request_params,
'412: No reviewer with this email address exists.')
request_params['reviewer'] = reviewer
return request_params, ''
def get_edit_assignment(self):
"""Shows interface for selecting and viewing a student assignment."""
if not AssignmentsRights.can_view(self):
self.error(401)
return
course = courses.Course(self)
peer_reviewed_units = course.get_peer_reviewed_units()
template_values = {}
template_values['page_title'] = self.format_title('Peer Review')
template_values['page_description'] = (
messages.ASSIGNMENTS_MENU_DESCRIPTION)
unit_id = self.request.get('unit_id')
if not unit_id:
# No unit has been set yet, so display an empty form.
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units)
self.render_page(template_values)
return
reviewee_id = self.request.get('reviewee_id')
# This field may be populated due to a redirect from a POST method.
post_error_msg = self.request.get('post_error_msg')
request_params, error_msg = self.parse_request(
course, unit_id, reviewee_id)
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
if error_msg:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg=error_msg)
self.render_page(template_values)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
get_readonly_assessment = self.get_readonly_assessment_1_4
get_readonly_review = self.get_readonly_review_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
get_readonly_assessment = self.get_readonly_assessment_1_5
get_readonly_review = self.get_readonly_review_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
# Render content.
rp = course.get_reviews_processor()
submission_and_review_steps = rp.get_submission_and_review_steps(
unit.unit_id, reviewee.get_key())
if not submission_and_review_steps:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg='412: This student hasn\'t submitted the assignment.'
)
self.render_page(template_values)
return
readonly_assessment = get_readonly_assessment(
unit, submission_and_review_steps[0])
review_steps = submission_and_review_steps[1]
reviews = rp.get_reviews_by_keys(
unit.unit_id,
[review_step.review_key for review_step in review_steps],
handle_empty_keys=True)
reviews_params = []
reviewers = []
for idx, review_step in enumerate(review_steps):
params = get_readonly_review(unit, reviews[idx])
reviews_params.append(params)
reviewer = models.Student.get_student_by_user_id(
review_step.reviewer_key.name()).key().name()
reviewers.append(reviewer)
assert len(reviewers) == len(review_steps)
assert len(reviews_params) == len(review_steps)
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
readonly_assessment=readonly_assessment, review_steps=review_steps,
error_msg=post_error_msg, reviewers=reviewers,
reviews_params=reviews_params,
model_version=model_version)
self.render_page(template_values)
def get_readonly_assessment_1_4(self, unit, submission_content):
return create_readonly_assessment_params(
courses.Course(self).get_assessment_content(unit),
student_work.StudentWorkUtils.get_answer_list(submission_content))
def get_readonly_assessment_1_5(self, unit, submission_content):
return {
'content': unit.html_content,
'saved_answers': transforms.dumps(submission_content)
}
def get_readonly_review_1_4(self, unit, review_content):
return create_readonly_assessment_params(
courses.Course(self).get_review_form_content(unit),
student_work.StudentWorkUtils.get_answer_list(review_content))
def get_readonly_review_1_5(self, unit, review_content):
return {
'content': unit.html_review_form,
'saved_answers': transforms.dumps(review_content)
}
def post_add_reviewer(self):
"""Adds a new reviewer to a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
reviewer_id = self.request.get('reviewer_id')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id, reviewer_id=reviewer_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'reviewer_id': reviewer_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
reviewer = request_params.get('reviewer')
rp = course.get_reviews_processor()
reviewee_key = reviewee.get_key()
reviewer_key = reviewer.get_key()
try:
rp.add_reviewer(unit.unit_id, reviewee_key, reviewer_key)
except domain.TransitionError:
redirect_params['post_error_msg'] = (
'412: The reviewer is already assigned to this submission.')
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
def post_delete_reviewer(self):
"""Deletes a reviewer from a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
review_step_key = self.request.get('key')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
rp = course.get_reviews_processor()
unit = request_params.get('unit')
rp.delete_reviewer(unit.unit_id, review_step_key)
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting updates to basic course settings."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
class CourseSettingsRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(ApplicationHandler):
"""Course settings handler."""
def post_edit_basic_course_settings(self):
"""Handles editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml'))
def get_edit_basic_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/course/settings')
form_html = oeditor.ObjectEditor.get_html_for(
self,
CourseSettingsRESTHandler.REGISTORY.get_json_schema(),
CourseSettingsRESTHandler.REGISTORY.get_schema_dict(),
key, rest_url, exit_url,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
class CourseSettingsRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
REGISTORY = courses.create_course_registry()
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte']
URI = '/rest/course/settings'
@classmethod
def validate_content(cls, content):
yaml.safe_load(content)
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get_group_id(self, email):
if not email or '@googlegroups.com' not in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {}
CourseSettingsRESTHandler.REGISTORY.convert_entity_to_json_entity(
self.get_course_dict(), entity)
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
CourseSettingsRESTHandler.REGISTORY.get_json_schema_dict())
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'basic-course-settings-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'basic-course-settings-put', {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
request_data = {}
CourseSettingsRESTHandler.REGISTORY.convert_json_to_entity(
transforms.loads(payload), request_data)
course_data = request_data['course']
if 'forum_email' in course_data.keys():
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data.keys():
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
entity = courses.deep_dict_merge(request_data, self.get_course_dict())
content = yaml.safe_dump(entity)
try:
self.validate_content(content)
content_stream = vfs.string_to_stream(unicode(content))
except Exception as e: # pylint: disable=W0703
transforms.send_json_response(self, 412, 'Validation error: %s' % e)
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import logging
import random
import urllib
from common import safe_dom
from common import tags
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import models as m_models
from models import review
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
import yaml
import filer
import messages
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
# The editor has severe limitations for editing nested lists of objects. First,
# it does not allow one to move a lesson from one unit to another. We need a way
# of doing that. Second, JSON schema specification does not seem to support a
# type-safe array, which has objects of different types. We also want that
# badly :). All in all - using generic schema-based object editor for editing
# nested arrayable polymorphic attributes is a pain...
STATUS_ANNOTATION = oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', DRAFT_TEXT,
PUBLISHED_TEXT, class_name='split-from-main-group')
# Allowed matchers. Keys of this dict represent internal keys for the matcher
# type, and the value represents the corresponding string that will appear in
# the dashboard UI.
ALLOWED_MATCHERS_NAMES = {review.PEER_MATCHER: messages.PEER_MATCHER_NAME}
# Allowed graders. Keys of this dict represent internal keys for the grader
# type, and the value represents the corresponding string that will appear in
# the dashboard UI.
ALLOWED_GRADERS_NAMES = {
courses.AUTO_GRADER: messages.AUTO_GRADER_NAME,
courses.HUMAN_GRADER: messages.HUMAN_GRADER_NAME,
}
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit_lesson(self):
"""Shows editor for the list of unit and lesson titles."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
UnitLessonTitleRESTHandler.SCHEMA_JSON,
UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Course Outline')
template_values[
'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
first_unit = None
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
first_unit = unit
break
if first_unit:
lesson = course.add_lesson(first_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
link.href = ''
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def _render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None,
extra_js_files=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not filer.is_editable_fs(self.app_context),
required_modules=rest_handler_cls.REQUIRED_MODULES,
extra_js_files=extra_js_files)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION)
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION,
extra_js_files=['assessment_editor_lib.js', 'assessment_editor.js'])
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
annotations_dict=LessonRESTHandler.get_schema_annotations_dict(
courses.Course(self).get_units()),
delete_xsrf_token='delete-lesson',
extra_js_files=LessonRESTHandler.EXTRA_JS_FILES)
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
def unit_to_dict(self, unused_unit):
"""Converts a unit to a dictionary representation."""
raise Exception('Not implemented')
def apply_updates(
self, unused_unit, unused_updated_unit_dict, unused_errors):
"""Applies changes to a unit; modifies unit input argument."""
raise Exception('Not implemented')
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA_JSON = """
{
"id": "Unit Entity",
"type": "object",
"description": "Unit",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Unit'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
STATUS_ANNOTATION]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'U'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.now_available = not updated_unit_dict.get('is_draft')
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA_JSON = """
{
"id": "Link Entity",
"type": "object",
"description": "Link",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"url": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Link'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'url', '_inputex'], {
'label': 'URL',
'description': messages.LINK_EDITOR_URL_DESCRIPTION}),
STATUS_ANNOTATION]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'O'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'url': unit.href,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.href = updated_unit_dict.get('url')
unit.now_available = not updated_unit_dict.get('is_draft')
class ImportCourseRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to course import."""
URI = '/rest/course/import'
SCHEMA_JSON = """
{
"id": "Import Course Entity",
"type": "object",
"description": "Import Course",
"properties": {
"course" : {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
@classmethod
def _get_course_list(cls):
# Make a list of courses user has the rights to.
course_list = []
for acourse in sites.get_all_courses():
if not roles.Roles.is_course_admin(acourse):
continue
if acourse == sites.get_course_for_current_request():
continue
course_list.append({
'value': acourse.raw,
'label': cgi.escape(acourse.get_title())})
return course_list
@classmethod
def SCHEMA_ANNOTATIONS_DICT(cls): # pylint: disable-msg=g-bad-name
"""Schema annotations are dynamic and include a list of courses."""
course_list = cls._get_course_list()
if not course_list:
return None
# Format annotations.
return [
(['title'], 'Import Course'),
(
['properties', 'course', '_inputex'],
{
'label': 'Available Courses',
'_type': 'select',
'choices': course_list})]
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
first_course_in_dropdown = self._get_course_list()[0]['value']
transforms.send_json_response(
self, 200, None,
payload_dict={'course': first_course_in_dropdown},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'import-course'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'import-course', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
course_raw = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)['course']
source = None
for acourse in sites.get_all_courses():
if acourse.raw == course_raw:
source = acourse
break
if not source:
transforms.send_json_response(
self, 404, 'Object not found.', {'raw': course_raw})
return
course = courses.Course(self)
errors = []
try:
course.import_from(source, errors)
except Exception as e: # pylint: disable-msg=broad-except
logging.exception(e)
errors.append('Import failed: %s' % e)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
course.save()
transforms.send_json_response(self, 200, 'Imported.')
def workflow_key(key):
return 'workflow:%s' % key
def create_assessment_registry():
"""Create the registry for course properties."""
reg = FieldRegistry('Assessment Entity', description='Assessment')
# Course level settings.
course_opts = reg.add_sub_registry('assessment', 'Assessment Config')
course_opts.add_property(SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'}))
course_opts.add_property(
SchemaField('type', 'Type', 'string', editable=False))
course_opts.add_property(
SchemaField('title', 'Title', 'string', optional=True))
course_opts.add_property(
SchemaField('weight', 'Weight', 'string', optional=True))
course_opts.add_property(SchemaField(
'content', 'Assessment Content', 'text', optional=True,
description=str(messages.ASSESSMENT_CONTENT_DESCRIPTION),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
course_opts.add_property(SchemaField(
'html_content', 'Assessment Content (HTML)', 'html', optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-content'}))
course_opts.add_property(SchemaField(
'html_check_answers', '"Check Answers" Buttons', 'boolean',
optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(
SchemaField(workflow_key(courses.SUBMISSION_DUE_DATE_KEY),
'Submission Due Date', 'string', optional=True,
description=str(messages.DUE_DATE_FORMAT_DESCRIPTION)))
course_opts.add_property(
SchemaField(workflow_key(courses.GRADER_KEY), 'Grading Method',
'string',
select_data=ALLOWED_GRADERS_NAMES.items()))
course_opts.add_property(
SchemaField('is_draft', 'Status', 'boolean',
select_data=[(True, DRAFT_TEXT), (False, PUBLISHED_TEXT)],
extra_schema_dict_values={
'className': 'split-from-main-group'}))
review_opts = reg.add_sub_registry(
'review_opts', 'Review Config',
description=str(messages.ASSESSMENT_DETAILS_DESCRIPTION))
if len(ALLOWED_MATCHERS_NAMES) > 1:
review_opts.add_property(
SchemaField(workflow_key(courses.MATCHER_KEY), 'Review Matcher',
'string', optional=True,
select_data=ALLOWED_MATCHERS_NAMES.items()))
review_opts.add_property(
SchemaField(
'review_form', 'Reviewer Feedback Form', 'text', optional=True,
description=str(messages.REVIEWER_FEEDBACK_FORM_DESCRIPTION),
extra_schema_dict_values={
'className': 'inputEx-Field review-form'}))
review_opts.add_property(SchemaField(
'html_review_form', 'Reviewer Feedback Form (HTML)', 'html',
optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE,
'className': 'inputEx-Field html-review-form'}))
review_opts.add_property(
SchemaField(
workflow_key(courses.REVIEW_DUE_DATE_KEY),
'Review Due Date', 'string', optional=True,
description=str(messages.REVIEW_DUE_DATE_FORMAT_DESCRIPTION)))
review_opts.add_property(
SchemaField(workflow_key(courses.REVIEW_MIN_COUNT_KEY),
'Review Min Count', 'integer', optional=True,
description=str(messages.REVIEW_MIN_COUNT_DESCRIPTION)))
review_opts.add_property(
SchemaField(workflow_key(courses.REVIEW_WINDOW_MINS_KEY),
'Review Window Timeout', 'integer', optional=True,
description=str(messages.REVIEW_TIMEOUT_IN_MINUTES)))
return reg
class AssessmentRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to assessment."""
URI = '/rest/course/assessment'
REG = create_assessment_registry()
SCHEMA_JSON = REG.get_json_schema()
SCHEMA_DICT = REG.get_json_schema_dict()
SCHEMA_ANNOTATIONS_DICT = REG.get_schema_dict()
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-textarea',
'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-checkbox']
def _get_assessment_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_assessment_filename(unit.unit_id))
def _get_review_form_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_review_form_filename(unit.unit_id))
def unit_to_dict(self, unit):
"""Assemble a dict with the unit data fields."""
assert unit.type == 'A'
path = self._get_assessment_path(unit)
fs = self.app_context.fs
if fs.isfile(path):
content = fs.get(path)
else:
content = ''
review_form_path = self._get_review_form_path(unit)
if review_form_path and fs.isfile(review_form_path):
review_form = fs.get(review_form_path)
else:
review_form = ''
workflow = unit.workflow
if workflow.get_submission_due_date():
submission_due_date = workflow.get_submission_due_date().strftime(
courses.ISO_8601_DATE_FORMAT)
else:
submission_due_date = ''
if workflow.get_review_due_date():
review_due_date = workflow.get_review_due_date().strftime(
courses.ISO_8601_DATE_FORMAT)
else:
review_due_date = ''
return {
'assessment': {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'weight': str(unit.weight if hasattr(unit, 'weight') else 0),
'content': content,
'html_content': unit.html_content or '',
'html_check_answers': unit.html_check_answers,
'is_draft': not unit.now_available,
workflow_key(courses.SUBMISSION_DUE_DATE_KEY): (
submission_due_date),
workflow_key(courses.GRADER_KEY): workflow.get_grader(),
},
'review_opts': {
workflow_key(courses.MATCHER_KEY): workflow.get_matcher(),
workflow_key(courses.REVIEW_DUE_DATE_KEY): review_due_date,
workflow_key(courses.REVIEW_MIN_COUNT_KEY): (
workflow.get_review_min_count()),
workflow_key(courses.REVIEW_WINDOW_MINS_KEY): (
workflow.get_review_window_mins()),
'review_form': review_form,
'html_review_form': unit.html_review_form or ''
}
}
def apply_updates(self, unit, updated_unit_dict, errors):
"""Store the updated assessment."""
entity_dict = {}
AssessmentRESTHandler.REG.convert_json_to_entity(
updated_unit_dict, entity_dict)
unit.title = entity_dict.get('title')
try:
unit.weight = int(entity_dict.get('weight'))
if unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
unit.now_available = not entity_dict.get('is_draft')
course = courses.Course(self)
content = entity_dict.get('content')
if content:
course.set_assessment_content(
unit, entity_dict.get('content'), errors=errors)
unit.html_content = entity_dict.get('html_content')
unit.html_check_answers = entity_dict.get('html_check_answers')
workflow_dict = entity_dict.get('workflow')
if len(ALLOWED_MATCHERS_NAMES) == 1:
workflow_dict[courses.MATCHER_KEY] = (
ALLOWED_MATCHERS_NAMES.keys()[0])
unit.workflow_yaml = yaml.safe_dump(workflow_dict)
unit.workflow.validate(errors=errors)
# Only save the review form if the assessment needs human grading.
if not errors:
if course.needs_human_grader(unit):
review_form = entity_dict.get('review_form')
if review_form:
course.set_review_form(
unit, review_form, errors=errors)
unit.html_review_form = entity_dict.get('html_review_form')
elif entity_dict.get('review_form'):
errors.append(
'Review forms for auto-graded assessments should be empty.')
class UnitLessonTitleRESTHandler(BaseRESTHandler):
"""Provides REST API to unit and lesson titles."""
URI = '/rest/course/outline'
SCHEMA_JSON = """
{
"type": "object",
"description": "Course Outline",
"properties": {
"outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"lessons": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"}
}
}
}
}
}
}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Course Outline'),
(['_inputex'], {'className': 'organizer'}),
(['properties', 'outline', '_inputex'], {
'sortable': 'true',
'label': ''}),
([
'properties', 'outline', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'id', '_inputex'], {
'_type': 'hidden'}),
(['properties', 'outline', 'items', 'properties', 'lessons',
'_inputex'], {
'sortable': 'true',
'label': '',
'listAddLabel': 'Add a new lesson',
'listRemoveLabel': 'Delete'}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'id', '_inputex'], {
'_type': 'hidden'})
]
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-list', 'inputex-string',
'inputex-uneditable']
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
course = courses.Course(self)
outline_data = []
for unit in course.get_units():
lesson_data = []
for lesson in course.get_lessons(unit.unit_id):
lesson_data.append({
'title': lesson.title,
'id': lesson.lesson_id})
unit_title = unit.title
if verify.UNIT_TYPE_UNIT == unit.type:
unit_title = 'Unit: %s' % unit.title
outline_data.append({
'title': unit_title,
'id': unit.unit_id,
'lessons': lesson_data})
transforms.send_json_response(
self, 200, None,
payload_dict={'outline': outline_data},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'unit-lesson-reorder'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'unit-lesson-reorder', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
payload_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
course = courses.Course(self)
course.reorder_units(payload_dict['outline'])
course.save()
transforms.send_json_response(self, 200, 'Saved.')
class LessonRESTHandler(BaseRESTHandler):
"""Provides REST API to handle lessons and activities."""
URI = '/rest/course/lesson'
# Note GcbRte relies on the structure of this schema. Do not change without
# checking the dependency.
SCHEMA_JSON = """
{
"id": "Lesson Entity",
"type": "object",
"description": "Lesson",
"properties": {
"key" : {"type": "string"},
"title" : {"type": "string"},
"unit_id": {"type": "string"},
"video" : {"type": "string", "optional": true},
"scored": {"type": "string"},
"objectives" : {
"type": "string", "format": "html", "optional": true},
"notes" : {"type": "string", "optional": true},
"activity_title" : {"type": "string", "optional": true},
"activity_listed" : {"type": "boolean", "optional": true},
"activity": {"type": "string", "format": "text", "optional": true},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'gcb-rte', 'inputex-select', 'inputex-textarea',
'inputex-uneditable', 'inputex-checkbox']
EXTRA_JS_FILES = ['lesson_editor_lib.js', 'lesson_editor.js']
@classmethod
def get_schema_annotations_dict(cls, units):
unit_list = []
for unit in units:
if unit.type == 'U':
unit_list.append({
'label': cgi.escape(
'Unit %s - %s' % (unit.index, unit.title)),
'value': unit.unit_id})
return [
(['title'], 'Lesson'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable',
'className': 'inputEx-Field keyHolder'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'unit_id', '_inputex'], {
'label': 'Parent Unit', '_type': 'select',
'choices': unit_list}),
(['properties', 'scored', '_inputex'], {
'_type': 'select',
'choices': [
{'label': 'Questions are scored', 'value': 'scored'},
{
'label': 'Questions only give feedback',
'value': 'not_scored'}],
'label': 'Scored',
'description': messages.LESSON_SCORED_DESCRIPTION}),
# TODO(sll): The internal 'objectives' property should also be
# renamed.
(['properties', 'objectives', '_inputex'], {
'label': 'Lesson Body',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'description': messages.LESSON_OBJECTIVES_DESCRIPTION}),
(['properties', 'video', '_inputex'], {
'label': 'Video ID',
'description': messages.LESSON_VIDEO_ID_DESCRIPTION}),
(['properties', 'notes', '_inputex'], {
'label': 'Notes',
'description': messages.LESSON_NOTES_DESCRIPTION}),
(['properties', 'activity_title', '_inputex'], {
'label': 'Activity Title',
'description': messages.LESSON_ACTIVITY_TITLE_DESCRIPTION}),
(['properties', 'activity_listed', '_inputex'], {
'label': 'Activity Listed',
'description': messages.LESSON_ACTIVITY_LISTED_DESCRIPTION}),
(['properties', 'activity', '_inputex'], {
'label': 'Activity',
'description': str(messages.LESSON_ACTIVITY_DESCRIPTION),
'className': 'inputEx-Field activityHolder'}),
STATUS_ANNOTATION]
def get(self):
"""Handles GET REST verb and returns lesson object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
assert lesson
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if lesson.has_activity and fs.isfile(path):
activity = fs.get(path)
else:
activity = ''
payload_dict = {
'key': key,
'title': lesson.title,
'unit_id': lesson.unit_id,
'scored': 'scored' if lesson.scored else 'not_scored',
'objectives': lesson.objectives,
'video': lesson.video,
'notes': lesson.notes,
'activity_title': lesson.activity_title,
'activity_listed': lesson.activity_listed,
'activity': activity,
'is_draft': not lesson.now_available
}
message = ['Success.']
if self.request.get('is_newly_created'):
message.append('New lesson has been created and saved.')
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))
def put(self):
"""Handles PUT REST verb to save lesson and associated activity."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'lesson-edit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updates_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
lesson.title = updates_dict['title']
lesson.unit_id = updates_dict['unit_id']
lesson.scored = (updates_dict['scored'] == 'scored')
lesson.objectives = updates_dict['objectives']
lesson.video = updates_dict['video']
lesson.notes = updates_dict['notes']
lesson.activity_title = updates_dict['activity_title']
lesson.activity_listed = updates_dict['activity_listed']
lesson.now_available = not updates_dict['is_draft']
activity = updates_dict.get('activity', '').strip()
errors = []
if activity:
lesson.has_activity = True
course.set_activity_content(lesson, activity, errors=errors)
else:
lesson.has_activity = False
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if fs.isfile(path):
fs.delete(path)
if not errors:
assert course.update_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-lesson', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
assert course.delete_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
def generate_instanceid():
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
length = 12
return ''.join([random.choice(chars) for unused_i in xrange(length)])
class CollisionError(Exception):
"""Exception raised to show that a collision in a namespace has occurred."""
class ImportActivityRESTHandler(BaseRESTHandler):
"""REST handler for requests to import an activity into the lesson body."""
URI = '/rest/course/lesson/activity'
VERSION = '1.5'
def put(self):
"""Handle REST PUT instruction to import an assignment."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(request, 'lesson-edit', {}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
text = request.get('text')
try:
content, noverify_text = verify.convert_javascript_to_python(
text, 'activity')
activity = verify.evaluate_python_expression_from_text(
content, 'activity', verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
transforms.send_json_response(
self, 412, 'Unable to parse activity.')
return
try:
verify.Verifier().verify_activity_instance(activity, 'none')
except verify.SchemaException:
transforms.send_json_response(
self, 412, 'Unable to validate activity.')
return
self.course = courses.Course(self)
self.lesson = self.course.find_lesson_by_id(None, key)
self.unit = self.course.find_unit_by_id(self.lesson.unit_id)
self.question_number = 0
self.question_descriptions = set(
[q.description for q in m_models.QuestionDAO.get_all()])
self.question_group_descriptions = set(
[qg.description for qg in m_models.QuestionGroupDAO.get_all()])
lesson_content = []
try:
for item in activity['activity']:
if isinstance(item, basestring):
lesson_content.append(item)
else:
question_tag = self.import_question(item)
lesson_content.append(question_tag)
self.question_number += 1
except CollisionError:
transforms.send_json_response(
self, 412, (
'This activity has already been imported. Remove duplicate '
'imported questions from the question bank in order to '
're-import.'))
return
except Exception as ex:
transforms.send_json_response(
self, 412, 'Unable to convert: %s' % ex)
return
transforms.send_json_response(self, 200, 'OK.', payload_dict={
'content': '\n'.join(lesson_content)
})
def _get_question_description(self):
return (
'Imported from unit "%s", lesson "%s" (question #%s)' % (
self.unit.title, self.lesson.title, self.question_number + 1))
def _insert_question(self, question_dict, question_type):
question = m_models.QuestionDTO(None, question_dict)
question.type = question_type
return m_models.QuestionDAO.save(question)
def _insert_question_group(self, question_group_dict):
question_group = m_models.QuestionGroupDTO(None, question_group_dict)
return m_models.QuestionGroupDAO.save(question_group)
def import_question(self, item):
question_type = item['questionType']
if question_type == 'multiple choice':
question_dict = self.import_multiple_choice(item)
quid = self._insert_question(
question_dict, m_models.QuestionDTO.MULTIPLE_CHOICE)
return '<question quid="%s" instanceid="%s"></question>' % (
quid, generate_instanceid())
elif question_type == 'multiple choice group':
question_group_dict = self.import_multiple_choice_group(item)
qgid = self._insert_question_group(question_group_dict)
return (
'<question-group qgid="%s" instanceid="%s">'
'</question-group>') % (
qgid, generate_instanceid())
elif question_type == 'freetext':
question_dict = self.import_freetext(item)
quid = self._insert_question(
question_dict, m_models.QuestionDTO.SHORT_ANSWER)
return '<question quid="%s" instanceid="%s"></question>' % (
quid, generate_instanceid())
else:
raise ValueError('Unknown question type: %s' % question_type)
def import_multiple_choice(self, orig_question):
description = self._get_question_description()
if description in self.question_descriptions:
raise CollisionError()
return {
'version': self.VERSION,
'description': description,
'question': '',
'multiple_selections': False,
'choices': [
{
'text': choice[0],
'score': 1.0 if choice[1].value else 0.0,
'feedback': choice[2]
} for choice in orig_question['choices']]}
def import_multiple_choice_group(self, mc_choice_group):
"""Import a 'multiple choice group' as a question group."""
description = self._get_question_description()
if description in self.question_group_descriptions:
raise CollisionError()
question_group_dict = {
'version': self.VERSION,
'description': description}
question_list = []
for index, question in enumerate(mc_choice_group['questionsList']):
question_dict = self.import_multiple_choice_group_question(
question, index)
question = m_models.QuestionDTO(None, question_dict)
question.type = m_models.QuestionDTO.MULTIPLE_CHOICE
question_list.append(question)
quid_list = m_models.QuestionDAO.save_all(question_list)
question_group_dict['items'] = [{
'question': str(quid),
'weight': 1.0} for quid in quid_list]
return question_group_dict
def import_multiple_choice_group_question(self, orig_question, index):
"""Import the questions from a group as individual questions."""
# TODO(jorr): Handle allCorrectOutput and someCorrectOutput
description = (
'Imported from unit "%s", lesson "%s" (question #%s, part #%s)' % (
self.unit.title, self.lesson.title, self.question_number + 1,
index + 1))
if description in self.question_descriptions:
raise CollisionError()
correct_index = orig_question['correctIndex']
multiple_selections = not isinstance(correct_index, int)
if multiple_selections:
partial = 1.0 / len(correct_index)
choices = [{
'text': text,
'score': partial if i in correct_index else -1.0
} for i, text in enumerate(orig_question['choices'])]
else:
choices = [{
'text': text,
'score': 1.0 if i == correct_index else 0.0
} for i, text in enumerate(orig_question['choices'])]
return {
'version': self.VERSION,
'description': description,
'question': orig_question.get('questionHTML') or '',
'multiple_selections': multiple_selections,
'choices': choices}
def import_freetext(self, orig_question):
description = self._get_question_description()
if description in self.question_descriptions:
raise CollisionError()
return {
'version': self.VERSION,
'description': description,
'question': '',
'hint': orig_question['showAnswerOutput'],
'graders': [{
'score': 1.0,
'matcher': 'regex',
'response': orig_question['correctAnswerRegex'].value,
'feedback': orig_question.get('correctAnswerOutput')
}],
'defaultFeedback': orig_question.get('incorrectAnswerOutput')}
class ExportAssessmentRESTHandler(BaseRESTHandler):
"""REST handler for requests to export an activity into new format."""
URI = '/rest/course/asessment/export'
VERSION = '1.5'
def put(self):
"""Handle the PUT verb to export an assessment."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
raw_assessment_dict = transforms.json_to_dict(
request.get('payload'), AssessmentRESTHandler.SCHEMA_DICT)
entity_dict = {}
AssessmentRESTHandler.REG.convert_json_to_entity(
raw_assessment_dict, entity_dict)
course = courses.Course(self)
self.unit = course.find_unit_by_id(key)
self.question_descriptions = set(
[q.description for q in m_models.QuestionDAO.get_all()])
# Import all the assessment context except the questions
new_unit = course.add_assessment()
errors = []
new_unit.title = 'Exported from %s ' % entity_dict.get('title')
try:
new_unit.weight = int(entity_dict.get('weight'))
if new_unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
new_unit.now_available = not entity_dict.get('is_draft')
workflow_dict = entity_dict.get('workflow')
if len(ALLOWED_MATCHERS_NAMES) == 1:
workflow_dict[courses.MATCHER_KEY] = (
ALLOWED_MATCHERS_NAMES.keys()[0])
new_unit.workflow_yaml = yaml.safe_dump(workflow_dict)
new_unit.workflow.validate(errors=errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
assessment_dict = self.get_assessment_dict(entity_dict.get('content'))
if assessment_dict is None:
return
if assessment_dict.get('checkAnswers'):
new_unit.html_check_answers = assessment_dict['checkAnswers'].value
# Import the questions in the assessment and the review questionnaire
html_content = []
html_review_form = []
if assessment_dict.get('preamble'):
html_content.append(assessment_dict['preamble'])
# prepare all the dtos for the questions in the assigment content
question_dtos = self.get_question_dtos(
assessment_dict,
'Imported from assessment "%s" (question #%s)')
if question_dtos is None:
return
# prepare the questions for the review questionnaire, if necessary
review_dtos = []
if course.needs_human_grader(new_unit):
review_str = entity_dict.get('review_form')
review_dict = self.get_assessment_dict(review_str)
if review_dict is None:
return
if review_dict.get('preamble'):
html_review_form.append(review_dict['preamble'])
review_dtos = self.get_question_dtos(
review_dict,
'Imported from assessment "%s" (review question #%s)')
if review_dtos is None:
return
# batch submit the questions and split out their resulting id's
all_dtos = question_dtos + review_dtos
all_ids = m_models.QuestionDAO.save_all(all_dtos)
question_ids = all_ids[:len(question_dtos)]
review_ids = all_ids[len(question_dtos):]
# insert question tags for the assessment content
for quid in question_ids:
html_content.append(
str(safe_dom.Element(
'question',
quid=str(quid), instanceid=generate_instanceid())))
new_unit.html_content = '\n'.join(html_content)
# insert question tags for the review questionnaire
for quid in review_ids:
html_review_form.append(
str(safe_dom.Element(
'question',
quid=str(quid), instanceid=generate_instanceid())))
new_unit.html_review_form = '\n'.join(html_review_form)
course.save()
transforms.send_json_response(
self, 200, (
'The assessment has been exported to "%s".' % new_unit.title),
payload_dict={'key': key})
def get_assessment_dict(self, assessment_content):
"""Validate the assessment scipt and return as a python dict."""
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, 'assessment')
assessment = verify.evaluate_python_expression_from_text(
content, 'assessment', verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
transforms.send_json_response(
self, 412, 'Unable to parse asessment.')
return None
try:
verify.Verifier().verify_assessment_instance(assessment, 'none')
except verify.SchemaException:
transforms.send_json_response(
self, 412, 'Unable to validate assessment')
return None
return assessment['assessment']
def get_question_dtos(self, assessment_dict, description_template):
"""Convert the assessment into a list of QuestionDTO's."""
question_dtos = []
try:
for i, question in enumerate(assessment_dict['questionsList']):
description = description_template % (self.unit.title, (i + 1))
if description in self.question_descriptions:
raise CollisionError()
question_dto = self.import_question(question)
question_dto.dict['description'] = description
question_dtos.append(question_dto)
except CollisionError:
transforms.send_json_response(
self, 412, (
'This assessment has already been imported. Remove '
'duplicate imported questions from the question bank in '
'order to re-import.'))
return None
except Exception as ex:
transforms.send_json_response(
self, 412, 'Unable to convert: %s' % ex)
return None
return question_dtos
def import_question(self, question):
"""Convert a single question into a QuestioDTO."""
if 'choices' in question:
question_dict = self.import_multiple_choice_question(question)
question_type = m_models.QuestionDTO.MULTIPLE_CHOICE
elif 'correctAnswerNumeric' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'numeric',
question.get('correctAnswerNumeric'))
question_type = m_models.QuestionDTO.SHORT_ANSWER
elif 'correctAnswerString' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'case_insensitive',
question.get('correctAnswerString'))
question_type = m_models.QuestionDTO.SHORT_ANSWER
elif 'correctAnswerRegex' in question:
question_dict = self.import_short_answer_question(
question.get('questionHTML'),
'regex',
question.get('correctAnswerRegex').value)
question_type = m_models.QuestionDTO.SHORT_ANSWER
else:
raise ValueError('Unknown question type')
question_dto = m_models.QuestionDTO(None, question_dict)
question_dto.type = question_type
return question_dto
def import_multiple_choice_question(self, question):
"""Assemble the dict for a multiple choice question."""
question_dict = {
'version': self.VERSION,
'question': question.get('questionHTML') or '',
'multiple_selections': False
}
choices = []
for choice in question.get('choices'):
if isinstance(choice, basestring):
text = choice
score = 0.0
else:
text = choice.value
score = 1.0
choices.append({
'text': text,
'score': score
})
question_dict['choices'] = choices
return question_dict
def import_short_answer_question(self, question_html, matcher, response):
return {
'version': self.VERSION,
'question': question_html or '',
'graders': [{
'score': 1.0,
'matcher': matcher,
'response': response,
}]
}
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
import urllib
import appengine_config
from common import jinja_utils
from common import safe_dom
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import ReflectiveRequestHandler
import jinja2
import jinja2.exceptions
from models import config
from models import courses
from models import custom_modules
from models import roles
from models import vfs
from models.models import QuestionDAO
from models.models import QuestionGroupDAO
from modules.dashboard import analytics
from modules.search.search import SearchDashboardHandler
from tools import verify
from course_settings import CourseSettingsHandler
from course_settings import CourseSettingsRESTHandler
import filer
from filer import AssetItemRESTHandler
from filer import AssetUriRESTHandler
from filer import FileManagerAndEditor
from filer import FilesItemRESTHandler
from filer import TextAssetRESTHandler
import messages
from peer_review import AssignmentManager
from question_editor import McQuestionRESTHandler
from question_editor import QuestionManagerAndEditor
from question_editor import SaQuestionRESTHandler
from question_group_editor import QuestionGroupManagerAndEditor
from question_group_editor import QuestionGroupRESTHandler
import unit_lesson_editor
from unit_lesson_editor import AssessmentRESTHandler
from unit_lesson_editor import ExportAssessmentRESTHandler
from unit_lesson_editor import ImportActivityRESTHandler
from unit_lesson_editor import ImportCourseRESTHandler
from unit_lesson_editor import LessonRESTHandler
from unit_lesson_editor import LinkRESTHandler
from unit_lesson_editor import UnitLessonEditor
from unit_lesson_editor import UnitLessonTitleRESTHandler
from unit_lesson_editor import UnitRESTHandler
from google.appengine.api import users
class DashboardHandler(
CourseSettingsHandler, FileManagerAndEditor, UnitLessonEditor,
QuestionManagerAndEditor, QuestionGroupManagerAndEditor, AssignmentManager,
ApplicationHandler, ReflectiveRequestHandler, SearchDashboardHandler):
"""Handles all pages and actions required for managing a course."""
default_action = 'outline'
get_actions = [
default_action, 'assets', 'settings', 'analytics', 'search',
'edit_basic_settings', 'edit_settings', 'edit_unit_lesson',
'edit_unit', 'edit_link', 'edit_lesson', 'edit_assessment',
'add_asset', 'delete_asset', 'manage_text_asset', 'import_course',
'edit_assignment', 'add_mc_question', 'add_sa_question',
'edit_question', 'add_question_group', 'edit_question_group']
# Requests to these handlers automatically go through an XSRF token check
# that is implemented in ReflectiveRequestHandler.
post_actions = [
'compute_student_stats', 'create_or_edit_settings', 'add_unit',
'add_link', 'add_assessment', 'add_lesson', 'index_course',
'clear_index', 'edit_basic_course_settings', 'add_reviewer',
'delete_reviewer']
nav_mappings = [
('', 'Outline'),
('assets', 'Assets'),
('settings', 'Settings'),
('analytics', 'Analytics'),
('search', 'Search'),
('edit_assignment', 'Peer Review')]
local_fs = vfs.LocalReadOnlyFileSystem(logical_home_folder='/')
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [
(AssessmentRESTHandler.URI, AssessmentRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(CourseSettingsRESTHandler.URI, CourseSettingsRESTHandler),
(FilesItemRESTHandler.URI, FilesItemRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(AssetUriRESTHandler.URI, AssetUriRESTHandler),
(ImportActivityRESTHandler.URI, ImportActivityRESTHandler),
(ImportCourseRESTHandler.URI, ImportCourseRESTHandler),
(LessonRESTHandler.URI, LessonRESTHandler),
(LinkRESTHandler.URI, LinkRESTHandler),
(UnitLessonTitleRESTHandler.URI, UnitLessonTitleRESTHandler),
(UnitRESTHandler.URI, UnitRESTHandler),
(McQuestionRESTHandler.URI, McQuestionRESTHandler),
(SaQuestionRESTHandler.URI, SaQuestionRESTHandler),
(TextAssetRESTHandler.URI, TextAssetRESTHandler),
(QuestionGroupRESTHandler.URI, QuestionGroupRESTHandler),
(ExportAssessmentRESTHandler.URI, ExportAssessmentRESTHandler)
]
def can_view(self):
"""Checks if current user has viewing rights."""
return roles.Roles.is_course_admin(self.app_context)
def can_edit(self):
"""Checks if current user has editing rights."""
return roles.Roles.is_course_admin(self.app_context)
def get(self):
"""Enforces rights to all GET operations."""
if not self.can_view():
self.redirect(self.app_context.get_slug())
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(DashboardHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect(self.app_context.get_slug())
return
return super(DashboardHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)], handler=self)
def _get_alerts(self):
alerts = []
if not courses.is_editable_fs(self.app_context):
alerts.append('Read-only course.')
if not self.app_context.now_available:
alerts.append('The course is not publicly available.')
return '\n'.join(alerts)
def _get_top_nav(self):
current_action = self.request.get('action')
nav = safe_dom.NodeList()
for action, title in self.nav_mappings:
class_name = 'selected' if action == current_action else ''
action_href = 'dashboard?action=%s' % action
nav.append(safe_dom.Element(
'a', href=action_href, className=class_name).add_text(
title))
if roles.Roles.is_super_admin():
nav.append(safe_dom.Element(
'a', href='/admin').add_text('Admin'))
nav.append(safe_dom.Element(
'a',
href='https://code.google.com/p/course-builder/wiki/Dashboard',
target='_blank'
).add_text('Help'))
return nav
def render_page(self, template_values):
"""Renders a page using provided template values."""
template_values['top_nav'] = self._get_top_nav()
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Created on: %s' % datetime.datetime.now()
if not template_values.get('sections'):
template_values['sections'] = []
self.response.write(
self.get_template('view.html', []).render(template_values))
def format_title(self, text):
"""Formats standard title."""
title = self.app_context.get_environ()['course']['title']
return safe_dom.NodeList().append(
safe_dom.Text('Course Builder ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s ' % title)
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' Dashboard ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s' % text)
)
def _get_edit_link(self, url):
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element('a', href=url).add_text('Edit')
)
def _get_availability(self, resource):
if not hasattr(resource, 'now_available'):
return safe_dom.Text('')
if resource.now_available:
return safe_dom.Text('')
else:
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element(
'span', className='draft-label'
).add_text('(%s)' % unit_lesson_editor.DRAFT_TEXT)
)
def render_course_outline_to_html(self):
"""Renders course outline to HTML."""
course = courses.Course(self)
if not course.get_units():
return []
is_editable = filer.is_editable_fs(self.app_context)
lines = safe_dom.Element('ul', style='list-style: none;')
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='assessment?name=%s' % unit.unit_id,
className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_assessment',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == verify.UNIT_TYPE_LINK:
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href=unit.href, className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_link',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == verify.UNIT_TYPE_UNIT:
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='unit?unit=%s' % unit.unit_id,
className='strong').add_text(
'Unit %s - %s' % (unit.index, unit.title))
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_unit',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
ol = safe_dom.Element('ol')
for lesson in course.get_lessons(unit.unit_id):
li2 = safe_dom.Element('li').add_child(
safe_dom.Element(
'a',
href='unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
).add_text(lesson.title)
).add_child(self._get_availability(lesson))
if is_editable:
url = self.get_action_url(
'edit_lesson', key=lesson.lesson_id)
li2.add_child(self._get_edit_link(url))
ol.add_child(li2)
li.add_child(ol)
lines.add_child(li)
continue
raise Exception('Unknown unit type: %s.' % unit.type)
return lines
def get_outline(self):
"""Renders course outline view."""
pages_info = [
safe_dom.Element(
'a', href=self.canonicalize_url('/announcements')
).add_text('Announcements'),
safe_dom.Element(
'a', href=self.canonicalize_url('/course')
).add_text('Course')]
outline_actions = []
if filer.is_editable_fs(self.app_context):
outline_actions.append({
'id': 'edit_unit_lesson',
'caption': 'Organize',
'href': self.get_action_url('edit_unit_lesson')})
all_units = courses.Course(self).get_units()
if any([unit.type == verify.UNIT_TYPE_UNIT for unit in all_units]):
outline_actions.append({
'id': 'add_lesson',
'caption': 'Add Lesson',
'action': self.get_action_url('add_lesson'),
'xsrf_token': self.create_xsrf_token('add_lesson')})
outline_actions.append({
'id': 'add_unit',
'caption': 'Add Unit',
'action': self.get_action_url('add_unit'),
'xsrf_token': self.create_xsrf_token('add_unit')})
outline_actions.append({
'id': 'add_link',
'caption': 'Add Link',
'action': self.get_action_url('add_link'),
'xsrf_token': self.create_xsrf_token('add_link')})
outline_actions.append({
'id': 'add_assessment',
'caption': 'Add Assessment',
'action': self.get_action_url('add_assessment'),
'xsrf_token': self.create_xsrf_token('add_assessment')})
if not courses.Course(self).get_units():
outline_actions.append({
'id': 'import_course',
'caption': 'Import',
'href': self.get_action_url('import_course')
})
data_info = self.list_files('/data/')
sections = [
{
'title': 'Pages',
'description': messages.PAGES_DESCRIPTION,
'children': pages_info},
{
'title': 'Course Outline',
'description': messages.COURSE_OUTLINE_DESCRIPTION,
'actions': outline_actions,
'pre': self.render_course_outline_to_html()},
{
'title': 'Data Files',
'description': messages.DATA_FILES_DESCRIPTION,
'children': data_info}]
template_values = {}
template_values['page_title'] = self.format_title('Outline')
template_values['alerts'] = self._get_alerts()
template_values['sections'] = sections
self.render_page(template_values)
def get_action_url(self, action, key=None, extra_args=None):
args = {'action': action}
if key:
args['key'] = key
if extra_args:
args.update(extra_args)
url = '/dashboard?%s' % urllib.urlencode(args)
return self.canonicalize_url(url)
def get_settings(self):
"""Renders course settings view."""
yaml_actions = []
basic_setting_actions = []
# Basic course info.
course_info = [
'Course Title: %s' % self.app_context.get_environ()['course'][
'title'],
'Context Path: %s' % self.app_context.get_slug(),
'Datastore Namespace: %s' % self.app_context.get_namespace_name()]
# Course file system.
fs = self.app_context.fs.impl
course_info.append(('File System: %s' % fs.__class__.__name__))
if fs.__class__ == vfs.LocalReadOnlyFileSystem:
course_info.append(('Home Folder: %s' % sites.abspath(
self.app_context.get_home_folder(), '/')))
# Enable editing if supported.
if filer.is_editable_fs(self.app_context):
yaml_actions.append({
'id': 'edit_course_yaml',
'caption': 'Advanced Edit',
'action': self.get_action_url('create_or_edit_settings'),
'xsrf_token': self.create_xsrf_token(
'create_or_edit_settings')})
yaml_actions.append({
'id': 'edit_basic_course_settings',
'caption': 'Edit',
'action': self.get_action_url('edit_basic_course_settings'),
'xsrf_token': self.create_xsrf_token(
'edit_basic_course_settings')})
# course.yaml file content.
yaml_info = []
yaml_stream = self.app_context.fs.open(
self.app_context.get_config_filename())
if yaml_stream:
yaml_lines = yaml_stream.read().decode('utf-8')
for line in yaml_lines.split('\n'):
yaml_info.append(line)
else:
yaml_info.append('< empty file >')
# course_template.yaml file contents
course_template_info = []
course_template_stream = open(os.path.join(os.path.dirname(
__file__), '../../course_template.yaml'), 'r')
if course_template_stream:
course_template_lines = course_template_stream.read().decode(
'utf-8')
for line in course_template_lines.split('\n'):
course_template_info.append(line)
else:
course_template_info.append('< empty file >')
# Prepare template values.
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
template_values['sections'] = [
{
'title': 'About the Course',
'description': messages.ABOUT_THE_COURSE_DESCRIPTION,
'actions': basic_setting_actions,
'children': course_info},
{
'title': 'Contents of course.yaml file',
'description': messages.CONTENTS_OF_THE_COURSE_DESCRIPTION,
'actions': yaml_actions,
'children': yaml_info},
{
'title': 'Contents of course_template.yaml file',
'description': messages.COURSE_TEMPLATE_DESCRIPTION,
'children': course_template_info}]
self.render_page(template_values)
def list_files(self, subfolder, merge_local_files=False):
"""Makes a list of files in a subfolder.
Args:
subfolder: string. Relative path of the subfolder to list.
merge_local_files: boolean. If True, the returned list will
contain files found on either the datastore filesystem or the
read-only local filesystem. If a file is found on both, its
datastore filesystem version will trump its local filesystem
version.
Returns:
List of relative, normalized file path strings.
"""
home = sites.abspath(self.app_context.get_home_folder(), '/')
all_paths = set(self.app_context.fs.list(
sites.abspath(self.app_context.get_home_folder(), subfolder)))
if merge_local_files:
all_paths = all_paths.union(set([
os.path.join(appengine_config.BUNDLE_ROOT, path) for path in
self.local_fs.list(subfolder[1:])]))
result = []
for abs_filename in all_paths:
filename = os.path.relpath(abs_filename, home)
result.append(vfs.AbstractFileSystem.normpath(filename))
return sorted(result)
def list_and_format_file_list(
self, title, subfolder,
links=False, upload=False, prefix=None, caption_if_empty='< none >',
edit_url_template=None, merge_local_files=False, sub_title=None):
"""Walks files in folders and renders their names in a section."""
# keep a list of files without merging
unmerged_files = {}
if merge_local_files:
unmerged_files = self.list_files(subfolder, merge_local_files=False)
items = safe_dom.NodeList()
count = 0
for filename in self.list_files(
subfolder, merge_local_files=merge_local_files):
if prefix and not filename.startswith(prefix):
continue
# show different captions depending if the override exists or not
has_override = filename in unmerged_files
link_caption = '[Override]'
if has_override or not merge_local_files:
link_caption = '[Edit]'
# make a <li> item
li = safe_dom.Element('li')
if links:
li.add_child(safe_dom.Element(
'a', href=urllib.quote(filename)).add_text(filename))
else:
li.add_text(filename)
# add actions if available
if (edit_url_template and
self.app_context.fs.impl.is_read_write()):
edit_url = edit_url_template % urllib.quote(filename)
li.add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text(link_caption))
count += 1
items.append(li)
output = safe_dom.NodeList()
if filer.is_editable_fs(self.app_context) and upload:
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?%s' % urllib.urlencode(
{'action': 'add_asset', 'base': subfolder})
).add_text(
'Upload to ' +
filer.strip_leading_and_trailing_slashes(subfolder))
).append(
safe_dom.Element(
'div', style='clear: both; padding-top: 2px;'
)
)
if title:
h3 = safe_dom.Element('h3')
if count:
h3.add_text('%s (%s)' % (title, count))
else:
h3.add_text(title)
output.append(h3)
if sub_title:
output.append(safe_dom.Element('blockquote').add_text(sub_title))
if items:
output.append(safe_dom.Element('ol').add_children(items))
else:
if caption_if_empty:
output.append(
safe_dom.Element('blockquote').add_text(caption_if_empty))
return output
def list_questions(self):
"""Prepare a list of the question bank contents."""
if not filer.is_editable_fs(self.app_context):
return safe_dom.NodeList()
output = safe_dom.NodeList().append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_mc_question'
).add_text('Add Multiple Choice')
).append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_sa_question'
).add_text('Add Short Answer')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(
safe_dom.Element('h3').add_text('Question Bank')
)
all_questions = QuestionDAO.get_all()
if all_questions:
ol = safe_dom.Element('ol')
for question in all_questions:
edit_url = 'dashboard?action=edit_question&key=%s' % question.id
li = safe_dom.Element('li')
li.add_text(
question.description
).add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text('[Edit]'))
ol.add_child(li)
output.append(ol)
else:
output.append(safe_dom.Element('blockquote').add_text('< none >'))
return output
def list_question_groups(self):
"""Prepare a list of question groups."""
if not filer.is_editable_fs(self.app_context):
return safe_dom.NodeList()
all_questions = QuestionDAO.get_all()
output = safe_dom.NodeList()
if all_questions:
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_question_group'
).add_text('Add Question Group')
).append(
safe_dom.Element(
'div', style='clear: both; padding-top: 2px;'
)
)
output.append(
safe_dom.Element('h3').add_text('Question Groups')
)
# TODO(jorr): Hook this into the datastore
all_question_groups = QuestionGroupDAO.get_all()
if all_question_groups:
ol = safe_dom.Element('ol')
for question_group in all_question_groups:
edit_url = 'dashboard?action=edit_question_group&key=%s' % (
question_group.id)
li = safe_dom.Element('li')
li.add_text(
question_group.description
).add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text('[Edit]'))
ol.add_child(li)
output.append(ol)
else:
output.append(safe_dom.Element('blockquote').add_text('< none >'))
return output
def get_assets(self):
"""Renders course assets view."""
def inherits_from(folder):
return '< inherited from %s >' % folder
text_asset_url_template = 'dashboard?action=manage_text_asset&uri=%s'
items = safe_dom.NodeList().append(
self.list_questions()
).append(
self.list_question_groups()
).append(
self.list_and_format_file_list(
'Assessments', '/assets/js/', links=True,
prefix='assets/js/assessment-')
).append(
self.list_and_format_file_list(
'Activities', '/assets/js/', links=True,
prefix='assets/js/activity-')
).append(
self.list_and_format_file_list(
'Images & Documents', '/assets/img/', links=True, upload=True,
edit_url_template='dashboard?action=delete_asset&uri=%s',
caption_if_empty=inherits_from('/assets/img/'))
).append(
self.list_and_format_file_list(
'Cascading Style Sheets', '/assets/css/', links=True,
upload=True, edit_url_template=text_asset_url_template,
caption_if_empty=inherits_from('/assets/css/'),
merge_local_files=True)
).append(
self.list_and_format_file_list(
'JavaScript Libraries', '/assets/lib/', links=True,
upload=True, edit_url_template=text_asset_url_template,
caption_if_empty=inherits_from('/assets/lib/'),
merge_local_files=True)
).append(
self.list_and_format_file_list(
'View Templates', '/views/', upload=True,
edit_url_template=text_asset_url_template,
caption_if_empty=inherits_from('/views/'),
merge_local_files=True)
)
template_values = {}
template_values['page_title'] = self.format_title('Assets')
template_values['page_description'] = messages.ASSETS_DESCRIPTION
template_values['main_content'] = items
self.render_page(template_values)
def get_analytics(self):
"""Renders course analytics view."""
template_values = {}
template_values['page_title'] = self.format_title('Analytics')
all_jobs_have_finished = True
stats_html = ''
for callback in DashboardRegistry.analytics_handlers:
handler = callback()
handler.app_context = self.app_context
handler.request = self.request
handler.response = self.response
job = handler.stats_computer(self.app_context).load()
stats_html += handler.get_markup(job)
if job and not job.has_finished:
all_jobs_have_finished = False
template_values['main_content'] = jinja2.utils.Markup(
self.get_template(
'analytics.html', [os.path.dirname(__file__)]
).render({
'show_recalculate_button': all_jobs_have_finished,
'stats_html': stats_html,
'xsrf_token': self.create_xsrf_token('compute_student_stats'),
}, autoescape=True)
)
self.render_page(template_values)
def post_compute_student_stats(self):
"""Submits a new student statistics calculation task."""
for callback in DashboardRegistry.analytics_handlers:
job = callback().stats_computer(self.app_context)
job.submit()
self.redirect('/dashboard?action=analytics')
class DashboardRegistry(object):
"""Holds registered handlers that produce HTML code for the dashboard."""
analytics_handlers = [analytics.StudentEnrollmentAndScoresHandler,
analytics.StudentProgressStatsHandler,
analytics.QuestionStatsHandler
]
@classmethod
def add_analytics_section(cls, handler):
"""Adds handlers that provide data for the Analytics page."""
if handler not in cls.analytics_handlers:
existing_names = [h.name for h in cls.analytics_handlers]
if handler.name in existing_names:
raise Exception('Stats handler name %s is being duplicated.'
% handler.name)
cls.analytics_handlers.append(handler)
custom_module = None
def register_module():
"""Registers this module in the registry."""
dashboard_handlers = [('/dashboard', DashboardHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Dashboard',
'A set of pages for managing Course Builder course.',
[], dashboard_handlers)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the dashboard."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
node_list.append(safe_dom.Entity(' '))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
ABOUT_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
This information is configured by an administrator from the Admin pages.
""", None)
ASSESSMENT_CONTENT_DESCRIPTION = assemble_sanitized_message("""
Assessment questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSESSMENT_DETAILS_DESCRIPTION = assemble_sanitized_message("""
Properties and restrictions of your assessment.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
ASSESSMENT_EDITOR_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSETS_DESCRIPTION = assemble_sanitized_message("""
These are all the assets for your course. You can upload new images and
documents here, after which you can use them in your lessons and activities.
You may create, edit, and delete activities and assessments from the Outline
page. All other assets must be edited by an administrator.
""", None)
ASSIGNMENTS_MENU_DESCRIPTION = assemble_sanitized_message("""
Select a peer-reviewed assignment and enter a student's email address to view
their assignment submission and any associated reviews.
""", None)
CONTENTS_OF_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings. Edit it using the buttons
at the right.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
COURSE_OUTLINE_DESCRIPTION = assemble_sanitized_message(
'Build, organize and preview your course here.',
'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
COURSE_OUTLINE_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Click up/down arrows to re-order units, or lessons within units. To move a
lesson between units, edit that lesson from the outline page and change its
parent unit.
""", None)
COURSE_TEMPLATE_DESCRIPTION = assemble_sanitized_message("""
The course_template.yaml file contains the common template settings
for all courses. You can override the template settings for this
course by editing your course.yaml file.
""", None)
DATA_FILES_DESCRIPTION = assemble_sanitized_message("""
The lesson.csv file contains the contents of your lesson. The unit.csv file
contains the course related content shown on the homepage. These files are
located in your Course Builder installation. Edit them directly with an editor
like Notepad++. Be careful, some editors will add extra characters, which may
prevent the uploading of these files.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
EDIT_SETTINGS_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
IMPORT_COURSE_DESCRIPTION = assemble_sanitized_message("""
Import the contents of another course into this course. Both courses must be on
the same Google App Engine instance.
""", None)
INCORRECT_ANSWER_FEEDBACK = """
Shown when the student response does not match any of the possible answers.
"""
INPUT_FIELD_HEIGHT_DESCRIPTION = """
Height of the input field, measured in rows.
"""
INPUT_FIELD_WIDTH_DESCRIPTION = """
Width of the input field, measured in columns.
"""
LESSON_ACTIVITY_DESCRIPTION = assemble_sanitized_message("""
Create an activity by entering the correct syntax above.
""", ('https://code.google.com/p/course-builder/wiki/CreateActivities'
'#Writing_activities'))
LESSON_ACTIVITY_LISTED_DESCRIPTION = """
Whether the activity should be viewable as a stand-alone item in the unit index.
"""
LESSON_ACTIVITY_TITLE_DESCRIPTION = """
This appears above your activity.
"""
LESSON_OBJECTIVES_DESCRIPTION = """
The lesson body is displayed to students above the video in the default
template.
"""
LESSON_SCORED_DESCRIPTION = """
Whether questions in this lesson will be scored (summative) or only
provide textual feedback (formative).
"""
LESSON_VIDEO_ID_DESCRIPTION = """
Provide a YouTube video ID to embed a video.
"""
LESSON_NOTES_DESCRIPTION = """
Provide a URL that points to the notes for this lesson (if applicable). These
notes can be accessed by clicking on the 'Text Version' button on the lesson
page.
"""
LINK_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Links will appear in your outline and will take students directly to the URL.
""", None)
LINK_EDITOR_URL_DESCRIPTION = """
Links to external sites must start with 'http' or https'.
"""
PAGES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
QUESTION_DESCRIPTION = 'Shown when selecting questions for quizzes, etc.'
REVIEWER_FEEDBACK_FORM_DESCRIPTION = assemble_sanitized_message("""
Review form questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Settings')
UNIT_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Units contain lessons and acitivities.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
UPLOAD_ASSET_DESCRIPTION = assemble_sanitized_message("""
Choose a file to upload to this Google App Engine instance. Learn more about
file storage and hosting.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Assets')
DUE_DATE_FORMAT_DESCRIPTION = assemble_sanitized_message("""
Should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and be specified
in the UTC timezone.""", None)
REVIEW_DUE_DATE_FORMAT_DESCRIPTION = assemble_sanitized_message("""
Should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and be specified
in the UTC timezone.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
REVIEW_TIMEOUT_IN_MINUTES = assemble_sanitized_message("""
This value should be specified in minutes.
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
REVIEW_MIN_COUNT_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/PeerReview')
AUTO_GRADER_NAME = 'Automatic Grading'
HUMAN_GRADER_NAME = 'Peer Review'
PEER_MATCHER_NAME = 'Peer'
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of question_groups."""
__author__ = 'John Orr (jorr@google.com)'
from common import schema_fields
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import transforms
from models.models import QuestionDAO
from models.models import QuestionGroupDAO
from models.models import QuestionGroupDTO
import question_editor
from unit_lesson_editor import CourseOutlineRights
class QuestionGroupManagerAndEditor(question_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing question_groups."""
def get_template_values(self, key):
template_values = {}
template_values['page_title'] = self.format_title('Edit Question Group')
template_values['main_content'] = self.get_form(
QuestionGroupRESTHandler, key)
return template_values
def get_add_question_group(self):
self.render_page(self.get_template_values(''))
def get_edit_question_group(self):
self.render_page(self.get_template_values(self.request.get('key')))
class QuestionGroupRESTHandler(BaseRESTHandler):
"""REST handler for editing question_groups."""
URI = '/rest/question_group'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-hidden', 'inputex-select', 'inputex-string',
'inputex-list']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'question-group-edit'
SCHEMA_VERSION = '1.5'
@classmethod
def get_schema(cls):
"""Return the InputEx schema for the question group editor."""
question_group = schema_fields.FieldRegistry(
'Question Group', description='question_group')
question_group.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
question_group.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True))
question_group.add_property(schema_fields.SchemaField(
'introduction', 'Introduction', 'html', optional=True))
item_type = schema_fields.FieldRegistry(
'Item',
extra_schema_dict_values={'className': 'question-group-item'})
item_type.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'string', optional=True,
extra_schema_dict_values={'className': 'question-group-weight'}))
question_select_data = [
(q.id, q.description) for q in QuestionDAO.get_all()]
item_type.add_property(schema_fields.SchemaField(
'question', 'Question', 'string', optional=True,
select_data=question_select_data,
extra_schema_dict_values={'className': 'question-group-question'}))
item_array = schema_fields.FieldArray(
'items', '', item_type=item_type,
extra_schema_dict_values={
'className': 'question-group-items',
'sortable': 'true',
'listAddLabel': 'Add an item',
'listRemoveLabel': 'Delete item'})
question_group.add_property(item_array)
return question_group
def get(self):
"""Respond to the REST GET verb with the contents of the group."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
question_group = QuestionGroupDAO.load(key)
version = question_group.dict.get('version')
if self.SCHEMA_VERSION != version:
transforms.send_json_response(
self, 403, 'Cannot edit a Version %s group.' % version,
{'key': key})
return
payload_dict = question_group.dict
else:
payload_dict = {
'version': self.SCHEMA_VERSION,
'items': [{'weight': ''}, {'weight': ''}, {'weight': ''}]}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
def validate(self, question_group_dict, key):
"""Validate the question group data sent from the form."""
errors = []
assert question_group_dict['version'] == self.SCHEMA_VERSION
if not question_group_dict['description'].strip():
errors.append('The question group must have a description.')
descriptions = {question_group.description for question_group
in QuestionGroupDAO.get_all()
if not key or question_group.id != long(key)}
if question_group_dict['description'] in descriptions:
errors.append('The description must be different '
'from existing question groups.')
if not question_group_dict['items']:
errors.append(
'The question group must contain at least one question.')
items = question_group_dict['items']
for index in range(0, len(items)):
item = items[index]
try:
float(item['weight'])
except ValueError:
errors.append(
'Item %s must have a numeric weight.' % (index + 1))
return errors
def put(self):
"""Store a question group in the datastore in response to a PUT."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
question_group_dict = transforms.json_to_dict(
transforms.loads(payload),
self.get_schema().get_json_schema_dict())
validation_errors = self.validate(question_group_dict, key)
if validation_errors:
self.validation_error('\n'.join(validation_errors), key=key)
return
assert self.SCHEMA_VERSION == question_group_dict.get('version')
if key:
question_group = QuestionGroupDTO(key, question_group_dict)
else:
question_group = QuestionGroupDTO(None, question_group_dict)
key_after_save = QuestionGroupDAO.save(question_group)
transforms.send_json_response(
self, 200, 'Saved.', payload_dict={'key': key_after_save})
def delete(self):
"""Delete the question_group in response to REST request."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
question_group = QuestionGroupDAO.load(key)
if not question_group:
transforms.send_json_response(
self, 404, 'Question Group not found.', {'key': key})
return
QuestionGroupDAO.delete(question_group)
transforms.send_json_response(self, 200, 'Deleted.')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student HTML file submission upload module."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import os
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
import jinja2
from models import custom_modules
from models import models
from models import student_work
from google.appengine.ext import db
# String. Url fragment after the namespace we POST user payloads to.
_POST_ACTION_SUFFIX = '/upload'
# String. Course Builder root-relative path where resources for this module are.
_RESOURCES_PATH = os.path.join(os.path.sep, 'modules', 'upload', 'resources')
# String. Post form XSRF token name.
_XSRF_TOKEN_NAME = 'user-upload-form-xsrf'
class TextFileUploadHandler(utils.BaseHandler):
def get_template(self, template_file, additional_dirs=None):
dirs = additional_dirs if additional_dirs else []
dirs.append(os.path.join(os.path.dirname(__file__), 'templates'))
return super(TextFileUploadHandler, self).get_template(
template_file, additional_dirs=dirs)
def post(self):
"""Creates or updates a student submission."""
token = self.request.get('form_xsrf_token')
if not utils.XsrfTokenManager.is_xsrf_token_valid(
token, _XSRF_TOKEN_NAME):
self.error(400)
return
student = self.personalize_page_and_get_enrolled()
if not student:
self.error(403)
return
success = False
unit_id = self.request.get('unit_id')
contents = self.request.get('contents')
if not contents:
self.error(400)
else:
try:
success = bool(student_work.Submission.write(
unit_id, student.get_key(), contents))
# All write errors are treated equivalently.
# pylint: disable-msg=broad-except
except Exception as e:
self.error(400)
logging.warn(
'Unable to save student submission; error was: "%s"', e)
self.template_value['navbar'] = {'course': True}
self.template_value['success'] = success
self.template_value['unit_id'] = unit_id
self.render('result.html')
class TextFileUploadTag(tags.BaseTag):
"""Renders a form for uploading a text file."""
binding_name = 'text-file-upload-tag'
@classmethod
def name(cls):
return 'Student Text File Upload'
@classmethod
def vendor(cls):
return 'gcb'
def _get_action(self, slug):
action = slug + _POST_ACTION_SUFFIX
return action.replace('//', '/')
def get_icon_url(self):
return os.path.join(_RESOURCES_PATH, 'script_add.png')
def get_schema(self, unused_handler):
"""Gets the tag's schema."""
registry = schema_fields.FieldRegistry(TextFileUploadTag.name())
registry.add_property(schema_fields.SchemaField(
'display_length', 'Display Length', 'integer',
description=(
'Number of characters in the filename display (supported '
'browsers only).'),
extra_schema_dict_values={'value': 100},
))
return registry
def render(self, node, handler):
"""Renders the custom tag."""
student = handler.personalize_page_and_get_enrolled(
supports_transient_student=True)
template = jinja_utils.get_template(
'templates/form.html', os.path.dirname(__file__),
locale=handler.app_context.get_environ()['course']['locale'],
)
already_submitted = False
if not isinstance(student, models.TransientStudent):
already_submitted = bool(
db.get(student_work.Submission.get_key(
handler.unit_id, student.get_key())))
handler.template_value['action'] = self._get_action(
handler.app_context.get_slug())
handler.template_value['already_submitted'] = already_submitted
handler.template_value['display_length'] = node.attrib.get(
'display_length')
handler.template_value['form_xsrf_token'] = (
utils.XsrfTokenManager.create_xsrf_token(
_XSRF_TOKEN_NAME))
handler.template_value['unit_id'] = handler.unit_id
return tags.html_string_to_element_tree(
jinja2.utils.Markup(template.render(handler.template_value))
)
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(TextFileUploadTag.binding_name)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(
TextFileUploadTag.binding_name, TextFileUploadTag)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
global_routes = [
(os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler),
]
namespaced_routes = [
(_POST_ACTION_SUFFIX, TextFileUploadHandler),
]
global custom_module
custom_module = custom_modules.Module(
'Student Text File Submission Upload',
'Adds a custom tag for students to upload text files <= 1MB in size.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import webapp2
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
from common import tags
from controllers import sites
from models import custom_modules
import modules.activity_tag.activity_tag
import modules.admin.admin
import modules.announcements.announcements
import modules.assessment_tags.questions
import modules.course_explorer.course_explorer
import modules.courses.courses
import modules.dashboard.dashboard
import modules.oauth2.oauth2
import modules.oeditor.oeditor
import modules.review.review
import modules.search.search
import modules.upload.upload
# use this flag to control debug only features
debug = not appengine_config.PRODUCTION_MODE
# init and enable most known modules
modules.activity_tag.activity_tag.register_module().enable()
modules.admin.admin.register_module().enable()
modules.announcements.announcements.register_module().enable()
modules.assessment_tags.questions.register_module().enable()
modules.course_explorer.course_explorer.register_module().enable()
modules.courses.courses.register_module().enable()
modules.dashboard.dashboard.register_module().enable()
modules.oeditor.oeditor.register_module().enable()
modules.review.review.register_module().enable()
modules.search.search.register_module().enable()
modules.upload.upload.register_module().enable()
# register modules that are not enabled by default.
modules.oauth2.oauth2.register_module()
# compute all possible routes
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# tag extension resource routes
extensions_tag_resource_routes = [(
'/extensions/tags/.*/resources/.*', tags.ResourcesHandler)]
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + extensions_tag_resource_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=debug)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCB-provided custom tags."""
__author__ = 'John Orr (jorr@google.com)'
import urllib
import urlparse
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
from models import courses
from xml.etree import cElementTree
def _escape_url(url, force_https=True):
"""Escapes/quotes url parts to sane user input."""
scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)
if force_https:
scheme = 'https'
path = urllib.quote(path)
query = urllib.quote_plus(query, '=?&;')
return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))
class GoogleDoc(tags.BaseTag):
"""Custom tag for a Google Doc."""
@classmethod
def name(cls):
return'Google Doc'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s?embedded=true' % link)
iframe = cElementTree.XML("""
<iframe class="google-doc" title="Google Doc" type="text/html" frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/docs.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleDoc.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the Document link.
# Changes to the publication status of a document or to its contents
# do not appear instantly.
schema_fields.SchemaField(
'link', 'Document Link', 'string',
optional=True,
description=('Provide the "Document Link" from the Google Docs '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the document, in pixels. Width will be '
'set automatically')))
return reg
class GoogleSpreadsheet(tags.BaseTag):
"""Custom tag for a Google Spreadsheet."""
@classmethod
def name(cls):
return'Google Spreadsheet'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s&chrome=false' % link.split('&output')[0])
iframe = cElementTree.XML("""
<iframe class="google-spreadsheet" title="Google Spreadsheet" type="text/html"
frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/spreadsheets.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleSpreadsheet.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the link above 'Copy
# and paste the link above'. Changes to the publication status of a
# document or to its contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Link', 'string',
optional=True,
description=('Provide the link from the Google Spreadsheets '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the spreadsheet, in pixels. Width will '
'be set automatically')))
return reg
class YouTube(tags.BaseTag):
@classmethod
def name(cls):
return'YouTube Video'
def render(self, node, unused_handler):
video_id = node.attrib.get('videoid')
if utils.CAN_PERSIST_TAG_EVENTS.value:
return self._render_with_tracking(video_id)
else:
return self._render_no_tracking(video_id)
def _render_no_tracking(self, video_id):
"""Embed video without event tracking support."""
you_tube_url = (
'https://www.youtube.com/embed/%s'
'?feature=player_embedded&rel=0') % video_id
iframe = cElementTree.XML("""
<p class="gcb-video-container">
<iframe class="youtube-player" title="YouTube Video Player"
type="text/html" width="650" height="400" frameborder="0"
allowfullscreen="allowfullscreen">
</iframe>
</p>""")
iframe[0].set('src', you_tube_url)
return iframe
def _render_with_tracking(self, video_id):
"""Embed video and enable event tracking."""
video_id = jinja_utils.js_string_raw(video_id)
return cElementTree.XML("""
<p>
<script src='/extensions/tags/gcb/resources/youtube_video.js'></script>
<script>
gcbTagYoutubeEnqueueVideo('""" + video_id + """');
</script>
</p>""")
def get_icon_url(self):
return '/extensions/tags/gcb/resources/youtube.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(YouTube.name())
reg.add_property(
schema_fields.SchemaField('videoid', 'Video Id', 'string',
optional=True,
description='Provide YouTube video ID (e.g. Kdg2drcUjYI)'))
return reg
class GoogleGroup(tags.BaseTag):
@classmethod
def name(cls):
return 'Google Group'
def render(self, node, unused_handler):
group_name = node.attrib.get('group')
category_name = node.attrib.get('category')
embedded_forum_url = (
'https://groups.google.com/forum/embed/?place=forum/?'
'fromgroups&hl=en#!categories/%s/%s') \
% (urllib.quote(group_name), urllib.quote(category_name))
iframe = cElementTree.XML("""
<p>
<iframe class="forum-embed" title="Google Group Embed"
type="text/html" width="700" height="300" frameborder="0">
</iframe>
</p>""")
iframe[0].set('src', embedded_forum_url)
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/forumembed.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleGroup.name())
reg.add_property(
schema_fields.SchemaField(
'group', 'Group Name', 'string', optional=True,
description='Name of the Google Group (e.g. mapping-with-google)'))
reg.add_property(
schema_fields.SchemaField(
'category', 'Category Name', 'string', optional=True,
description='Name of the Category (e.g. unit5-2-annotation)'))
return reg
class IFrame(tags.BaseTag):
def render(self, node, unused_handler):
src = node.attrib.get('src')
title = node.attrib.get('title')
height = node.attrib.get('height') or '400'
width = node.attrib.get('width') or '650'
iframe = cElementTree.XML(
'<iframe style="border: 0;"></iframe>'
)
iframe.set('src', _escape_url(src, force_https=False))
iframe.set('title', title)
iframe.set('width', width)
iframe.set('height', height)
return iframe
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions."""
return '/extensions/tags/gcb/resources/iframe.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(IFrame.name())
reg.add_property(
schema_fields.SchemaField('src', 'Source URL', 'string',
optional=True,
description='Provide source URL for iframe (including http/https)'))
reg.add_property(
schema_fields.SchemaField('title', 'Title', 'string',
optional=True,
description='Provide title of iframe'))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
optional=True,
extra_schema_dict_values={'value': '400'},
description=('Height of the iframe')))
reg.add_property(
schema_fields.SchemaField(
'width', 'Width', 'string',
optional=True,
extra_schema_dict_values={'value': '650'},
description=('Width of the iframe')))
return reg
| Python |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Search module."""
__author__ = 'Ellis Michael (emichael@google.com)'
import re
import robotparser
import urlparse
from functional import actions
from modules.search import resources
from google.appengine.api import urlfetch
VALID_PAGE_URL = 'http://valid.null/'
VALID_PAGE = """<html>
<head>
<title>Test Page</title>
<script>
alert('test');
</script>
<style>
body {
font-size: 12px;
}
</style>
</head>
<body>
Lorem ipsum <strong> dolor </strong> sit.
<a href="index.php?query=bibi%20quid">Ago gratias tibi</a>.
<a>Cogito ergo sum.</a>
<a href="//partial.null/"> Partial link </a>
<a href="ftp://absolute.null/"> Absolute link </a>
<a href="http://pdf.null/"> PDF </a>
<a href="http://link.null/"> Link </a>
</body>
</html>"""
VALID_PAGE_ROBOTS = ('User-agent: *', 'Allow: /')
LINKED_PAGE_URL = 'http://link.null/'
LINKED_PAGE = """<a href="http://distance2link.null/">
What hath God wrought?
</a>"""
SECOND_LINK_PAGE_URL = 'http://distance2link.null/'
SECOND_LINK_PAGE = """Something went terribly wrong. ABORT"""
UNICODE_PAGE_URL = 'http://unicode.null/'
UNICODE_PAGE = """<html>
<head>
<title>‘Quoted string’</title>
</head>
<body>
Russell's Paradox: <br/>
∃ y∀ x(x∈ y ⇔ P(x)) <br/>
Let P(x)=~(x∈ x), x=y. <br/>
y∈ y ⇔ ~(y∈ y)
</body>
</html>"""
PDF_URL = 'http://pdf.null/'
XML_DOC_URL = 'http://xml.null/'
XML_DOC = """<document attribute="foo">
<childNode>
Text content.
</childNode>
</document>"""
YOUTUBE_TRANSCRIPT_URL = (resources.YOUTUBE_TIMED_TEXT_URL +
'?.*name=Name%20of%20track.*$')
YOUTUBE_TRANSCRIPT = """<transcript>
<text start="3.14" dur="6.28">
Apple, lemon, cherry...
</text>
<text start="20.0" dur="20.0">
It's a test.
</text>
</transcript>"""
GDATA_DOC_URL = resources.YOUTUBE_DATA_URL
GDATA_DOC = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:media='http://search.yahoo.com/mrss/'>
<title type="text">
Medicus Quis
</title>
<media:thumbnail url="http://thumbnail.null"/>
</entry>"""
YOUTUBE_TRANSCRIPT_LIST_URL = (resources.YOUTUBE_TIMED_TEXT_URL +
'?.*type=list.*$')
YOUTUBE_TRANSCRIPT_LIST = """<transcript_list docid="123456789">
<track id="0" name="Name of track"
lang_code="en" lang_original="English"
lang_translated="English"
lang_default="true" />
</transcript_list>"""
BANNED_PAGE_URL = 'http://banned.null/'
BANNED_PAGE = 'Should not be accessed'
BANNED_PAGE_ROBOTS = ('User-agent: *', 'Disallow: /')
class SearchTestBase(actions.TestBase):
"""Unit tests for all search functionality."""
pages = {VALID_PAGE_URL + '$': # Using $ to prevent erroneous matches
(VALID_PAGE, 'text/html'),
urlparse.urljoin(VALID_PAGE_URL, '/robots.txt'):
(VALID_PAGE_ROBOTS, 'text/html'),
LINKED_PAGE_URL + '$':
(LINKED_PAGE, 'text/html'),
urlparse.urljoin(LINKED_PAGE_URL, '/robots.txt'):
(VALID_PAGE_ROBOTS, 'text/html'),
SECOND_LINK_PAGE_URL + '$':
(SECOND_LINK_PAGE, 'text/html'),
urlparse.urljoin(SECOND_LINK_PAGE_URL, '/robots.txt'):
(VALID_PAGE_ROBOTS, 'text/html'),
PDF_URL:
(VALID_PAGE, 'application/pdf'),
UNICODE_PAGE_URL + '$':
(UNICODE_PAGE, 'text/html charset=utf-8'),
urlparse.urljoin(UNICODE_PAGE_URL, '/robots.txt'):
(VALID_PAGE_ROBOTS, 'text/html'),
XML_DOC_URL + '$':
(XML_DOC, 'text/xml'),
urlparse.urljoin(XML_DOC_URL, '/robots.txt'):
(VALID_PAGE_ROBOTS, 'text/html'),
YOUTUBE_TRANSCRIPT_URL:
(YOUTUBE_TRANSCRIPT, 'text/xml'),
GDATA_DOC_URL:
(GDATA_DOC, 'text/xml'),
YOUTUBE_TRANSCRIPT_LIST_URL:
(YOUTUBE_TRANSCRIPT_LIST, 'text/xml'),
# The default Power Searching course has notes in this domain
'http://www.google.com/robots.txt':
(VALID_PAGE_ROBOTS, 'text/html'),
BANNED_PAGE_URL + '$':
(BANNED_PAGE, 'text/html'),
urlparse.urljoin(BANNED_PAGE_URL, '/robots.txt'):
(BANNED_PAGE_ROBOTS, 'text/html'),
}
def setUp(self): # Name set by parent. pylint: disable-msg=g-bad-name
"""Do all of the necessary monkey patching to test search."""
super(SearchTestBase, self).setUp()
def return_doc(url):
"""Monkey patch for URL fetching."""
class Response(object):
def __init__(self, code, content_type, content):
self.status_code = code
self.headers = {}
self.headers['Content-type'] = content_type
self.content = content
for pattern in self.pages:
if re.match(pattern, url):
page_data = self.pages[pattern]
body = page_data[0]
content_type = page_data[1]
break
else:
body = VALID_PAGE
content_type = 'text/html'
result = Response(200, content_type, body)
return result
self.swap(urlfetch, 'fetch', return_doc)
class FakeRobotParser(robotparser.RobotFileParser):
"""Monkey patch for robot parser."""
def read(self):
parts = urlparse.urlsplit(self.url)
if not (parts.netloc and parts.scheme):
raise IOError
response = urlfetch.fetch(self.url)
self.parse(response.content)
self.swap(robotparser, 'RobotFileParser', FakeRobotParser)
class ParserTests(SearchTestBase):
"""Unit tests for the search HTML Parser."""
def setUp(self):
super(ParserTests, self).setUp()
self.parser = resources.ResourceHTMLParser(VALID_PAGE_URL)
self.parser.feed(VALID_PAGE)
def test_found_tokens(self):
content = self.parser.get_content()
for text in ['Lorem', 'ipsum', 'dolor']:
self.assertIn(text, content)
def test_no_false_matches(self):
content = self.parser.get_content()
for text in ['Loremipsum', 'ipsumdolor', 'tibiCogito', 'sit.Ago']:
self.assertNotIn(text, content)
def test_ignored_fields(self):
content = self.parser.get_content()
for text in ['alert', 'font-size', 'body', 'script', 'style']:
self.assertNotIn(text, content)
def test_links(self):
links = self.parser.get_links()
self.assertIn('http://valid.null/index.php?query=bibi%20quid', links)
self.assertIn('http://partial.null/', links)
self.assertIn('ftp://absolute.null/', links)
self.assertEqual(len(links), 5)
def test_unopened_tag(self):
self.parser = resources.ResourceHTMLParser('')
self.parser.feed('Lorem ipsum </script> dolor sit.')
content = self.parser.get_content()
for text in ['Lorem', 'ipsum', 'dolor', 'sit']:
self.assertIn(text, content)
def test_title(self):
self.assertEqual('Test Page', self.parser.get_title())
def test_get_parser_allowed(self):
self.parser = resources.get_parser_for_html(VALID_PAGE_URL)
content = self.parser.get_content()
self.assertIn('Cogito ergo sum', content)
with self.assertRaises(resources.URLNotParseableException):
self.parser = resources.get_parser_for_html(BANNED_PAGE_URL)
content = self.parser.get_content()
self.assertNotIn('accessed', content)
def test_bad_urls(self):
for url in ['http://', 'invalid.null', '//invalid.null', '//',
'invalid', '?test=1', 'invalid?test=1']:
with self.assertRaises(resources.URLNotParseableException):
self.parser = resources.get_parser_for_html(url)
content = self.parser.get_content()
self.assertNotIn('impsum', content)
def test_unicode_page(self):
self.parser = resources.get_parser_for_html(UNICODE_PAGE_URL)
content = self.parser.get_content()
self.assertIn('Paradox', content)
title = self.parser.get_title()
self.assertIn('Quoted string', title)
def test_xml_parser(self):
dom = resources.get_minidom_from_xml(XML_DOC_URL)
self.assertEqual('foo', dom.getElementsByTagName(
'document')[0].attributes['attribute'].value)
self.assertIn('Text content.', dom.getElementsByTagName(
'childNode')[0].firstChild.nodeValue)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the transforms functions."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from models import transforms
def wrap_properties(properties):
return {'properties': properties}
class JsonToDictTests(unittest.TestCase):
def test_missing_optional_fields_are_allowed(self):
schema = wrap_properties(
{'opt_field': {'type': 'boolean', 'optional': 'true'}})
result = transforms.json_to_dict({}, schema)
self.assertEqual(len(result), 0)
def test_missing_required_fields_are_rejected(self):
schema = wrap_properties(
{'req_field': {'type': 'boolean', 'optional': 'false'}})
try:
transforms.json_to_dict({}, schema)
self.fail('Expected ValueError')
except ValueError as e:
self.assertEqual(str(e), 'Missing required attribute: req_field')
schema = wrap_properties(
{'req_field': {'type': 'boolean'}})
try:
transforms.json_to_dict({}, schema)
self.fail('Expected ValueError')
except ValueError as e:
self.assertEqual(str(e), 'Missing required attribute: req_field')
def test_convert_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': True}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], True)
def test_convert_string_to_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': 'true'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], True)
def test_reject_bad_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'Bad boolean value for field: cat')
def test_convert_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': 3.14}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], 3.14)
def test_convert_string_to_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': '3.14'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], 3.14)
def test_reject_bad_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'could not convert string to float: cat')
class StringValueConversionTests(unittest.TestCase):
def test_value_to_string(self):
assert transforms.value_to_string(True, bool) == 'True'
assert transforms.value_to_string(False, bool) == 'False'
assert transforms.value_to_string(None, bool) == 'False'
def test_string_to_value(self):
assert transforms.string_to_value('True', bool)
assert transforms.string_to_value('1', bool)
assert transforms.string_to_value(1, bool)
assert not transforms.string_to_value('False', bool)
assert not transforms.string_to_value('0', bool)
assert not transforms.string_to_value('5', bool)
assert not transforms.string_to_value(0, bool)
assert not transforms.string_to_value(5, bool)
assert not transforms.string_to_value(None, bool)
assert transforms.string_to_value('15', int) == 15
assert transforms.string_to_value(15, int) == 15
assert transforms.string_to_value(None, int) == 0
assert transforms.string_to_value('foo', str) == 'foo'
assert transforms.string_to_value(None, str) == str('')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common/schema_fields.py."""
__author__ = 'John Orr (jorr@google.com)'
import json
import unittest
from common import schema_fields
def remove_whitespace(s):
return ''.join(s.split())
class BaseFieldTests(unittest.TestCase):
"""Base class for the tests on a schema field."""
def assert_json_schema_value(self, expected, field):
self.assertEquals(
remove_whitespace(expected),
remove_whitespace(json.dumps(field.get_json_schema_dict())))
def assert_schema_dict_value(self, expected, field):
self.assertEquals(
remove_whitespace(expected),
remove_whitespace(json.dumps(field._get_schema_dict([]))))
class SchemaFieldTests(BaseFieldTests):
"""Unit tests for common.schema_fields.SchemaField."""
def test_simple_field(self):
field = schema_fields.SchemaField('aName', 'aLabel', 'aType')
expected = '{"type":"aType"}'
self.assert_json_schema_value(expected, field)
expected = '[[["_inputex"], {"label": "aLabel"}]]'
self.assert_schema_dict_value(expected, field)
self.assertEquals('aName', field.name)
def test_extra_schema_dict(self):
field = schema_fields.SchemaField(
'aName', 'aLabel', 'aType',
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = '[[["_inputex"], {"a": "A", "b": "B", "label": "aLabel"}]]'
self.assert_schema_dict_value(expected, field)
def test_uneditable_field(self):
field = schema_fields.SchemaField(
'aName', 'aLabel', 'aType', editable=False)
expected = '{"type":"aType"}'
self.assert_json_schema_value(expected, field)
expected = ('[[["_inputex"], {"_type": "uneditable", '
'"label": "aLabel"}]]')
self.assert_schema_dict_value(expected, field)
self.assertEquals('aName', field.name)
def test_hidden_field(self):
field = schema_fields.SchemaField('aName', 'aLabel', 'aType',
hidden=True)
expected = '{"type":"aType"}'
self.assert_json_schema_value(expected, field)
expected = '[[["_inputex"], {"_type": "hidden", "label": "aLabel"}]]'
self.assert_schema_dict_value(expected, field)
self.assertEquals('aName', field.name)
class FieldArrayTests(BaseFieldTests):
"""Unit tests for common.schema_fields.FieldArray."""
def test_field_array_with_simple_members(self):
array = schema_fields.FieldArray(
'aName', 'aLabel',
item_type=schema_fields.SchemaField(
'unusedName', 'field_label', 'aType'))
expected = """
{
"items": {"type": "aType"},
"type": "array"
}"""
self.assert_json_schema_value(expected, array)
expected = """
[
[["_inputex"],{"label":"aLabel"}],
[["items","_inputex"],{"label":"field_label"}]
]
"""
self.assert_schema_dict_value(expected, array)
def test_field_array_with_object_members(self):
object_type = schema_fields.FieldRegistry('object_title')
object_type.add_property(schema_fields.SchemaField(
'prop_name', 'prop_label', 'prop_type'))
field = schema_fields.FieldArray(
'aName', 'aLabel', item_type=object_type)
expected = """
{
"items": {
"type": "object",
"id": "object_title",
"properties": {
"prop_name": {"type":"prop_type"}
}
},
"type":"array"}
"""
self.assert_json_schema_value(expected, field)
expected = """
[
[["_inputex"],{"label":"aLabel"}],
[["items","title"],"object_title"],
[["items","properties","prop_name","_inputex"],{"label":"prop_label"}]
]
"""
self.assert_schema_dict_value(expected, field)
def test_extra_schema_dict(self):
array = schema_fields.FieldArray(
'aName', 'aLabel',
item_type=schema_fields.SchemaField(
'unusedName', 'field_label', 'aType'),
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = """
[
[["_inputex"],{"a":"A","b":"B","label":"aLabel"}],
[["items","_inputex"],{"label":"field_label"}]]
"""
self.assert_schema_dict_value(expected, array)
class FieldRegistryTests(BaseFieldTests):
"""Unit tests for common.schema_fields.FieldRegistry."""
def test_single_property(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'property_type',
description='property_description'))
expected = """
{
"properties": {
"field_name": {
"type": "property_type",
"description": "property_description"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}"""
self.assert_json_schema_value(expected, reg)
expected = """
[
[["title"], "registry_name"],
[["properties","field_name","_inputex"], {
"description": "property_description",
"label":"field_label"
}]
]
"""
self.assert_schema_dict_value(expected, reg)
def test_single_property_with_select_data(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'string',
select_data=[('a', 'A'), ('b', 'B')]))
expected = """
{
"properties": {
"field_name": {
"type": "string"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}"""
self.assert_json_schema_value(expected, reg)
expected = """
[
[["title"],"registry_name"],
[["properties","field_name","_inputex"],{
"_type": "select",
"choices":[
{"value": "a", "label": "A"},
{"value": "b","label": "B"}],
"label":"field_label"
}]
]
"""
self.assert_schema_dict_value(expected, reg)
def test_select_data_values_retain_boolean_and_numeric_type_in_json(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'string',
select_data=[(True, 'A'), (12, 'B'), ('c', 'C')]))
expected = """
[
[["title"],"registry_name"],
[["properties","field_name","_inputex"],{
"_type": "select",
"choices":[
{"value": true, "label": "A"},
{"value": 12,"label": "B"},
{"value": "c","label": "C"}],
"label":"field_label"
}]
]
"""
self.assert_schema_dict_value(expected, reg)
def test_object_with_array_property(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'field_type',
description='field_description'))
reg.add_property(schema_fields.FieldArray(
'array_name', 'array_label',
item_type=schema_fields.SchemaField(
'unusedName', 'unusedLabel', 'aType')))
expected = """
{
"properties": {
"field_name": {
"type": "field_type",
"description": "field_description"
},
"array_name": {
"items": {"type": "aType"},
"type":"array"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}
"""
self.assert_json_schema_value(expected, reg)
def test_extra_schema_dict(self):
reg = schema_fields.FieldRegistry(
'aName', 'aLabel',
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = """
[
[["title"], "aName"],
[["_inputex"], {"a": "A", "b": "B"}]]
"""
self.assert_schema_dict_value(expected, reg)
def test_mc_question_schema(self):
"""The multiple choice question schema is a good end-to-end example."""
mc_question = schema_fields.FieldRegistry(
'MC Question',
extra_schema_dict_values={'className': 'mc-question'})
mc_question.add_property(
schema_fields.SchemaField('question', 'Question', 'string'))
choice_type = schema_fields.FieldRegistry(
'choice', extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(
schema_fields.SchemaField('text', 'Text', 'string'))
choice_type.add_property(
schema_fields.SchemaField('score', 'Score', 'string'))
choice_type.add_property(
schema_fields.SchemaField('feedback', 'Feedback', 'string'))
choices_array = schema_fields.FieldArray(
'choices', 'Choices', item_type=choice_type)
mc_question.add_property(choices_array)
expected = """
{
"type":"object",
"id":"MCQuestion",
"properties":{
"question":{"type":"string"},
"choices":{
"items":{
"type":"object",
"id":"choice",
"properties":{
"text":{"type":"string"},
"score":{"type":"string"},
"feedback":{"type":"string"}
}
},
"type":"array"
}
}
}
"""
self.assert_json_schema_value(expected, mc_question)
expected = """
[
[["title"],"MCQuestion"],
[["_inputex"],{"className":"mc-question"}],
[["properties","question","_inputex"],{"label":"Question"}],
[["properties","choices","_inputex"],{"label":"Choices"}],
[["properties","choices","items","title"],"choice"],
[["properties","choices","items","_inputex"],{"className":"mc-choice"}],
[["properties","choices","items","properties","text","_inputex"],{
"label":"Text"
}],
[["properties","choices","items","properties","score","_inputex"],{
"label":"Score"
}],
[["properties","choices","items","properties","feedback","_inputex"],{
"label":"Feedback"
}]
]
"""
self.assert_schema_dict_value(expected, mc_question)
| Python |
"""Unit tests for the javascript code."""
__author__ = 'John Orr (jorr@google.com)'
import os
import subprocess
import unittest
class AllJavaScriptTests(unittest.TestCase):
def karma_test(self, test_folder):
karma_conf = os.path.join(
'experimental', 'coursebuilder', 'tests', 'unit',
'javascript_tests', test_folder, 'karma.conf.js')
self.assertEqual(0, subprocess.call(['karma', 'start', karma_conf]))
def test_activity_generic(self):
self.karma_test('assets_lib_activity_generic')
def test_butterbar(self):
self.karma_test('assets_lib_butterbar')
def test_assessment_tags(self):
self.karma_test('modules_assessment_tags')
def test_dashboard(self):
self.karma_test('modules_dashboard')
def test_oeditor(self):
self.karma_test('modules_oeditor')
| Python |
"""Unit tests for the common.sanitize module."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from common import safe_dom
class MockNode(safe_dom.Node):
def __init__(self, value):
self._value = value
@property
def sanitized(self):
return self._value
class NodeListTests(unittest.TestCase):
"""Unit tests for common.safe_dom.NodeList."""
def test_list(self):
"""NodeList should escape all its members."""
node_list = safe_dom.NodeList()
node_list.append(MockNode('a')).append(MockNode('b'))
self.assertEqual('ab', node_list.sanitized)
def test_len(self):
"""NodeList should support len."""
node_list = safe_dom.NodeList().append(
MockNode('a')).append(MockNode('b'))
self.assertEqual(2, len(node_list))
def test_append_node_list(self):
"""NodeList should support appending both Nodes and NodeLists."""
node_list = safe_dom.NodeList().append(
safe_dom.NodeList().append(MockNode('a')).append(MockNode('b'))
).append(MockNode('c'))
self.assertEqual('abc', node_list.__str__())
class TextTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Text."""
def test_text_sanitizes(self):
"""Text should sanitize unsafe characters."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.sanitized)
def test_str_returns_sanitized(self):
"""The _str__ method should return sanitized text."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.__str__())
class ElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Element."""
def test_build_simple_element(self):
"""Element should build an element without attributes or children."""
element = safe_dom.Element('p')
self.assertEqual('<p></p>', element.__str__())
def test_reject_bad_tag_names(self):
"""Element should reject bad tag names."""
bad_names = ['2a', 'a b', '@', 'a-b']
for name in bad_names:
try:
safe_dom.Element(name)
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_reject_bad_attribute_names(self):
"""Element should reject bad attribute names."""
bad_names = ['2a', 'a b', '@', 'a-b']
for name in bad_names:
try:
safe_dom.Element('p', **{name: 'good value'})
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_include_attributes(self):
"""Element should include tag attributes."""
element = safe_dom.Element('button', style='foo', onclick='action')
self.assertEqual(
'<button onclick="action" style="foo"></button>',
element.__str__())
def test_escape_quotes(self):
"""Element should escape single and double quote characters."""
element = safe_dom.Element('a', href='a\'b"c`d')
self.assertEqual(
'<a href="a'b"c`d"></a>', element.__str__())
def test_allow_parens(self):
"""Element should allow parentheses in attributes."""
element = safe_dom.Element('a', action='myAction()')
self.assertEqual('<a action="myAction()"></a>', element.__str__())
def test_allow_urls(self):
"""Element should allow urls with a method sepcified in an attribute."""
element = safe_dom.Element(
'a', action='http://a.b.com/d/e/f?var1=val1&var2=val2#fra')
self.assertEqual(
'<a action="http://a.b.com/d/e/f?var1=val1&var2=val2#fra"></a>',
element.__str__())
def test_url_query_chars(self):
"""Element should pass '?' and '=' characters in an attribute."""
element = safe_dom.Element('a', action='target?action=foo&value=bar')
self.assertEqual(
'<a action="target?action=foo&value=bar"></a>',
element.__str__())
def test_convert_none_to_empty(self):
"""An attribute with value None should render as empty."""
element = safe_dom.Element('a', action=None)
self.assertEqual('<a action=""></a>', element.__str__())
def test_coerce_className(self): # pylint: disable-msg=g-bad-name
"""Element should replace the 'className' attrib with 'class'."""
element = safe_dom.Element('p', className='foo')
self.assertEqual('<p class="foo"></p>', element.__str__())
def test_include_children(self):
"""Element should include child elements."""
element = safe_dom.Element('a').add_child(
safe_dom.Element('b').add_child(
safe_dom.Element('c'))
).add_child(
safe_dom.Element('d'))
self.assertEqual('<a><b><c></c></b><d></d></a>', element.__str__())
def test_include_node_list(self):
"""Element should include a list of children."""
element = safe_dom.Element('a').add_children(
safe_dom.NodeList().append(MockNode('b')).append(MockNode('c')))
self.assertEqual('<a>bc</a>', element.__str__())
def test_sanitize_children(self):
"""Element should sanitize child elements as they are included."""
element = safe_dom.Element('td').add_child(
safe_dom.Element('a', href='foo"bar').add_text('1<2'))
self.assertEqual(
'<td><a href="foo"bar">1<2</a></td>', element.__str__())
def test_add_text(self):
"""Adding text should add text which will be sanitized."""
self.assertEqual(
'<a>1<2</a>', safe_dom.Element('a').add_text('1<2').__str__())
def test_add_attribute(self):
"""Attributes can be added after initialization."""
self.assertEqual(
'<a b="c" d="e" f="g" h="i"></a>',
safe_dom.Element(
'a', b='c', d='e').add_attribute(f='g', h='i').__str__())
def test_void_elements_have_no_end_tags(self):
"""Void elements should have no end tag, e.g., <br/>."""
void_elements = [
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input',
'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track',
'wbr']
for elt in void_elements:
self.assertEqual('<%s/>' % elt, safe_dom.Element(elt).__str__())
def test_empty_non_void_elememnts_should_have_end_tags(self):
"""Non-void elements should have their end tags, even when empty."""
sample_elements = ['p', 'textarea', 'div']
for elt in sample_elements:
self.assertEqual(
'<%s></%s>' % (elt, elt), safe_dom.Element(elt).__str__())
class ScriptElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.ScriptElement."""
def test_script_should_not_escape_body(self):
""""The body of the script tag should not be escaped."""
script = safe_dom.ScriptElement()
script.add_text('alert("foo");')
script.add_text('1 < 2 && 2 > 1;')
self.assertEqual(
'<script>alert("foo");1 < 2 && 2 > 1;</script>', script.__str__())
def test_script_should_reject_close_script_tag_in_body(self):
"""Expect an error if the body of the script tag contains </script>."""
script = safe_dom.ScriptElement()
script.add_text('</script>')
try:
script.__str__()
self.fail('Expected an exception')
except ValueError:
pass
def test_script_should_not_allow_child_nodes_to_be_added(self):
"""Script should not allow child nodes to be added."""
script = safe_dom.ScriptElement()
try:
child = safe_dom.Element('br')
script.add_child(child)
self.fail('Expected an exception')
except ValueError:
pass
try:
children = safe_dom.NodeList().append(safe_dom.Element('br'))
script.add_children(children)
self.fail('Expected an exception')
except ValueError:
pass
class EntityTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Entity."""
def expect_pass(self, test_text):
entity = safe_dom.Entity(test_text)
self.assertEqual(test_text, entity.__str__())
def expect_fail(self, test_text):
try:
safe_dom.Entity(test_text)
except AssertionError:
return
self.fail('Expected an assert exception')
def test_should_pass_named_entities(self):
self.expect_pass(' ')
def test_should_pass_decimal_entities(self):
self.expect_pass('&')
def test_should_pass_hex_entities(self):
self.expect_pass('⚫')
def test_entities_must_start_with_ampersand(self):
self.expect_fail('nbsp;')
def test_entities_must_end_with_semicolon(self):
self.expect_fail(' ')
def test_named_entities_must_be_all_alpha(self):
self.expect_fail('&qu2ot;')
def test_decimal_entities_must_be_all_decimal_digits(self):
self.expect_fail('A6;')
def test_hex_entities_must_be_all_hex_digits(self):
self.expect_fail('ɪG')
def test_entitiesmust_be_non_empty(self):
self.expect_fail('&;')
self.expect_fail('&#;')
self.expect_fail('&#x;')
def test_should_reject_extraneous_characters(self):
self.expect_fail(' ')
self.expect_fail(' ')
def test_should_reject_tampering(self):
entity = safe_dom.Entity(' ')
entity._entity = '<script/>'
try:
entity.__str__()
except AssertionError:
return
self.fail('Expected an assert exception')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Workflow class in models.courses."""
__author__ = 'Sean Lip (sll@google.com)'
import unittest
from models.courses import LEGACY_HUMAN_GRADER_WORKFLOW
from models.courses import Workflow
import yaml
DATE_FORMAT_ERROR = (
'dates should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and '
'be specified in the UTC timezone.'
)
ERROR_HEADER = 'Error validating workflow specification: '
MISSING_KEYS_PREFIX = 'missing key(s) for a human-reviewed assessment:'
class DateTimeConversionTests(unittest.TestCase):
"""Unit tests for datetime conversion."""
def test_valid_datetime(self):
"""Valid datetimes should be converted without problems."""
workflow = Workflow('')
date_obj = workflow._convert_date_string_to_datetime('2012-03-21 12:30')
self.assertEqual(date_obj.year, 2012)
self.assertEqual(date_obj.month, 3)
self.assertEqual(date_obj.day, 21)
self.assertEqual(date_obj.hour, 12)
self.assertEqual(date_obj.minute, 30)
def test_invalid_datetime(self):
"""Valid datetimes should be converted without problems."""
invalid_date_strs = [
'abc', '2012-13-31 12:30', '2012-12-31T12:30',
'2012-13-31 12:30+0100']
workflow = Workflow('')
for date_str in invalid_date_strs:
with self.assertRaises(Exception):
workflow._convert_date_string_to_datetime(date_str)
def test_no_timezone_set(self):
"""Parsed date strings should contain no timezone information."""
workflow = Workflow('')
date_obj = workflow._convert_date_string_to_datetime('2012-03-21 12:30')
self.assertIsNone(date_obj.tzinfo)
class WorkflowValidationTests(unittest.TestCase):
"""Unit tests for workflow object validation."""
def setUp(self):
self.errors = []
self.valid_human_review_workflow_dict = yaml.safe_load(
LEGACY_HUMAN_GRADER_WORKFLOW)
def assert_matching_errors(self, expected, actual):
"""Prepend the error prefix to the error messages, then compare them."""
formatted_errors = []
for error in expected:
formatted_errors.append('%s%s' % (ERROR_HEADER, error))
self.assertEqual(formatted_errors, actual)
def to_yaml(self, adict):
"""Convert a dict to YAML."""
return yaml.safe_dump(adict)
def test_empty_string(self):
"""Validation should fail on an empty string."""
workflow = Workflow('')
workflow.validate(self.errors)
self.assert_matching_errors(['missing key: grader.'], self.errors)
def test_invalid_string(self):
"""Validation should fail for invalid YAML strings."""
workflow = Workflow('(')
workflow.validate(self.errors)
self.assertTrue(self.errors)
def test_not_dict(self):
"""Validation should fail for non-dict YAML strings."""
yaml_strs = ['- first\n- second', 'grader']
for yaml_str in yaml_strs:
self.errors = []
workflow = Workflow(yaml_str)
workflow.validate(self.errors)
self.assert_matching_errors(
['expected the YAML representation of a dict'], self.errors)
def test_missing_grader_key(self):
"""Validation should fail for missing grader key."""
workflow = Workflow(self.to_yaml({'not_grader': 'human'}))
workflow.validate(self.errors)
self.assert_matching_errors(['missing key: grader.'], self.errors)
def test_auto_grader(self):
"""Validation should pass for an auto-graded assessment."""
workflow = Workflow(self.to_yaml({'grader': 'auto'}))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_empty_submission_date_in_grader(self):
"""Validation should pass for empty submission date."""
workflow = Workflow(self.to_yaml(
{'grader': 'auto', 'submission_due_date': ''}))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_human_grader(self):
"""Validation should fail for invalid human grading specifications."""
workflow = Workflow(self.to_yaml({'grader': 'human'}))
workflow.validate(self.errors)
self.assert_matching_errors([
'%s matcher, review_min_count, review_window_mins, '
'submission_due_date, review_due_date.' %
MISSING_KEYS_PREFIX], self.errors)
self.errors = []
workflow = Workflow(self.to_yaml(
{'grader': 'human', 'matcher': 'peer'}
))
workflow.validate(self.errors)
self.assert_matching_errors([
'%s review_min_count, review_window_mins, submission_due_date, '
'review_due_date.' % MISSING_KEYS_PREFIX], self.errors)
def test_invalid_review_min_count(self):
"""Validation should fail for bad review_min_count values."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_min_count'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_min_count should be an integer.'], self.errors)
self.errors = []
workflow_dict['review_min_count'] = -1
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_min_count should be a non-negative integer.'], self.errors)
self.errors = []
workflow_dict['review_min_count'] = 0
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_review_window_mins(self):
"""Validation should fail for bad review_window_mins values."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_window_mins'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be an integer.'], self.errors)
self.errors = []
workflow_dict['review_window_mins'] = -1
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be a non-negative integer.'],
self.errors)
self.errors = []
workflow_dict['review_window_mins'] = 0
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_date(self):
"""Validation should fail for invalid dates."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors([DATE_FORMAT_ERROR], self.errors)
self.errors = []
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_due_date'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors([DATE_FORMAT_ERROR], self.errors)
def test_submission_date_after_review_date_fails(self):
"""Validation should fail if review date precedes submission date."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = '2013-03-14 12:00'
workflow_dict['review_due_date'] = '2013-03-13 12:00'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['submission due date should be earlier than review due date.'],
self.errors)
def test_multiple_errors(self):
"""Validation should fail with multiple errors when appropriate."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = '2013-03-14 12:00'
workflow_dict['review_due_date'] = '2013-03-13 12:00'
workflow_dict['review_window_mins'] = 'hello'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be an integer; submission due date '
'should be earlier than review due date.'],
self.errors)
def test_valid_human_grader(self):
"""Validation should pass for valid human grading specifications."""
workflow_dict = self.valid_human_review_workflow_dict
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common.tags."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from xml.etree import cElementTree
from common import tags
class CustomTagTests(unittest.TestCase):
"""Unit tests for the custom tag functionality."""
def setUp(self):
class SimpleTag(tags.BaseTag):
def render(self, unused_arg, unused_handler):
return cElementTree.Element('SimpleTag')
class ComplexTag(tags.BaseTag):
def render(self, unused_arg, unused_handler):
return cElementTree.XML(
'<Complex><Child>Text</Child></Complex>')
class ReRootTag(tags.BaseTag):
def render(self, node, unused_handler):
elt = cElementTree.Element('Re')
root = cElementTree.Element('Root')
elt.append(root)
for child in node:
root.append(child)
return elt
def new_get_tag_bindings():
return {
'simple': SimpleTag,
'complex': ComplexTag,
'reroot': ReRootTag}
self.old_get_tag_bindings = tags.get_tag_bindings
tags.get_tag_bindings = new_get_tag_bindings
self.mock_handler = object()
def tearDown(self):
tags.get_tag_bindings = self.old_get_tag_bindings
def test_empty_text_is_passed(self):
safe_dom = tags.html_to_safe_dom(None, self.mock_handler)
self.assertEquals('', str(safe_dom))
def test_none_is_treated_as_empty(self):
safe_dom = tags.html_to_safe_dom(None, self.mock_handler)
self.assertEquals('', str(safe_dom))
def test_plain_text_is_passed(self):
safe_dom = tags.html_to_safe_dom(
'This is plain text.', self.mock_handler)
self.assertEquals('This is plain text.', str(safe_dom))
def test_mix_of_plain_text_and_tags_is_passed(self):
html = 'This is plain text<br/>on several<br/>lines'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(html, str(safe_dom))
def test_simple_tag_is_replaced(self):
html = '<div><simple></simple></div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals('<div><SimpleTag></SimpleTag></div>', str(safe_dom))
def test_replaced_tag_preserves_tail_text(self):
html = '<div><simple></simple>Tail text</div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(
'<div><SimpleTag></SimpleTag>Tail text</div>', str(safe_dom))
def test_simple_tag_consumes_children(self):
html = '<div><simple><p>child1</p></simple></div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(
'<div><SimpleTag></SimpleTag></div>', str(safe_dom))
def test_complex_tag_preserves_its_own_children(self):
html = '<div><complex/></div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(
'<div><Complex><Child>Text</Child></Complex></div>', str(safe_dom))
def test_reroot_tag_puts_children_in_new_root(self):
html = '<div><reroot><p>one</p><p>two</p></reroot></div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(
'<div><Re><Root><p>one</p><p>two</p></Root></Re></div>',
str(safe_dom))
def test_chains_of_tags(self):
html = '<div><reroot><p><simple></p></reroot></div>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(
'<div><Re><Root><p><SimpleTag></SimpleTag></p></Root></Re></div>',
str(safe_dom))
def test_scripts_are_not_escaped(self):
html = '<script>alert("2"); var a = (1 < 2 && 2 > 1);</script>'
safe_dom = tags.html_to_safe_dom(html, self.mock_handler)
self.assertEquals(html, str(safe_dom))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for mapreduce jobs."""
__author__ = 'juliaoh@google.com (Julia Oh)'
import unittest
from models import transforms
from tools.etl import mapreduce
class HistogramTests(unittest.TestCase):
def test_get_bin_number(self):
histogram = mapreduce.Histogram()
self.assertEquals(histogram._get_bin_number(0), 0)
self.assertEquals(histogram._get_bin_number(
mapreduce._BUCKET_SIZE_SECONDS), 0)
self.assertEquals(histogram._get_bin_number(
mapreduce._BUCKET_SIZE_SECONDS + 1), 1)
self.assertEquals(histogram._get_bin_number(
mapreduce._BUCKET_SIZE_SECONDS * 2), 1)
self.assertEquals(histogram._get_bin_number(
(mapreduce._BUCKET_SIZE_SECONDS * 2) + 1), 2)
def test_get_bin_number_throws_value_error_for_negative_input(self):
histogram = mapreduce.Histogram()
self.assertRaises(ValueError, histogram._get_bin_number, -1)
def test_add(self):
histogram = mapreduce.Histogram()
histogram.add(0)
histogram.add(1)
histogram.add(31)
histogram.add(60)
histogram.add(61)
histogram.add(123)
self.assertEquals(histogram._values, {0: 2, 1: 2, 2: 1, 4: 1})
def test_to_list(self):
histogram = mapreduce.Histogram()
histogram.add(0)
histogram.add(1)
histogram.add(31)
histogram.add(60)
histogram.add(61)
histogram.add(123)
self.assertEquals(histogram.to_list(), [2, 1, 0, 1])
histogram = mapreduce.Histogram()
histogram.add(121)
self.assertEquals(histogram.to_list(), [0, 0, 0, 1])
def test_to_list_returns_empty_list(self):
histogram = mapreduce.Histogram()
self.assertEquals(histogram.to_list(), [])
class FlattenJsonTests(unittest.TestCase):
def test_empty_json_flattened_returns_empty_json(self):
empty_json = transforms.loads(transforms.dumps({}))
flattened_json = mapreduce.CSVGenerator._flatten_json(empty_json)
self.assertEquals(empty_json, flattened_json)
def test_flat_json_flattened_returns_same_json(self):
flat_json = transforms.loads(
transforms.dumps({'foo': 1, 'bar': 2, 'quz': 3}))
flattened_json = mapreduce.CSVGenerator._flatten_json(flat_json)
self.assertEquals(flat_json, flattened_json)
def test_nested_json_flattens_correctly(self):
dict1 = dict(aaa=111)
dict2 = dict(aa=11, bb=22, cc=transforms.dumps(dict1))
dict3 = dict(a=transforms.dumps(dict2), b=2)
json = transforms.loads(transforms.dumps(dict3))
flattened_json = mapreduce.CSVGenerator._flatten_json(json)
result_json = transforms.loads(
transforms.dumps(
{'a_aa': '11', 'a_bb': '22', 'b': '2', 'a_cc_aaa': '111'}))
self.assertEquals(result_json, flattened_json)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Page objects used in functional tests for Course Builder."""
__author__ = [
'John Orr (jorr@google.com)'
]
from selenium.webdriver.common import action_chains
from selenium.webdriver.common import by
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support import select
from selenium.webdriver.support import wait
class PageObject(object):
"""Superclass to hold shared logic used by page objects."""
def __init__(self, tester):
self._tester = tester
def find_element_by_css_selector(self, selector):
return self._tester.driver.find_element_by_css_selector(selector)
def find_element_by_id(self, elt_id):
return self._tester.driver.find_element_by_id(elt_id)
def find_element_by_link_text(self, text):
return self._tester.driver.find_element_by_link_text(text)
def find_element_by_name(self, name):
return self._tester.driver.find_element_by_name(name)
def expect_status_message_to_be(self, value):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.text_to_be_present_in_element(
(by.By.ID, 'gcb-butterbar-message'), value))
class EditorPageObject(PageObject):
"""Page object for pages which wait for the editor to finish loading."""
def __init__(self, tester):
super(EditorPageObject, self).__init__(tester)
def successful_butter_bar(driver):
butter_bar_message = driver.find_element_by_id(
'gcb-butterbar-message')
return 'Success.' in butter_bar_message.text or (
not butter_bar_message.is_displayed())
wait.WebDriverWait(self._tester.driver, 15).until(successful_butter_bar)
def set_status(self, status):
select.Select(self.find_element_by_name(
'is_draft')).select_by_visible_text(status)
return self
def click_save(self, link_text='Save', status_message='Saved'):
self.find_element_by_link_text(link_text).click()
self.expect_status_message_to_be(status_message)
return self
def _close_and_return_to(self, continue_page):
self.find_element_by_link_text('Close').click()
return continue_page(self._tester)
class DashboardEditor(EditorPageObject):
"""A base class for the editors accessed from the Dashboard."""
def click_close(self):
return self._close_and_return_to(DashboardPage)
class RootPage(PageObject):
"""Page object to model the interactions with the root page."""
def load(self, base_url):
self._tester.driver.get(base_url + '/')
return self
def click_login(self):
self.find_element_by_link_text('Login').click()
return LoginPage(self._tester)
def click_dashboard(self):
self.find_element_by_link_text('Dashboard').click()
return DashboardPage(self._tester)
def click_admin(self):
self.find_element_by_link_text('Admin').click()
return AdminPage(self._tester)
def click_announcements(self):
self.find_element_by_link_text('Announcements').click()
return AnnouncementsPage(self._tester)
def click_register(self):
self.find_element_by_link_text('Register').click()
return RegisterPage(self._tester)
class RegisterPage(PageObject):
"""Page object to model the registration page."""
def enroll(self, name):
enroll = self.find_element_by_name('form01')
enroll.send_keys(name)
enroll.submit()
return RegisterPage(self._tester)
def verify_enrollment(self):
self._tester.assertTrue(
'Thank you for registering' in self.find_element_by_css_selector(
'.gcb-top-content').text)
return self
def click_course(self):
self.find_element_by_link_text('Course').click()
return RootPage(self._tester)
class AnnouncementsPage(PageObject):
"""Page object to model the announcements page."""
def click_add_new(self):
self.find_element_by_css_selector(
'#gcb-add-announcement > button').click()
return AnnouncementsEditorPage(self._tester)
def verify_announcement(self, title=None, date=None, body=None):
"""Verify that the announcement has the given fields."""
if title:
self._tester.assertEquals(
title, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside h2')[0].text)
if date:
self._tester.assertEquals(
date, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[0].text)
if body:
self._tester.assertEquals(
body, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[1].text)
return self
class AnnouncementsEditorPage(EditorPageObject):
"""Page to model the announcements editor."""
def enter_fields(self, title=None, date=None, body=None):
"""Enter title, date, and body into the announcement form."""
if title:
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
if date:
date_el = self.find_element_by_name('date')
date_el.clear()
date_el.send_keys(date)
if body:
body_el = self.find_element_by_name('html')
body_el.clear()
body_el.send_keys(body)
return self
def click_close(self):
return self._close_and_return_to(AnnouncementsPage)
class LoginPage(PageObject):
"""Page object to model the interactions with the login page."""
def login(self, login, admin=False):
email = self._tester.driver.find_element_by_id('email')
email.clear()
email.send_keys(login)
if admin:
self.find_element_by_id('admin').click()
self.find_element_by_id('submit-login').click()
return RootPage(self._tester)
class DashboardPage(PageObject):
"""Page object to model the interactions with the dashboard landing page."""
def load(self, base_url, name):
self._tester.driver.get('/'.join([base_url, name, 'dashboard']))
return self
def verify_read_only_course(self):
self._tester.assertEquals(
'Read-only course.',
self.find_element_by_id('gcb-butterbar-message').text)
return self
def verify_selected_tab(self, tab_text):
tab = self.find_element_by_link_text(tab_text)
self._tester.assertEquals('selected', tab.get_attribute('class'))
def verify_not_publicly_available(self):
self._tester.assertEquals(
'The course is not publicly available.',
self.find_element_by_id('gcb-butterbar-message').text)
return self
def click_import(self):
self.find_element_by_css_selector('#import_course').click()
return Import(self._tester)
def click_add_unit(self):
self.find_element_by_css_selector('#add_unit > button').click()
return AddUnit(self._tester)
def click_add_assessment(self):
self.find_element_by_css_selector('#add_assessment > button').click()
return AddAssessment(self._tester)
def click_add_link(self):
self.find_element_by_css_selector('#add_link > button').click()
return AddLink(self._tester)
def click_add_lesson(self):
self.find_element_by_css_selector('#add_lesson > button').click()
return AddLesson(self._tester)
def click_organize(self):
self.find_element_by_css_selector('#edit_unit_lesson').click()
return Organize(self._tester)
def click_assets(self):
self.find_element_by_link_text('Assets').click()
return AssetsPage(self._tester)
def verify_course_outline_contains_unit(self, unit_title):
self.find_element_by_link_text(unit_title)
return self
def click_on_course_outline_components(self, title):
self.find_element_by_link_text(title).click()
return LessonPage(self._tester)
class LessonPage(RootPage):
"""Page object for viewing course content."""
def submit_answer_for_mc_question_and_verify(self, question_text, answer):
questions = self._tester.driver.find_elements_by_css_selector(
'.qt-mc-question.qt-standalone')
for question in questions:
if (question.find_element_by_css_selector('.qt-question').text ==
question_text):
choices = question.find_elements_by_css_selector(
'.qt-choices > *')
for choice in choices:
if choice.text == answer:
choice.find_element_by_css_selector(
'input[type="radio"]').click()
question.find_element_by_css_selector(
'.qt-check-answer').click()
if (question.find_element_by_css_selector(
'.qt-feedback').text ==
'Yes, the answer is correct.'):
return self
else:
raise Exception('Incorrect answer submitted')
class AssetsPage(PageObject):
"""Page object for the dashboard's assets tab."""
def click_upload(self):
self.find_element_by_link_text('Upload to assets/img').click()
return AssetsEditorPage(self._tester)
def verify_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def verify_no_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def click_edit_image(self, name):
self.find_element_by_link_text(
name).parent.find_element_by_link_text('[Edit]').click()
return ImageEditorPage(self._tester)
def click_add_short_answer(self):
self.find_element_by_link_text('Add Short Answer').click()
return ShortAnswerEditorPage(self._tester)
def click_add_multiple_choice(self):
self.find_element_by_link_text('Add Multiple Choice').click()
return MultipleChoiceEditorPage(self._tester)
def click_add_question_group(self):
self.find_element_by_link_text('Add Question Group').click()
return QuestionEditorPage(self._tester)
def click_edit_short_answer(self, name):
raise NotImplementedError
def click_edit_mc_question(self):
raise NotImplementedError
def verify_question_exists(self, description):
"""Verifies question description exists on list of question banks."""
lis = self._tester.driver.find_elements_by_css_selector(
'#gcb-main-content > ol > li')
for li in lis:
try:
self._tester.assertEquals(
description + ' [Edit]', li.text)
return self
except AssertionError:
continue
raise AssertionError(description + ' not found')
def click_outline(self):
self.find_element_by_link_text('Outline').click()
return DashboardPage(self._tester)
class AssetsEditorPage(DashboardEditor):
"""Page object for upload image page."""
def select_file(self, path):
self.find_element_by_name('file').send_keys(path)
return self
def click_upload_and_expect_saved(self):
self.find_element_by_link_text('Upload').click()
self.expect_status_message_to_be('Saved.')
# Page automatically redirects after successful save.
wait.WebDriverWait(self._tester.driver, 15).until(
ec.title_contains('Assets'))
return AssetsPage(self._tester)
class QuestionEditorPage(EditorPageObject):
"""Abstract superclass for page objects for add/edit questions pages."""
def set_question(self, question):
question_el = self.find_element_by_name('question')
question_el.clear()
question_el.send_keys(question)
return self
def set_description(self, description):
question_el = self.find_element_by_name('description')
question_el.clear()
question_el.send_keys(description)
return self
def click_close(self):
return self._close_and_return_to(AssetsPage)
class MultipleChoiceEditorPage(QuestionEditorPage):
"""Page object for editing multiple choice questions."""
def click_add_a_choice(self):
self.find_element_by_link_text('Add a choice').click()
return self
def set_answer(self, n, answer):
answer_el = self.find_element_by_id('gcbRteField-' + str(2 * n + 1))
answer_el.clear()
answer_el.send_keys(answer)
return self
def click_allow_only_one_selection(self):
raise NotImplementedError
def click_allow_multiple_selections(self):
raise NotImplementedError
class ShortAnswerEditorPage(QuestionEditorPage):
"""Page object for editing short answer questions."""
def click_add_an_answer(self):
self.find_element_by_link_text('Add an answer').click()
return self
def set_score(self, n, score):
score_el = self.find_element_by_name('graders[%d]score' %n)
score_el.clear()
score_el.send_key(score)
def set_response(self, n, response):
response_el = self.find_element_by_name('graders[%d]response' %n)
response_el.clear()
response_el.send_key(response)
def click_delete_this_answer(self, n):
raise NotImplementedError
class ImageEditorPage(EditorPageObject):
"""Page object for the dashboard's view/delete image page."""
def click_delete(self):
self.find_element_by_link_text('Delete').click()
return self
def confirm_delete(self):
self._tester.driver.switch_to_alert().accept()
return AssetsPage(self._tester)
class AddUnit(DashboardEditor):
"""Page object to model the dashboard's add unit editor."""
def __init__(self, tester):
super(AddUnit, self).__init__(tester)
self.expect_status_message_to_be('New unit has been created and saved.')
def set_title(self, title):
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
return self
class Import(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AddAssessment(DashboardEditor):
"""Page object to model the dashboard's assessment editor."""
def __init__(self, tester):
super(AddAssessment, self).__init__(tester)
self.expect_status_message_to_be(
'New assessment has been created and saved.')
class AddLink(DashboardEditor):
"""Page object to model the dashboard's link editor."""
def __init__(self, tester):
super(AddLink, self).__init__(tester)
self.expect_status_message_to_be(
'New link has been created and saved.')
class AddLesson(DashboardEditor):
"""Page object to model the dashboard's lesson editor."""
RTE_EDITOR_ID = 'gcbRteField-0_editor'
RTE_TEXTAREA_ID = 'gcbRteField-0'
def __init__(self, tester):
super(AddLesson, self).__init__(tester)
self.instanceid_list_snapshot = []
self.expect_status_message_to_be(
'New lesson has been created and saved.')
def click_rich_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('Rich Text', el.text)
el.click()
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable((by.By.ID, AddLesson.RTE_EDITOR_ID)))
return self
def click_plain_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('<HTML>', el.text)
el.click()
return self
def send_rte_text(self, text):
self.find_element_by_id('gcbRteField-0_editor').send_keys(text)
return self
def select_rte_custom_tag_type(self, option_text):
"""Select the given option from the custom content type selector."""
self._ensure_rte_iframe_ready_and_switch_to_it()
select_tag = self.find_element_by_name('tag')
for option in select_tag.find_elements_by_tag_name('option'):
if option.text == option_text:
option.click()
break
else:
self._tester.fail('No option "%s" found' % option_text)
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
self._tester.driver.switch_to_default_content()
return self
def click_rte_add_custom_tag(self):
self.find_element_by_link_text(
'Insert Google Course Builder component').click()
return self
def set_lesson_title(self, lesson_title):
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(lesson_title)
return self
def doubleclick_rte_element(self, elt_css_selector):
self._tester.driver.switch_to_frame(AddLesson.RTE_EDITOR_ID)
target = self.find_element_by_css_selector(elt_css_selector)
action_chains.ActionChains(
self._tester.driver).double_click(target).perform()
self._tester.driver.switch_to_default_content()
return self
def _ensure_rte_iframe_ready_and_switch_to_it(self):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.frame_to_be_available_and_switch_to_it('modal-editor-iframe'))
# Ensure inputEx has initialized too
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
def set_rte_lightbox_field(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
field = self.find_element_by_css_selector(field_css_selector)
field.clear()
field.send_keys(value)
self._tester.driver.switch_to_default_content()
return self
def ensure_rte_lightbox_field_has_value(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
self._tester.assertEqual(
value,
self.find_element_by_css_selector(
field_css_selector).get_attribute('value'))
self._tester.driver.switch_to_default_content()
return self
def click_rte_save(self):
self._ensure_rte_iframe_ready_and_switch_to_it()
self.find_element_by_link_text('Save').click()
self._tester.driver.switch_to_default_content()
return self
def _get_rte_contents(self):
return self.find_element_by_id(
AddLesson.RTE_TEXTAREA_ID).get_attribute('value')
def _get_instanceid_list(self):
"""Returns a list of the instanceid attrs in the lesson body."""
html = self._get_rte_contents()
html_list = html.split(' instanceid="')
instanceid_list = []
for item in html_list[1:]:
closing_quote_ind = item.find('"')
instanceid_list.append(item[:closing_quote_ind])
return instanceid_list
def ensure_instanceid_count_equals(self, value):
self._tester.assertEqual(value, len(self._get_instanceid_list()))
return self
def take_snapshot_of_instanceid_list(self):
self.instanceid_list_snapshot = self._get_instanceid_list()
return self
def ensure_instanceid_list_matches_last_snapshot(self):
self._tester.assertEqual(
self.instanceid_list_snapshot, self._get_instanceid_list())
return self
def ensure_lesson_body_textarea_matches_regex(self, regex):
rte_contents = self._get_rte_contents()
self._tester.assertRegexpMatches(rte_contents, regex)
return self
class Organize(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AdminPage(PageObject):
"""Page object to model the interactions with the admimn landing page."""
def click_add_course(self):
self.find_element_by_id('add_course').click()
return AddCourseEditorPage(self._tester)
def click_settings(self):
self.find_element_by_link_text('Settings').click()
return AdminSettingsPage(self._tester)
class AdminSettingsPage(PageObject):
"""Page object for the admin settings."""
def click_override_admin_user_emails(self):
self._tester.driver.find_elements_by_css_selector(
'button.gcb-button')[0].click()
return ConfigPropertyOverridePage(self._tester)
def verify_admin_user_emails_contains(self, email):
self._tester.assertTrue(
email in self._tester.driver.find_elements_by_css_selector(
'table.gcb-config tr')[1].find_elements_by_css_selector(
'td')[1].text)
class ConfigPropertyOverridePage(EditorPageObject):
"""Page object for the admin property override editor."""
def set_value(self, value):
self.find_element_by_name('value').send_keys(value)
return self
def click_close(self):
return self._close_and_return_to(AdminSettingsPage)
class AddCourseEditorPage(EditorPageObject):
"""Page object for the dashboards' add course page."""
def set_fields(self, name=None, title=None, email=None):
"""Populate the fields in the add course page."""
name_el = self.find_element_by_name('name')
title_el = self.find_element_by_name('title')
email_el = self.find_element_by_name('admin_email')
name_el.clear()
title_el.clear()
email_el.clear()
if name:
name_el.send_keys(name)
if title:
title_el.send_keys(title)
if email:
email_el.send_keys(email)
return self
def click_close(self):
return self._close_and_return_to(AdminPage)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for a peer review system.
WARNING! Use this script to test load Course Builder. This is very dangerous
feature, be careful, because anyone can impersonate super user of your Course
Builder instance; use only if you have to perform specific load testing
Keep in mind:
- when repeatedly running tests and creating new test namespaces,
flush memcache
Here is how to run:
- update /controllers/sites.py and enable CAN_IMPERSONATE
- navigate to the root directory of the app
- run a command line by typing:
python tests/integration/load_test.py \
--thread_count=5 \
--start_uid=1 \
http://mycourse.appspot.com
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import argparse
import cookielib
import json
import logging
import random
import re
import sys
import threading
import time
import urllib
import urllib2
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the course you want to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if needle not in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_does_not_contain(needle, haystack):
if needle in haystack:
raise Exception('Did not expect to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as he:
if (
try_count < WebSession.MAX_RETRIES and
self.is_soft_error(he)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise he
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = None
if args_dict:
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Error in %s: %s', self.name, e)
self.exc_info = sys.exc_info()
raise self.exc_info[1], None, self.exc_info[2]
class PeerReviewLoadTest(object):
"""A peer review load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
# this is an impersonation identity for the actor thread
self.email = 'load_test_bot_%s@example.com' % self.uid
self.name = 'Load Test Bot #%s' % self.uid
# begin web session
impersonate_header = {
'email': self.email, 'user_id': u'impersonation-%s' % self.uid}
self.session = WebSession(
uid=uid,
common_headers={'Gcb-Impersonate': json.dumps(impersonate_header)})
def run(self):
self.register_if_has_to()
self.submit_peer_review_assessment_if_possible()
while self.count_completed_reviews() < 2:
self.request_and_do_a_review()
def get_hidden_field(self, name, body):
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<input type="hidden" name="%s"\s* value="([^"]*)">' % name)
# pylint: enable-msg=anomalous-backslash-in-string
return reg.search(body).group(1)
def get_js_var(self, name, body):
reg = re.compile('%s = \'([^\']*)\';\n' % name)
return reg.search(body).group(1)
def get_draft_review_url(self, body):
"""Returns the URL of a draft review on the review dashboard."""
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<a href="([^"]*)">Assignment [0-9]+</a>\s*\(Draft\)')
# pylint: enable-msg=anomalous-backslash-in-string
result = reg.search(body)
if result is None:
return None
return result.group(1)
def register_if_has_to(self):
"""Performs student registration action."""
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
if 'href="register"' not in body:
body = self.session.get('%s/student/home' % self.host)
assert_contains(self.email, body)
assert_contains(self.name, body)
return False
body = self.session.get('%s/register' % self.host)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {'xsrf_token': xsrf_token, 'form01': self.name}
body = self.session.post('%s/register' % self.host, data)
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
assert_does_not_contain('href="register"', body)
return True
def submit_peer_review_assessment_if_possible(self):
"""Submits the peer review assessment."""
body = self.session.get(
'%s/assessment?name=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
assert_contains('You may only submit this assignment once', body)
if 'Submitted assignment' in body:
# The assignment was already submitted.
return True
assessment_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'regex',
'value': 'Answer 0 by %s' % self.email},
{'index': 1, 'type': 'choices', 'value': self.uid},
{'index': 2, 'type': 'regex',
'value': 'Answer 2 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': LEGACY_REVIEW_UNIT_ID,
'score': 0,
'xsrf_token': assessment_xsrf_token,
}
body = self.session.post('%s/answer' % self.host, data)
assert_contains('Review peer assignments', body)
return True
def request_and_do_a_review(self):
"""Request a new review, wait for it to be granted, then submit it."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
completed = False
while not completed:
# Get peer review dashboard and inspect it.
body = self.session.get(review_dashboard_url)
assert_contains('Assignments for your review', body)
assert_contains('Review a new assignment', body)
# Pick first pending review if any or ask for a new review.
draft_review_url = self.get_draft_review_url(body)
if draft_review_url: # There is a pending review. Choose it.
body = self.session.get(
'%s/%s' % (self.host, draft_review_url))
else: # Request a new assignment to review.
assert_contains('xsrf_token', body)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': xsrf_token,
}
body = self.session.post(review_dashboard_url, data)
# It is possible that we fail to get a new review because the
# old one is now visible, but was not yet visible when we asked
# for the dashboard page.
if (
'You must complete all assigned reviews before you '
'can request a new one.' in body):
continue
# It is possible that no submissions available for review yet.
# Wait for a while until they become available on the dashboard
# page.
if 'Back to the review dashboard' not in body:
assert_contains('Assignments for your review', body)
# Sleep for a random number of seconds between 1 and 4.
time.sleep(1.0 + random.random() * 3.0)
continue
# Submit the review.
review_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'choices', 'value': 0},
{'index': 1, 'type': 'regex',
'value': 'Review 0 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': None,
'is_draft': 'false',
'key': self.get_js_var('assessmentGlobals.key', body),
'score': 0,
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': review_xsrf_token,
}
body = self.session.post('%s/review' % self.host, data)
assert_contains('Your review has been submitted', body)
return True
def count_completed_reviews(self):
"""Counts the number of reviews that the actor has completed."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
body = self.session.get(review_dashboard_url)
num_completed = body.count('(Completed)')
return num_completed
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = PeerReviewLoadTest(
args.base_url,
(
args.start_uid +
iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='PeerReviewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder test suite.
This script runs all functional and units test in the Course Builder project.
Here is how to use the script:
- download WebTest Python package from a URL below and put
the files in a folder of your choice, for example: tmp/webtest:
http://pypi.python.org/packages/source/W/WebTest/WebTest-1.4.2.zip
- update your Python path:
PYTHONPATH=$PYTHONPATH:/tmp/webtest
- invoke this test suite from the command line:
# Automatically find and run all Python tests in tests/*.
python tests/suite.py
# Run only tests matching shell glob *_functional_test.py in tests/*.
python tests/suite.py --pattern *_functional_test.py
# Run test method baz in unittest.TestCase Bar found in tests/foo.py.
python tests/suite.py --test_class_name tests.foo.Bar.baz
- review the output to make sure there are no errors or warnings
Good luck!
"""
__author__ = 'Sean Lip'
import argparse
import base64
import os
import shutil
import signal
import subprocess
import sys
import time
import unittest
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
import webtest
from google.appengine.api.search import simple_search_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import deferred
from google.appengine.ext import testbed
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--pattern', default='*.py',
help='shell pattern for discovering files containing tests', type=str)
_PARSER.add_argument(
'--test_class_name',
help='optional dotted module name of the test(s) to run', type=str)
_PARSER.add_argument(
'--integration_server_start_cmd',
help='script to start an external CB server', type=str)
# Base filesystem location for test data.
TEST_DATA_BASE = '/tmp/experimental/coursebuilder/test-data/'
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class TestBase(unittest.TestCase):
"""Base class for all Course Builder tests."""
REQUIRES_INTEGRATION_SERVER = 1
INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'
def setUp(self):
super(TestBase, self).setUp()
# Map of object -> {symbol_string: original_value}
self._originals = {}
def tearDown(self):
self._unswap_all()
super(TestBase, self).tearDown()
def swap(self, source, symbol, new):
"""Swaps out source.symbol for a new value.
Allows swapping of members and methods:
myobject.foo = 'original_foo'
self.swap(myobject, 'foo', 'bar')
self.assertEqual('bar', myobject.foo)
myobject.baz() # -> 'original_baz'
self.swap(myobject, 'baz', lambda: 'quux')
self.assertEqual('quux', myobject.bar())
Swaps are automatically undone in tearDown().
Args:
source: object. The source object to swap from.
symbol: string. The name of the symbol to swap.
new: object. The new value to swap in.
"""
if source not in self._originals:
self._originals[source] = {}
if not self._originals[source].get(symbol, None):
self._originals[source][symbol] = getattr(source, symbol)
setattr(source, symbol, new)
# Allow protected method names. pylint: disable-msg=g-bad-name
def _unswap_all(self):
for source, symbol_to_value in self._originals.iteritems():
for symbol, value in symbol_to_value.iteritems():
setattr(source, symbol, value)
def shortDescription(self):
"""Additional information logged during unittest invocation."""
# Suppress default logging of docstrings. Instead log name/status only.
return None
class FunctionalTestBase(TestBase):
"""Base class for functional tests."""
def setUp(self):
super(FunctionalTestBase, self).setUp()
# e.g. TEST_DATA_BASE/tests/functional/tests/MyTestCase.
self.test_tempdir = os.path.join(
TEST_DATA_BASE, self.__class__.__module__.replace('.', os.sep),
self.__class__.__name__)
self.reset_filesystem()
def tearDown(self):
self.reset_filesystem(remove_only=True)
super(FunctionalTestBase, self).tearDown()
def reset_filesystem(self, remove_only=False):
if os.path.exists(self.test_tempdir):
shutil.rmtree(self.test_tempdir)
if not remove_only:
os.makedirs(self.test_tempdir)
class AppEngineTestBase(FunctionalTestBase):
"""Base class for tests that require App Engine services."""
def getApp(self): # pylint: disable-msg=g-bad-name
"""Returns the main application to be tested."""
raise Exception('Not implemented.')
def setUp(self): # pylint: disable-msg=g-bad-name
super(AppEngineTestBase, self).setUp()
empty_environ()
# setup an app to be tested
self.testapp = webtest.TestApp(self.getApp())
self.testbed = testbed.Testbed()
self.testbed.activate()
# configure datastore policy to emulate instantaneously and globally
# consistent HRD; we also patch dev_appserver in main.py to run under
# the same policy
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# declare any relevant App Engine service stubs here
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
# TODO(emichael): Fix this when an official stub is created
self.testbed._register_stub(
'search', simple_search_stub.SearchServiceStub())
def tearDown(self): # pylint: disable-msg=g-bad-name
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def execute_all_deferred_tasks(self, queue_name='default'):
"""Executes all pending deferred tasks."""
for task in self.taskq.GetTasks(queue_name):
deferred.run(base64.b64decode(task['body']))
def create_test_suite(parsed_args):
"""Loads all requested test suites.
By default, loads all unittest.TestCases found under the project root's
tests/ directory.
Args:
parsed_args: argparse.Namespace. Processed command-line arguments.
Returns:
unittest.TestSuite. The test suite populated with all tests to run.
"""
loader = unittest.TestLoader()
if parsed_args.test_class_name:
return loader.loadTestsFromName(parsed_args.test_class_name)
else:
return loader.discover(
os.path.dirname(__file__), pattern=parsed_args.pattern)
def start_integration_server(integration_server_start_cmd):
print 'Starting external server: %s' % integration_server_start_cmd
server = subprocess.Popen(integration_server_start_cmd)
time.sleep(3) # Wait for server to start up
return server
def stop_integration_server(server):
server.kill() # dev_appserver.py itself.
# The new dev appserver starts a _python_runtime.py process that isn't
# captured by start_integration_server and so doesn't get killed. Until it's
# done, our tests will never complete so we kill it manually.
pid = int(subprocess.Popen(
['pgrep', '-f', '_python_runtime.py'], stdout=subprocess.PIPE
).communicate()[0][:-1])
os.kill(pid, signal.SIGKILL)
def fix_sys_path():
"""Fix the sys.path to include GAE extra paths."""
import dev_appserver # pylint: disable=C6204
# dev_appserver.fix_sys_path() prepends GAE paths to sys.path and hides
# our classes like 'tests' behind other modules that have 'tests'.
# Here, unlike dev_appserver, we append the path instead of prepending it,
# so that our classes come first.
sys.path += dev_appserver.EXTRA_PATHS[:]
def main():
"""Starts in-process server and runs all test cases in this module."""
fix_sys_path()
parsed_args = _PARSER.parse_args()
test_suite = create_test_suite(parsed_args)
all_tags = set()
for test in iterate_tests(test_suite):
if hasattr(test, 'TAGS'):
all_tags.update(test.TAGS)
server = None
if TestBase.REQUIRES_INTEGRATION_SERVER in all_tags:
server = start_integration_server(
parsed_args.integration_server_start_cmd)
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
if server:
stop_integration_server(server)
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s errors, %s failures of '
' %s tests run.' % (
len(result.errors), len(result.failures), result.testsRun))
import tests.functional.actions as actions # pylint: disable-msg=g-import-not-at-top
count = len(actions.UNIQUE_URLS_FOUND.keys())
result.stream.writeln('INFO: Unique URLs found: %s' % count)
result.stream.writeln('INFO: All %s tests PASSED!' % result.testsRun)
if __name__ == '__main__':
appengine_config.gcb_force_default_encoding('ascii')
main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.