code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
import gettext
_ = gettext.gettext
class I18n(object):
def __init__(self, parser):
pass
## junk I'm playing with to test the macro framework
# def parseArgs(self, parser, startPos):
# parser.getWhiteSpace()
# args = parser.getExpression(useNameMapper=False,
# pyTokensToBreakAt=[':']).strip()
# return args
#
# def convertArgStrToDict(self, args, parser=None, startPos=None):
# def getArgs(*pargs, **kws):
# return pargs, kws
# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
# return kwArgs
def __call__(self,
src, # aka message,
plural=None,
n=None, # should be a string representing the name of the
# '$var' rather than $var itself
id=None,
domain=None,
source=None,
target=None,
comment=None,
# args that are automatically supplied by the parser when the
# macro is called:
parser=None,
macros=None,
isShortForm=False,
EOLCharsInShortForm=None,
startPos=None,
endPos=None,
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
#print macros['i18n']
src = _(src)
if isShortForm and endPos<len(parser):
return src+EOLCharsInShortForm
else:
return src
| Python |
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'),
('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'),
('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
## Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''),
('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
]
DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1])
if reprstr[i]=="'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
#self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self", None) ]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname, defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name, defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(_initMethod_initCheetah % {'className' : self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className #self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, basestring): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName)
if not isinstance(source, basestring):
source = unicode(source)
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.match(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, str):
encoding = unicodeMatch.group(1) or 'ascii'
source = unicode(source, encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
source = source.decode(encoding)
else:
source = unicode(source)
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"import __builtin__",
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = baseClassName.split(',')
for klass in baseclasses:
chunks = klass.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline in the middle of a source block
# we don't want to inadvertantly import the module at the top of the file either
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if not var == '*']
self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| Python |
"""SourceReader class for Cheetah's Parser and CodeGenerator
"""
import re
import sys
EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)')
EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)')
ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search
class Error(Exception):
pass
class SourceReader(object):
def __init__(self, src, filename=None, breakPoint=None, encoding=None):
## @@TR 2005-01-17: the following comes from a patch Terrel Shumway
## contributed to add unicode support to the reading of Cheetah source
## files with dynamically compiled templates. All the existing unit
## tests pass but, it needs more testing and some test cases of its
## own. My instinct is to move this up into the code that passes in the
## src string rather than leaving it here. As implemented here it
## forces all src strings to unicode, which IMO is not what we want.
# if encoding is None:
# # peek at the encoding in the first two lines
# m = EOLZre.search(src)
# pos = m.end()
# if pos<len(src):
# m = EOLZre.search(src,pos)
# pos = m.end()
# m = ENCODINGsearch(src,0,pos)
# if m:
# encoding = m.group(1)
# else:
# encoding = sys.getfilesystemencoding()
# self._encoding = encoding
# if type(src) is not unicode:
# src = src.decode(encoding)
## end of Terrel's patch
self._src = src
self._filename = filename
self._srcLen = len(src)
if breakPoint == None:
self._breakPoint = self._srcLen
else:
self.setBreakPoint(breakPoint)
self._pos = 0
self._bookmarks = {}
self._posTobookmarkMap = {}
## collect some meta-information
self._EOLs = []
pos = 0
while pos < len(self):
EOLmatch = EOLZre.search(src, pos)
self._EOLs.append(EOLmatch.start())
pos = EOLmatch.end()
self._BOLs = []
for pos in self._EOLs:
BOLpos = self.findBOL(pos)
self._BOLs.append(BOLpos)
def src(self):
return self._src
def filename(self):
return self._filename
def __len__(self):
return self._breakPoint
def __getitem__(self, i):
self.checkPos(i)
return self._src[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self._src[i:j]
def splitlines(self):
if not hasattr(self, '_srcLines'):
self._srcLines = self._src.splitlines()
return self._srcLines
def lineNum(self, pos=None):
if pos == None:
pos = self._pos
for i in range(len(self._BOLs)):
if pos >= self._BOLs[i] and pos <= self._EOLs[i]:
return i
def getRowCol(self, pos=None):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum]
return lineNum+1, pos-BOL+1
def getRowColLine(self, pos=None):
if pos == None:
pos = self._pos
row, col = self.getRowCol(pos)
return row, col, self.splitlines()[row-1]
def getLine(self, pos):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
return self.splitlines()[lineNum]
def pos(self):
return self._pos
def setPos(self, pos):
self.checkPos(pos)
self._pos = pos
def validPos(self, pos):
return pos <= self._breakPoint and pos >=0
def checkPos(self, pos):
if not pos <= self._breakPoint:
raise Error("pos (" + str(pos) + ") is invalid: beyond the stream's end (" +
str(self._breakPoint-1) + ")" )
elif not pos >=0:
raise Error("pos (" + str(pos) + ") is invalid: less than 0" )
def breakPoint(self):
return self._breakPoint
def setBreakPoint(self, pos):
if pos > self._srcLen:
raise Error("New breakpoint (" + str(pos) +
") is invalid: beyond the end of stream's source string (" +
str(self._srcLen) + ")" )
elif not pos >= 0:
raise Error("New breakpoint (" + str(pos) + ") is invalid: less than 0" )
self._breakPoint = pos
def setBookmark(self, name):
self._bookmarks[name] = self._pos
self._posTobookmarkMap[self._pos] = name
def hasBookmark(self, name):
return name in self._bookmarks
def gotoBookmark(self, name):
if not self.hasBookmark(name):
raise Error("Invalid bookmark (" + name + ") is invalid: does not exist")
pos = self._bookmarks[name]
if not self.validPos(pos):
raise Error("Invalid bookmark (" + name + ', '+
str(pos) + ") is invalid: pos is out of range" )
self._pos = pos
def atEnd(self):
return self._pos >= self._breakPoint
def atStart(self):
return self._pos == 0
def peek(self, offset=0):
self.checkPos(self._pos+offset)
pos = self._pos + offset
return self._src[pos]
def getc(self):
pos = self._pos
if self.validPos(pos+1):
self._pos += 1
return self._src[pos]
def ungetc(self, c=None):
if not self.atStart():
raise Error('Already at beginning of stream')
self._pos -= 1
if not c==None:
self._src[self._pos] = c
def advance(self, offset=1):
self.checkPos(self._pos + offset)
self._pos += offset
def rev(self, offset=1):
self.checkPos(self._pos - offset)
self._pos -= offset
def read(self, offset):
self.checkPos(self._pos + offset)
start = self._pos
self._pos += offset
return self._src[start:self._pos]
def readTo(self, to, start=None):
self.checkPos(to)
if start == None:
start = self._pos
self._pos = to
return self._src[start:to]
def readToEOL(self, start=None, gobble=True):
EOLmatch = EOLZre.search(self.src(), self.pos())
if gobble:
pos = EOLmatch.end()
else:
pos = EOLmatch.start()
return self.readTo(to=pos, start=start)
def find(self, it, pos=None):
if pos == None:
pos = self._pos
return self._src.find(it, pos )
def startswith(self, it, pos=None):
if self.find(it, pos) == self.pos():
return True
else:
return False
def rfind(self, it, pos):
if pos == None:
pos = self._pos
return self._src.rfind(it, pos)
def findBOL(self, pos=None):
if pos == None:
pos = self._pos
src = self.src()
return max(src.rfind('\n', 0, pos)+1, src.rfind('\r', 0, pos)+1, 0)
def findEOL(self, pos=None, gobble=False):
if pos == None:
pos = self._pos
match = EOLZre.search(self.src(), pos)
if gobble:
return match.end()
else:
return match.start()
def isLineClearToPos(self, pos=None):
if pos == None:
pos = self.pos()
self.checkPos(pos)
src = self.src()
BOL = self.findBOL()
return BOL == pos or src[BOL:pos].isspace()
def matches(self, strOrRE):
if isinstance(strOrRE, (str, unicode)):
return self.startswith(strOrRE, pos=self.pos())
else: # assume an re object
return strOrRE.match(self.src(), self.pos())
def matchWhiteSpace(self, WSchars=' \f\t'):
return (not self.atEnd()) and self.peek() in WSchars
def getWhiteSpace(self, max=None, WSchars=' \f\t'):
if not self.matchWhiteSpace(WSchars):
return ''
start = self.pos()
breakPoint = self.breakPoint()
if max is not None:
breakPoint = min(breakPoint, self.pos()+max)
while self.pos() < breakPoint:
self.advance()
if not self.matchWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
def matchNonWhiteSpace(self, WSchars=' \f\t\n\r'):
return self.atEnd() or not self.peek() in WSchars
def getNonWhiteSpace(self, WSchars=' \f\t\n\r'):
if not self.matchNonWhiteSpace(WSchars):
return ''
start = self.pos()
while self.pos() < self.breakPoint():
self.advance()
if not self.matchNonWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
| Python |
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
try:
import cPickle as pickle
except ImportError:
import pickle
__author__ = "Evan Martin <martine@danga.com>"
__version__ = "1.2_tummy5"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
__license__ = "Python"
class _Error(Exception):
pass
class Client:
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, add, replace, get, get_multi, incr, decr, delete
"""
_usePickle = False
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_SERVER_RETRIES = 10 # how many times to try finding a free server.
def __init__(self, servers, debug=0):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
"""
self.set_servers(servers)
self.debug = debug
self.stats = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debuglog) for s in servers]
self._init_buckets()
def get_stats(self):
'''Get statistics from each of the servers.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
s.send_cmd('stats')
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while True:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.dead_until = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash = key[0]
key = key[1]
else:
serverhash = hash(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
return server, key
serverhash = hash(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@rtype: int
'''
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
server.expect("DELETED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
return 1
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value for C{key} by
C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
exist on server, otherwise it returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache, and it
must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
return int(line)
except socket.error, msg:
server.mark_dead(msg[1])
return None
def add(self, key, val, time=0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn\'t already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time)
def replace(self, key, val, time=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time)
def set(self, key, val, time=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element being the
hash value, if you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects on the
same memcache server, so you could use the user's unique id as the hash
value.
@return: Nonzero on success.
@rtype: int
'''
return self._set("set", key, val, time)
def _set(self, cmd, key, val, time):
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
elif self._usePickle:
flags |= Client._FLAG_PICKLE
val = pickle.dumps(val, 2)
else:
pass
fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, flags, time, len(val), val)
try:
server.send_cmd(fullcmd)
server.expect("STORED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
return 1
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
server, key = self._get_server(key)
if not server:
return None
self._statlog('get')
try:
server.send_cmd("get %s" % key)
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
value = self._recv_value(server, flags, rlen)
server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple):
msg = msg[1]
server.mark_dead(msg)
return None
return value
def get_multi(self, keys):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn\'t have to wait for each round-trip of L{get} before sending
the next one.
@param keys: An array of keys.
@return: A dictionary of key/value pairs that were available.
'''
self._statlog('get_multi')
server_keys = {}
# build up a list for each server of all the keys we want.
for key in keys:
server, key = self._get_server(key)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.keys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
server.mark_dead(msg[1])
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.keys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[rkey] = val
line = server.readline()
except (_Error, socket.error), msg:
server.mark_dead(msg)
return retvals
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags == 0:
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif self._usePickle and flags & Client._FLAG_PICKLE:
try:
val = pickle.loads(buf)
except:
self.debuglog('Pickle error...\n')
val = None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
class _Host:
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
def __init__(self, host, debugfunc=None):
if isinstance(host, tuple):
host = host[0]
self.weight = host[1]
else:
self.weight = 1
if host.find(":") > 0:
self.ip, self.port = host.split(":")
self.port = int(self.port)
else:
self.ip, self.port = host, 11211
if not debugfunc:
debugfunc = lambda x: x
self.debuglog = debugfunc
self.deaduntil = 0
self.socket = None
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + _Host._DEAD_RETRY
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Python 2.3-ism: s.settimeout(1)
try:
s.connect((self.ip, self.port))
except socket.error, msg:
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
if len(cmd) > 100:
self.socket.sendall(cmd)
self.socket.sendall('\r\n')
else:
self.socket.sendall(cmd + '\r\n')
def readline(self):
buffers = ''
recv = self.socket.recv
while True:
data = recv(1)
if not data:
self.mark_dead('Connection closed while reading from %s'
% repr(self))
break
if data == '\n' and buffers and buffers[-1] == '\r':
return(buffers[:-1])
buffers = buffers + data
return(buffers)
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
return line
def recv(self, rlen):
buf = ''
recv = self.socket.recv
while len(buf) < rlen:
buf = buf + recv(rlen - len(buf))
return buf
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
return "%s:%d%s" % (self.ip, self.port, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
print("Testing docstrings...")
_doctest()
print("Running tests:")
#servers = ["127.0.0.1:11211", "127.0.0.1:11212"]
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, str):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
print("Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)))
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print("OK")
return 1
else:
print("FAIL")
return 0
class FooStruct:
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print("Testing delete ...")
if mc.delete("long"):
print("OK")
else:
print("FAIL")
print("Testing get_multi ...")
print(mc.get_multi(["a_string", "an_integer"]))
print("Testing get(unknown value) ...")
print(to_s(mc.get("unknown_value")))
f = FooStruct()
test_setget("foostruct", f)
print("Testing incr ...")
x = mc.incr("an_integer", 1)
if x == 43:
print("OK")
else:
print("FAIL")
print("Testing decr ...")
x = mc.decr("an_integer", 1)
if x == 42:
print("OK")
else:
print("FAIL")
# vim: ts=4 sw=4 et :
| Python |
"""
Indentation maker.
@@TR: this code is unsupported and largely undocumented ...
This version is based directly on code by Robert Kuzelj
<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and
attributes have been renamed. Indentation is output via
$self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the
searchList and another one being found. The directive syntax will
soon be changed somewhat.
"""
import re
import sys
def indentize(source):
return IndentProcessor().process(source)
class IndentProcessor(object):
"""Preprocess #indent tags."""
LINE_SEP = '\n'
ARGS = "args"
INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)')
DIRECTIVE = re.compile(r"[ \t]*#")
WS = "ws"
WHITESPACES = re.compile(r"(?P<ws>[ \t]*)")
INC = "++"
DEC = "--"
SET = "="
CHAR = "char"
ON = "on"
OFF = "off"
PUSH = "push"
POP = "pop"
def process(self, _txt):
result = []
for line in _txt.splitlines():
match = self.INDENT_DIR.match(line)
if match:
#is indention directive
args = match.group(self.ARGS).strip()
if args == self.ON:
line = "#silent $self._CHEETAH__indenter.on()"
elif args == self.OFF:
line = "#silent $self._CHEETAH__indenter.off()"
elif args == self.INC:
line = "#silent $self._CHEETAH__indenter.inc()"
elif args == self.DEC:
line = "#silent $self._CHEETAH__indenter.dec()"
elif args.startswith(self.SET):
level = int(args[1:])
line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level}
elif args.startswith('chars'):
self.indentChars = eval(args.split('=')[1])
line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level}
elif args.startswith(self.PUSH):
line = "#silent $self._CHEETAH__indenter.push()"
elif args.startswith(self.POP):
line = "#silent $self._CHEETAH__indenter.pop()"
else:
match = self.DIRECTIVE.match(line)
if not match:
#is not another directive
match = self.WHITESPACES.match(line)
if match:
size = len(match.group("ws").expandtabs(4))
line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip()
else:
line = "${self._CHEETAH__indenter.indent(0)}" + line
result.append(line)
return self.LINE_SEP.join(result)
class Indenter(object):
"""
A class that keeps track of the current indentation level.
.indent() returns the appropriate amount of indentation.
"""
On = 1
Level = 0
Chars = ' '
LevelStack = []
def on(self):
self.On = 1
def off(self):
self.On = 0
def inc(self):
self.Level += 1
def dec(self):
"""decrement can only be applied to values greater zero
values below zero don't make any sense at all!"""
if self.Level > 0:
self.Level -= 1
def push(self):
self.LevelStack.append(self.Level)
def pop(self):
"""the levestack can not become -1. any attempt to do so
sets the level to 0!"""
if len(self.LevelStack) > 0:
self.Level = self.LevelStack.pop()
else:
self.Level = 0
def setLevel(self, _level):
"""the leve can't be less than zero. any attempt to do so
sets the level automatically to zero!"""
if _level < 0:
self.Level = 0
else:
self.Level = _level
def setChar(self, _chars):
self.Chars = _chars
def indent(self, _default=0):
if self.On:
return self.Chars * self.Level
return " " * _default
| Python |
## statprof.py
## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this program; if not, contact:
##
## Free Software Foundation Voice: +1-617-542-5942
## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
## Boston, MA 02111-1307, USA gnu@gnu.org
"""
statprof is intended to be a fairly simple statistical profiler for
python. It was ported directly from a statistical profiler for guile,
also named statprof, available from guile-lib [0].
[0] http://wingolog.org/software/guile-lib/statprof/
To start profiling, call statprof.start():
>>> start()
Then run whatever it is that you want to profile, for example:
>>> import test.pystone; test.pystone.pystones()
Then stop the profiling and print out the results:
>>> stop()
>>> display()
% cumulative self
time seconds seconds name
26.72 1.40 0.37 pystone.py:79:Proc0
13.79 0.56 0.19 pystone.py:133:Proc1
13.79 0.19 0.19 pystone.py:208:Proc8
10.34 0.16 0.14 pystone.py:229:Func2
6.90 0.10 0.10 pystone.py:45:__init__
4.31 0.16 0.06 pystone.py:53:copy
...
All of the numerical data with the exception of the calls column is
statistically approximate. In the following column descriptions, and
in all of statprof, "time" refers to execution time (both user and
system), not wall clock time.
% time
The percent of the time spent inside the procedure itself (not
counting children).
cumulative seconds
The total number of seconds spent in the procedure, including
children.
self seconds
The total number of seconds spent in the procedure itself (not
counting children).
name
The name of the procedure.
By default statprof keeps the data collected from previous runs. If you
want to clear the collected data, call reset():
>>> reset()
reset() can also be used to change the sampling frequency. For example,
to tell statprof to sample 50 times a second:
>>> reset(50)
This means that statprof will sample the call stack after every 1/50 of
a second of user + system time spent running on behalf of the python
process. When your process is idle (for example, blocking in a read(),
as is the case at the listener), the clock does not advance. For this
reason statprof is not currently not suitable for profiling io-bound
operations.
The profiler uses the hash of the code object itself to identify the
procedures, so it won't confuse different procedures with the same name.
They will show up as two different rows in the output.
Right now the profiler is quite simplistic. I cannot provide
call-graphs or other higher level information. What you see in the
table is pretty much all there is. Patches are welcome :-)
Threading
---------
Because signals only get delivered to the main thread in Python,
statprof only profiles the main thread. However because the time
reporting function uses per-process timers, the results can be
significantly off if other threads' work patterns are not similar to the
main thread's work patterns.
Implementation notes
--------------------
The profiler works by setting the unix profiling signal ITIMER_PROF to
go off after the interval you define in the call to reset(). When the
signal fires, a sampling routine is run which looks at the current
procedure that's executing, and then crawls up the stack, and for each
frame encountered, increments that frame's code object's sample count.
Note that if a procedure is encountered multiple times on a given stack,
it is only counted once. After the sampling is complete, the profiler
resets profiling timer to fire again after the appropriate interval.
Meanwhile, the profiler keeps track, via os.times(), how much CPU time
(system and user -- which is also what ITIMER_PROF tracks), has elapsed
while code has been executing within a start()/stop() block.
The profiler also tries to avoid counting or timing its own code as
much as possible.
"""
try:
import itimer
except ImportError:
raise ImportError('''statprof requires the itimer python extension.
To install it, enter the following commands from a terminal:
wget http://www.cute.fi/~torppa/py-itimer/py-itimer.tar.gz
tar zxvf py-itimer.tar.gz
cd py-itimer
sudo python setup.py install
''')
import signal
import os
__all__ = ['start', 'stop', 'reset', 'display']
###########################################################################
## Utils
def clock():
times = os.times()
return times[0] + times[1]
###########################################################################
## Collection data structures
class ProfileState(object):
def __init__(self, frequency=None):
self.reset(frequency)
def reset(self, frequency=None):
# total so far
self.accumulated_time = 0.0
# start_time when timer is active
self.last_start_time = None
# total count of sampler calls
self.sample_count = 0
# a float
if frequency:
self.sample_interval = 1.0/frequency
elif not hasattr(self, 'sample_interval'):
# default to 100 Hz
self.sample_interval = 1.0/100.0
else:
# leave the frequency as it was
pass
self.remaining_prof_time = None
# for user start/stop nesting
self.profile_level = 0
# whether to catch apply-frame
self.count_calls = False
# gc time between start() and stop()
self.gc_time_taken = 0
def accumulate_time(self, stop_time):
self.accumulated_time += stop_time - self.last_start_time
state = ProfileState()
## call_data := { code object: CallData }
call_data = {}
class CallData(object):
def __init__(self, code):
self.name = code.co_name
self.filename = code.co_filename
self.lineno = code.co_firstlineno
self.call_count = 0
self.cum_sample_count = 0
self.self_sample_count = 0
call_data[code] = self
def get_call_data(code):
return call_data.get(code, None) or CallData(code)
###########################################################################
## SIGPROF handler
def sample_stack_procs(frame):
state.sample_count += 1
get_call_data(frame.f_code).self_sample_count += 1
code_seen = {}
while frame:
code_seen[frame.f_code] = True
frame = frame.f_back
for code in code_seen.iterkeys():
get_call_data(code).cum_sample_count += 1
def profile_signal_handler(signum, frame):
if state.profile_level > 0:
state.accumulate_time(clock())
sample_stack_procs(frame)
itimer.setitimer(itimer.ITIMER_PROF,
state.sample_interval, 0.0)
state.last_start_time = clock()
###########################################################################
## Profiling API
def is_active():
return state.profile_level > 0
def start():
state.profile_level += 1
if state.profile_level == 1:
state.last_start_time = clock()
rpt = state.remaining_prof_time
state.remaining_prof_time = None
signal.signal(signal.SIGPROF, profile_signal_handler)
itimer.setitimer(itimer.ITIMER_PROF,
rpt or state.sample_interval, 0.0)
state.gc_time_taken = 0 # dunno
def stop():
state.profile_level -= 1
if state.profile_level == 0:
state.accumulate_time(clock())
state.last_start_time = None
rpt = itimer.setitimer(itimer.ITIMER_PROF, 0.0, 0.0)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
state.remaining_prof_time = rpt[0]
state.gc_time_taken = 0 # dunno
def reset(frequency=None):
assert state.profile_level == 0, "Can't reset() while statprof is running"
call_data.clear()
state.reset(frequency)
###########################################################################
## Reporting API
class CallStats(object):
def __init__(self, call_data):
self_samples = call_data.self_sample_count
cum_samples = call_data.cum_sample_count
nsamples = state.sample_count
secs_per_sample = state.accumulated_time / nsamples
basename = os.path.basename(call_data.filename)
self.name = '%s:%d:%s' % (basename, call_data.lineno, call_data.name)
self.pcnt_time_in_proc = self_samples / nsamples * 100
self.cum_secs_in_proc = cum_samples * secs_per_sample
self.self_secs_in_proc = self_samples * secs_per_sample
self.num_calls = None
self.self_secs_per_call = None
self.cum_secs_per_call = None
def display(self):
print('%6.2f %9.2f %9.2f %s' % (self.pcnt_time_in_proc,
self.cum_secs_in_proc,
self.self_secs_in_proc,
self.name))
def display():
if state.sample_count == 0:
print('No samples recorded.')
return
l = [CallStats(x) for x in call_data.itervalues()]
l = [(x.self_secs_in_proc, x.cum_secs_in_proc, x) for x in l]
l.sort(reverse=True)
l = [x[2] for x in l]
print('%5.5s %10.10s %7.7s %-8.8s' % ('% ', 'cumulative', 'self', ''))
print('%5.5s %9.9s %8.8s %-8.8s' % ("time", "seconds", "seconds", "name"))
for x in l:
x.display()
print('---')
print('Sample count: %d' % state.sample_count)
print('Total time: %f seconds' % state.accumulated_time)
| Python |
"""This is a copy of the htmlDecode function in Webware.
@@TR: It implemented more efficiently.
"""
from Cheetah.Utils.htmlEncode import htmlCodesReversed
def htmlDecode(s, codes=htmlCodesReversed):
""" Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode()."""
for code in codes:
s = s.replace(code[1], code[0])
return s
| Python |
#!/usr/bin/env python
"""
Miscellaneous functions/objects used by Cheetah but also useful standalone.
"""
import os # Used in mkdirsWithPyInitFile.
import sys # Used in die.
##################################################
## MISCELLANEOUS FUNCTIONS
def die(reason):
sys.stderr.write(reason + '\n')
sys.exit(1)
def useOrRaise(thing, errmsg=''):
"""Raise 'thing' if it's a subclass of Exception. Otherwise return it.
Called by: Cheetah.Servlet.cgiImport()
"""
if isinstance(thing, type) and issubclass(thing, Exception):
raise thing(errmsg)
return thing
def checkKeywords(dic, legalKeywords, what='argument'):
"""Verify no illegal keyword arguments were passed to a function.
in : dic, dictionary (**kw in the calling routine).
legalKeywords, list of strings, the keywords that are allowed.
what, string, suffix for error message (see function source).
out: None.
exc: TypeError if 'dic' contains a key not in 'legalKeywords'.
called by: Cheetah.Template.__init__()
"""
# XXX legalKeywords could be a set when sets get added to Python.
for k in dic.keys(): # Can be dic.iterkeys() if Python >= 2.2.
if k not in legalKeywords:
raise TypeError("'%s' is not a valid %s" % (k, what))
def removeFromList(list_, *elements):
"""Save as list_.remove(each element) but don't raise an error if
element is missing. Modifies 'list_' in place! Returns None.
"""
for elm in elements:
try:
list_.remove(elm)
except ValueError:
pass
def mkdirsWithPyInitFiles(path):
"""Same as os.makedirs (mkdir 'path' and all missing parent directories)
but also puts a Python '__init__.py' file in every directory it
creates. Does nothing (without creating an '__init__.py' file) if the
directory already exists.
"""
dir, fil = os.path.split(path)
if dir and not os.path.exists(dir):
mkdirsWithPyInitFiles(dir)
if not os.path.exists(path):
os.mkdir(path)
init = os.path.join(path, "__init__.py")
f = open(init, 'w') # Open and close to produce empty file.
f.close()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $
"""Provides helpers for Template.webInput(), a method for importing web
transaction variables in bulk. See the docstring of webInput for full details.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.10 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2006/01/06 21:56:54 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.10 $"[11:-2]
from Cheetah.Utils.Misc import useOrRaise
class NonNumericInputError(ValueError): pass
##################################################
## PRIVATE FUNCTIONS AND CLASSES
class _Converter:
"""A container object for info about type converters.
.name, string, name of this converter (for error messages).
.func, function, factory function.
.default, value to use or raise if the real value is missing.
.error, value to use or raise if .func() raises an exception.
"""
def __init__(self, name, func, default, error):
self.name = name
self.func = func
self.default = default
self.error = error
def _lookup(name, func, multi, converters):
"""Look up a Webware field/cookie/value/session value. Return
'(realName, value)' where 'realName' is like 'name' but with any
conversion suffix strips off. Applies numeric conversion and
single vs multi values according to the comments in the source.
"""
# Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
# If there's no colon, the suffix is "". 'longName' is the name with the
# suffix, 'shortName' is without.
# XXX This implementation assumes "height:" means "height".
colon = name.find(':')
if colon != -1:
longName = name
shortName, ext = name[:colon], name[colon+1:]
else:
longName = shortName = name
ext = ''
# Step 2 -- look up the values by calling 'func'.
if longName != shortName:
values = func(longName, None) or func(shortName, None)
else:
values = func(shortName, None)
# 'values' is a list of strings, a string or None.
# Step 3 -- Coerce 'values' to a list of zero, one or more strings.
if values is None:
values = []
elif isinstance(values, str):
values = [values]
# Step 4 -- Find a _Converter object or raise TypeError.
try:
converter = converters[ext]
except KeyError:
fmt = "'%s' is not a valid converter name in '%s'"
tup = (ext, longName)
raise TypeError(fmt % tup)
# Step 5 -- if there's a converter func, run it on each element.
# If the converter raises an exception, use or raise 'converter.error'.
if converter.func is not None:
tmp = values[:]
values = []
for elm in tmp:
try:
elm = converter.func(elm)
except (TypeError, ValueError):
tup = converter.name, elm
errmsg = "%s '%s' contains invalid characters" % tup
elm = useOrRaise(converter.error, errmsg)
values.append(elm)
# 'values' is now a list of strings, ints or floats.
# Step 6 -- If we're supposed to return a multi value, return the list
# as is. If we're supposed to return a single value and the list is
# empty, return or raise 'converter.default'. Otherwise, return the
# first element in the list and ignore any additional values.
if multi:
return shortName, values
if len(values) == 0:
return shortName, useOrRaise(converter.default)
return shortName, values[0]
# vim: sw=4 ts=4 expandtab
| Python |
#
| Python |
"""This is a copy of the htmlEncode function in Webware.
@@TR: It implemented more efficiently.
"""
htmlCodes = [
['&', '&'],
['<', '<'],
['>', '>'],
['"', '"'],
]
htmlCodesReversed = htmlCodes[:]
htmlCodesReversed.reverse()
def htmlEncode(s, codes=htmlCodes):
""" Returns the HTML encoded version of the given string. This is useful to
display a plain ASCII text string on a web page."""
for code in codes:
s = s.replace(code[0], code[1])
return s
| Python |
import sys
import os.path
import copy as copyModule
from ConfigParser import ConfigParser
import re
from tokenize import Intnumber, Floatnumber, Number
from types import *
import types
import new
import time
from StringIO import StringIO # not cStringIO because of unicode support
import imp # used by SettingsManager.updateSettingsFromPySrcFile()
numberRE = re.compile(Number)
complexNumberRE = re.compile('[\(]*' +Number + r'[ \t]*\+[ \t]*' + Number + '[\)]*')
convertableToStrTypes = (StringType, IntType, FloatType,
LongType, ComplexType, NoneType,
UnicodeType)
##################################################
## FUNCTIONS ##
def mergeNestedDictionaries(dict1, dict2, copy=False, deepcopy=False):
"""Recursively merge the values of dict2 into dict1.
This little function is very handy for selectively overriding settings in a
settings dictionary that has a nested structure.
"""
if copy:
dict1 = copyModule.copy(dict1)
elif deepcopy:
dict1 = copyModule.deepcopy(dict1)
for key, val in dict2.iteritems():
if key in dict1 and isinstance(val, dict) and isinstance(dict1[key], dict):
dict1[key] = mergeNestedDictionaries(dict1[key], val)
else:
dict1[key] = val
return dict1
def stringIsNumber(S):
"""Return True if theString represents a Python number, False otherwise.
This also works for complex numbers and numbers with +/- in front."""
S = S.strip()
if S[0] in '-+' and len(S) > 1:
S = S[1:].strip()
match = complexNumberRE.match(S)
if not match:
match = numberRE.match(S)
if not match or (match.end() != len(S)):
return False
else:
return True
def convStringToNum(theString):
"""Convert a string representation of a Python number to the Python version"""
if not stringIsNumber(theString):
raise Error(theString + ' cannot be converted to a Python number')
return eval(theString, {}, {})
class Error(Exception):
pass
class NoDefault(object):
pass
class ConfigParserCaseSensitive(ConfigParser):
"""A case sensitive version of the standard Python ConfigParser."""
def optionxform(self, optionstr):
"""Don't change the case as is done in the default implemenation."""
return optionstr
class _SettingsCollector(object):
"""An abstract base class that provides the methods SettingsManager uses to
collect settings from config files and strings.
This class only collects settings it doesn't modify the _settings dictionary
of SettingsManager instances in any way.
"""
_ConfigParserClass = ConfigParserCaseSensitive
def readSettingsFromModule(self, mod, ignoreUnderscored=True):
"""Returns all settings from a Python module.
"""
S = {}
attrs = vars(mod)
for k, v in attrs.iteritems():
if (ignoreUnderscored and k.startswith('_')):
continue
else:
S[k] = v
return S
def readSettingsFromPySrcStr(self, theString):
"""Return a dictionary of the settings in a Python src string."""
globalsDict = {'True': (1==1),
'False': (0==1),
}
newSettings = {'self':self}
exec((theString+os.linesep), globalsDict, newSettings)
del newSettings['self']
module = new.module('temp_settings_module')
module.__dict__.update(newSettings)
return self.readSettingsFromModule(module)
def readSettingsFromConfigFileObj(self, inFile, convert=True):
"""Return the settings from a config file that uses the syntax accepted by
Python's standard ConfigParser module (like Windows .ini files).
NOTE:
this method maintains case unlike the ConfigParser module, unless this
class was initialized with the 'caseSensitive' keyword set to False.
All setting values are initially parsed as strings. However, If the
'convert' arg is True this method will do the following value
conversions:
* all Python numeric literals will be coverted from string to number
* The string 'None' will be converted to the Python value None
* The string 'True' will be converted to a Python truth value
* The string 'False' will be converted to a Python false value
* Any string starting with 'python:' will be treated as a Python literal
or expression that needs to be eval'd. This approach is useful for
declaring lists and dictionaries.
If a config section titled 'Globals' is present the options defined
under it will be treated as top-level settings.
"""
p = self._ConfigParserClass()
p.readfp(inFile)
sects = p.sections()
newSettings = {}
sects = p.sections()
newSettings = {}
for s in sects:
newSettings[s] = {}
for o in p.options(s):
if o != '__name__':
newSettings[s][o] = p.get(s, o)
## loop through new settings -> deal with global settings, numbers,
## booleans and None ++ also deal with 'importSettings' commands
for sect, subDict in newSettings.items():
for key, val in subDict.items():
if convert:
if val.lower().startswith('python:'):
subDict[key] = eval(val[7:], {}, {})
if val.lower() == 'none':
subDict[key] = None
if val.lower() == 'true':
subDict[key] = True
if val.lower() == 'false':
subDict[key] = False
if stringIsNumber(val):
subDict[key] = convStringToNum(val)
## now deal with any 'importSettings' commands
if key.lower() == 'importsettings':
if val.find(';') < 0:
importedSettings = self.readSettingsFromPySrcFile(val)
else:
path = val.split(';')[0]
rest = ''.join(val.split(';')[1:]).strip()
parentDict = self.readSettingsFromPySrcFile(path)
importedSettings = eval('parentDict["' + rest + '"]')
subDict.update(mergeNestedDictionaries(subDict,
importedSettings))
if sect.lower() == 'globals':
newSettings.update(newSettings[sect])
del newSettings[sect]
return newSettings
class SettingsManager(_SettingsCollector):
"""A mixin class that provides facilities for managing application settings.
SettingsManager is designed to work well with nested settings dictionaries
of any depth.
"""
def __init__(self):
super(SettingsManager, self).__init__()
self._settings = {}
self._initializeSettings()
def _defaultSettings(self):
return {}
def _initializeSettings(self):
"""A hook that allows for complex setting initialization sequences that
involve references to 'self' or other settings. For example:
self._settings['myCalcVal'] = self._settings['someVal'] * 15
This method should be called by the class' __init__() method when needed.
The dummy implementation should be reimplemented by subclasses.
"""
pass
## core post startup methods
def setting(self, name, default=NoDefault):
"""Get a setting from self._settings, with or without a default value."""
if default is NoDefault:
return self._settings[name]
else:
return self._settings.get(name, default)
def hasSetting(self, key):
"""True/False"""
return key in self._settings
def setSetting(self, name, value):
"""Set a setting in self._settings."""
self._settings[name] = value
def settings(self):
"""Return a reference to the settings dictionary"""
return self._settings
def copySettings(self):
"""Returns a shallow copy of the settings dictionary"""
return copyModule.copy(self._settings)
def deepcopySettings(self):
"""Returns a deep copy of the settings dictionary"""
return copyModule.deepcopy(self._settings)
def updateSettings(self, newSettings, merge=True):
"""Update the settings with a selective merge or a complete overwrite."""
if merge:
mergeNestedDictionaries(self._settings, newSettings)
else:
self._settings.update(newSettings)
## source specific update methods
def updateSettingsFromPySrcStr(self, theString, merge=True):
"""Update the settings from a code in a Python src string."""
newSettings = self.readSettingsFromPySrcStr(theString)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge) )
def updateSettingsFromConfigFileObj(self, inFile, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
The caller of this method is responsible for closing the inFile file
object."""
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
def updateSettingsFromConfigStr(self, configStr, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
"""
configStr = '[globals]\n' + configStr
inFile = StringIO(configStr)
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
| Python |
Version = '2.4.1'
VersionTuple = (2, 4, 1, 'final', 0)
MinCompatibleVersion = '2.0rc6'
MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6)
####
def convertVersionStringToTuple(s):
versionNum = [0, 0, 0]
releaseType = 'final'
releaseTypeSubNum = 0
if s.find('a')!=-1:
num, releaseTypeSubNum = s.split('a')
releaseType = 'alpha'
elif s.find('b')!=-1:
num, releaseTypeSubNum = s.split('b')
releaseType = 'beta'
elif s.find('rc')!=-1:
num, releaseTypeSubNum = s.split('rc')
releaseType = 'candidate'
else:
num = s
num = num.split('.')
for i in range(len(num)):
versionNum[i] = int(num[i])
if len(versionNum)<3:
versionNum += [0]
releaseTypeSubNum = int(releaseTypeSubNum)
return tuple(versionNum+[releaseType, releaseTypeSubNum])
if __name__ == '__main__':
c = convertVersionStringToTuple
print(c('2.0a1'))
print(c('2.0b1'))
print(c('2.0rc1'))
print(c('2.0'))
print(c('2.0.2'))
assert c('0.9.19b1') < c('0.9.19')
assert c('0.9b1') < c('0.9.19')
assert c('2.0a2') > c('2.0a1')
assert c('2.0b1') > c('2.0a2')
assert c('2.0b2') > c('2.0b1')
assert c('2.0b2') == c('2.0b2')
assert c('2.0rc1') > c('2.0b1')
assert c('2.0rc2') > c('2.0rc1')
assert c('2.0rc2') > c('2.0b1')
assert c('2.0') > c('2.0a1')
assert c('2.0') > c('2.0b1')
assert c('2.0') > c('2.0rc1')
assert c('2.0.1') > c('2.0')
assert c('2.0rc1') > c('2.0b1')
| Python |
'''
Cheetah is an open source template engine and code generation tool.
It can be used standalone or combined with other tools and frameworks. Web
development is its principle use, but Cheetah is very flexible and is also being
used to generate C++ game code, Java, sql, form emails and even Python code.
Homepage
http://www.cheetahtemplate.org/
Documentation
http://cheetahtemplate.org/learn.html
Mailing list
cheetahtemplate-discuss@lists.sourceforge.net
Subscribe at
http://lists.sourceforge.net/lists/listinfo/cheetahtemplate-discuss
'''
from Version import *
| Python |
import os.path
import string
l = ['_'] * 256
for c in string.digits + string.letters:
l[ord(c)] = c
_pathNameTransChars = string.join(l, '')
del l, c
def convertTmplPathToModuleName(tmplPath,
_pathNameTransChars=_pathNameTransChars,
splitdrive=os.path.splitdrive,
translate=string.translate,
):
return translate(splitdrive(tmplPath)[1], _pathNameTransChars)
| Python |
"""
Provides an emulator/replacement for Python's standard import system.
@@TR: Be warned that Import Hooks are in the deepest, darkest corner of Python's
jungle. If you need to start hacking with this, be prepared to get lost for a
while. Also note, this module predates the newstyle import hooks in Python 2.3
http://www.python.org/peps/pep-0302.html.
This is a hacked/documented version of Gordon McMillan's iu.py. I have:
- made it a little less terse
- added docstrings and explanatations
- standardized the variable naming scheme
- reorganized the code layout to enhance readability
"""
import sys
import imp
import marshal
_installed = False
# _globalOwnerTypes is defined at the bottom of this file
_os_stat = _os_path_join = _os_getcwd = _os_path_dirname = None
##################################################
## FUNCTIONS
def _os_bootstrap():
"""Set up 'os' module replacement functions for use during import bootstrap."""
names = sys.builtin_module_names
join = dirname = None
if 'posix' in names:
sep = '/'
from posix import stat, getcwd
elif 'nt' in names:
sep = '\\'
from nt import stat, getcwd
elif 'dos' in names:
sep = '\\'
from dos import stat, getcwd
elif 'os2' in names:
sep = '\\'
from os2 import stat, getcwd
elif 'mac' in names:
from mac import stat, getcwd
def join(a, b):
if a == '':
return b
if ':' not in a:
a = ':' + a
if a[-1:] != ':':
a = a + ':'
return a + b
else:
raise ImportError('no os specific module found')
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
if dirname is None:
def dirname(a, sep=sep):
for i in range(len(a)-1, -1, -1):
c = a[i]
if c == '/' or c == sep:
return a[:i]
return ''
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
global _os_path_dirname
_os_path_dirname = dirname
global _os_getcwd
_os_getcwd = getcwd
_os_bootstrap()
def packageName(s):
for i in range(len(s)-1, -1, -1):
if s[i] == '.':
break
else:
return ''
return s[:i]
def nameSplit(s):
rslt = []
i = j = 0
for j in range(len(s)):
if s[j] == '.':
rslt.append(s[i:j])
i = j+1
if i < len(s):
rslt.append(s[i:])
return rslt
def getPathExt(fnm):
for i in range(len(fnm)-1, -1, -1):
if fnm[i] == '.':
return fnm[i:]
return ''
def pathIsDir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s[0] & 0170000) == 0040000
def getDescr(fnm):
ext = getPathExt(fnm)
for (suffix, mode, typ) in imp.get_suffixes():
if suffix == ext:
return (suffix, mode, typ)
##################################################
## CLASSES
class Owner:
"""An Owner does imports from a particular piece of turf That is, there's
an Owner for each thing on sys.path There are owners for directories and
.pyz files. There could be owners for zip files, or even URLs. A
shadowpath (a dictionary mapping the names in sys.path to their owners) is
used so that sys.path (or a package's __path__) is still a bunch of strings,
"""
def __init__(self, path):
self.path = path
def __str__(self):
return self.path
def getmod(self, nm):
return None
class DirOwner(Owner):
def __init__(self, path):
if path == '':
path = _os_getcwd()
if not pathIsDir(path):
raise ValueError("%s is not a directory" % path)
Owner.__init__(self, path)
def getmod(self, nm,
getsuffixes=imp.get_suffixes, loadco=marshal.loads, newmod=imp.new_module):
pth = _os_path_join(self.path, nm)
possibles = [(pth, 0, None)]
if pathIsDir(pth):
possibles.insert(0, (_os_path_join(pth, '__init__'), 1, pth))
py = pyc = None
for pth, ispkg, pkgpth in possibles:
for ext, mode, typ in getsuffixes():
attempt = pth+ext
try:
st = _os_stat(attempt)
except:
pass
else:
if typ == imp.C_EXTENSION:
fp = open(attempt, 'rb')
mod = imp.load_module(nm, fp, attempt, (ext, mode, typ))
mod.__file__ = attempt
return mod
elif typ == imp.PY_SOURCE:
py = (attempt, st)
else:
pyc = (attempt, st)
if py or pyc:
break
if py is None and pyc is None:
return None
while True:
if pyc is None or py and pyc[1][8] < py[1][8]:
try:
co = compile(open(py[0], 'r').read()+'\n', py[0], 'exec')
break
except SyntaxError, e:
print("Invalid syntax in %s" % py[0])
print(e.args)
raise
elif pyc:
stuff = open(pyc[0], 'rb').read()
try:
co = loadco(stuff[8:])
break
except (ValueError, EOFError):
pyc = None
else:
return None
mod = newmod(nm)
mod.__file__ = co.co_filename
if ispkg:
mod.__path__ = [pkgpth]
subimporter = PathImportDirector(mod.__path__)
mod.__importsub__ = subimporter.getmod
mod.__co__ = co
return mod
class ImportDirector(Owner):
"""ImportDirectors live on the metapath There's one for builtins, one for
frozen modules, and one for sys.path Windows gets one for modules gotten
from the Registry Mac would have them for PY_RESOURCE modules etc. A
generalization of Owner - their concept of 'turf' is broader"""
pass
class BuiltinImportDirector(ImportDirector):
"""Directs imports of builtin modules"""
def __init__(self):
self.path = 'Builtins'
def getmod(self, nm, isbuiltin=imp.is_builtin):
if isbuiltin(nm):
mod = imp.load_module(nm, None, nm, ('', '', imp.C_BUILTIN))
return mod
return None
class FrozenImportDirector(ImportDirector):
"""Directs imports of frozen modules"""
def __init__(self):
self.path = 'FrozenModules'
def getmod(self, nm,
isFrozen=imp.is_frozen, loadMod=imp.load_module):
if isFrozen(nm):
mod = loadMod(nm, None, nm, ('', '', imp.PY_FROZEN))
if hasattr(mod, '__path__'):
mod.__importsub__ = lambda name, pname=nm, owner=self: owner.getmod(pname+'.'+name)
return mod
return None
class RegistryImportDirector(ImportDirector):
"""Directs imports of modules stored in the Windows Registry"""
def __init__(self):
self.path = "WindowsRegistry"
self.map = {}
try:
import win32api
## import win32con
except ImportError:
pass
else:
HKEY_CURRENT_USER = -2147483647
HKEY_LOCAL_MACHINE = -2147483646
KEY_ALL_ACCESS = 983103
subkey = r"Software\Python\PythonCore\%s\Modules" % sys.winver
for root in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
try:
hkey = win32api.RegOpenKeyEx(root, subkey, 0, KEY_ALL_ACCESS)
except:
pass
else:
numsubkeys, numvalues, lastmodified = win32api.RegQueryInfoKey(hkey)
for i in range(numsubkeys):
subkeyname = win32api.RegEnumKey(hkey, i)
hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, KEY_ALL_ACCESS)
val = win32api.RegQueryValueEx(hskey, '')
desc = getDescr(val[0])
self.map[subkeyname] = (val[0], desc)
hskey.Close()
hkey.Close()
break
def getmod(self, nm):
stuff = self.map.get(nm)
if stuff:
fnm, desc = stuff
fp = open(fnm, 'rb')
mod = imp.load_module(nm, fp, fnm, desc)
mod.__file__ = fnm
return mod
return None
class PathImportDirector(ImportDirector):
"""Directs imports of modules stored on the filesystem."""
def __init__(self, pathlist=None, importers=None, ownertypes=None):
if pathlist is None:
self.path = sys.path
else:
self.path = pathlist
if ownertypes == None:
self._ownertypes = _globalOwnerTypes
else:
self._ownertypes = ownertypes
if importers:
self._shadowPath = importers
else:
self._shadowPath = {}
self._inMakeOwner = False
self._building = {}
def getmod(self, nm):
mod = None
for thing in self.path:
if isinstance(thing, basestring):
owner = self._shadowPath.get(thing, -1)
if owner == -1:
owner = self._shadowPath[thing] = self._makeOwner(thing)
if owner:
mod = owner.getmod(nm)
else:
mod = thing.getmod(nm)
if mod:
break
return mod
def _makeOwner(self, path):
if self._building.get(path):
return None
self._building[path] = 1
owner = None
for klass in self._ownertypes:
try:
# this may cause an import, which may cause recursion
# hence the protection
owner = klass(path)
except:
pass
else:
break
del self._building[path]
return owner
#=================ImportManager============================#
# The one-and-only ImportManager
# ie, the builtin import
UNTRIED = -1
class ImportManager:
# really the equivalent of builtin import
def __init__(self):
self.metapath = [
BuiltinImportDirector(),
FrozenImportDirector(),
RegistryImportDirector(),
PathImportDirector()
]
self.threaded = 0
self.rlock = None
self.locker = None
self.setThreaded()
def setThreaded(self):
thread = sys.modules.get('thread', None)
if thread and not self.threaded:
self.threaded = 1
self.rlock = thread.allocate_lock()
self._get_ident = thread.get_ident
def install(self):
import __builtin__
__builtin__.__import__ = self.importHook
__builtin__.reload = self.reloadHook
def importHook(self, name, globals=None, locals=None, fromlist=None, level=-1):
'''
NOTE: Currently importHook will accept the keyword-argument "level"
but it will *NOT* use it (currently). Details about the "level" keyword
argument can be found here: http://www.python.org/doc/2.5.2/lib/built-in-funcs.html
'''
# first see if we could be importing a relative name
#print "importHook(%s, %s, locals, %s)" % (name, globals['__name__'], fromlist)
_sys_modules_get = sys.modules.get
contexts = [None]
if globals:
importernm = globals.get('__name__', '')
if importernm:
if hasattr(_sys_modules_get(importernm), '__path__'):
contexts.insert(0, importernm)
else:
pkgnm = packageName(importernm)
if pkgnm:
contexts.insert(0, pkgnm)
# so contexts is [pkgnm, None] or just [None]
# now break the name being imported up so we get:
# a.b.c -> [a, b, c]
nmparts = nameSplit(name)
_self_doimport = self.doimport
threaded = self.threaded
for context in contexts:
ctx = context
for i in range(len(nmparts)):
nm = nmparts[i]
#print " importHook trying %s in %s" % (nm, ctx)
if ctx:
fqname = ctx + '.' + nm
else:
fqname = nm
if threaded:
self._acquire()
mod = _sys_modules_get(fqname, UNTRIED)
if mod is UNTRIED:
mod = _self_doimport(nm, ctx, fqname)
if threaded:
self._release()
if mod:
ctx = fqname
else:
break
else:
# no break, point i beyond end
i = i + 1
if i:
break
if i<len(nmparts):
if ctx and hasattr(sys.modules[ctx], nmparts[i]):
#print "importHook done with %s %s %s (case 1)" % (name, globals['__name__'], fromlist)
return sys.modules[nmparts[0]]
del sys.modules[fqname]
raise ImportError("No module named %s" % fqname)
if fromlist is None:
#print "importHook done with %s %s %s (case 2)" % (name, globals['__name__'], fromlist)
if context:
return sys.modules[context+'.'+nmparts[0]]
return sys.modules[nmparts[0]]
bottommod = sys.modules[ctx]
if hasattr(bottommod, '__path__'):
fromlist = list(fromlist)
i = 0
while i < len(fromlist):
nm = fromlist[i]
if nm == '*':
fromlist[i:i+1] = list(getattr(bottommod, '__all__', []))
if i >= len(fromlist):
break
nm = fromlist[i]
i = i + 1
if not hasattr(bottommod, nm):
if self.threaded:
self._acquire()
mod = self.doimport(nm, ctx, ctx+'.'+nm)
if self.threaded:
self._release()
if not mod:
raise ImportError("%s not found in %s" % (nm, ctx))
#print "importHook done with %s %s %s (case 3)" % (name, globals['__name__'], fromlist)
return bottommod
def doimport(self, nm, parentnm, fqname):
# Not that nm is NEVER a dotted name at this point
#print "doimport(%s, %s, %s)" % (nm, parentnm, fqname)
if parentnm:
parent = sys.modules[parentnm]
if hasattr(parent, '__path__'):
importfunc = getattr(parent, '__importsub__', None)
if not importfunc:
subimporter = PathImportDirector(parent.__path__)
importfunc = parent.__importsub__ = subimporter.getmod
mod = importfunc(nm)
if mod:
setattr(parent, nm, mod)
else:
#print "..parent not a package"
return None
else:
# now we're dealing with an absolute import
for director in self.metapath:
mod = director.getmod(nm)
if mod:
break
if mod:
mod.__name__ = fqname
sys.modules[fqname] = mod
if hasattr(mod, '__co__'):
co = mod.__co__
del mod.__co__
exec(co, mod.__dict__)
if fqname == 'thread' and not self.threaded:
## print "thread detected!"
self.setThreaded()
else:
sys.modules[fqname] = None
#print "..found %s" % mod
return mod
def reloadHook(self, mod):
fqnm = mod.__name__
nm = nameSplit(fqnm)[-1]
parentnm = packageName(fqnm)
newmod = self.doimport(nm, parentnm, fqnm)
mod.__dict__.update(newmod.__dict__)
## return newmod
def _acquire(self):
if self.rlock.locked():
if self.locker == self._get_ident():
self.lockcount = self.lockcount + 1
## print "_acquire incrementing lockcount to", self.lockcount
return
self.rlock.acquire()
self.locker = self._get_ident()
self.lockcount = 0
## print "_acquire first time!"
def _release(self):
if self.lockcount:
self.lockcount = self.lockcount - 1
## print "_release decrementing lockcount to", self.lockcount
else:
self.rlock.release()
## print "_release releasing lock!"
##################################################
## MORE CONSTANTS & GLOBALS
_globalOwnerTypes = [
DirOwner,
Owner,
]
| Python |
#!/usr/bin/env python
import os
import pprint
try:
from functools import reduce
except ImportError:
# Assume we have reduce
pass
from Cheetah import Parser
from Cheetah import Compiler
from Cheetah import Template
class Analyzer(Parser.Parser):
def __init__(self, *args, **kwargs):
self.calls = {}
super(Analyzer, self).__init__(*args, **kwargs)
def eatDirective(self):
directive = self.matchDirective()
try:
self.calls[directive] += 1
except KeyError:
self.calls[directive] = 1
super(Analyzer, self).eatDirective()
class AnalysisCompiler(Compiler.ModuleCompiler):
parserClass = Analyzer
def analyze(source):
klass = Template.Template.compile(source, compilerClass=AnalysisCompiler)
return klass._CHEETAH_compilerInstance._parser.calls
def main_file(f):
fd = open(f, 'r')
try:
print u'>>> Analyzing %s' % f
calls = analyze(fd.read())
return calls
finally:
fd.close()
def _find_templates(directory, suffix):
for root, dirs, files in os.walk(directory):
for f in files:
if not f.endswith(suffix):
continue
yield root + os.path.sep + f
def _analyze_templates(iterable):
for template in iterable:
yield main_file(template)
def main_dir(opts):
results = _analyze_templates(_find_templates(opts.dir, opts.suffix))
totals = {}
for series in results:
if not series:
continue
for k, v in series.iteritems():
try:
totals[k] += v
except KeyError:
totals[k] = v
return totals
def main():
from optparse import OptionParser
op = OptionParser()
op.add_option('-f', '--file', dest='file', default=None,
help='Specify a single file to analyze')
op.add_option('-d', '--dir', dest='dir', default=None,
help='Specify a directory of templates to analyze')
op.add_option('--suffix', default='tmpl', dest='suffix',
help='Specify a custom template file suffix for the -d option (default: "tmpl")')
opts, args = op.parse_args()
if not opts.file and not opts.dir:
op.print_help()
return
results = None
if opts.file:
results = main_file(opts.file)
if opts.dir:
results = main_dir(opts)
pprint.pprint(results)
if __name__ == '__main__':
main()
| Python |
# $Id: ImportHooks.py,v 1.27 2007/11/16 18:28:47 tavis_rudd Exp $
"""Provides some import hooks to allow Cheetah's .tmpl files to be imported
directly like Python .py modules.
To use these:
import Cheetah.ImportHooks
Cheetah.ImportHooks.install()
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.27 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/11/16 18:28:47 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.27 $"[11:-2]
import sys
import os.path
import types
import __builtin__
import new
import imp
from threading import RLock
import string
import traceback
from Cheetah import ImportManager
from Cheetah.ImportManager import DirOwner
from Cheetah.Compiler import Compiler
from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
_installed = False
##################################################
## HELPER FUNCS
_cacheDir = []
def setCacheDir(cacheDir):
global _cacheDir
_cacheDir.append(cacheDir)
##################################################
## CLASSES
class CheetahDirOwner(DirOwner):
_lock = RLock()
_acquireLock = _lock.acquire
_releaseLock = _lock.release
templateFileExtensions = ('.tmpl',)
def getmod(self, name):
self._acquireLock()
try:
mod = DirOwner.getmod(self, name)
if mod:
return mod
for ext in self.templateFileExtensions:
tmplPath = os.path.join(self.path, name + ext)
if os.path.exists(tmplPath):
try:
return self._compile(name, tmplPath)
except:
# @@TR: log the error
exc_txt = traceback.format_exc()
exc_txt =' '+(' \n'.join(exc_txt.splitlines()))
raise ImportError(
'Error while compiling Cheetah module'
' %(name)s, original traceback follows:\n%(exc_txt)s'%locals())
##
return None
finally:
self._releaseLock()
def _compile(self, name, tmplPath):
## @@ consider adding an ImportError raiser here
code = str(Compiler(file=tmplPath, moduleName=name,
mainClassName=name))
if _cacheDir:
__file__ = os.path.join(_cacheDir[0],
convertTmplPathToModuleName(tmplPath)) + '.py'
try:
open(__file__, 'w').write(code)
except OSError:
## @@ TR: need to add some error code here
traceback.print_exc(file=sys.stderr)
__file__ = tmplPath
else:
__file__ = tmplPath
co = compile(code+'\n', __file__, 'exec')
mod = imp.new_module(name)
mod.__file__ = co.co_filename
if _cacheDir:
mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit
# filemonitoring code
mod.__co__ = co
return mod
##################################################
## FUNCTIONS
def install(templateFileExtensions=('.tmpl',)):
"""Install the Cheetah Import Hooks"""
global _installed
if not _installed:
CheetahDirOwner.templateFileExtensions = templateFileExtensions
import __builtin__
if isinstance(__builtin__.__import__, types.BuiltinFunctionType):
global __oldimport__
__oldimport__ = __builtin__.__import__
ImportManager._globalOwnerTypes.insert(0, CheetahDirOwner)
#ImportManager._globalOwnerTypes.append(CheetahDirOwner)
global _manager
_manager=ImportManager.ImportManager()
_manager.setThreaded()
_manager.install()
def uninstall():
"""Uninstall the Cheetah Import Hooks"""
global _installed
if not _installed:
import __builtin__
if isinstance(__builtin__.__import__, types.MethodType):
__builtin__.__import__ = __oldimport__
global _manager
del _manager
if __name__ == '__main__':
install()
| Python |
"""A Skeleton HTML page template, that provides basic structure and utility methods.
"""
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Cheetah.Templates._SkeletonPage import _SkeletonPage
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc6'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 6)
__CHEETAH_genTime__ = 1139107954.3640411
__CHEETAH_genTimestamp__ = 'Sat Feb 4 18:52:34 2006'
__CHEETAH_src__ = 'src/Templates/SkeletonPage.tmpl'
__CHEETAH_srcLastModified__ = 'Mon Oct 7 11:37:30 2002'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class SkeletonPage(_SkeletonPage):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
_SkeletonPage.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k, v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def writeHeadTag(self, **KWS):
## CHEETAH: generated from #block writeHeadTag at line 22, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('<head>\n<title>')
_v = VFFSL(SL, "title", True) # '$title' on line 24, col 8
if _v is not None: write(_filter(_v, rawExpr='$title')) # from line 24, col 8.
write('</title>\n')
_v = VFFSL(SL, "metaTags", True) # '$metaTags' on line 25, col 1
if _v is not None: write(_filter(_v, rawExpr='$metaTags')) # from line 25, col 1.
write(' \n')
_v = VFFSL(SL, "stylesheetTags", True) # '$stylesheetTags' on line 26, col 1
if _v is not None: write(_filter(_v, rawExpr='$stylesheetTags')) # from line 26, col 1.
write(' \n')
_v = VFFSL(SL, "javascriptTags", True) # '$javascriptTags' on line 27, col 1
if _v is not None: write(_filter(_v, rawExpr='$javascriptTags')) # from line 27, col 1.
write('\n</head>\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: generated from #block writeBody at line 36, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('This skeleton page has no flesh. Its body needs to be implemented.\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
## START CACHE REGION: ID=header. line 6, col 1 in the source.
_RECACHE_header = False
_cacheRegion_header = self.getCacheRegion(regionID='header', cacheInfo={'type': 2, 'id': 'header'})
if _cacheRegion_header.isNew():
_RECACHE_header = True
_cacheItem_header = _cacheRegion_header.getCacheItem('header')
if _cacheItem_header.hasExpired():
_RECACHE_header = True
if (not _RECACHE_header) and _cacheItem_header.getRefreshTime():
try:
_output = _cacheItem_header.renderOutput()
except KeyError:
_RECACHE_header = True
else:
write(_output)
del _output
if _RECACHE_header or not _cacheItem_header.getRefreshTime():
_orig_transheader = trans
trans = _cacheCollector_header = DummyTransaction()
write = _cacheCollector_header.response().write
_v = VFFSL(SL, "docType", True) # '$docType' on line 7, col 1
if _v is not None: write(_filter(_v, rawExpr='$docType')) # from line 7, col 1.
write('\n')
_v = VFFSL(SL, "htmlTag", True) # '$htmlTag' on line 8, col 1
if _v is not None: write(_filter(_v, rawExpr='$htmlTag')) # from line 8, col 1.
write('''
<!-- This document was autogenerated by Cheetah(http://CheetahTemplate.org).
Do not edit it directly!
Copyright ''')
_v = VFFSL(SL, "currentYr", True) # '$currentYr' on line 12, col 11
if _v is not None: write(_filter(_v, rawExpr='$currentYr')) # from line 12, col 11.
write(' - ')
_v = VFFSL(SL, "siteCopyrightName", True) # '$siteCopyrightName' on line 12, col 24
if _v is not None: write(_filter(_v, rawExpr='$siteCopyrightName')) # from line 12, col 24.
write(' - All Rights Reserved.\nFeel free to copy any javascript or html you like on this site,\nprovided you remove all links and/or references to ')
_v = VFFSL(SL, "siteDomainName", True) # '$siteDomainName' on line 14, col 52
if _v is not None: write(_filter(_v, rawExpr='$siteDomainName')) # from line 14, col 52.
write('''
However, please do not copy any content or images without permission.
''')
_v = VFFSL(SL, "siteCredits", True) # '$siteCredits' on line 17, col 1
if _v is not None: write(_filter(_v, rawExpr='$siteCredits')) # from line 17, col 1.
write('''
-->
''')
self.writeHeadTag(trans=trans)
write('\n')
trans = _orig_transheader
write = trans.response().write
_cacheData = _cacheCollector_header.response().getvalue()
_cacheItem_header.setData(_cacheData)
write(_cacheData)
del _cacheData
del _cacheCollector_header
del _orig_transheader
## END CACHE REGION: header
write('\n')
_v = VFFSL(SL, "bodyTag", True) # '$bodyTag' on line 34, col 1
if _v is not None: write(_filter(_v, rawExpr='$bodyTag')) # from line 34, col 1.
write('\n\n')
self.writeBody(trans=trans)
write('''
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_SkeletonPage= 'respond'
## END CLASS DEFINITION
if not hasattr(SkeletonPage, '_initCheetahAttributes'):
templateAPIClass = getattr(SkeletonPage, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(SkeletonPage)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=SkeletonPage()).run()
| Python |
# $Id: _SkeletonPage.py,v 1.13 2002/10/01 17:52:02 tavis_rudd Exp $
"""A baseclass for the SkeletonPage template
Meta-Data
==========
Author: Tavis Rudd <tavis@damnsimple.com>,
Version: $Revision: 1.13 $
Start Date: 2001/04/05
Last Revision Date: $Date: 2002/10/01 17:52:02 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.13 $"[11:-2]
##################################################
## DEPENDENCIES ##
import time, types, os, sys
# intra-package imports ...
from Cheetah.Template import Template
##################################################
## GLOBALS AND CONSTANTS ##
True = (1==1)
False = (0==1)
##################################################
## CLASSES ##
class _SkeletonPage(Template):
"""A baseclass for the SkeletonPage template"""
docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ' + \
'"http://www.w3.org/TR/html4/loose.dtd">'
# docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' + \
#'"http://www.w3.org/TR/xhtml1l/DTD/transitional.dtd">'
title = ''
siteDomainName = 'www.example.com'
siteCredits = 'Designed & Implemented by Tavis Rudd'
siteCopyrightName = "Tavis Rudd"
htmlTag = '<html>'
def __init__(self, *args, **KWs):
Template.__init__(self, *args, **KWs)
self._metaTags = {'HTTP-EQUIV':{'keywords': 'Cheetah',
'Content-Type': 'text/html; charset=iso-8859-1',
},
'NAME':{'generator':'Cheetah: The Python-Powered Template Engine'}
}
# metaTags = {'HTTP_EQUIV':{'test':1234}, 'NAME':{'test':1234,'test2':1234} }
self._stylesheets = {}
# stylesheets = {'.cssClassName':'stylesheetCode'}
self._stylesheetsOrder = []
# stylesheetsOrder = ['.cssClassName',]
self._stylesheetLibs = {}
# stylesheetLibs = {'libName':'libSrcPath'}
self._javascriptLibs = {}
self._javascriptTags = {}
# self._javascriptLibs = {'libName':'libSrcPath'}
self._bodyTagAttribs = {}
def metaTags(self):
"""Return a formatted vesion of the self._metaTags dictionary, using the
formatMetaTags function from Cheetah.Macros.HTML"""
return self.formatMetaTags(self._metaTags)
def stylesheetTags(self):
"""Return a formatted version of the self._stylesheetLibs and
self._stylesheets dictionaries. The keys in self._stylesheets must
be listed in the order that they should appear in the list
self._stylesheetsOrder, to ensure that the style rules are defined in
the correct order."""
stylesheetTagsTxt = ''
for title, src in self._stylesheetLibs.items():
stylesheetTagsTxt += '<link rel="stylesheet" type="text/css" href="' + str(src) + '" />\n'
if not self._stylesheetsOrder:
return stylesheetTagsTxt
stylesheetTagsTxt += '<style type="text/css"><!--\n'
for identifier in self._stylesheetsOrder:
if identifier not in self._stylesheets:
warning = '# the identifier ' + identifier + \
'was in stylesheetsOrder, but not in stylesheets'
print(warning)
stylesheetTagsTxt += warning
continue
attribsDict = self._stylesheets[identifier]
cssCode = ''
attribCode = ''
for k, v in attribsDict.items():
attribCode += str(k) + ': ' + str(v) + '; '
attribCode = attribCode[:-2] # get rid of the last semicolon
cssCode = '\n' + identifier + ' {' + attribCode + '}'
stylesheetTagsTxt += cssCode
stylesheetTagsTxt += '\n//--></style>\n'
return stylesheetTagsTxt
def javascriptTags(self):
"""Return a formatted version of the javascriptTags and
javascriptLibs dictionaries. Each value in javascriptTags
should be a either a code string to include, or a list containing the
JavaScript version number and the code string. The keys can be anything.
The same applies for javascriptLibs, but the string should be the
SRC filename rather than a code string."""
javascriptTagsTxt = []
for key, details in self._javascriptTags.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript"><!--\n',
str(details[0]), '\n//--></script>\n']
for key, details in self._javascriptLibs.iteritems():
if not isinstance(details, (list, tuple)):
details = ['', details]
javascriptTagsTxt += ['<script language="JavaScript', str(details[0]),
'" type="text/javascript" src="',
str(details[1]), '" />\n']
return ''.join(javascriptTagsTxt)
def bodyTag(self):
"""Create a body tag from the entries in the dict bodyTagAttribs."""
return self.formHTMLTag('body', self._bodyTagAttribs)
def imgTag(self, src, alt='', width=None, height=None, border=0):
"""Dynamically generate an image tag. Cheetah will try to convert the
src argument to a WebKit serverSidePath relative to the servlet's
location. If width and height aren't specified they are calculated using
PIL or ImageMagick if available."""
src = self.normalizePath(src)
if not width or not height:
try: # see if the dimensions can be calc'd with PIL
import Image
im = Image.open(src)
calcWidth, calcHeight = im.size
del im
if not width: width = calcWidth
if not height: height = calcHeight
except:
try: # try imageMagick instead
calcWidth, calcHeight = os.popen(
'identify -format "%w,%h" ' + src).read().split(',')
if not width: width = calcWidth
if not height: height = calcHeight
except:
pass
if width and height:
return ''.join(['<img src="', src, '" width="', str(width), '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
elif width:
return ''.join(['<img src="', src, '" width="', str(width),
'" alt="', alt, '" border="', str(border), '" />'])
elif height:
return ''.join(['<img src="', src, '" height="', str(height),
'" alt="', alt, '" border="', str(border), '" />'])
else:
return ''.join(['<img src="', src, '" alt="', alt, '" border="', str(border), '" />'])
def currentYr(self):
"""Return a string representing the current yr."""
return time.strftime("%Y", time.localtime(time.time()))
def currentDate(self, formatString="%b %d, %Y"):
"""Return a string representing the current localtime."""
return time.strftime(formatString, time.localtime(time.time()))
def spacer(self, width=1,height=1):
return '<img src="spacer.gif" width="%s" height="%s" alt="" />'% (str(width), str(height))
def formHTMLTag(self, tagName, attributes={}):
"""returns a string containing an HTML <tag> """
tagTxt = ['<', tagName.lower()]
for name, val in attributes.items():
tagTxt += [' ', name.lower(), '="', str(val), '"']
tagTxt.append('>')
return ''.join(tagTxt)
def formatMetaTags(self, metaTags):
"""format a dict of metaTag definitions into an HTML version"""
metaTagsTxt = []
if 'HTTP-EQUIV' in metaTags:
for http_equiv, contents in metaTags['HTTP-EQUIV'].items():
metaTagsTxt += ['<meta http-equiv="', str(http_equiv), '" content="',
str(contents), '" />\n']
if 'NAME' in metaTags:
for name, contents in metaTags['NAME'].items():
metaTagsTxt += ['<meta name="', str(name), '" content="', str(contents),
'" />\n']
return ''.join(metaTagsTxt)
| Python |
"""
Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
"""
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
from Cheetah.Macros.I18n import I18n
# re tools
_regexCache = {}
def cachedRegex(pattern):
if pattern not in _regexCache:
_regexCache[pattern] = re.compile(pattern)
return _regexCache[pattern]
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1', txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
##################################################
## CONSTANTS & GLOBALS ##
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
##################################################
## Tokens for the parser ##
#generic
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
#operators
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//', '%')
shiftOps = ('>>', '<<')
bitwiseOps = ('&', '|', '^')
assignOp = '='
augAssignOps = ('+=', '-=', '/=', '*=', '**=', '^=', '%=',
'>>=', '<<=', '&=', '|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<', '>', '==', '!=', '<=', '>=', '<>', 'is', 'in',)
booleanOps = ('and', 'or', 'not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(', ')', '{', '}', '[', ']',
',', '.', ':', ';', '=', '`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
##################################################
## Regex chunks for the parser ##
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS = r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)', r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
# e.g. ##author@ Tavis Rudd
unicodeDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
encodingDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
escapedNewlineRE = re.compile(r'(?<!\\)((\\\\)*)\\(n|012)')
directiveNamesAndParsers = {
# importing and inheritance
'import': None,
'from': None,
'extends': 'eatExtends',
'implements': 'eatImplements',
'super': 'eatSuper',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'transform': 'eatTransform',
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher': None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
##################################################
## CLASSES ##
# @@TR: SyntaxError doesn't call exception.__str__ for some reason!
#class ParseError(SyntaxError):
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if self.lineno:
lineno = self.lineno
row, col, line = (lineno, (self.col or 0),
self.stream.splitlines()[lineno-1])
else:
row, col, line = self.stream.getRowColLine()
## get the surrounding lines
lines = stream.splitlines()
prevLines = [] # (rowNum, content)
for i in range(1, 4):
if row-1-i <=0:
break
prevLines.append( (row-i, lines[row-1-i]) )
nextLines = [] # (rowNum, content)
for i in range(1, 4):
if not row-1+i < len(lines):
break
nextLines.append( (row+i, lines[row-1+i]) )
nextLines.reverse()
## print the main message
report += "\n\n%s\n" %self.msg
report += "Line %i, column %i%s\n\n" % (row, col, f)
report += 'Line|Cheetah Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line}
report += ' '*5 +' '*(col-1) + "^\n"
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
## add the extra msg
if self.extMsg:
report += self.extMsg + '\n'
return report
class ForbiddenSyntax(ParseError):
pass
class ForbiddenExpression(ForbiddenSyntax):
pass
class ForbiddenDirective(ForbiddenSyntax):
pass
class CheetahVariable(object):
def __init__(self, nameChunks, useNameMapper=True, cacheToken=None,
rawSource=None):
self.nameChunks = nameChunks
self.useNameMapper = useNameMapper
self.cacheToken = cacheToken
self.rawSource = rawSource
class Placeholder(CheetahVariable):
pass
class ArgList(object):
"""Used by _LowLevelParser.getArgList()"""
def __init__(self):
self.arguments = []
self.defaults = []
self.count = 0
def add_argument(self, name):
self.arguments.append(name)
self.defaults.append(None)
def next(self):
self.count += 1
def add_default(self, token):
count = self.count
if self.defaults[count] is None:
self.defaults[count] = ''
self.defaults[count] += token
def merge(self):
defaults = (isinstance(d, basestring) and d.strip() or None for d in self.defaults)
return list(map(None, (a.strip() for a in self.arguments), defaults))
def __str__(self):
return str(self.merge())
class _LowLevelParser(SourceReader):
"""This class implements the methods to match or extract ('get*') the basic
elements of Cheetah's grammar. It does NOT handle any code generation or
state management.
"""
_settingsManager = None
def setSettingsManager(self, settingsManager):
self._settingsManager = settingsManager
def setting(self, key, default=Unspecified):
if default is Unspecified:
return self._settingsManager.setting(key)
else:
return self._settingsManager.setting(key, default=default)
def setSetting(self, key, val):
self._settingsManager.setSetting(key, val)
def settings(self):
return self._settingsManager.settings()
def updateSettings(self, settings):
self._settingsManager.updateSettings(settings)
def _initializeSettings(self):
self._settingsManager._initializeSettings()
def configureParser(self):
"""Is called by the Compiler instance after the parser has had a
settingsManager assigned with self.setSettingsManager()
"""
self._makeCheetahVarREs()
self._makeCommentREs()
self._makeDirectiveREs()
self._makePspREs()
self._possibleNonStrConstantChars = (
self.setting('commentStartToken')[0] +
self.setting('multiLineCommentStartToken')[0] +
self.setting('cheetahVarStartToken')[0] +
self.setting('directiveStartToken')[0] +
self.setting('PSPStartToken')[0])
self._nonStrConstMatchers = [
self.matchCommentStartToken,
self.matchMultiLineCommentStartToken,
self.matchVariablePlaceholderStart,
self.matchExpressionPlaceholderStart,
self.matchDirective,
self.matchPSPStartToken,
self.matchEOLSlurpToken,
]
## regex setup ##
def _makeCheetahVarREs(self):
"""Setup the regexs for Cheetah $var parsing."""
num = r'[0-9\.]+'
interval = (r'(?P<interval>' +
num + r's|' +
num + r'm|' +
num + r'h|' +
num + r'd|' +
num + r'w|' +
num + ')'
)
cacheToken = (r'(?:' +
r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+
'|' +
r'(?P<STATIC_CACHE>\*)' +
'|' +
r'(?P<NO_CACHE>)' +
')')
self.cacheTokenRE = cachedRegex(cacheToken)
silentPlaceholderToken = (r'(?:' +
r'(?P<SILENT>' +escapeRegexChars('!')+')'+
'|' +
r'(?P<NOT_SILENT>)' +
')')
self.silentPlaceholderTokenRE = cachedRegex(silentPlaceholderToken)
self.cheetahVarStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+
r'(?P<silenceToken>'+silentPlaceholderToken+')'+
r'(?P<cacheToken>'+cacheToken+')'+
r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure
r'(?=[A-Za-z_])')
validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])'
self.cheetahVarStartToken = self.setting('cheetahVarStartToken')
self.cheetahVarStartTokenRE = cachedRegex(
escCharLookBehind +
escapeRegexChars(self.setting('cheetahVarStartToken'))
+validCharsLookAhead
)
self.cheetahVarInExpressionStartTokenRE = cachedRegex(
escapeRegexChars(self.setting('cheetahVarStartToken'))
+r'(?=[A-Za-z_])'
)
self.expressionPlaceholderStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' +
r'(?P<cacheToken>' + cacheToken + ')' +
#r'\[[ \t\f]*'
r'(?:\{|\(|\[)[ \t\f]*'
+ r'(?=[^\)\}\]])'
)
if self.setting('EOLSlurpToken'):
self.EOLSlurpRE = cachedRegex(
escapeRegexChars(self.setting('EOLSlurpToken'))
+ r'[ \t\f]*'
+ r'(?:'+EOL+')'
)
else:
self.EOLSlurpRE = None
def _makeCommentREs(self):
"""Construct the regex bits that are used in comment parsing."""
startTokenEsc = escapeRegexChars(self.setting('commentStartToken'))
self.commentStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
del startTokenEsc
startTokenEsc = escapeRegexChars(
self.setting('multiLineCommentStartToken'))
endTokenEsc = escapeRegexChars(
self.setting('multiLineCommentEndToken'))
self.multiLineCommentTokenStartRE = cachedRegex(escCharLookBehind +
startTokenEsc)
self.multiLineCommentEndTokenRE = cachedRegex(escCharLookBehind +
endTokenEsc)
def _makeDirectiveREs(self):
"""Construct the regexs that are used in directive parsing."""
startToken = self.setting('directiveStartToken')
endToken = self.setting('directiveEndToken')
startTokenEsc = escapeRegexChars(startToken)
endTokenEsc = escapeRegexChars(endToken)
validSecondCharsLookAhead = r'(?=[A-Za-z_@])'
reParts = [escCharLookBehind, startTokenEsc]
if self.setting('allowWhitespaceAfterDirectiveStartToken'):
reParts.append('[ \t]*')
reParts.append(validSecondCharsLookAhead)
self.directiveStartTokenRE = cachedRegex(''.join(reParts))
self.directiveEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _makePspREs(self):
"""Setup the regexs for PSP parsing."""
startToken = self.setting('PSPStartToken')
startTokenEsc = escapeRegexChars(startToken)
self.PSPStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
endToken = self.setting('PSPEndToken')
endTokenEsc = escapeRegexChars(endToken)
self.PSPEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah directives in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def isLineClearToStartToken(self, pos=None):
return self.isLineClearToPos(pos)
def matchTopLevelToken(self):
"""Returns the first match found from the following methods:
self.matchCommentStartToken
self.matchMultiLineCommentStartToken
self.matchVariablePlaceholderStart
self.matchExpressionPlaceholderStart
self.matchDirective
self.matchPSPStartToken
self.matchEOLSlurpToken
Returns None if no match.
"""
match = None
if self.peek() in self._possibleNonStrConstantChars:
for matcher in self._nonStrConstMatchers:
match = matcher()
if match:
break
return match
def matchPyToken(self):
match = pseudoprog.match(self.src(), self.pos())
if match and match.group() in tripleQuotedStringStarts:
TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos())
if TQSmatch:
return TQSmatch
return match
def getPyToken(self):
match = self.matchPyToken()
if match is None:
raise ParseError(self)
elif match.group() in tripleQuotedStringStarts:
raise ParseError(self, msg='Malformed triple-quoted string')
return self.readTo(match.end())
def matchEOLSlurpToken(self):
if self.EOLSlurpRE:
return self.EOLSlurpRE.match(self.src(), self.pos())
def getEOLSlurpToken(self):
match = self.matchEOLSlurpToken()
if not match:
raise ParseError(self, msg='Invalid EOL slurp token')
return self.readTo(match.end())
def matchCommentStartToken(self):
return self.commentStartTokenRE.match(self.src(), self.pos())
def getCommentStartToken(self):
match = self.matchCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid single-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentStartToken(self):
return self.multiLineCommentTokenStartRE.match(self.src(), self.pos())
def getMultiLineCommentStartToken(self):
match = self.matchMultiLineCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentEndToken(self):
return self.multiLineCommentEndTokenRE.match(self.src(), self.pos())
def getMultiLineCommentEndToken(self):
match = self.matchMultiLineCommentEndToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment end token')
return self.readTo(match.end())
def getCommaSeparatedSymbols(self):
"""
Loosely based on getDottedName to pull out comma separated
named chunks
"""
srcLen = len(self)
pieces = []
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
elif c == ',':
self.getc()
pieces.append(''.join(nameChunks))
nameChunks = []
elif c in (' ', '\t'):
self.getc()
else:
break
if nameChunks:
pieces.append(''.join(nameChunks))
return pieces
def getDottedName(self):
srcLen = len(self)
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
else:
break
return ''.join(nameChunks)
def matchIdentifier(self):
return identRE.match(self.src(), self.pos())
def getIdentifier(self):
match = self.matchIdentifier()
if not match:
raise ParseError(self, msg='Invalid identifier')
return self.readTo(match.end())
def matchOperator(self):
match = self.matchPyToken()
if match and match.group() not in operators:
match = None
return match
def getOperator(self):
match = self.matchOperator()
if not match:
raise ParseError(self, msg='Expected operator')
return self.readTo( match.end() )
def matchAssignmentOperator(self):
match = self.matchPyToken()
if match and match.group() not in assignmentOps:
match = None
return match
def getAssignmentOperator(self):
match = self.matchAssignmentOperator()
if not match:
raise ParseError(self, msg='Expected assignment operator')
return self.readTo( match.end() )
def matchDirective(self):
"""Returns False or the name of the directive matched.
"""
startPos = self.pos()
if not self.matchDirectiveStartToken():
return False
self.getDirectiveStartToken()
directiveName = self.matchDirectiveName()
self.setPos(startPos)
return directiveName
def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'):
startPos = self.pos()
possibleMatches = self._directiveNamesAndParsers.keys()
name = ''
match = None
while not self.atEnd():
c = self.getc()
if not c in directiveNameChars:
break
name += c
if name == '@':
if not self.atEnd() and self.peek() in identchars:
match = '@'
break
possibleMatches = [dn for dn in possibleMatches if dn.startswith(name)]
if not possibleMatches:
break
elif (name in possibleMatches and (self.atEnd() or self.peek() not in directiveNameChars)):
match = name
break
self.setPos(startPos)
return match
def matchDirectiveStartToken(self):
return self.directiveStartTokenRE.match(self.src(), self.pos())
def getDirectiveStartToken(self):
match = self.matchDirectiveStartToken()
if not match:
raise ParseError(self, msg='Invalid directive start token')
return self.readTo(match.end())
def matchDirectiveEndToken(self):
return self.directiveEndTokenRE.match(self.src(), self.pos())
def getDirectiveEndToken(self):
match = self.matchDirectiveEndToken()
if not match:
raise ParseError(self, msg='Invalid directive end token')
return self.readTo(match.end())
def matchColonForSingleLineShortFormDirective(self):
if not self.atEnd() and self.peek()==':':
restOfLine = self[self.pos()+1:self.findEOL()]
restOfLine = restOfLine.strip()
if not restOfLine:
return False
elif self.commentStartTokenRE.match(restOfLine):
return False
else: # non-whitespace, non-commment chars found
return True
return False
def matchPSPStartToken(self):
return self.PSPStartTokenRE.match(self.src(), self.pos())
def matchPSPEndToken(self):
return self.PSPEndTokenRE.match(self.src(), self.pos())
def getPSPStartToken(self):
match = self.matchPSPStartToken()
if not match:
raise ParseError(self, msg='Invalid psp start token')
return self.readTo(match.end())
def getPSPEndToken(self):
match = self.matchPSPEndToken()
if not match:
raise ParseError(self, msg='Invalid psp end token')
return self.readTo(match.end())
def matchCheetahVarStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchCheetahVarStartToken(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartTokenRE.match(self.src(), self.pos())
def matchCheetahVarInExpressionStartToken(self):
"""no enclosures or cache tokens allowed"""
return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos())
def matchVariablePlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchExpressionPlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.expressionPlaceholderStartRE.match(self.src(), self.pos())
def getCheetahVarStartToken(self):
"""just the start token, not the enclosure or cache token"""
match = self.matchCheetahVarStartToken()
if not match:
raise ParseError(self, msg='Expected Cheetah $var start token')
return self.readTo( match.end() )
def getCacheToken(self):
try:
token = self.cacheTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected cache token')
def getSilentPlaceholderToken(self):
try:
token = self.silentPlaceholderTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected silent placeholder token')
def getTargetVarsList(self):
varnames = []
while not self.atEnd():
if self.peek() in ' \t\f':
self.getWhiteSpace()
elif self.peek() in '\r\n':
break
elif self.startswith(','):
self.advance()
elif self.startswith('in ') or self.startswith('in\t'):
break
#elif self.matchCheetahVarStart():
elif self.matchCheetahVarInExpressionStartToken():
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
varnames.append( self.getDottedName() )
elif self.matchIdentifier():
varnames.append( self.getDottedName() )
else:
break
return varnames
def getCheetahVar(self, plain=False, skipStartToken=False):
"""This is called when parsing inside expressions. Cache tokens are only
valid in placeholders so this method discards any cache tokens found.
"""
if not skipStartToken:
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
return self.getCheetahVarBody(plain=plain)
def getCheetahVarBody(self, plain=False):
# @@TR: this should be in the compiler
return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain)
def getCheetahVarNameChunks(self):
"""
nameChunks = list of Cheetah $var subcomponents represented as tuples
[ (namemapperPart,autoCall,restOfName),
]
where:
namemapperPart = the dottedName base
autocall = where NameMapper should use autocalling on namemapperPart
restOfName = any arglist, index, or slice
If restOfName contains a call arglist (e.g. '(1234)') then autocall is
False, otherwise it defaults to True.
EXAMPLE
------------------------------------------------------------------------
if the raw CheetahVar is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'),
('d',False,'()'),
('x.y.z',True,''),
]
"""
chunks = []
while self.pos() < len(self):
rest = ''
autoCall = True
if not self.peek() in identchars + '.':
break
elif self.peek() == '.':
if self.pos()+1 < len(self) and self.peek(1) in identchars:
self.advance() # discard the period as it isn't needed with NameMapper
else:
break
dottedName = self.getDottedName()
if not self.atEnd() and self.peek() in '([':
if self.peek() == '(':
rest = self.getCallArgString()
else:
rest = self.getExpression(enclosed=True)
period = max(dottedName.rfind('.'), 0)
if period:
chunks.append( (dottedName[:period], autoCall, '') )
dottedName = dottedName[period+1:]
if rest and rest[0]=='(':
autoCall = False
chunks.append( (dottedName, autoCall, rest) )
return chunks
def getCallArgString(self,
enclosures=[], # list of tuples (char, pos), where char is ({ or [
useNameMapper=Unspecified):
""" Get a method/function call argument string.
This method understands *arg, and **kw
"""
# @@TR: this settings mangling should be removed
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures:
pass
else:
if not self.peek() == '(':
raise ParseError(self, msg="Expected '('")
startPos = self.pos()
self.getc()
enclosures = [('(', startPos),
]
argStringBits = ['(']
addBit = argStringBits.append
while True:
if self.atEnd():
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
c = self.peek()
if c in ")}]": # get the ending enclosure and break
if not enclosures:
raise ParseError(self)
c = self.getc()
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
addBit(')')
break
else:
raise ParseError(self)
elif c in " \t\f\r\n":
addBit(self.getc())
elif self.matchCheetahVarInExpressionStartToken():
startPos = self.pos()
codeFor1stToken = self.getCheetahVar()
WS = self.getWhiteSpace()
if not self.atEnd() and self.peek() == '=':
nextToken = self.getPyToken()
if nextToken == '=':
endPos = self.pos()
self.setPos(startPos)
codeFor1stToken = self.getCheetahVar(plain=True)
self.setPos(endPos)
## finally
addBit( codeFor1stToken + WS + nextToken )
else:
addBit( codeFor1stToken + WS)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
addBit(token)
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return ''.join(argStringBits)
def getDefArgList(self, exitPos=None, useNameMapper=False):
""" Get an argument list. Can be used for method/function definition
argument lists or for #directive argument lists. Returns a list of
tuples in the form (argName, defVal=None) with one tuple for each arg
name.
These defVals are always strings, so (argName, defVal=None) is safe even
with a case like (arg1, arg2=None, arg3=1234*2), which would be returned as
[('arg1', None),
('arg2', 'None'),
('arg3', '1234*2'),
]
This method understands *arg, and **kw
"""
if self.peek() == '(':
self.advance()
else:
exitPos = self.findEOL() # it's a directive so break at the EOL
argList = ArgList()
onDefVal = False
# @@TR: this settings mangling should be removed
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
while True:
if self.atEnd():
raise ParseError(
self, msg="EOF was reached before a matching ')'"+
" was found for the '('")
if self.pos() == exitPos:
break
c = self.peek()
if c == ")" or self.matchDirectiveEndToken():
break
elif c == ":":
break
elif c in " \t\f\r\n":
if onDefVal:
argList.add_default(c)
self.advance()
elif c == '=':
onDefVal = True
self.advance()
elif c == ",":
argList.next()
onDefVal = False
self.advance()
elif self.startswith(self.cheetahVarStartToken) and not onDefVal:
self.advance(len(self.cheetahVarStartToken))
elif self.matchIdentifier() and not onDefVal:
argList.add_argument( self.getIdentifier() )
elif onDefVal:
if self.matchCheetahVarInExpressionStartToken():
token = self.getCheetahVar()
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
argList.add_default(token)
elif c == '*' and not onDefVal:
varName = self.getc()
if self.peek() == '*':
varName += self.getc()
if not self.matchIdentifier():
raise ParseError(self)
varName += self.getIdentifier()
argList.add_argument(varName)
else:
raise ParseError(self)
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return argList.merge()
def getExpressionParts(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where char is ({ or [
pyTokensToBreakAt=None, # only works if not enclosed
useNameMapper=Unspecified,
):
""" Get a Cheetah expression that includes $CheetahVars and break at
directive end tokens, the end of an enclosure, or at a specified
pyToken.
"""
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures is None:
enclosures = []
srcLen = len(self)
exprBits = []
while True:
if self.atEnd():
if enclosures:
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
else:
break
c = self.peek()
if c in "{([":
exprBits.append(c)
enclosures.append( (c, self.pos()) )
self.advance()
elif enclosed and not enclosures:
break
elif c in "])}":
if not enclosures:
raise ParseError(self)
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
exprBits.append(c)
else:
open = enclosures[-1][0]
close = closurePairsRev[open]
row, col = self.getRowCol()
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg= "A '" + c + "' was found at line " + str(row) +
", col " + str(col) +
" before a matching '" + close +
"' was found\nfor the '" + open + "'")
self.advance()
elif c in " \f\t":
exprBits.append(self.getWhiteSpace())
elif self.matchDirectiveEndToken() and not enclosures:
break
elif c == "\\" and self.pos()+1 < srcLen:
eolMatch = EOLre.match(self.src(), self.pos()+1)
if not eolMatch:
self.advance()
raise ParseError(self, msg='Line ending expected')
self.setPos( eolMatch.end() )
elif c in '\r\n':
if enclosures:
self.advance()
else:
break
elif self.matchCheetahVarInExpressionStartToken():
expr = self.getCheetahVar()
exprBits.append(expr)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if (not enclosures
and pyTokensToBreakAt
and token in pyTokensToBreakAt):
self.setPos(beforeTokenPos)
break
token = self.transformToken(token, beforeTokenPos)
exprBits.append(token)
if identRE.match(token):
if token == 'for':
expr = self.getExpression(useNameMapper=False, pyTokensToBreakAt=['in'])
exprBits.append(expr)
else:
exprBits.append(self.getWhiteSpace())
if not self.atEnd() and self.peek() == '(':
exprBits.append(self.getCallArgString())
##
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return exprBits
def getExpression(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where # char is ({ or [
pyTokensToBreakAt=None,
useNameMapper=Unspecified,
):
"""Returns the output of self.getExpressionParts() as a concatenated
string rather than as a list.
"""
return ''.join(self.getExpressionParts(
enclosed=enclosed, enclosures=enclosures,
pyTokensToBreakAt=pyTokensToBreakAt,
useNameMapper=useNameMapper))
def transformToken(self, token, beforeTokenPos):
"""Takes a token from the expression being parsed and performs and
special transformations required by Cheetah.
At the moment only Cheetah's c'$placeholder strings' are transformed.
"""
if token=='c' and not self.atEnd() and self.peek() in '\'"':
nextToken = self.getPyToken()
token = nextToken.upper()
theStr = eval(token)
endPos = self.pos()
if not theStr:
return
if token.startswith(single3) or token.startswith(double3):
startPosIdx = 3
else:
startPosIdx = 1
self.setPos(beforeTokenPos+startPosIdx+1)
outputExprs = []
strConst = ''
while self.pos() < (endPos-startPosIdx):
if self.matchCheetahVarStart() or self.matchExpressionPlaceholderStart():
if strConst:
outputExprs.append(repr(strConst))
strConst = ''
placeholderExpr = self.getPlaceholder()
outputExprs.append('str('+placeholderExpr+')')
else:
strConst += self.getc()
self.setPos(endPos)
if strConst:
outputExprs.append(repr(strConst))
token = "''.join(["+','.join(outputExprs)+"])"
return token
def _raiseErrorAboutInvalidCheetahVarSyntaxInExpr(self):
match = self.matchCheetahVarStart()
groupdict = match.groupdict()
if groupdict.get('cacheToken'):
raise ParseError(
self,
msg='Cache tokens are not valid inside expressions. '
'Use them in top-level $placeholders only.')
elif groupdict.get('enclosure'):
raise ParseError(
self,
msg='Long-form placeholders - ${}, $(), $[], etc. are not valid inside expressions. '
'Use them in top-level $placeholders only.')
else:
raise ParseError(
self,
msg='This form of $placeholder syntax is not valid here.')
def getPlaceholder(self, allowCacheTokens=False, plain=False, returnEverything=False):
# filtered
for callback in self.setting('preparsePlaceholderHooks'):
callback(parser=self)
startPos = self.pos()
lineCol = self.getRowCol(startPos)
startToken = self.getCheetahVarStartToken()
silentPlaceholderToken = self.getSilentPlaceholderToken()
if silentPlaceholderToken:
isSilentPlaceholder = True
else:
isSilentPlaceholder = False
if allowCacheTokens:
cacheToken = self.getCacheToken()
cacheTokenParts = self.cacheTokenRE.match(cacheToken).groupdict()
else:
cacheTokenParts = {}
if self.peek() in '({[':
pos = self.pos()
enclosureOpenChar = self.getc()
enclosures = [ (enclosureOpenChar, pos) ]
self.getWhiteSpace()
else:
enclosures = []
filterArgs = None
if self.matchIdentifier():
nameChunks = self.getCheetahVarNameChunks()
expr = self._compiler.genCheetahVar(nameChunks[:], plain=plain)
restOfExpr = None
if enclosures:
WS = self.getWhiteSpace()
expr += WS
if self.setting('allowPlaceholderFilterArgs') and self.peek()==',':
filterArgs = self.getCallArgString(enclosures=enclosures)[1:-1]
else:
if self.peek()==closurePairsRev[enclosureOpenChar]:
self.getc()
else:
restOfExpr = self.getExpression(enclosed=True, enclosures=enclosures)
if restOfExpr[-1] == closurePairsRev[enclosureOpenChar]:
restOfExpr = restOfExpr[:-1]
expr += restOfExpr
rawPlaceholder = self[startPos: self.pos()]
else:
expr = self.getExpression(enclosed=True, enclosures=enclosures)
if expr[-1] == closurePairsRev[enclosureOpenChar]:
expr = expr[:-1]
rawPlaceholder=self[startPos: self.pos()]
expr = self._applyExpressionFilters(expr, 'placeholder',
rawExpr=rawPlaceholder, startPos=startPos)
for callback in self.setting('postparsePlaceholderHooks'):
callback(parser=self)
if returnEverything:
return (expr, rawPlaceholder, lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder)
else:
return expr
class _HighLevelParser(_LowLevelParser):
"""This class is a StateMachine for parsing Cheetah source and
sending state dependent code generation commands to
Cheetah.Compiler.Compiler.
"""
def __init__(self, src, filename=None, breakPoint=None, compiler=None):
super(_HighLevelParser, self).__init__(src, filename=filename, breakPoint=breakPoint)
self.setSettingsManager(compiler)
self._compiler = compiler
self.setupState()
self.configureParser()
def setupState(self):
self._macros = {}
self._macroDetails = {}
self._openDirectivesStack = []
def cleanup(self):
"""Cleanup to remove any possible reference cycles
"""
self._macros.clear()
for macroname, macroDetails in self._macroDetails.items():
macroDetails.template.shutdown()
del macroDetails.template
self._macroDetails.clear()
def configureParser(self):
super(_HighLevelParser, self).configureParser()
self._initDirectives()
def _initDirectives(self):
def normalizeParserVal(val):
if isinstance(val, (str, unicode)):
handler = getattr(self, val)
elif type(val) in (ClassType, TypeType):
handler = val(self)
elif hasattr(val, '__call__'):
handler = val
elif val is None:
handler = val
else:
raise Exception('Invalid parser/handler value %r for %s'%(val, name))
return handler
normalizeHandlerVal = normalizeParserVal
_directiveNamesAndParsers = directiveNamesAndParsers.copy()
customNamesAndParsers = self.setting('directiveNamesAndParsers', {})
_directiveNamesAndParsers.update(customNamesAndParsers)
_endDirectiveNamesAndHandlers = endDirectiveNamesAndHandlers.copy()
customNamesAndHandlers = self.setting('endDirectiveNamesAndHandlers', {})
_endDirectiveNamesAndHandlers.update(customNamesAndHandlers)
self._directiveNamesAndParsers = {}
for name, val in _directiveNamesAndParsers.items():
if val in (False, 0):
continue
self._directiveNamesAndParsers[name] = normalizeParserVal(val)
self._endDirectiveNamesAndHandlers = {}
for name, val in _endDirectiveNamesAndHandlers.items():
if val in (False, 0):
continue
self._endDirectiveNamesAndHandlers[name] = normalizeHandlerVal(val)
self._closeableDirectives = ['def', 'block', 'closure', 'defmacro',
'call',
'capture',
'cache',
'filter',
'if', 'unless',
'for', 'while', 'repeat',
'try',
]
for directiveName in self.setting('closeableDirectives', []):
self._closeableDirectives.append(directiveName)
macroDirectives = self.setting('macroDirectives', {})
macroDirectives['i18n'] = I18n
for macroName, callback in macroDirectives.items():
if type(callback) in (ClassType, TypeType):
callback = callback(parser=self)
assert callback
self._macros[macroName] = callback
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def _applyExpressionFilters(self, expr, exprType, rawExpr=None, startPos=None):
"""Pipes cheetah expressions through a set of optional filter hooks.
The filters are functions which may modify the expressions or raise
a ForbiddenExpression exception if the expression is not allowed. They
are defined in the compiler setting 'expressionFilterHooks'.
Some intended use cases:
- to implement 'restricted execution' safeguards in cases where you
can't trust the author of the template.
- to enforce style guidelines
filter call signature: (parser, expr, exprType, rawExpr=None, startPos=None)
- parser is the Cheetah parser
- expr is the expression to filter. In some cases the parser will have
already modified it from the original source code form. For example,
placeholders will have been translated into namemapper calls. If you
need to work with the original source, see rawExpr.
- exprType is the name of the directive, 'psp', or 'placeholder'. All
lowercase. @@TR: These will eventually be replaced with a set of
constants.
- rawExpr is the original source string that Cheetah parsed. This
might be None in some cases.
- startPos is the character position in the source string/file
where the parser started parsing the current expression.
@@TR: I realize this use of the term 'expression' is a bit wonky as many
of the 'expressions' are actually statements, but I haven't thought of
a better name yet. Suggestions?
"""
for callback in self.setting('expressionFilterHooks'):
expr = callback(parser=self, expr=expr, exprType=exprType,
rawExpr=rawExpr, startPos=startPos)
return expr
def _filterDisabledDirectives(self, directiveName):
directiveName = directiveName.lower()
if (directiveName in self.setting('disabledDirectives')
or (self.setting('enabledDirectives')
and directiveName not in self.setting('enabledDirectives'))):
for callback in self.setting('disabledDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
raise ForbiddenDirective(self, msg='This %r directive is disabled'%directiveName)
## main parse loop
def parse(self, breakPoint=None, assertEmptyStack=True):
if breakPoint:
origBP = self.breakPoint()
self.setBreakPoint(breakPoint)
assertEmptyStack = False
while not self.atEnd():
if self.matchCommentStartToken():
self.eatComment()
elif self.matchMultiLineCommentStartToken():
self.eatMultiLineComment()
elif self.matchVariablePlaceholderStart():
self.eatPlaceholder()
elif self.matchExpressionPlaceholderStart():
self.eatPlaceholder()
elif self.matchDirective():
self.eatDirective()
elif self.matchPSPStartToken():
self.eatPSP()
elif self.matchEOLSlurpToken():
self.eatEOLSlurpToken()
else:
self.eatPlainText()
if assertEmptyStack:
self.assertEmptyOpenDirectivesStack()
if breakPoint:
self.setBreakPoint(origBP)
## non-directive eat methods
def eatPlainText(self):
startPos = self.pos()
match = None
while not self.atEnd():
match = self.matchTopLevelToken()
if match:
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
strConst = self._unescapeCheetahVars(strConst)
strConst = self._unescapeDirectives(strConst)
self._compiler.addStrConst(strConst)
return match
def eatComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
if isLineClearToStartToken:
self._compiler.handleWSBeforeDirective()
self.getCommentStartToken()
comm = self.readToEOL(gobble=isLineClearToStartToken)
self._compiler.addComment(comm)
def eatMultiLineComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getMultiLineCommentStartToken()
endPos = startPos = self.pos()
level = 1
while True:
endPos = self.pos()
if self.atEnd():
break
if self.matchMultiLineCommentStartToken():
self.getMultiLineCommentStartToken()
level += 1
elif self.matchMultiLineCommentEndToken():
self.getMultiLineCommentEndToken()
level -= 1
if not level:
break
self.advance()
comm = self.readTo(endPos, start=startPos)
if not self.atEnd():
self.getMultiLineCommentEndToken()
if (not self.atEnd()) and self.setting('gobbleWhitespaceAroundMultiLineComments'):
restOfLine = self[self.pos():self.findEOL()]
if not restOfLine.strip(): # WS only to EOL
self.readToEOL(gobble=isLineClearToStartToken)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLine):
self._compiler.handleWSBeforeDirective()
self._compiler.addComment(comm)
def eatPlaceholder(self):
(expr, rawPlaceholder,
lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder) = self.getPlaceholder(
allowCacheTokens=True, returnEverything=True)
self._compiler.addPlaceholder(
expr,
filterArgs=filterArgs,
rawPlaceholder=rawPlaceholder,
cacheTokenParts=cacheTokenParts,
lineCol=lineCol,
silentMode=isSilentPlaceholder)
return
def eatPSP(self):
# filtered
self._filterDisabledDirectives(directiveName='psp')
self.getPSPStartToken()
endToken = self.setting('PSPEndToken')
startPos = self.pos()
while not self.atEnd():
if self.peek() == endToken[0]:
if self.matchPSPEndToken():
break
self.advance()
pspString = self.readTo(self.pos(), start=startPos).strip()
pspString = self._applyExpressionFilters(pspString, 'psp', startPos=startPos)
self._compiler.addPSP(pspString)
self.getPSPEndToken()
## generic directive eat methods
_simpleIndentingDirectives = '''
else elif for while repeat unless try except finally'''.split()
_simpleExprDirectives = '''
pass continue stop return yield break
del assert raise
silent echo
import from'''.split()
_directiveHandlerNames = {'import': 'addImportStatement',
'from': 'addImportStatement', }
def eatDirective(self):
directiveName = self.matchDirective()
self._filterDisabledDirectives(directiveName)
for callback in self.setting('preparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
# subclasses can override the default behaviours here by providing an
# eater method in self._directiveNamesAndParsers[directiveName]
directiveParser = self._directiveNamesAndParsers.get(directiveName)
if directiveParser:
directiveParser()
elif directiveName in self._simpleIndentingDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
self.eatSimpleIndentingDirective(directiveName, callback=handler)
elif directiveName in self._simpleExprDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
if directiveName in ('silent', 'echo'):
includeDirectiveNameInExpr = False
else:
includeDirectiveNameInExpr = True
expr = self.eatSimpleExprDirective(
directiveName,
includeDirectiveNameInExpr=includeDirectiveNameInExpr)
handler(expr)
##
for callback in self.setting('postparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
def _eatRestOfDirectiveTag(self, isLineClearToStartToken, endOfFirstLinePos):
foundComment = False
if self.matchCommentStartToken():
pos = self.pos()
self.advance()
if not self.matchDirective():
self.setPos(pos)
foundComment = True
self.eatComment() # this won't gobble the EOL
else:
self.setPos(pos)
if not foundComment and self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
# still gobble the EOL if a comment was found.
self.readToEOL(gobble=True)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLinePos):
self._compiler.handleWSBeforeDirective()
def _eatToThisEndDirective(self, directiveName):
finalPos = endRawPos = startPos = self.pos()
directiveChar = self.setting('directiveStartToken')[0]
isLineClearToStartToken = False
while not self.atEnd():
if self.peek() == directiveChar:
if self.matchDirective() == 'end':
endRawPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('end'))
self.getWhiteSpace()
if self.startswith(directiveName):
if self.isLineClearToStartToken(endRawPos):
isLineClearToStartToken = True
endRawPos = self.findBOL(endRawPos)
self.advance(len(directiveName)) # to end of directiveName
self.getWhiteSpace()
finalPos = self.pos()
break
self.advance()
finalPos = endRawPos = self.pos()
textEaten = self.readTo(endRawPos, start=startPos)
self.setPos(finalPos)
endOfFirstLinePos = self.findEOL()
if self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
self.readToEOL(gobble=True)
if isLineClearToStartToken and self.pos() > endOfFirstLinePos:
self._compiler.handleWSBeforeDirective()
return textEaten
def eatSimpleExprDirective(self, directiveName, includeDirectiveNameInExpr=True):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
if not includeDirectiveNameInExpr:
self.advance(len(directiveName))
startPos = self.pos()
expr = self.getExpression().strip()
directiveName = expr.split()[0]
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
return expr
def eatSimpleIndentingDirective(self, directiveName, callback,
includeDirectiveNameInExpr=False):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
if directiveName not in 'else elif for while try except finally'.split():
self.advance(len(directiveName))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
if directiveName in 'else elif except finally'.split():
callback(expr, dedent=False, lineCol=lineCol)
else:
callback(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
callback(expr, lineCol=lineCol)
def eatEndDirective(self):
isLineClearToStartToken = self.isLineClearToStartToken()
self.getDirectiveStartToken()
self.advance(3) # to end of 'end'
self.getWhiteSpace()
pos = self.pos()
directiveName = False
for key in self._endDirectiveNamesAndHandlers.keys():
if self.find(key, pos) == pos:
directiveName = key
break
if not directiveName:
raise ParseError(self, msg='Invalid end directive')
endOfFirstLinePos = self.findEOL()
self.getExpression() # eat in any extra comment-like crap
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.popFromOpenDirectivesStack(directiveName)
# subclasses can override the default behaviours here by providing an
# end-directive handler in self._endDirectiveNamesAndHandlers[directiveName]
if self._endDirectiveNamesAndHandlers.get(directiveName):
handler = self._endDirectiveNamesAndHandlers[directiveName]
handler()
elif directiveName in 'block capture cache call filter errorCatcher'.split():
if key == 'block':
self._compiler.closeBlock()
elif key == 'capture':
self._compiler.endCaptureRegion()
elif key == 'cache':
self._compiler.endCacheRegion()
elif key == 'call':
self._compiler.endCallRegion()
elif key == 'filter':
self._compiler.closeFilterBlock()
elif key == 'errorCatcher':
self._compiler.turnErrorCatcherOff()
elif directiveName in 'while for if try repeat unless'.split():
self._compiler.commitStrConst()
self._compiler.dedent()
elif directiveName=='closure':
self._compiler.commitStrConst()
self._compiler.dedent()
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
## specific directive eat methods
def eatBreakPoint(self):
"""Tells the parser to stop parsing at this point and completely ignore
everything else.
This is a debugging tool.
"""
self.setBreakPoint(self.pos())
def eatShbang(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('shBang'))
self.getWhiteSpace()
startPos = self.pos()
shBang = self.readToEOL()
shBang = self._applyExpressionFilters(shBang, 'shbang', startPos=startPos)
self._compiler.setShBang(shBang.strip())
def eatEncoding(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('encoding'))
self.getWhiteSpace()
startPos = self.pos()
encoding = self.readToEOL()
encoding = self._applyExpressionFilters(encoding, 'encoding', startPos=startPos)
self._compiler.setModuleEncoding(encoding.strip())
def eatCompiler(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('compiler')) # to end of 'compiler'
self.getWhiteSpace()
startPos = self.pos()
settingName = self.getIdentifier()
if settingName.lower() == 'reset':
self.getExpression() # gobble whitespace & junk
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._initializeSettings()
self.configureParser()
return
self.getWhiteSpace()
if self.peek() == '=':
self.advance()
else:
raise ParseError(self)
valueExpr = self.getExpression()
endPos = self.pos()
# @@TR: it's unlikely that anyone apply filters would have left this
# directive enabled:
# @@TR: fix up filtering, regardless
self._applyExpressionFilters('%s=%r'%(settingName, valueExpr),
'compiler', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
try:
self._compiler.setCompilerSetting(settingName, valueExpr)
except:
sys.stderr.write('An error occurred while processing the following #compiler directive.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % self[startPos:endPos])
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatCompilerSettings(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('compiler-settings')) # to end of 'settings'
keywords = self.getTargetVarsList()
self.getExpression() # gobble any garbage
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
if 'reset' in keywords:
self._compiler._initializeSettings()
self.configureParser()
# @@TR: this implies a single-line #compiler-settings directive, and
# thus we should parse forward for an end directive.
# Subject to change in the future
return
startPos = self.pos()
settingsStr = self._eatToThisEndDirective('compiler-settings')
settingsStr = self._applyExpressionFilters(settingsStr, 'compilerSettings',
startPos=startPos)
try:
self._compiler.setCompilerSettings(keywords=keywords, settingsStr=settingsStr)
except:
sys.stderr.write('An error occurred while processing the following compiler settings.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % settingsStr.strip())
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatAttr(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('attr'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
attribName = self.getIdentifier()
self.getWhiteSpace()
self.getAssignmentOperator()
expr = self.getExpression()
expr = self._applyExpressionFilters(expr, 'attr', startPos=startPos)
self._compiler.addAttribute(attribName, expr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatDecorator(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
#self.advance() # eat @
startPos = self.pos()
decoratorExpr = self.getExpression()
decoratorExpr = self._applyExpressionFilters(decoratorExpr, 'decorator', startPos=startPos)
self._compiler.addDecorator(decoratorExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.getWhiteSpace()
directiveName = self.matchDirective()
if not directiveName or directiveName not in ('def', 'block', 'closure', '@'):
raise ParseError(
self, msg='Expected #def, #block, #closure or another @decorator')
self.eatDirective()
def eatDef(self):
# filtered
self._eatDefOrBlock('def')
def eatBlock(self):
# filtered
startPos = self.pos()
methodName, rawSignature = self._eatDefOrBlock('block')
self._compiler._blockMetaData[methodName] = {
'raw': rawSignature,
'lineCol': self.getRowCol(startPos),
}
def eatClosure(self):
# filtered
self._eatDefOrBlock('closure')
def _eatDefOrBlock(self, directiveName):
# filtered
assert directiveName in ('def', 'block', 'closure')
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len(directiveName))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
methodName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
def includeBlockMarkers():
if self.setting('includeBlockMarkers'):
startMarker = self.setting('blockMarkerStart')
self._compiler.addStrConst(startMarker[0] + methodName + startMarker[1])
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'def', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
self.getc()
rawSignature = self[startPos:endOfFirstLinePos]
self._eatSingleLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
endPos=endOfFirstLinePos)
if directiveName == 'def' and not isNestedDef:
#@@TR: must come before _eatRestOfDirectiveTag ... for some reason
self._compiler.closeDef()
elif directiveName == 'block':
includeBlockMarkers()
self._compiler.closeBlock()
elif directiveName == 'closure' or isNestedDef:
self._compiler.dedent()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
else:
if self.peek()==':':
self.getc()
self.pushToOpenDirectivesStack(directiveName)
rawSignature = self[startPos:self.pos()]
self._eatMultiLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
isLineClearToStartToken=isLineClearToStartToken)
if directiveName == 'block':
includeBlockMarkers()
return methodName, rawSignature
def _eatMultiLineDef(self, directiveName, methodName, argsList, startPos,
isLineClearToStartToken=False):
# filtered in calling method
self.getExpression() # slurp up any garbage left at the end
signature = self[startPos:self.pos()]
endOfFirstLinePos = self.findEOL()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
signature = ' '.join([line.strip() for line in signature.splitlines()])
parserComment = ('## CHEETAH: generated from ' + signature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and len([name for name in self._openDirectivesStack if name=='def'])>1)
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
self._useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
return methodName
def _eatSingleLineDef(self, directiveName, methodName, argsList, startPos, endPos):
# filtered in calling method
fullSignature = self[startPos:endPos]
parserComment = ('## Generated from ' + fullSignature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
# @@TR: temporary hack of useSearchList
useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
self.getWhiteSpace(max=1)
self.parse(breakPoint=endPos)
if directiveName=='closure' or isNestedDef: # @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', useSearchList_orig)
def eatExtends(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('extends'))
self.getWhiteSpace()
startPos = self.pos()
if self.setting('allowExpressionsInExtendsDirective'):
baseName = self.getExpression()
else:
baseName = self.getCommaSeparatedSymbols()
baseName = ', '.join(baseName)
baseName = self._applyExpressionFilters(baseName, 'extends', startPos=startPos)
self._compiler.setBaseClass(baseName) # in compiler
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatImplements(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('implements'))
self.getWhiteSpace()
startPos = self.pos()
methodName = self.getIdentifier()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
# @@TR: need to split up filtering of the methodname and the args
#methodName = self._applyExpressionFilters(methodName, 'implements', startPos=startPos)
self._applyExpressionFilters(self[startPos:self.pos()], 'implements', startPos=startPos)
self._compiler.setMainMethodName(methodName)
self._compiler.setMainMethodArgs(argsList)
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatSuper(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('super'))
self.getWhiteSpace()
startPos = self.pos()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
self._applyExpressionFilters(self[startPos:self.pos()], 'super', startPos=startPos)
#parserComment = ('## CHEETAH: generated from ' + signature +
# ' at line %s, col %s' % self.getRowCol(startPos)
# + '.')
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addSuper(argsList)
def eatSet(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(3)
self.getWhiteSpace()
style = SET_LOCAL
if self.startswith('local'):
self.getIdentifier()
self.getWhiteSpace()
elif self.startswith('global'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_GLOBAL
elif self.startswith('module'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_MODULE
startsWithDollar = self.matchCheetahVarStart()
startPos = self.pos()
LVALUE = self.getExpression(pyTokensToBreakAt=assignmentOps, useNameMapper=False).strip()
OP = self.getAssignmentOperator()
RVALUE = self.getExpression()
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
expr = self._applyExpressionFilters(expr, 'set', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
class Components: pass # used for 'set global'
exprComponents = Components()
exprComponents.LVALUE = LVALUE
exprComponents.OP = OP
exprComponents.RVALUE = RVALUE
self._compiler.addSet(expr, exprComponents, style)
def eatSlurp(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatEOLSlurpToken(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatRaw(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('raw'))
self.getWhiteSpace()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
rawBlock = self.readToEOL(gobble=False)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
rawBlock = self._eatToThisEndDirective('raw')
self._compiler.addRawText(rawBlock)
def eatInclude(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('include'))
self.getWhiteSpace()
includeFrom = 'file'
isRaw = False
if self.startswith('raw'):
self.advance(3)
isRaw=True
self.getWhiteSpace()
if self.startswith('source'):
self.advance(len('source'))
includeFrom = 'str'
self.getWhiteSpace()
if not self.peek() == '=':
raise ParseError(self)
self.advance()
startPos = self.pos()
sourceExpr = self.getExpression()
sourceExpr = self._applyExpressionFilters(sourceExpr, 'include', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.addInclude(sourceExpr, includeFrom, isRaw)
def eatDefMacro(self):
# @@TR: not filtered yet
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('defmacro'))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
macroName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList(useNameMapper=False)
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
assert macroName not in self._directiveNamesAndParsers
argsList.insert(0, ('src', None))
argsList.append(('parser', 'None'))
argsList.append(('macros', 'None'))
argsList.append(('compilerSettings', 'None'))
argsList.append(('isShortForm', 'None'))
argsList.append(('EOLCharsInShortForm', 'None'))
argsList.append(('startPos', 'None'))
argsList.append(('endPos', 'None'))
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
macroSrc = self.readToEOL(gobble=False)
self.readToEOL(gobble=True)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
macroSrc = self._eatToThisEndDirective('defmacro')
#print argsList
normalizedMacroSrc = ''.join(
['%def callMacro('+','.join([defv and '%s=%s'%(n, defv) or n
for n, defv in argsList])
+')\n',
macroSrc,
'%end def'])
from Cheetah.Template import Template
templateAPIClass = self.setting('templateAPIClassForDefMacro', default=Template)
compilerSettings = self.setting('compilerSettingsForDefMacro', default={})
searchListForMacros = self.setting('searchListForDefMacro', default=[])
searchListForMacros = list(searchListForMacros) # copy to avoid mutation bugs
searchListForMacros.append({'macros': self._macros,
'parser': self,
'compilerSettings': self.settings(),
})
templateAPIClass._updateSettingsWithPreprocessTokens(
compilerSettings, placeholderToken='@', directiveToken='%')
macroTemplateClass = templateAPIClass.compile(source=normalizedMacroSrc,
compilerSettings=compilerSettings)
#print normalizedMacroSrc
#t = macroTemplateClass()
#print t.callMacro('src')
#print t.generatedClassCode()
class MacroDetails: pass
macroDetails = MacroDetails()
macroDetails.macroSrc = macroSrc
macroDetails.argsList = argsList
macroDetails.template = macroTemplateClass(searchList=searchListForMacros)
self._macroDetails[macroName] = macroDetails
self._macros[macroName] = macroDetails.template.callMacro
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def eatMacroCall(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
macroName = self.getIdentifier()
macro = self._macros[macroName]
if hasattr(macro, 'parse'):
return macro.parse(parser=self, startPos=startPos)
if hasattr(macro, 'parseArgs'):
args = macro.parseArgs(parser=self, startPos=startPos)
else:
self.getWhiteSpace()
args = self.getExpression(useNameMapper=False,
pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
isShortForm = True
self.advance() # skip over :
self.getWhiteSpace(max=1)
srcBlock = self.readToEOL(gobble=False)
EOLCharsInShortForm = self.readToEOL(gobble=True)
#self.readToEOL(gobble=False)
else:
isShortForm = False
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
srcBlock = self._eatToThisEndDirective(macroName)
if hasattr(macro, 'convertArgStrToDict'):
kwArgs = macro.convertArgStrToDict(args, parser=self, startPos=startPos)
else:
def getArgs(*pargs, **kws):
return pargs, kws
exec('positionalArgs, kwArgs = getArgs(%(args)s)'%locals())
assert 'src' not in kwArgs
kwArgs['src'] = srcBlock
if isinstance(macro, new.instancemethod):
co = macro.im_func.func_code
elif (hasattr(macro, '__call__')
and hasattr(macro.__call__, 'im_func')):
co = macro.__call__.im_func.func_code
else:
co = macro.func_code
availableKwArgs = inspect.getargs(co)[0]
if 'parser' in availableKwArgs:
kwArgs['parser'] = self
if 'macros' in availableKwArgs:
kwArgs['macros'] = self._macros
if 'compilerSettings' in availableKwArgs:
kwArgs['compilerSettings'] = self.settings()
if 'isShortForm' in availableKwArgs:
kwArgs['isShortForm'] = isShortForm
if isShortForm and 'EOLCharsInShortForm' in availableKwArgs:
kwArgs['EOLCharsInShortForm'] = EOLCharsInShortForm
if 'startPos' in availableKwArgs:
kwArgs['startPos'] = startPos
if 'endPos' in availableKwArgs:
kwArgs['endPos'] = self.pos()
srcFromMacroOutput = macro(**kwArgs)
origParseSrc = self._src
origBreakPoint = self.breakPoint()
origPos = self.pos()
# add a comment to the output about the macro src that is being parsed
# or add a comment prefix to all the comments added by the compiler
self._src = srcFromMacroOutput
self.setPos(0)
self.setBreakPoint(len(srcFromMacroOutput))
self.parse(assertEmptyStack=False)
self._src = origParseSrc
self.setBreakPoint(origBreakPoint)
self.setPos(origPos)
#self._compiler.addRawText('end')
def eatCache(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('cache'))
startPos = self.pos()
argList = self.getDefArgList(useNameMapper=True)
argList = self._applyExpressionFilters(argList, 'cache', startPos=startPos)
def startCache():
cacheInfo = self._compiler.genCacheInfoFromArgList(argList)
self._compiler.startCacheRegion(cacheInfo, lineCol)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
startCache()
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.endCacheRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack('cache')
startCache()
def eatCall(self):
# @@TR: need to enable single line version of this
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('call'))
startPos = self.pos()
useAutocallingOrig = self.setting('useAutocalling')
self.setSetting('useAutocalling', False)
self.getWhiteSpace()
if self.matchCheetahVarStart():
functionName = self.getCheetahVar()
else:
functionName = self.getCheetahVar(plain=True, skipStartToken=True)
self.setSetting('useAutocalling', useAutocallingOrig)
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'call', startPos=startPos)
self.getWhiteSpace()
args = self.getExpression(pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCallRegion(functionName, args, lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCallRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("call")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.startCallRegion(functionName, args, lineCol)
def eatCallArg(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('arg'))
startPos = self.pos()
self.getWhiteSpace()
argName = self.getIdentifier()
self.getWhiteSpace()
argName = self._applyExpressionFilters(argName, 'arg', startPos=startPos)
self._compiler.setCallArg(argName, lineCol)
if self.peek() == ':':
self.getc()
else:
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatFilter(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('filter'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
theFilter = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
theFilter = self.getIdentifier()
self.getWhiteSpace()
theFilter = self._applyExpressionFilters(theFilter, 'filter', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
self._compiler.setFilter(theFilter, isKlass)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.closeFilterBlock()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("filter")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setFilter(theFilter, isKlass)
def eatTransform(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('transform'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
transformer = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
transformer = self.getIdentifier()
self.getWhiteSpace()
transformer = self._applyExpressionFilters(transformer, 'transform', startPos=startPos)
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setTransform(transformer, isKlass)
def eatErrorCatcher(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('errorCatcher'))
self.getWhiteSpace()
startPos = self.pos()
errorCatcherName = self.getIdentifier()
errorCatcherName = self._applyExpressionFilters(
errorCatcherName, 'errorcatcher', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setErrorCatcher(errorCatcherName)
def eatCapture(self):
# @@TR: this could be refactored to use the code in eatSimpleIndentingDirective
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('capture'))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, 'capture', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCaptureRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack("capture")
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
def eatIf(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
startPos = self.pos()
expressionParts = self.getExpressionParts(pyTokensToBreakAt=[':'])
expr = ''.join(expressionParts).strip()
expr = self._applyExpressionFilters(expr, 'if', startPos=startPos)
isTernaryExpr = ('then' in expressionParts and 'else' in expressionParts)
if isTernaryExpr:
conditionExpr = []
trueExpr = []
falseExpr = []
currentExpr = conditionExpr
for part in expressionParts:
if part.strip()=='then':
currentExpr = trueExpr
elif part.strip()=='else':
currentExpr = falseExpr
else:
currentExpr.append(part)
conditionExpr = ''.join(conditionExpr)
trueExpr = ''.join(trueExpr)
falseExpr = ''.join(falseExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addTernaryExpr(conditionExpr, trueExpr, falseExpr, lineCol=lineCol)
elif self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.addIf(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self.pushToOpenDirectivesStack('if')
self._compiler.addIf(expr, lineCol=lineCol)
## end directive handlers
def handleEndDef(self):
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if not isNestedDef:
self._compiler.closeDef()
else:
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
self._compiler.commitStrConst()
self._compiler.dedent()
###
def pushToOpenDirectivesStack(self, directiveName):
assert directiveName in self._closeableDirectives
self._openDirectivesStack.append(directiveName)
def popFromOpenDirectivesStack(self, directiveName):
if not self._openDirectivesStack:
raise ParseError(self, msg="#end found, but nothing to end")
if self._openDirectivesStack[-1] == directiveName:
del self._openDirectivesStack[-1]
else:
raise ParseError(self, msg="#end %s found, expected #end %s" %(
directiveName, self._openDirectivesStack[-1]))
def assertEmptyOpenDirectivesStack(self):
if self._openDirectivesStack:
errorMsg = (
"Some #directives are missing their corresponding #end ___ tag: %s" %(
', '.join(self._openDirectivesStack)))
raise ParseError(self, msg=errorMsg)
##################################################
## Make an alias to export
Parser = _HighLevelParser
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (\
self.chill or "null",
self.direction or "null",
self.speed or "null"
)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (\
self.humidity or "null",
self.visibility or "null",
self.pressure or "null",
self.rising or "null"
)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return '{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code].encode('utf-8'), self.low, self.high,
"http://weather.china.xappengine.com/static/w/img/d%s.png" % self.code,
"http://weather.china.xappengine.com/static/w/img/s%s.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def f2c(temp):
f = float(temp)
c = (f - 32) * 5 / 9 + 0.5
return int(c)
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
f2c(attrs['low']),
f2c(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, name, data):
self.__isLastBuildDate = False
if isinstance(name, unicode):
name = name.encode('utf-8')
self.name = name
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return '{"pub" : %s, "name" : "%s", "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.name, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
if __name__=='__main__':
import urllib
url = 'http://weather.yahooapis.com/forecastrss?u=c&w=2143712'
result = urllib.urlopen(url).read()
print Weather(result)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from os import path
from Cheetah.Template import Template
def main():
file = path.join(path.split(__file__)[0], 'home.html')
print 'Compile template %s...' % file
cc = Template.compile(source=None, file=file, returnAClass=False, moduleName='autogen', className='CompiledTemplate')
target = path.join(path.split(__file__)[0], 'autogen', '__init__.py')
print 'Writing file %s...' % target
f = open(target, 'w')
f.write(cc)
f.close()
from autogen import CompiledTemplate
CompiledTemplate(searchList=[])
print 'Compiled ok.'
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from google.appengine.ext import db
class City(db.Model):
name = db.StringProperty(required=True)
aliases = db.StringListProperty(required=True)
code = db.IntegerProperty(required=True)
def first_alias(self):
return self.aliases[0]
def aliases_str(self):
return ', '.join(self.aliases)
def get_city(key=None):
city = None
if key:
city = City.get(key)
if city is None:
city = find_city('beijing')
return city
def get_cities():
return City.all().order('aliases').fetch(1000)
def find_city(name, return_default=True):
'''
Find city by name. Return City or None if not found.
'''
city = City.all().filter('aliases =', name).get()
if city is None:
city = City.all().filter('name =', name).get()
if city is None and return_default:
city = City.all().filter('aliases =', 'beijing').get()
return city
def create_city(name, aliases, code):
c = City(name=name, aliases=aliases, code=code)
c.put()
return c
def delete_city(key):
City.get(key).delete()
import urllib
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
def load_rss(url):
f = urllib.urlopen(url)
data = f.read()
f.close()
return data
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (self.chill, self.direction, self.speed)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (self.humidity, self.visibility, self.pressure, self.rising)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return u'{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code], self.low, self.high,
"http://l.yimg.com/a/i/us/nws/weather/gr/%sd.png" % self.code,
"http://l.yimg.com/a/i/us/nws/weather/gr/%ss.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
int(attrs['low']),
int(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, data):
self.__isLastBuildDate = False
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return u'{"pub" : %s, "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
class Subscriber(db.Model):
mobile = db.StringProperty(required=True)
city = db.StringProperty(required=True)
time = db.IntegerProperty(required=True)
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
import __builtin__
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.1'
__CHEETAH_versionTuple__ = (2, 4, 1, 'final', 0)
__CHEETAH_genTime__ = 1284450634.7130001
__CHEETAH_genTimestamp__ = 'Tue Sep 14 15:50:34 2010'
__CHEETAH_src__ = 'D:\\workspace\\python\\weather-china\\src\\home.html'
__CHEETAH_srcLastModified__ = 'Wed Jul 28 10:35:46 2010'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class CompiledTemplate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(CompiledTemplate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>\u5929\u6c14\u9884\u62a5</title>
<script type="text/javascript" src="/static/js/jquery.js"></script>
<script type="text/javascript">
var days=["\u661f\u671f\u65e5", "\u661f\u671f\u4e00", "\u661f\u671f\u4e8c", "\u661f\u671f\u4e09", "\u661f\u671f\u56db", "\u661f\u671f\u4e94", "\u661f\u671f\u516d"]
jQuery(document).ready(function() {
jQuery.getJSON("/api?city=''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.first_alias",True) # u'${city.first_alias}' on line 11, col 29
if _v is not None: write(_filter(_v, rawExpr=u'${city.first_alias}')) # from line 11, col 29.
write(u'''", function(data) {
var today = data.forecasts[0];
\tvar tomorrow = data.forecasts[1];
jQuery("#x-today-date").html(today.date);
jQuery("#x-tomorrow-date").html(tomorrow.date);
jQuery("#x-today-day").html(days[today.day]);
jQuery("#x-tomorrow-day").html(days[tomorrow.day]);
jQuery("#x-today-text").html(today.text);
jQuery("#x-tomorrow-text").html(tomorrow.text);
jQuery("#x-today-temp").html(today.low + " ~ " + today.high + "\xb0");
\tjQuery("#x-tomorrow-temp").html(tomorrow.low + " ~ " + tomorrow.high + "\xb0");
jQuery("#x-today-icon").css("background-image", "url(" + today.image_large + ")");
\tjQuery("#x-tomorrow-icon").css("background-image", "url(" + tomorrow.image_large + ")");
\tjQuery("#x-today-icon-small").css("background-image", "url(" + today.image_small + ")");
jQuery("#x-pub").html(data.pub);
\tif (data.wind.chill!=null)
\t jQuery("#x-wind-chill").html(data.wind.chill);
\tif (data.wind.direction!=null)
\t jQuery("#x-wind-direction").html(data.wind.direction);
\tif (data.wind.speed!=null)
\t jQuery("#x-wind-speed").html(data.wind.speed);
if (data.atmosphere.humidity!=null)
\t jQuery("#x-atmosphere-humidity").html(data.atmosphere.humidity);
if (data.atmosphere.visibility!=null)
\t jQuery("#x-atmosphere-visibility").html(data.atmosphere.visibility);
if (data.atmosphere.pressure!=null)
\t jQuery("#x-atmosphere-pressure").html(data.atmosphere.pressure);
if (data.astronomy.sunrise!=null)
\t jQuery("#x-astronomy-sunrise").html(data.astronomy.sunrise);
if (data.astronomy.sunset!=null)
\t jQuery("#x-astronomy-sunset").html(data.astronomy.sunset);
});
});
function change_city(key){
if (key=="-")
return;
location.assign("/?city=" + key);
}
</script>
<link rel="stylesheet" href="/static/css/screen.css" type="text/css" media="screen, projection">
<link rel="stylesheet" href="/static/css/print.css" type="text/css" media="print">
<!--[if lt IE 8]>
\t<link rel="stylesheet" href="/static/css/ie.css" type="text/css" media="screen, projection">
<![endif]-->
<style type="text/css">
div.w-report span.h {
\tmargin:3px 0px;
\tfont-weight:bold;
font-size:24px;
\tdisplay:inline;
}
div.w-report span.date {
\tmargin:3px 0px 3px 12px;
\tfont-weight:bold;
\tfont-size:16px;
}
div.weather-report {
\tbackground-image:url(static/img/w-bg.png);
\tbackground-repeat:no-repeat;
\tbackground-position:56px 70px;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-icon {
\tbackground-image:url(static/w/img/d44.png);
\tbackground-repeat:no-repeat;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-text {
\ttext-align:right;
\tmargin:0px;
\tpadding-top:76px;
\tpadding-right:20px;
}
div.weather-text p {
\tmargin:0px;
\tcolor:#FFF;
\tfont-size: 20px;
\tfont-weight: bold;
\ttext-shadow: #315895 0px -1px 1px;
\tline-height:28px;
}
</style>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push([\'_setAccount\', \'UA-251595-22\']);
_gaq.push([\'_trackPageview\']);
(function() {
var ga = document.createElement(\'script\'); ga.type = \'text/javascript\'; ga.async = true;
ga.src = (\'https:\' == document.location.protocol ? \'https://ssl\' : \'http://www\') + \'.google-analytics.com/ga.js\';
var s = document.getElementsByTagName(\'script\')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body style="font-size:13px">
<div class="container" style="background-color:#FFF">
<div class="span-24 last">
</div>
<div class="span-24 last">
<div id="x-today-icon-small" style="background-repeat:no-repeat; height:34; padding:10px 0px 10px 60px; background-image:url(static/w/img/s44.png)"><strong>''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.name",True) # u'${city.name}' on line 125, col 163
if _v is not None: write(_filter(_v, rawExpr=u'${city.name}')) # from line 125, col 163.
write(u'''</strong>
<select name="change_city" id="change_city" onchange="change_city(this.value)">
<option value="-">\u66f4\u6539\u57ce\u5e02</option>
''')
for c in VFSL([locals()]+SL+[globals(), __builtin__],"cities",True): # generated from line 128, col 1
write(u''' <option value="''')
_v = VFN(VFSL([locals()]+SL+[globals(), __builtin__],"c",True),"first_alias",False)() # u'${c.first_alias()}' on line 129, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${c.first_alias()}')) # from line 129, col 26.
write(u'''">''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"c.name",True) # u'${c.name}' on line 129, col 46
if _v is not None: write(_filter(_v, rawExpr=u'${c.name}')) # from line 129, col 46.
write(u'''</option>
''')
write(u''' </select>
</div>
</div>
\t<div class="span-16">
<div class="span-16 last">
<div id="weather-today" class="w-report span-8">
<div><span class="h">\u4eca\u65e5\u5929\u6c14</span><span class="date"><span id="x-today-date"></span> <span id="x-today-day"></span></span></div>
<div class="weather-report">
<div id="x-today-icon" class="weather-icon">
<div class="weather-text">
<p id="x-today-text">Loading...</p>
<p id="x-today-temp"></p>
</div>
</div>
</div>
<div><span class="h">\u5176\u4ed6\u4fe1\u606f\uff1a</span></div>
<div style="padding:6px">
<div>\u98ce\u529b\uff1a<span id="x-wind-chill">N/A</span> \u98ce\u5411\uff1a<span id="x-wind-direction">N/A</span> \u98ce\u901f\uff1a<span id="x-wind-speed">N/A</span></div>
<div>\u80fd\u89c1\u5ea6\uff1a<span id="x-atmosphere-visibility">N/A</span> \u6e7f\u5ea6\uff1a<span id="x-atmosphere-humidity">N/A</span> \u6c14\u538b\uff1a<span id="x-atmosphere-pressure">N/A</span></div>
<div>\u65e5\u51fa\uff1a<span id="x-astronomy-sunrise">N/A</span> \u65e5\u843d\uff1a<span id="x-astronomy-sunset">N/A</span></div>
<div>\u53d1\u5e03\u4e8e\uff1a<span id="x-pub">N/A</span></div>
</div>
</div>
<div id="weather-tomorrow" class="w-report span-8 last">
<div><span class="h">\u660e\u65e5\u5929\u6c14</span><span class="date"><span id="x-tomorrow-date"></span> <span id="x-tomorrow-day"></span></span></div>
<div class="weather-report">
<div id="x-tomorrow-icon" class="weather-icon">
<div class="weather-text">
<p id="x-tomorrow-text">Loading...</p>
<p id="x-tomorrow-temp"></p>
</div>
</div>
</div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">\u5b89\u88c5Chrome\u63d2\u4ef6</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528\u7684\u662f\u652f\u6301HTML 5\u7684Google Chrome\u6d4f\u89c8\u5668\uff0c\u53ef\u4ee5<a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank">\u5b89\u88c5\u6700\u65b0\u63d2\u4ef6</a>\u4ee5\u4fbf\u968f\u65f6\u83b7\u53d6\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank"><img src="static/img/snapshot-chrome-extension.png" width="291" height="99" style="margin:12px"/></a></div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">GTalk\u673a\u5668\u4eba</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528Google Talk\uff0c\u53ef\u4ee5\u6dfb\u52a0\u673a\u5668\u4eba<strong>weather-china@appspot.com</strong>\u4e3a\u597d\u53cb\uff0c\u968f\u65f6\u5411\u4ed6\u8be2\u95ee\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><img src="static/img/snapshot-xmpp.png" width="300" height="254" style="margin:12px"/></div>
</div>
</div>
</div>
<div class="span-8 last">
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
</div>
<div class="span-24 last"></div>
<div class="span-24 last"><div style="text-align:center;padding:6px"><a href="http://code.google.com/p/weather-china/wiki/API" target="_blank">API\u670d\u52a1</a> | <a href="http://code.google.com/p/weather-china/issues/list" target="_blank">\u610f\u89c1\u53cd\u9988</a> | <a id="x-contact" href="#">\u8054\u7cfb\u6211\u4eec</a> | Copyright©2010</div></div>
</div>
<script type="text/javascript">
jQuery("#x-contact").attr("href", "mail" + "to:ask" + "xuefeng@" + "gm" + "ail.com");
</script>
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_CompiledTemplate= 'respond'
## END CLASS DEFINITION
if not hasattr(CompiledTemplate, '_initCheetahAttributes'):
templateAPIClass = getattr(CompiledTemplate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(CompiledTemplate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=CompiledTemplate()).run()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This script will try to create the minig indexes for all the users
# with active mail_perms in the OBM database.
#
# Depends: python-psycopg2 for postgresql or python-mysqldb for mysql.
import ConfigParser;
import os;
import sys;
import httplib;
import urllib;
# Read /etc/obm/obm_conf.ini & fetches login, domain & passwords from
# the P_UserObm table.
def fetch_user_passwords():
print "INFO: Reading /etc/obm/obm_conf.ini..."
config = ConfigParser.ConfigParser();
config.readfp(open("/etc/obm/obm_conf.ini"));
dbtype = config.get("global", "dbtype").strip();
host = config.get("global", "host").strip();
db = config.get("global", "db").strip();
user = config.get("global", "user").strip();
password = config.get("global", "password").strip(" \"");
print "INFO: type: '"+dbtype+"' host: '"+host+"' db: '"+db+"' user: '"+user+"' password: '"+password+"'";
ds = None;
if dbtype == "PGSQL":
import psycopg2 as dbapi2;
print "INFO: psycopg2 drived loaded."
ds = dbapi2.connect(host=host, database=db, user=user, password=password);
elif dbtype == 'MYSQL':
import MySQLdb as dbapi2;
print "INFO: MySQLdb driver loaded."
ds = dbapi2.connect(host=host, db=db, user=user, passwd=password);
else:
print "ERROR: Unrecognised dbtype: "+dbtype;
exit(1);
cur = ds.cursor();
cur.execute("""
SELECT userobm_login, domain_name, userobm_password
FROM P_UserObm
INNER JOIN P_Domain ON userobm_domain_id=domain_id
WHERE
userobm_password_type='PLAIN' AND
userobm_mail_perms=1 AND
userobm_archive=0 AND
NOT domain_global
ORDER BY domain_name, userobm_login
""");
rows = cur.fetchall();
cur.close();
return rows;
# login on the minig backend using the given tuple with login, domain
# & password
def init_minig_index(host, port, row):
print "INFO: init index for "+row[0]+"@"+row[1]+" on "+host+":"+str(port);
params = urllib.urlencode({ "login": row[0], "domain": row[1], "password": row[2] });
headers = { "Content-type": "application/x-www-form-urlencoded" };
con = httplib.HTTPConnection(host, port);
try:
con.request("POST", "/firstIndexing.do", params, headers);
response = con.getresponse();
print "INFO:", response.status, response.reason
data = response.read();
except Exception, e:
print "ERROR:", e
con.close();
def usage():
print """usage: ./minig_init_index.py <backend_host> <backend_port>
example: ./minig_init_index.py localhost 8081""";
if __name__ == "__main__":
if len(sys.argv) != 3:
usage();
exit(1);
rows = fetch_user_passwords();
for i in range (len(rows)):
init_minig_index(sys.argv[1], int(sys.argv[2]), rows[i]);
print "INFO: progress: "+str(i+1)+"/"+str(len(rows));
| Python |
''' Cookie-handlin' mix-in helper; inspired by WebOb.
This module offers a cookie-handling mixin class meant to be used with Google
App Engine; this class can in fact be mixed into any class that shares the
following features with webapp.RequestHandler subclasses:
- a self.request.cookies object with a get(key, defaultvalue) method
- a self.response.headers object offering:
- methods add_header(header, value) and getall(header)
- the ability to 'del self.response.headers[header]'
The mixin class supplies methods to get_, set_, delete_ and unset_ a cookie
(each method's name ends with _cookie;-).
'''
# Copyright (C) 2008 aleaxit@gmail.com
# licensed under CC-by license, http://creativecommons.org/licenses/by/3.0/
import Cookie
import datetime
import time
from Cookie import BaseCookie
def _serialize_cookie_date(dt):
dt = dt.timetuple()
return time.strftime('"%a, %d-%b-%Y %H:%M:%S GMT"', dt.timetuple())
class CookieMixin(object):
def get_cookie(self, key, default_value=None):
""" Gets a cookie from the request object:
Args:
key: string that's the cookie's name (mandatory)
default_value: default value if name's absent (default: None)
Returns:
a string (the cookie's value) or the default value if the cookie's absent
"""
return self.request.cookies.get(key, default_value)
def set_cookie(self, key, value='', max_age=None,
path='/', domain=None, secure=None, httponly=False,
version=None, comment=None, expires=None):
""" Set (add) a cookie to the response object.
Args:
key: string that is the cookie's name (mandatory)
value: string (or Unicode) that is the cookie's value (default '')
and many optional ones to set the cookie's properties (pass BY NAME only!):
max_age (or datetime.timedelta or a number of seconds)
expires (string, datetime.timedelta, or datetime.datetime)
[if you pass max_age and not expires, expires is computed from max_age]
path, domain, secure, httponly, version, comment (typically strings)
Side effects:
adds to self.response.headers an appropriate Set-Cookie header.
"""
if isinstance(value, unicode):
value = '"%s"' % value.encode('utf8')
cookies = Cookie.BaseCookie()
cookies[key] = value
if isinstance(max_age, datetime.timedelta):
max_age = datetime.timedelta.seconds + datetime.timedelta.days*24*60*60
if max_age is not None and expires is None:
expires = (datetime.datetime.utcnow() +
datetime.timedelta(seconds=max_age))
if isinstance(expires, datetime.timedelta):
expires = datetime.datetime.utcnow() + expires
if isinstance(expires, datetime.datetime):
expires = '"'+_serialize_cookie_date(expires)+'"'
for var_name, var_value in [
('max_age', max_age),
('path', path),
('domain', domain),
('secure', secure),
('HttpOnly', httponly),
('version', version),
('comment', comment),
('expires', expires),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name.replace('_', '-')] = str(var_value)
header_value = cookies[key].output(header='').lstrip()
self.response.headers.add_header('Set-Cookie', header_value)
def delete_cookie(self, key, path='/', domain=None):
""" Delete a cookie from the client.
Path and domain must match how the cookie was originally set. This method
sets the cookie to the empty string, and max_age=0 so that it should
expire immediately (a negative expires should also help with that)
Args:
key: string that is the cookie's name (mandatory)
path, domain: optional strings, must match the original settings
Side effects:
adds to self.response.headers an appropriate Set-Cookie header.
"""
self.set_cookie(key, '', path=path, domain=domain,
max_age=0, expires=datetime.timedelta(days=-5))
def unset_cookie(self, key):
""" Unset a cookie with the given name (remove from the response).
If there are multiple cookies (e.g., two cookies with the same name and
different paths or domains), all such cookies will be deleted.
Args:
key: string that is the cookie's name (mandatory)
Side effects:
delete from self.response.headers all cookies with that name
Raises:
KeyError if the response had no such cookies (or, none at all)
"""
existing = self.response.headers.getall('Set-Cookie')
if not existing: raise KeyError("No cookies at all had been set")
# remove all set-cookie headers, then put back those (if any) that
# should not be removed
del self.response.headers['Set-Cookie']
found = False
for header in existing:
cookies = BaseCookie()
cookies.load(header)
if key in cookies:
found = True
del cookies[key]
header = cookies.output(header='').lstrip()
if header:
self.response.headers.add_header('Set-Cookie', header)
if not found: raise KeyError("No cookie had been set with name %r" % key)
| Python |
# common needs of JSON-REST-based client-side Python tests
# (intended to be run while gae-json-rest is being served at localhost:8083)!
import cookielib
import httplib
import optparse
import os
import signal
import socket
import subprocess
import sys
import time
import urllib2
import simplejson
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8083
DEFAULT_PREFIX = ''
def body(**k):
return simplejson.dumps(k)
class Tester(object):
def __init__(self, f):
self.f = f
self.cj = cookielib.CookieJar()
self.gae = None
# get command-line options
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print detailed info to stdout")
parser.add_option("-s", "--host", dest="host", default=DEFAULT_HOST,
help="what host the server is running on")
parser.add_option("-p", "--port", dest="port", default=DEFAULT_PORT,
type="int", help="what port the server is running on")
parser.add_option("-x", "--prefix", dest="prefix", default=DEFAULT_PREFIX,
help="prefix to prepend to every path to test")
parser.add_option("-l", "--local-gae", action="store", dest="gaepath",
help="GAE SDK directory path")
options, args = parser.parse_args()
if args:
print 'Unknown arguments:', args
sys.exit(1)
for attrib in 'verbose host port prefix'.split():
setattr(self, attrib, getattr(options, attrib))
if options.gaepath is not None: # start the local GAE server
self.gae = subprocess.Popen((os.path.realpath(options.gaepath) +
"/dev_appserver.py " + "-p %d " % self.port + "-a %s" % self.host,
os.path.dirname(os.path.realpath(__file__))))
# ensure prefix starts and doesn't end with / (or, is /)
self.prefix = self.prefix.strip('/')
if self.prefix: self.prefix = '/%s/' % self.prefix
else: self.prefix = '/'
def getAny(self, classname):
""" Returns the ID of any one existing entity of the model, or None
"""
data = silent_request(conn, 'GET', '/%s/' % classname)
if data: return data[0]['id']
else: return None
def silent_request(self, verb, path, body=None):
""" Makes an HTTP request, always silently.
Returns the JSON-deserialized of the response body, or None.
"""
prev = self.verbose
self.verbose = False
retval = self.request_and_show(verb, path, body)
self.verbose = prev
return retval
def request_and_show(self, verb, path, body=None):
""" Makes an HTTP request, optionally prints data about the interaction.
Returns the JSON-deserialized of the response body, or None.
"""
path = '%s%s' % (self.prefix, path.lstrip('/'))
try:
if body is None: self.conn.request(verb, path)
else: self.conn.request(verb, path, body)
except socket.error, e:
print 'Cannot request %r %r: %s' % (verb, path, e)
sys.exit(1)
rl = self.conn.getresponse()
if self.verbose or rl.status//100 != 2:
print '%s %s gave: %s %r' % (verb, path, rl.status, rl.reason)
if rl.status//100 == 2:
if self.verbose:
print 'HEADERS:'
for h, v in rl.getheaders(): print ' ', h, v
print 'CONTENTS:'
body = rl.read()
if self.verbose:
for line in body.splitlines():
print ' ', line
print
return simplejson.loads(body)
else:
return None
def get_cookies(self):
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
opener.open("http://%s:%s" % (self.host, self.port))
return dict((c.name, c.value) for c in self.cj)
def execute(self):
if self.gae is not None: time.sleep(3) # wait for GAE server to start
try:
self.conn = httplib.HTTPConnection(self.host, self.port, strict=True)
except socket.error, e:
print "Cannot connect: %s"
sys.exit(1)
self.f(self, self.verbose)
if self.gae is not None: os.kill(self.gae.pid, signal.SIGINT)
print 'All done OK!'
| Python |
"""
Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
pass
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = c_encode_basestring_ascii
except NameError:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
| Python |
"""
Implementation of JSONDecoder
"""
import re
import sys
from simplejson.scanner import Scanner, pattern
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
pass
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
s = match.group(0)
fn = getattr(context, 'parse_constant', None)
if fn is None:
rval = c[s]
else:
rval = fn(s)
return rval, None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
fn = getattr(context, 'parse_float', None) or float
res = fn(integer + (frac or '') + (exp or ''))
else:
fn = getattr(context, 'parse_int', None) or int
res = fn(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
elif terminator != '\\':
if strict:
raise ValueError(errmsg("Invalid control character %r at", s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return u''.join(chunks), end
# Use speedup
try:
scanstring = c_scanstring
except NameError:
scanstring = py_scanstring
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
strict = getattr(context, 'strict', True)
return scanstring(match.string, match.end(), encoding, strict)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
strict = getattr(context, 'strict', True)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding, strict)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# Look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float
self.parse_int = parse_int
self.parse_constant = parse_constant
self.strict = strict
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
| Python |
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> simplejson.loads('1.1', parse_float=decimal.Decimal)
Decimal("1.1")
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.9.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
if __name__ == '__main__':
import warnings
warnings.warn('python -msimplejson is deprecated, use python -msiplejson.tool', DeprecationWarning)
from simplejson.decoder import JSONDecoder
from simplejson.encoder import JSONEncoder
else:
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
#
# Compatibility cruft from other libraries
#
def decode(s):
"""
demjson, python-cjson API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of decode(s)",
DeprecationWarning)
return loads(s)
def encode(obj):
"""
demjson, python-cjson compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of encode(s)",
DeprecationWarning)
return dumps(obj)
def read(s):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
if __name__ == '__main__':
import simplejson.tool
simplejson.tool.main()
| Python |
r"""
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
import simplejson
#
# Pretty printer:
# curl http://mochikit.com/examples/ajax_tables/domains.json | python -msimplejson.tool
#
def main():
import sys
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("%s [infile [outfile]]" % (sys.argv[0],))
try:
obj = simplejson.load(infile)
except ValueError, e:
raise SystemExit(e)
simplejson.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""
Iterator based sre token scanner
"""
import re
from re import VERBOSE, MULTILINE, DOTALL
import sre_parse
import sre_compile
import sre_constants
from sre_constants import BRANCH, SUBPATTERN
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# Combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator | Python |
# Much the below modified from the gae-json-rest project
import plistlib
import logging
import re
import touchengineutil
def id_of(entity):
""" Make a {'id': <string-of-digits>} dict for an entity.
Args:
entity: an entity
Returns:
a jobj corresponding to the entity
"""
return dict(id=touchengineutil.id_of(entity))
def send_plist(response_obj, pdata):
""" Send data in Plist form to an HTTP-response object.
Args:
response_obj: an HTTP response object
jdata: a dict or list in correct 'plistable' form
Side effects:
sends the JSON form of jdata on response.out
"""
response_obj.content_type = 'application/xml'
#logging.info("send_plist pdata = %s" %(pdata,))
response_obj.out.write(plistlib.writePlistToString(pdata))
def entity_to_dict(entity):
""" Make a plistable dict (a dictObj) given an entity.
Args:
entity: an entity
Returns:
the JSONable-form dict (dictObj) for the entity
"""
model = type(entity)
dictObj = id_of(entity)
dictObj["key"] = str(entity.key())
props = touchengineutil.allProperties(model)
for property_name, property_value in props:
value_in_entity = getattr(entity, property_name, None)
if value_in_entity is not None:
to_string = getattr(model, property_name + '_to_string')
#logging.info("type(value_in_entity) = %s" %(type(value_in_entity),))
#logging.info("value_in_entity = %s" %(value_in_entity,))
dictObj[property_name] = to_string(value_in_entity)
return dictObj | Python |
"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
import time
from cStringIO import StringIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
def _dateFromStruct_time(t):
date = datetime.datetime(*t[:6])
return (date)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1, pukeOnBadContent=0):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
self.pukeOnBadContent = pukeOnBadContent
def writeValue(self, value):
if isinstance(value, (str, unicode, type(None))):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, (int, long)):
self.simpleElement("integer", str(value))
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
elif isinstance(value, time.struct_time):
date = _dateFromStruct_time(value)
self.simpleElement("date", _dateToString(date))
else:
if self.pukeOnBadContent:
raise TypeError("unsuported type: %s" % type(value))
#self.simpleElement("string", "None")
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
if "bozo" not in key:
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
| Python |
#!/usr/bin/env python
import logging
import time
import re
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.db import Key
import cookutil
import touchengineutil
import plistutil
# RE to match: optional /, classname, optional /, ID of 0+ numeric digits
CLASSNAME_ID_RE = re.compile(r'^/?(\w+)/?(\d*)$')
# TODO: queries, methods, schemas, and MUCH better error-handling!-)
def path_to_classname_and_id(path):
""" Get a (classname, id) pair from a path.
Args:
path: a path string to anaylyze
Returns:
a 2-item tuple:
(None, '') if the path does not match CLASSNAME_ID_RE
(classname, idstring) if the path does match
[idstring may be '', or else a string of digits]
"""
mo = CLASSNAME_ID_RE.match(path)
if mo: return mo.groups()
else: return (None, '')
class PlistHandler(webapp.RequestHandler, cookutil.CookieMixin):
stripFromURL = ''
def _serve(self, data):
counter = self.get_cookie('counter')
if counter: self.set_cookie('counter', str(int(counter) + 1))
else: self.set_cookie('counter', '0')
return plistutil.send_plist(self.response, data)
def _get_model_and_entity(self, need_model, need_id):
""" Analyze self.request.path to get model and entity.
Args:
need_model: bool: if True, fail if classname is missing
need_id: bool: if True, fail if ID is missing
Returns 3-item tuple:
failed: bool: True iff has failed
model: class object or None
entity: instance of model or None
"""
path = self.request.path.lstrip(self.stripFromURL)
logging.info(u'path = %s (stripFromURL = %s)' %(path, self.stripFromURL))
classname, strid = path_to_classname_and_id(path)
self._classname = classname
if not classname:
if need_model:
self.response.set_status(400, 'Cannot do it without a model.')
logging.info(u'_get_model_and_entity 400, Cannot do it without a model.')
return need_model, None, None
model = touchengineutil.modelClassFromName(classname)
if model is None:
self.response.set_status(400, 'Model %r not found' % classname)
logging.info(u'_get_model_and_entity 400, Model %r not found' % classname)
return True, None, None
if not strid:
if need_id:
self.response.set_status(400, 'Cannot do it without an ID.')
logging.info(u'_get_model_and_entity 400, Cannot do it without an ID.')
return need_id, model, None
try:
numid = int(strid)
except TypeError:
self.response.set_status(400, 'ID %r is not numeric.' % strid)
logging.info(u'_get_model_and_entity 400, ID %r is not numeric.' % strid)
return True, model, None
else:
entity = model.get_by_id(numid)
if entity is None:
self.response.set_status(404, "Entity %s not found" % self.request.path)
logging.info(u'_get_model_and_entity 400, Entity %s not found' % self.request.path)
return True, model, None
logging.info(u'_get_model_and_entity model:%s entity:%s', model, entity)
return False, model, entity
def _get(self,short=True, limit=None, afterKey=None, orderBy=None):
""" Get Plist data for model names, entity IDs of a model, or an entity.
Depending on the request path, serve as JSON to the response object:
- for a path of /classname/id, a plist for that entity
- for a path of /classname, a list of id-only strings for that model
or a list of all entities (if short is False)
- for a path of /, a list of all model class names, which allows the API
to be introspected
this needs some sanitization to keep from throwing exceptions all over
the place when the user sends a string that makes us puke.
"""
coon = str(1 + int(self.get_cookie('coon', '0')))
self.set_cookie('count', coon)
self.set_cookie('ts', str(int(time.time())))
failed, model, entity = self._get_model_and_entity(False, False)
dictObj = {}
if failed:
#todo: put the errors above in here as well
dictObj = {"error":"See response code"}
elif model is None:
#return all class names
dictObj = {"allModelClassNames":touchengineutil.allModelClassNames()}
#TODO: yeah, all these nested "if" statements, ugly.
elif entity is None:
classSetname = model.__name__ + "_set"
if not limit:
if not afterKey:
if not orderBy:
models = model.all()
else:
models = model.gql("order by %s" %(orderBy,)) #todo: sanitize me? check Model.properties() for orderBy at least
else:
if not orderBy:
models = model.gql("Where __key__ > :1", Key(encoded=afterKey)) #todo: handle key doesn't exist
else:
models = model.gql("Where __key__ > :1 order by %s" %(orderBy,), Key(encoded=afterKey))
else: #limit
if not afterKey:
if not orderBy:
models = model.all().fetch(int(limit))
else:
models = model.gql("order by %s" %(orderBy, )).fetch(int(limit))
else:
if not orderBy:
models = model.gql("Where __key__ > :1", Key(encoded=afterKey)).fetch(int(limit))
else:
models = model.gql("Where __key__ > :1 order by %s" %(orderBy,), Key(encoded=afterKey)).fetch(int(limit))
if short:
dictObj = {classSetname:[touchengineutil.classAndIdFromModelInstance(eachModelInstance) for eachModelInstance in models]}
else:
dictObj = {classSetname:[plistutil.entity_to_dict(eachModelInstance) for eachModelInstance in models]}
else:
#return the dictionary representation of the entity in question
dictObj = plistutil.entity_to_dict(entity)
return self._serve(dictObj)
def get(self):
limit = self.request.get("limit")
afterKey = self.request.get("afterKey")
short = self.request.get("short")
orderBy = self.request.get("orderBy")
#logging.info("orderBy = %s" %(orderBy,))
if short: short=True #anything in short makes it true
else: short=False
return self._get(short=short, limit=limit, afterKey=afterKey, orderBy=orderBy) | Python |
""" Utilities for REST CRUD support for GAE db models from gae-json-rest
modified for touchengine
Specifically, this module facilitates introspection about a data model built
on GAE db -- a registry of what db.Model subclasses are made available for
introspection and by what names, utilities to register and query about such
classes 'in bulk', mapping of property values of instances of those classes
from and to strings. Reference properties, in particular, are mapped to
strings of the form Classname/<id> where id is a unique-within-class id
usable for the get_by_id method of the corresponding class;
"reverse-reference" properties are *not* supported for conversion to/from
string.
The conversion of property values to/from string is made by static methods
named foo_to_string and foo_from_string (for a property class attribute
named foo); this module offers facilities to make and install on the class
object all such needed methods, but if the class itself explicitly chooses
to define some methods with these names, those facilities will not override
them (so each db.Model subclass gets a chance to special-case some or all
of its instance's property attributes). The_from_string method is not
The module also offers the ability to register and retrieve (by string
names):
-- 'special objects' (model-like, but with no entities)
such a registration just creates a namespace (for registering methods on)
which is represented as a dict
-- 'methods' which can be registered as (any one of; for >1 register again)
-- callable on a special object,
-- callable on a model,
-- callable on any entity of a model
all such registrations require a callable taking named args which are
lists coming from the cgi.parse_qs parsing of a query string;
the callable object registered for a method that's registered as callable
on any entity of a model also takes a first argument 'self' that is
the specific entity on which it is being called.
-- entry points to query all the methods callable on a special object,
model, or, any entity of a given model
"""
import datetime
import inspect
import logging
import sys
from google.appengine.ext import db
from google.appengine.api import users
def id_of(x):
""" Get the numeric ID given an instance x of a db.Model subclass. """
return x.key().id()
def identity(x): return x
identity = staticmethod(identity)
def isProperty(x):
""" Is class attribute x a 'real' property (not a reverse reference)?
Args:
x: a class attribute (from some db.Model subclass)
Returns:
True iff x's type is that of a "real" property (not a rev.ref.)
"""
return isinstance(x, db.Property)# and not isinstance(x, db._ReverseReferenceProperty)
specials_registry = dict()
def registerSpecialByName(name):
if name in specials_registry:
raise KeyError, 'Duplicate name %r for specials registry' % name
specials_registry[name] = dict(_n=name)
def specialFromName(name):
""" Get special object with the given name (None if none).
Args:
name: a string that should be registered as name for a special object
Returns:
dict that's the special object thus named, None if none
"""
return specials_registry.get(name)
def allSpecialNames():
""" Return a list of strings, all special object names in registry. """
return sorted(specials_registry)
def registerSpecialMethod(special, name, method):
if isinstance(special, str):
spc = specialFromName(special)
if spc is None:
raise KeyError, 'No special %r' % special
special = spc
if name in special:
raise KeyError, 'Duplicated method name %r for special %r' % (
name, special['_n'])
special[name] = method
def specialMethodFromName(special, name):
if isinstance(special, str):
special = specialFromName(special)
if special is None:
return None
return special.get(name)
model_class_registry = dict()
def registerClassByName(cls, name=None):
""" Register a db.Model subclass with the given name (def. its own name). """
if name is None: name = cls.__name__
if name in model_class_registry:
raise KeyError, 'Duplicate name %r for model class registry' % name
model_class_registry[name] = cls
setattr(cls, '_n', name)
def isModelClass(x):
""" Is object x a subclass of db.Model?
Args:
x: any
Returns:
true iff x is a subclass of db.Model
"""
try: return issubclass(x, db.Model)
except TypeError: return False
def registerAllModelClasses(module_obj):
""" Register non-private db.Model subclasses from the given module object. """
for name, cls in inspect.getmembers(module_obj, isModelClass):
if name[0] != '_':
registerClassByName(cls, name)
def registerAllModelClassesFromModuleNamed(module_name):
""" Register all db.Model subclasses from module w/given name. """
registerAllModelClasses(__import__(module_name))
def modelClassFromName(classname):
""" Get the db.Model subclass with the given name (None if none).
Only handles db.Model subclasses enregistered into model_class_registry.
Args:
classname: a string that should name a db.Model subclass
Returns:
class object with that name, or None if there's no such class
"""
return model_class_registry.get(classname)
def nameFromModelClass(cls):
""" Get the name a db.Model subclass is registered under (or None). """
return getattr(cls, '_n', None)
def allModelClassNames():
""" Return a list of strings, all model class names in registry. """
return sorted(model_class_registry)
def _getter(model, an):
if isinstance(model, str):
mdl = modelClassFromName(model)
if mdl is None:
raise KeyError, 'No model named %r' % model
model = mdl
mm = getattr(model, an, None)
if mm is None:
mm = dict()
setattr(model, an, mm)
def _registerMethod(model, name, method, _getter_an):
model, mm = _getter(model, _getter_an)
if name in mm:
raise KeyError, 'Duplicate name %r for method in model %r' % (name,
nameFromModelClass(model))
mm[name] = method
def _methodByName(model, name, _getter_an):
model, mm = _getter(model, _getter_an)
return mm.get(name)
def _allMethods(model, _getter_an):
model, mm = _getter(model, _getter_an)
return sorted(mm)
def registerModelMethod(model, name, method):
return _registerMethod(model, name, method, '_mm')
def modelMethodByName(model, name):
return _methodByName(model, name, '_mm')
def allModelMethods(model):
return _allMethods(model, '_mm')
def registerInstanceMethod(model, name, method):
return _registerMethod(model, name, method, '_im')
def instanceMethodByName(model, name):
return _methodByName(model, name, '_im')
def allInstanceMethods(model):
return _allMethods(model, '_im')
def modelInstanceByClassAndId(s):
""" Get a model instance given its class name and numeric ID, or None.
Args:
s: str of the form 'Classname/1234'
Returns:
model instance from the class of that name, with that ID (or None)
"""
classname, theid = s.split('/')
theclass = modelClassFromName(classname)
if theclass is None: return None
return theclass.get_by_id(int(theid))
def classAndIdFromModelInstance(x, classname=None):
""" Get a string with class name and numeric ID given a model instance.
Args:
x: a model instance or None
Returns:
str of the form 'Classname/1234' (or None if x is None)
"""
if x is None: return None
if classname is None: classname = type(x).__name__
theclass = modelClassFromName(classname)
if theclass is not type(x): return None
return '%s/%s' % (classname, id_of(x))
def classAndKeyFromModelInstance(x, classname=None):
""" Get a string with class name and numeric ID given a model instance.
Args:
x: a model instance or None
Returns:
str of the form 'Classname/1234' (or None if x is None)
"""
if x is None: return None
if classname is None: classname = type(x).__name__
theclass = modelClassFromName(classname)
if theclass is not type(x): return None
return '%s/%s' % (classname, x.key())
def revRefIdArray(s):
""" Get an array with reference IDs given a query
"""
results = s.fetch(limit=1000)
#outList = [dict(id=id_of(x)) for x in results]
outList = [classAndIdFromModelInstance(x) for x in results]
return outList
def idArrayRevRef(s):
#TODO: put some logging here to see what we get and
#figure out how to update a given object's multi-value
#lines.
#TODO: Also decide if it's necessary to do so of if I should
#force the client to update the many instead of the one
#example: A doctor has 20 pagers. I can remove a single
#pager from a doctor's pager list (potentially difficult)
#or I can remove the doctor reference from a pager (easier)
return ''
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def datetimeFromString(s):
""" Get a datetime object given a str ('right now' for empty str).
As per appengine convention, all datetime objs must be UTC.
Args:
s: str in DATETIME_FORMAT or ''
Returns:
appropriate datetime object
"""
if s:
return datetime.datetime.strptime(s, DATETIME_FORMAT)
else:
return datetime.datetime.now()
def stringFromDatetime(dt):
""" Get an appropriately formatted str given a datetime object.
Args:
dt: datetime instance
Returns:
str formatted as per DATETIME_FORMAT
"""
return dt.strftime(DATETIME_FORMAT)
def userFromString(s):
""" Get a user object given a str.
Args:
s: str as email address
Returns:
appropriate user object
"""
if s:
try:
return User(email=s)
except UserNotFoundError, e:
return None #shoud we err out?
else:
return None
def stringFromUser(aUser):
""" Get an appropriately formatted str given a datetime object.
Args:
dt: datetime instance
Returns:
str formatted as per DATETIME_FORMAT
"""
if aUser:
return u''.join(aUser.email())
else:
return u''
# mapping from property types to appropriate str->value function if any
# property types not in the mapping must accept a properly formatted str
setter_registry = {
db.BooleanProperty: 'False'.__ne__,
db.DateTimeProperty: staticmethod(datetimeFromString),
db.IntegerProperty: int,
db.FloatProperty: float,
db.ReferenceProperty: staticmethod(modelInstanceByClassAndId),
db.StringListProperty: str.split,
db.UserProperty: staticmethod(userFromString),
db._ReverseReferenceProperty: staticmethod(idArrayRevRef),
}
# mapping from property types to appropriate value->str function if any
# str(value) is used for property types that are not in the mapping
getter_registry = {
db.DateTimeProperty: staticmethod(stringFromDatetime),
db.ReferenceProperty: staticmethod(classAndIdFromModelInstance),
db.StringListProperty: ' '.join,
db.UserProperty: staticmethod(stringFromUser),
db._ReverseReferenceProperty: staticmethod(revRefIdArray),
}
def allProperties(cls):
""" Get all (name, value) pairs of properties given a db.Model subclass.
Args:
cls: a class object (a db.Model subclass)
Returns:
list of (name, value) pairs of properties of that class
"""
return inspect.getmembers(cls, isProperty)
def addHelperMethods(cls):
""" Add _from_string and _to_string methods to a db.Model subclass.
Args:
cls: a class object (db.Model subclass), adds methods to it.
"""
#logging.info('decorating model %r', cls)
props = allProperties(cls)
for name, value in props:
fs_name = name + '_from_string'
if not hasattr(cls, fs_name):
setter = setter_registry.get(type(value), identity)
setattr(cls, fs_name, setter)
#logging.info('added %r: %r: %r: %r', fs_name, setter, type(value), identity)
ts_name = name + '_to_string'
if not hasattr(cls, ts_name):
getter = getter_registry.get(type(value), u''.join)
setattr(cls, ts_name, getter)
#logging.info('added %r: %r', ts_name, getter)
def decorateModuleNamed(module_name):
""" Do all needed work for non-private model classes in module thus named. """
module_obj = __import__(module_name)
for name, cls in inspect.getmembers(module_obj, isModelClass):
if name[0] != '_':
registerClassByName(cls, name)
addHelperMethods(cls)
| Python |
""" Utilities for JSON REST CRUD support for GAE db models.
Terminology: a subclass of db.Model is known as "a Model"; an instance of
such a subclass is known as "an entity".
Data is said to be in JSONed or JSONable form if it contains only dicts, lists
and scalars (strings, numbers) in a form that is correctly serializable into a
JSON-format string.
In particular, a "jobj" is a JSONed dict with a key 'id' mapping the string
format of the numeric value of an entity; each other key must be the name of
a property of that entity's Model, and the corresponding value must be a string
that can be deserialized into a value of that property's type.
"""
import re
import touchengineutil
from django.utils import simplejson
def id_of(entity):
""" Make a {'id': <string-of-digits>} dict for an entity.
Args:
entity: an entity
Returns:
a jobj corresponding to the entity
"""
return dict(id=touchengineutil.id_of(entity))
# RE to match: optional /, classname, optional /, ID of 0+ numeric digits
CLASSNAME_ID_RE = re.compile(r'^/?(\w+)/?(\d*)$')
def path_to_classname_and_id(path):
""" Get a (classname, id) pair from a path.
Args:
path: a path string to anaylyze
Returns:
a 2-item tuple:
(None, '') if the path does not match CLASSNAME_ID_RE
(classname, idstring) if the path does match
[idstring may be '', or else a string of digits]
"""
mo = CLASSNAME_ID_RE.match(path)
if mo: return mo.groups()
else: return (None, '')
def send_json(response_obj, jdata):
""" Send data in JSON form to an HTTP-response object.
Args:
response_obj: an HTTP response object
jdata: a dict or list in correct 'JSONable' form
Side effects:
sends the JSON form of jdata on response.out
"""
response_obj.content_type = 'application/json'
simplejson.dump(jdata, response_obj.out)
def receive_json(request_obj):
""" Receive data in JSON form from an HTTP-request object.
Args:
request_obj: an HTTP request object (with body in JSONed form)
Returns:
the JSONable-form result of loading the request's body
"""
return simplejson.loads(request_obj.body)
def make_jobj(entity):
""" Make a JSONable dict (a jobj) given an entity.
Args:
entity: an entity
Returns:
the JSONable-form dict (jobj) for the entity
"""
model = type(entity)
jobj = id_of(entity)
props = touchengineutil.allProperties(model)
for property_name, property_value in props:
value_in_entity = getattr(entity, property_name, None)
if value_in_entity is not None:
to_string = getattr(model, property_name + '_to_string')
jobj[property_name] = to_string(value_in_entity)
return jobj
def parse_jobj(model, jobj):
""" Make dict suitable for instantiating model, given a jobj.
Args:
model: a Model
jobj: a jobj
Returns:
a dict d such that calling model(**d) properly makes an entity
"""
result = dict()
for property_name, property_value in jobj.iteritems():
# ensure we have an ASCII string, not a Unicode one
property_name = str(property_name)
from_string = getattr(model, property_name + '_from_string')
property_value = from_string(property_value)
if property_value is not None:
result[property_name] = property_value
return result
def make_entity(model, jobj):
""" Makes an entity whose type is model with the state given by jobj.
Args:
model: a Model
jobj: a jobj
Side effects:
creates and puts an entity of type model, w/state per jobj
Returns:
a jobj representing the newly created entity
"""
entity_dict = parse_jobj(model, jobj)
entity = model(**entity_dict)
entity.put()
jobj = make_jobj(entity)
jobj.update(id_of(entity))
return jobj
def update_entity(entity, jobj):
""" Updates an entity's state as per properties given in jobj.
Args:
entity: an entity
jobj: a jobj
Side effects:
updates the entity with properties as given by jobj
Returns:
a jobj representing the whole new state of the entity
"""
new_entity_data = parse_jobj(type(entity), jobj)
for property_name, property_value in new_entity_data.iteritems():
setattr(entity, property_name, property_value)
entity.put()
return make_jobj(entity)
| Python |
"""A toy-level example of a data model in Google Appengine DB terms.
"""
import logging
from touchengine import touchengineutil
from Doctor import Doctor
from Pager import Pager
touchengineutil.decorateModuleNamed(__name__)
logging.info('touchengine Models in %r decorated', __name__)
| Python |
"""A toy-level example of a RESTful app running on Google Appengine.
"""
import logging
import time
import wsgiref.handlers
from google.appengine.ext import webapp
import models
from touchengine import cookutil
from touchengine import touchengineutil
from touchengine import plistutil
from touchengine.plistHandler import PlistHandler
# TODO: queries, methods, schemas, and MUCH better error-handling!-)
def main():
logging.info('main.py main()')
application = webapp.WSGIApplication([('/.*', PlistHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
import cgi
import doctest
import logging
import os
import re
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.environ.get('LOGLEVEL', 'WARNING')))
class UrlParser(object):
""" Parse a URL path and perform appropriate an callback on regex-matching.
Instantiate h with a prefix (to be matched, but ignored if it matches),
followed by as many (regex, callback) pairs as needed.
Then, call h.process(path): if the path matches the prefix, then
each regex is tried *IN ORDER* on the rest of the path, and,
upon the first match if any, the corresponding callback gets called
(and its results returned).
If the prefix does not match, or none of the regexes does, then
method call h.process(path) returns None.
The callback is passed *NAMED* arguments (only!) corresponding to
the positional groups matched in the prefix, augmented or overridden
by those matched in the specific regex that matched after that.
So for example:
>>> def show(**k): print sorted(k.items())
>>> h = UrlParser(r'/(?P<foo>\w+)/',
... (r'(?P<bar>\d+)', show),
... (r'(?P<foo>[^/]*)', show),
... )
>>> h.process('/zipzop/23/whatever')
[('bar', '23'), ('foo', 'zipzop')]
>>> h.process('/zipzop/whoo/whatever')
[('foo', 'whoo')]
You can also change the prefix by passing a prefix to .process(...) [the
new prefix-to-ignore is then remembered in lieu of the previous one].
>>> h.prefix.pattern
'/(?P<foo>\\\\w+)/'
>>> h.process('/zipzop/whoo/whatever', prefix='/')
[('foo', 'zipzop')]
>>> h.process('/zipzop/whoo/whatever')
[('foo', 'zipzop')]
>>> h.prefix.pattern
'/'
The h.prefix attribute is exposed, and it's a RE object.
"""
def __init__(self, prefix, *args):
""" Takes a prefix to be ignored and 0+ (regex, callback) pair args.
Args:
prefix: a string regex pattern
args: 0+ pairs (regex_pattern, callback) [each a string + a callable]
"""
self.prefix = re.compile(prefix or '')
logging.debug('prefix: %r', prefix)
self.callbacks = []
for pattern, callback in args:
logging.debug('%r -> %r', pattern, callback)
self.callbacks.append((re.compile(pattern), callback))
def process(self, path, prefix=None):
""" Match the path to one of the regexs and call the appropriate callback.
Args:
path: a string URL (complete path) to parse
prefix: if not None, a RE pattern string to change self.prefix from now on
Returns:
the result of the appropriate callback, or None if no match
"""
if prefix is not None and prefix != self.prefix.pattern:
self.prefix = re.compile(prefix)
prefix_mo = self.prefix.match(path)
if prefix_mo is None:
logging.debug('No prefix match for %r (%r)', path, self.prefix)
return None
pathrest = path[prefix_mo.end():]
logging.debug('Matching %r...', pathrest)
for regex, callback in self.callbacks:
mo = regex.match(pathrest)
if mo:
logging.debug('Matched %r, calling %r', regex, callback)
named_args = prefix_mo.groupdict()
named_args.update(mo.groupdict())
return callback(**named_args)
logging.debug('No match for %r', pathrest)
return None
class RestUrlParser(UrlParser):
""" Specifically dispatches on the REs associated with REST-shaped URLs.
Note that h.process only takes an URL *path*, NOT the rest of the URL (no
protocol, no host, no query).
>>> h = RestUrlParser('')
>>> h.process('/$foobar')
('special', '$foobar')
>>> h.process('/foobar')
('model', 'foobar')
>>> h.process('/$foobar/zak/')
('special_method', '$foobar', 'zak')
>>> h.process('/foobar/zak/')
('model_method', 'foobar', 'zak')
>>> h.process('/foobar/23/')
('model_strid', 'foobar', '23')
>>> h.process('/foobar/23/blop')
('model_strid_method', 'foobar', '23', 'blop')
>>> h.process('')
>>> h.process('////////')
>>>
"""
@staticmethod
def _doprefix(prefix):
if prefix is None: return None
prefix = prefix.strip('/')
if prefix: return '/%s/' % prefix
else: return '/'
def process(self, path, prefix=None):
return UrlParser.process(self, path, self._doprefix(prefix))
def __init__(self, prefix=None, **overrides):
""" Set the prefix-to-ignore, optionally override methods.
Args:
prefix: a string regex pattern (or None, default)
overrides: 0+ named arguments; values are callables to override the
methods RestUrlParser provides (which just return tuples of strings),
and each such callable must be signature-compatible with the
corresponding named method. The methods & signaturs are:
do_special(special)
do_model(model)
do_special_method(special, method)
do_model_method(model, method)
do_model_strid(model, strid)
do_model_strid_method(model, strid, method)
The *names* (not necessarily the *order*) of the arguments matter.
The values of all arguments are strings (the substrings of the
incoming path that match the respective items of the REST URL):
strid is always 1+ digits; special is '$' + a valid identifier;
model and method are identifiers.
"""
# let each method be overridden (in the instance) by caller at ctor-time
self.__dict__.update(overrides)
# prefix must always absorb leading and trailing /
prefix = self._doprefix(prefix)
# build URL regexes with corresponding names
urls = []
def addurl(name, regex): urls.append((regex, getattr(self, 'do_'+name)))
sr_method = r'/(?P<method>\w+)'
sr_strid = r'/(?P<strid>\d+)'
# special_method must be before special (ie. special_method > special)
re_special = r'(?P<special>\$\w+)/?'
re_special_method = re_special + sr_method
addurl('special_method', re_special_method)
addurl('special', re_special)
# model_strid_method > model_strid > model_method > model
re_model = r'(?P<model>\w+)/?'
re_model_method = re_model + sr_method
re_model_strid = re_model + sr_strid
re_model_strid_method = re_model_strid + sr_method
addurl('model_strid_method', re_model_strid_method)
addurl('model_strid', re_model_strid)
addurl('model_method', re_model_method)
addurl('model', re_model)
UrlParser.__init__(self, prefix, *urls)
def do_special(self, special):
return 'special', special
def do_model(self, model):
return 'model', model
def do_special_method(self, special, method):
return 'special_method', special, method
def do_model_method(self, model, method):
return 'model_method', model, method
def do_model_strid(self, model, strid):
return 'model_strid', model, strid
def do_model_strid_method(self, model, strid, method):
return 'model_strid_method', model, strid, method
def _test():
import doctest
numfailures, numtests = doctest.testmod()
if numfailures == 0:
print '%d tests passed successfully' % numtests
# if there are any failures, doctest does its own reporting!-)
if __name__ == "__main__":
_test()
| Python |
''' Json-Rest-handlin' integration helper.
This module offers a JSON+REST-handling integration class meant to be used with
Google App Engine (hooked into a webapp.RequestHandler subclass); it can be
hooked up by simply passing an object h with attributes h.request and
h.response that are duck-like those of webapp.RequestHandler.
On hookup, the integration-helper class overrides the get/set/put/delete
methods of the object hooking up to it so that they respond appropriately to
REST requests (as documented in json_rest.txt) based on registrations performed
in touchengineutil, parsing and formatting JSON payloads based on jsonutil.
IOW, this helper integrates functionality found in other modules of the
gae-json-rest package:
parsutil
touchengineutil
jsonutil
"putting it all together" into a highly-reusable (but still modestly
customizable) REST-style, JSON-transport server web-app for GAE.
TODO: decide what arguments/parameters are passed to various kinds of
methods being called, and implement that decision; add MANY tests!!!
'''
import logging
import jsonutil
import parsutil
import touchengineutil
class JsonRestHelper(object):
prefix_to_ignore = '/'
__delete_parser = __put_parser = __post_parser = __get_parser = None
def hookup(self, handler):
""" "Hooks up" this helper instance to a handler object.
Args:
handler: an instance of a webapp.RequestHandler subclass
Side effects:
- sets self.handler to handler
- sets the handler's get, put, post and delete methods from self
- sets the handler's jrh attribute to self
Note this creates reference loops and MUST be undone in hookdown!
"""
logging.info('hookup %r/%r', self, handler)
self.handler = handler
handler.get = self.get
handler.put = self.put
handler.post = self.post
handler.delete = self.delete
handler.jrh = self
def hookdown(self):
""" Undoes the effects of self.hookup """
logging.info('hookdn %r/%r', self, self.handler)
h = self.handler
h.jrh = self.handler = None
del h.get, h.put, h.post, h.delete
def _serve(self, data):
""" Serves a result in JSON, and hooks-down from the handler """
try: return jsonutil.send_json(self.handler.response, data)
finally: self.hookdown()
def get_model(self, modelname):
""" Gets a model (or None) given a model name.
Args:
modelname: a string that should name a model
Returns:
a model class, or None (if no model's registered with that name)
Side effects:
sets response status to 400 if no model's registered with that name
"""
model = touchengineutil.modelClassFromName(modelname)
if model is None:
self.handler.response.set_status(400, 'Model %r not found' % modelname)
return model
def get_special(self, specialname):
""" Gets a special (or None) given a special object's name.
Args:
specialname: a string that should name a special object
Returns:
a special object, or None (if no special's registered with that name)
Side effects:
sets response status to 400 if no special's registered with that name
"""
special = touchengineutil.specialFromName(specialname)
if special is None:
self.handler.response.set_status(400, 'Special object %r not found' %
specialname)
return special
def get_entity(self, modelname, strid):
""" Gets an entity (or None) given a model name and entity ID as string.
Args:
modelname: a string that should name a model
strid: the str(id) for the numeric id of an entity of that model
Returns:
an entity, or None (if something went wrong)
Side effects:
sets response status to 400 or 404 if various things went wrong
"""
model = self.get_model(modelname)
if model is None:
return None
entity = model.get_by_id(int(strid))
if entity is None:
self.handler.response.set_status(404, "Entity %s/%s not found" %
(modelname, strid))
return entity
def get_special_method(self, specialname, methodname):
""" Gets a special object method (or None) given special & method names.
Args:
specialname: a string that should name a special object
methodname: a string that should name a method of that special object
Returns:
the method with that name in the special object of that name
Side effects:
sets response status to 400 if special or method not found
"""
special = self.get_special(specialname)
if special is None: return ''
method = special.get(methodname)
if method is None:
self.handler.response.set_status(400, 'Method %r not found in special %r'
% (methodname, specialname))
return method
def _methodhelper(self, modelname, methodname, _getter):
""" Gets a model or instance method given model and method names & getter.
Args:
modelname: a string that should name a model
methodname: a string that should name a method of that model
(model-method or instance-method, dep. on _getter)
Returns:
a method object, or None if either model or method were not found
Side effects:
sets response status to 400 if either model or method were not found
"""
model = self.get_model(modelname)
if model is None: return ''
method = _getter(model, methodname)
if method is None:
self.handler.response.set_status(400, 'Method %r not found in model' %
(methodname, modelname))
return method
def get_model_method(self, modelname, methodname):
""" Gets a model's method given model and method names.
Args:
modelname: a string that should name a model
methodname: a sring that should name a method of that model
Returns:
a method object, or None if either model or method were not found
Side effects:
sets response status to 400 if either model or method were not found
"""
return self._methodhelper(modelname, methodname, touchengineutil.modelMethodByName)
def get_instance_method(self, modelname, methodname):
""" Gets an instance method given model and method names.
Args:
modelname: a string that should name a model
methodname: a sring that should name an instance method of that model
Returns:
a method object, or None if either model or method were not found
Side effects:
sets response status to 400 if either model or method were not found
"""
return self._methodhelper(modelname, methodname, touchengineutil.instanceMethodByName)
def do_delete(self, model, strid):
""" Hook method to delete an entity given modelname and strid.
"""
entity = self.get_entity(model, strid)
if entity is not None:
entity.delete()
return {}
def delete(self, prefix=None):
""" Delete an entity given by path modelname/strid
Response is JSON for an empty jobj.
"""
if self.__delete_parser is None:
self.__delete_parser = parsutil.RestUrlParser(self.prefix_to_ignore,
do_model_strid=self.do_delete)
path = self.handler.request.path
result = self.__delete_parser.process(path, prefix)
if result is None or isinstance(result, tuple):
self.handler.response.set_status(400, 'Invalid URL for DELETE: %r' % path)
return self._serve(result)
def do_put(self, model, strid):
""" Hook method to update an entity given modelname and strid.
"""
entity = self.get_entity(model, strid)
if entity is None:
return {}
jobj = jsonutil.receive_json(self.handler.request)
jobj = jsonutil.update_entity(entity, jobj)
updated_entity_path = "/%s/%s" % (model, jobj['id'])
self.handler.response.set_status(200, 'Updated entity %s' %
updated_entity_path)
return jobj
def put(self, prefix=None):
""" Update an entity given by path modelname/strid
Request body is JSON for the needed changes
Response is JSON for the updated entity.
"""
if self.__put_parser is None:
self.__put_parser = parsutil.RestUrlParser(self.prefix_to_ignore,
do_model_strid=self.do_put)
path = self.handler.request.path
result = self.__put_parser.process(path, prefix)
if result is None or isinstance(result, tuple):
self.handler.response.set_status(400, 'Invalid URL for POST: %r' % path)
return self._serve({})
return self._serve(result)
def do_post_special_method(self, special, method):
""" Hook method to call a method on a special object given names.
"""
themethod = self.get_special_method(special, method)
if special is None: return ''
try: return themethod()
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r: %s" % (
special, method, e))
return ''
def do_post_model(self, model):
""" Hook method to "call a model" (to create an entity)
"""
themodel = self.get_model(model)
if themodel is None: return ''
jobj = jsonutil.receive_json(self.handler.request)
jobj = jsonutil.make_entity(themodel, jobj)
self._classname = model
return jobj
def do_post_model_method(self, model, method):
""" Hook method to call a method on a model given s.
"""
themethod = self.get_model_method(model, method)
if themethod is None: return ''
try: return themethod()
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r: %s" % (
model, method, e))
return ''
def do_post_entity_method(self, model, strid, method):
""" Hook method to call a method on an entity given s and strid.
"""
themethod = self.get_instance_method(model, method)
if themethod is None: return ''
entity = self.get_entity(model, strid)
if entity is None: return ''
try: return themethod(entity)
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r/%r: %s" % (
model, strid, method, e))
return ''
def post(self, prefix=None):
""" Create an entity ("call a model") or perform other non-R/O "call".
Request body is JSON for the needed entity or other call "args".
Response is JSON for the updated entity (or "call result").
"""
if self.__post_parser is None:
self.__post_parser = parsutil.RestUrlParser(self.prefix_to_ignore,
do_special_method=self.do_post_special_method,
do_model=self.do_post_model,
do_model_method=self.do_post_model_method,
do_model_strid_method=self.do_post_entity_method,
)
path = self.handler.request.path
result = self.__post_parser.process(path, prefix)
if result is None or isinstance(result, tuple):
self.handler.response.set_status(400, 'Invalid URL for POST: %r' % path)
return self._serve({})
try:
strid = result['id']
except (KeyError, AttributeError, TypeError):
pass
else:
new_entity_path = "/%s/%s" % (self._classname, strid)
logging.info('Post (%r) created %r', path, new_entity_path)
self.handler.response.headers['Location'] = new_entity_path
self.handler.response.set_status(201, 'Created entity %s' %
new_entity_path)
return self._serve(result)
def do_get_special_method(self, special, method):
""" Hook method to R/O call a method on a special object given names.
"""
themethod = self.get_special_method(special, method)
if themethod is None: return ''
try: return themethod()
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r: %s" % (
special, method, e))
return ''
def do_get_model(self, model):
""" Hook method to R/O "call a model" ("get list of all its IDs"...?)
"""
themodel = self.get_model(model)
if themodel is None: return ''
return [jsonutil.id_of(x) for x in themodel.all()]
def do_get_entity(self, model, strid):
""" Hook method to get data about an entity given model name and strid
"""
entity = self.get_entity(model, strid)
if entity is None:
return {}
return jsonutil.make_jobj(entity)
def do_get_model_method(self, model, method):
""" Hook method to R/O call a method on a model given s.
"""
themethod = self.get_model_method(model, method)
if themethod is None: return ''
try: return themethod()
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r: %s" % (
model, method, e))
return ''
def do_get_entity_method(self, model, strid, method):
""" Hook method to R/O call a method on an entity given s and strid.
"""
themethod = self.get_instance_method(model, method)
if themethod is None: return ''
entity = self.get_entity(model, strid)
if entity is None: return ''
try: return themethod(entity)
except Exception, e:
self.handler.response.set_status(400, "Can't call %r/%r/%r: %s" % (
model, strid, method, e))
return ''
def get(self, prefix=None):
""" Get JSON data for entity IDs of a model, or all about an entity.
Depending on the request path, serve as JSON to the response object:
- for a path of /classname/id, a jobj for that entity
- for a path of /classname, a list of id-only jobjs for that model
- or, the results of the method being called (should be R/O!)
"""
logging.info('GET path=%r, prefix=%r', self.handler.request.path, prefix)
if self.__get_parser is None:
self.__get_parser = parsutil.RestUrlParser(self.prefix_to_ignore,
do_special_method=self.do_get_special_method,
do_model=self.do_get_model,
do_model_strid=self.do_get_entity,
do_model_method=self.do_get_model_method,
do_model_strid_method=self.do_get_entity_method,
)
path = self.handler.request.path
# hacky/kludgy special-case: serve all model names (TODO: remove this!)
# (need to have proper %meta special w/methods to get such info!)
if prefix is not None and path.strip('/') == prefix.strip('/'):
result = touchengineutil.allModelClassNames()
logging.info('Hacky case (%r): %r', path, result)
return self._serve(result)
result = self.__get_parser.process(path, prefix)
if result is None or isinstance(result, tuple):
self.handler.response.set_status(400, 'Invalid URL for GET: %r' % path)
return self._serve({})
return self._serve(result)
# expose a single helper object, should be reusable
helper = JsonRestHelper()
# just for testing...:
import wsgiref.handlers
from google.appengine.ext import webapp
import models
class _TestCrudRestHandler(webapp.RequestHandler):
def __init__(self, *a, **k):
webapp.RequestHandler.__init__(self, *a, **k)
helper.hookup(self)
def main():
logging.info('intgutil test main()')
application = webapp.WSGIApplication([('/(rest)/.*', _TestCrudRestHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
""" A very simple "smoke test" for gae-json-rest toy app. """
import sys
import simplejson
import testutil
class TemplateTest(object):
def emit(self, s):
if self.verbose: print s
def __call__(self, tester, verbose):
print "__call__"
self.verbose = verbose
# what models do we have? shd be Doctor and Pager
self.emit('Getting names for Models:')
modelnames = tester.request_and_show('GET', '/')
try:
assert set(modelnames) == set(('Doctor', 'Pager'))
except:
print 'modelnames is', set(modelnames)
raise
# do we know any Doctors?
self.emit('IDs of Doctors before any operations:')
doctorids = tester.request_and_show('GET', '/Doctor/')
# get the highest-known Doctor ID, if any, to ensure a unique number
if doctorids:
unique = max(int(obj['id']) for obj in doctorids) + 1
else:
unique = 1
# now, we want to delete about half the doctors we know
num_doctors = len(doctorids)
deletions = 0
for i in range(0, num_doctors, 2):
strid = doctorids[i]['id']
tester.silent_request('DELETE', '/Doctor/%s' % strid)
deletions += 1
self.emit('IDs of Doctors after some deletions:')
doctorids = tester.silent_request('GET', '/Doctor/')
self.emit(doctorids)
if len(doctorids) != num_doctors - deletions:
print 'Had %d doctors, deleted %d, should have %d but have %d' % (
num_doctors, deletions, num_doctors-deletions, len(doctorids))
sys.exit(1)
num_doctors = len(doctorids)
# form name based on unique number
docname = 'Dr. John %s' % unique
# make entity with that name
post_body = testutil.body(name=docname)
post_result = tester.request_and_show('POST', '/Doctor/', post_body)
new_doctor_id = post_result['id']
new_doctor_path = '/Doctor/%s' % new_doctor_id
self.emit('Created %r' % new_doctor_path)
# show new doctor just created
self.emit('New Doctor just created:')
new_doctor = tester.request_and_show('GET', new_doctor_path)
if new_doctor['name'] != docname:
print 'New doctor name should be %r, is %r instead after POST' % (
docname, new_doctor['name'])
sys.exit(1)
# show IDs after the POST
self.emit('IDs of Doctors after POST:')
doctorids = tester.request_and_show('GET', '/Doctor/')
if len(doctorids) != num_doctors + 1:
print 'Had %d doctors, created %d, should have %d but have %d' % (
num_doctors, 1, num_doctors+1, len(doctorids))
sys.exit(1)
num_doctors = len(doctorids)
# Now change the name of the doctor
docname = '%s changed' % docname
put_body = testutil.body(name=docname)
put_result = tester.request_and_show('PUT', new_doctor_path, put_body)
# show new doctor just changed
self.emit('New Doctor just changed:')
new_doctor = tester.request_and_show('GET', new_doctor_path)
if new_doctor['name'] != docname:
print 'New doctor name should be %r, is %r instead after PUT' % (
docname, new_doctor['name'])
sys.exit(1)
self.emit('IDs of Doctors after PUT:')
doctorids = tester.request_and_show('GET', '/Doctor/')
if len(doctorids) != num_doctors:
print 'Had %d doctors, put %d, should have %d but have %d' % (
num_doctors, 1, num_doctors, len(doctorids))
sys.exit(1)
# check idempotence of PUT
self.emit('Check PUT idempotence')
tester.request_and_show('PUT', new_doctor_path, put_body)
# show new doctor just not-changed
self.emit('New Doctor just not-changed:')
new_doctor = tester.request_and_show('GET', new_doctor_path)
if new_doctor['name'] != docname:
print 'New doctor name should be %r, is %r instead after 2nd PUT' % (
docname, new_doctor['name'])
sys.exit(1)
self.emit('IDs of Doctors after second PUT:')
doctorids = tester.request_and_show('GET', '/Doctor/')
if len(doctorids) != num_doctors:
print 'Had %d doctors, put %d again, should have %d but have %d' % (
num_doctors, 1, num_doctors, len(doctorids))
sys.exit(1)
# testing cookie functionality
# each call to test_cookie should return an incremented value of
# cookie named secret_key
self.emit('Testing cookie functionality')
a = int(tester.get_cookies().get('counter'))
b = int(tester.get_cookies().get('counter'))
c = int(tester.get_cookies().get('counter'))
if a+1 != b or b+1 != c:
print 'a, b and c should have been 0, 1 and 2 respectively.'
print 'Got a=%d, b=%d and c=%d' % (a, b, c)
sys.exit(1)
if __name__ == '__main__':
test = TemplateTest()
t = testutil.Tester(test)
t.execute()
| Python |
#!/usr/bin/env python
#Python sonnet maker
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
#external imports
import sonnet
import plistlib
class MainHandler(webapp.RequestHandler):
"""Returns sonnets dictionary as a converted plist"""
def get(self):
plist = plistlib.writePlistToString(sonnet.verses)
self.response.out.write(plist)
class FrontPage(webapp.RequestHandler):
"""Displays front page"""
def get(self):
self.response.out.write("""
<html>
<title>iSonnet Application</title>
<body>
<p>This is a simple web service.</p>
<p>
A plist is served out here:
<a href="http://isonnet.appspot.com/plists/sonnets">isonnet</a>
</p>
<p>
The Touch Engine Open Source Project is here:
<a href="http://code.google.com/p/touchengine/">touchengine</a>
</p>
</body>
</html>
""")
def main():
application = webapp.WSGIApplication([('/plists/sonnets', MainHandler),
('/', FrontPage),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
verses={"verses":[["I","""FROM fairest creatures we desire increase,
That thereby beauty's rose might never die,
But as the riper should by time decease,
His tender heir might bear his memory:
But thou, contracted to thine own bright eyes,
Feed'st thy light'st flame with self-substantial fuel,
Making a famine where abundance lies,
Thyself thy foe, to thy sweet self too cruel.
Thou that art now the world's fresh ornament
And only herald to the gaudy spring,
Within thine own bud buriest thy content
And, tender churl, makest waste in niggarding.
Pity the world, or else this glutton be,
To eat the world's due, by the grave and thee."""],
["II","""When forty winters shall beseige thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery, so gazed on now,
Will be a tatter'd weed, of small worth held:
Then being ask'd where all thy beauty lies,
Where all the treasure of thy lusty days,
To say, within thine own deep-sunken eyes,
Were an all-eating shame and thriftless praise.
How much more praise deserved thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold."""],
["III","""Look in thy glass, and tell the face thou viewest
Now is the time that face should form another;
Whose fresh repair if now thou not renewest,
Thou dost beguile the world, unbless some mother.
For where is she so fair whose unear'd womb
Disdains the tillage of thy husbandry?
Or who is he so fond will be the tomb
Of his self-love, to stop posterity?
Thou art thy mother's glass, and she in thee
Calls back the lovely April of her prime:
So thou through windows of thine age shall see
Despite of wrinkles this thy golden time.
But if thou live, remember'd not to be,
Die single, and thine image dies with thee."""],
["IV","""Unthrifty loveliness, why dost thou spend
Upon thyself thy beauty's legacy?
Nature's bequest gives nothing but doth lend,
And being frank she lends to those are free.
Then, beauteous niggard, why dost thou abuse
The bounteous largess given thee to give?
Profitless usurer, why dost thou use
So great a sum of sums, yet canst not live?
For having traffic with thyself alone,
Thou of thyself thy sweet self dost deceive.
Then how, when nature calls thee to be gone,
What acceptable audit canst thou leave?
Thy unused beauty must be tomb'd with thee,
Which, used, lives th' executor to be."""],
["V","""Those hours, that with gentle work did frame
The lovely gaze where every eye doth dwell,
Will play the tyrants to the very same
And that unfair which fairly doth excel:
For never-resting time leads summer on
To hideous winter and confounds him there;
Sap cheque'd with frost and lusty leaves quite gone,
Beauty o'ersnow'd and bareness every where:
Then, were not summer's distillation left,
A liquid prisoner pent in walls of glass,
Beauty's effect with beauty were bereft,
Nor it nor no remembrance what it was:
But flowers distill'd though they with winter meet,
Leese but their show; their substance still lives sweet."""],
["VI","""Then let not winter's ragged hand deface
In thee thy summer, ere thou be distill'd:
Make sweet some vial; treasure thou some place
With beauty's treasure, ere it be self-kill'd.
That use is not forbidden usury,
Which happies those that pay the willing loan;
That's for thyself to breed another thee,
Or ten times happier, be it ten for one;
Ten times thyself were happier than thou art,
If ten of thine ten times refigured thee:
Then what could death do, if thou shouldst depart,
Leaving thee living in posterity?
Be not self-will'd, for thou art much too fair
To be death's conquest and make worms thine heir."""],
["VII","""Lo! in the orient when the gracious light
Lifts up his burning head, each under eye
Doth homage to his new-appearing sight,
Serving with looks his sacred majesty;
And having climb'd the steep-up heavenly hill,
Resembling strong youth in his middle age,
yet mortal looks adore his beauty still,
Attending on his golden pilgrimage;
But when from highmost pitch, with weary car,
Like feeble age, he reeleth from the day,
The eyes, 'fore duteous, now converted are
From his low tract and look another way:
So thou, thyself out-going in thy noon,
Unlook'd on diest, unless thou get a son."""],
["VIII","""Music to hear, why hear'st thou music sadly?
Sweets with sweets war not, joy delights in joy.
Why lovest thou that which thou receivest not gladly,
Or else receivest with pleasure thine annoy?
If the true concord of well-tuned sounds,
By unions married, do offend thine ear,
They do but sweetly chide thee, who confounds
In singleness the parts that thou shouldst bear.
Mark how one string, sweet husband to another,
Strikes each in each by mutual ordering,
Resembling sire and child and happy mother
Who all in one, one pleasing note do sing:
Whose speechless song, being many, seeming one,
Sings this to thee: 'thou single wilt prove none.'"""],
["IX","""Is it for fear to wet a widow's eye
That thou consumest thyself in single life?
Ah! if thou issueless shalt hap to die.
The world will wail thee, like a makeless wife;
The world will be thy widow and still weep
That thou no form of thee hast left behind,
When every private widow well may keep
By children's eyes her husband's shape in mind.
Look, what an unthrift in the world doth spend
Shifts but his place, for still the world enjoys it;
But beauty's waste hath in the world an end,
And kept unused, the user so destroys it.
No love toward others in that bosom sits
That on himself such murderous shame commits."""],
["X","""For shame! deny that thou bear'st love to any,
Who for thyself art so unprovident.
Grant, if thou wilt, thou art beloved of many,
But that thou none lovest is most evident;
For thou art so possess'd with murderous hate
That 'gainst thyself thou stick'st not to conspire.
Seeking that beauteous roof to ruinate
Which to repair should be thy chief desire.
O, change thy thought, that I may change my mind!
Shall hate be fairer lodged than gentle love?
Be, as thy presence is, gracious and kind,
Or to thyself at least kind-hearted prove:
Make thee another self, for love of me,
That beauty still may live in thine or thee."""],
["XI","""As fast as thou shalt wane, so fast thou growest
In one of thine, from that which thou departest;
And that fresh blood which youngly thou bestowest
Thou mayst call thine when thou from youth convertest.
Herein lives wisdom, beauty and increase:
Without this, folly, age and cold decay:
If all were minded so, the times should cease
And threescore year would make the world away.
Let those whom Nature hath not made for store,
Harsh featureless and rude, barrenly perish:
Look, whom she best endow'd she gave the more;
Which bounteous gift thou shouldst in bounty cherish:
She carved thee for her seal, and meant thereby
Thou shouldst print more, not let that copy die."""],
["XII","""When I do count the clock that tells the time,
And see the brave day sunk in hideous night;
When I behold the violet past prime,
And sable curls all silver'd o'er with white;
When lofty trees I see barren of leaves
Which erst from heat did canopy the herd,
And summer's green all girded up in sheaves
Borne on the bier with white and bristly beard,
Then of thy beauty do I question make,
That thou among the wastes of time must go,
Since sweets and beauties do themselves forsake
And die as fast as they see others grow;
And nothing 'gainst Time's scythe can make defence
Save breed, to brave him when he takes thee hence."""],
["XIII","""O, that you were yourself! but, love, you are
No longer yours than you yourself here live:
Against this coming end you should prepare,
And your sweet semblance to some other give.
So should that beauty which you hold in lease
Find no determination: then you were
Yourself again after yourself's decease,
When your sweet issue your sweet form should bear.
Who lets so fair a house fall to decay,
Which husbandry in honour might uphold
Against the stormy gusts of winter's day
And barren rage of death's eternal cold?
O, none but unthrifts! Dear my love, you know
You had a father: let your son say so."""],
["XIV","""Not from the stars do I my judgment pluck;
And yet methinks I have astronomy,
But not to tell of good or evil luck,
Of plagues, of dearths, or seasons' quality;
Nor can I fortune to brief minutes tell,
Pointing to each his thunder, rain and wind,
Or say with princes if it shall go well,
By oft predict that I in heaven find:
But from thine eyes my knowledge I derive,
And, constant stars, in them I read such art
As truth and beauty shall together thrive,
If from thyself to store thou wouldst convert;
Or else of thee this I prognosticate:
Thy end is truth's and beauty's doom and date."""],
["XV","""When I consider every thing that grows
Holds in perfection but a little moment,
That this huge stage presenteth nought but shows
Whereon the stars in secret influence comment;
When I perceive that men as plants increase,
Cheered and cheque'd even by the self-same sky,
Vaunt in their youthful sap, at height decrease,
And wear their brave state out of memory;
Then the conceit of this inconstant stay
Sets you most rich in youth before my sight,
Where wasteful Time debateth with Decay,
To change your day of youth to sullied night;
And all in war with Time for love of you,
As he takes from you, I engraft you new."""],
["XVI","""But wherefore do not you a mightier way
Make war upon this bloody tyrant, Time?
And fortify yourself in your decay
With means more blessed than my barren rhyme?
Now stand you on the top of happy hours,
And many maiden gardens yet unset
With virtuous wish would bear your living flowers,
Much liker than your painted counterfeit:
So should the lines of life that life repair,
Which this, Time's pencil, or my pupil pen,
Neither in inward worth nor outward fair,
Can make you live yourself in eyes of men.
To give away yourself keeps yourself still,
And you must live, drawn by your own sweet skill."""],
["XVII","""Who will believe my verse in time to come,
If it were fill'd with your most high deserts?
Though yet, heaven knows, it is but as a tomb
Which hides your life and shows not half your parts.
If I could write the beauty of your eyes
And in fresh numbers number all your graces,
The age to come would say 'This poet lies:
Such heavenly touches ne'er touch'd earthly faces.'
So should my papers yellow'd with their age
Be scorn'd like old men of less truth than tongue,
And your true rights be term'd a poet's rage
And stretched metre of an antique song:
But were some child of yours alive that time,
You should live twice; in it and in my rhyme."""],
["XVIII","""Shall I compare thee to a summer's day?
Thou art more lovely and more temperate:
Rough winds do shake the darling buds of May,
And summer's lease hath all too short a date:
Sometime too hot the eye of heaven shines,
And often is his gold complexion dimm'd;
And every fair from fair sometime declines,
By chance or nature's changing course untrimm'd;
But thy eternal summer shall not fade
Nor lose possession of that fair thou owest;
Nor shall Death brag thou wander'st in his shade,
When in eternal lines to time thou growest:
So long as men can breathe or eyes can see,
So long lives this and this gives life to thee."""],
["XIX","""Devouring Time, blunt thou the lion's paws,
And make the earth devour her own sweet brood;
Pluck the keen teeth from the fierce tiger's jaws,
And burn the long-lived phoenix in her blood;
Make glad and sorry seasons as thou fleets,
And do whate'er thou wilt, swift-footed Time,
To the wide world and all her fading sweets;
But I forbid thee one most heinous crime:
O, carve not with thy hours my love's fair brow,
Nor draw no lines there with thine antique pen;
Him in thy course untainted do allow
For beauty's pattern to succeeding men.
Yet, do thy worst, old Time: despite thy wrong,
My love shall in my verse ever live young."""],
["XX","""A woman's face with Nature's own hand painted
Hast thou, the master-mistress of my passion;
A woman's gentle heart, but not acquainted
With shifting change, as is false women's fashion;
An eye more bright than theirs, less false in rolling,
Gilding the object whereupon it gazeth;
A man in hue, all 'hues' in his controlling,
Much steals men's eyes and women's souls amazeth.
And for a woman wert thou first created;
Till Nature, as she wrought thee, fell a-doting,
And by addition me of thee defeated,
By adding one thing to my purpose nothing.
But since she prick'd thee out for women's pleasure,
Mine be thy love and thy love's use their treasure."""],
["XXI","""So is it not with me as with that Muse
Stirr'd by a painted beauty to his verse,
Who heaven itself for ornament doth use
And every fair with his fair doth rehearse
Making a couplement of proud compare,
With sun and moon, with earth and sea's rich gems,
With April's first-born flowers, and all things rare
That heaven's air in this huge rondure hems.
O' let me, true in love, but truly write,
And then believe me, my love is as fair
As any mother's child, though not so bright
As those gold candles fix'd in heaven's air:
Let them say more than like of hearsay well;
I will not praise that purpose not to sell."""],
["XXII","""My glass shall not persuade me I am old,
So long as youth and thou are of one date;
But when in thee time's furrows I behold,
Then look I death my days should expiate.
For all that beauty that doth cover thee
Is but the seemly raiment of my heart,
Which in thy breast doth live, as thine in me:
How can I then be elder than thou art?
O, therefore, love, be of thyself so wary
As I, not for myself, but for thee will;
Bearing thy heart, which I will keep so chary
As tender nurse her babe from faring ill.
Presume not on thy heart when mine is slain;
Thou gavest me thine, not to give back again."""],
["XXIII","""As an unperfect actor on the stage
Who with his fear is put besides his part,
Or some fierce thing replete with too much rage,
Whose strength's abundance weakens his own heart.
So I, for fear of trust, forget to say
The perfect ceremony of love's rite,
And in mine own love's strength seem to decay,
O'ercharged with burden of mine own love's might.
O, let my books be then the eloquence
And dumb presagers of my speaking breast,
Who plead for love and look for recompense
More than that tongue that more hath more express'd.
O, learn to read what silent love hath writ:
To hear with eyes belongs to love's fine wit."""],
["XXIV.","""Mine eye hath play'd the painter and hath stell'd
Thy beauty's form in table of my heart;
My body is the frame wherein 'tis held,
And perspective it is the painter's art.
For through the painter must you see his skill,
To find where your true image pictured lies;
Which in my bosom's shop is hanging still,
That hath his windows glazed with thine eyes.
Now see what good turns eyes for eyes have done:
Mine eyes have drawn thy shape, and thine for me
Are windows to my breast, where-through the sun
Delights to peep, to gaze therein on thee;
Yet eyes this cunning want to grace their art;
They draw but what they see, know not the heart."""],
["XXV","""Let those who are in favour with their stars
Of public honour and proud titles boast,
Whilst I, whom fortune of such triumph bars,
Unlook'd for joy in that I honour most.
Great princes' favourites their fair leaves spread
But as the marigold at the sun's eye,
And in themselves their pride lies buried,
For at a frown they in their glory die.
The painful warrior famoused for fight,
After a thousand victories once foil'd,
Is from the book of honour razed quite,
And all the rest forgot for which he toil'd:
Then happy I, that love and am beloved
Where I may not remove nor be removed."""],
["XXVI","""Lord of my love, to whom in vassalage
Thy merit hath my duty strongly knit,
To thee I send this written embassage,
To witness duty, not to show my wit:
Duty so great, which wit so poor as mine
May make seem bare, in wanting words to show it,
But that I hope some good conceit of thine
In thy soul's thought, all naked, will bestow it;
Till whatsoever star that guides my moving
Points on me graciously with fair aspect
And puts apparel on my tatter'd loving,
To show me worthy of thy sweet respect:
Then may I dare to boast how I do love thee;
Till then not show my head where thou mayst prove me."""],
["XXVII","""Weary with toil, I haste me to my bed,
The dear repose for limbs with travel tired;
But then begins a journey in my head,
To work my mind, when body's work's expired:
For then my thoughts, from far where I abide,
Intend a zealous pilgrimage to thee,
And keep my drooping eyelids open wide,
Looking on darkness which the blind do see
Save that my soul's imaginary sight
Presents thy shadow to my sightless view,
Which, like a jewel hung in ghastly night,
Makes black night beauteous and her old face new.
Lo! thus, by day my limbs, by night my mind,
For thee and for myself no quiet find."""],
["XXVIII","""How can I then return in happy plight,
That am debarr'd the benefit of rest?
When day's oppression is not eased by night,
But day by night, and night by day, oppress'd?
And each, though enemies to either's reign,
Do in consent shake hands to torture me;
The one by toil, the other to complain
How far I toil, still farther off from thee.
I tell the day, to please them thou art bright
And dost him grace when clouds do blot the heaven:
So flatter I the swart-complexion'd night,
When sparkling stars twire not thou gild'st the even.
But day doth daily draw my sorrows longer
And night doth nightly make grief's strength seem stronger."""],
["XXIX","""When, in disgrace with fortune and men's eyes,
I all alone beweep my outcast state
And trouble deaf heaven with my bootless cries
And look upon myself and curse my fate,
Wishing me like to one more rich in hope,
Featured like him, like him with friends possess'd,
Desiring this man's art and that man's scope,
With what I most enjoy contented least;
Yet in these thoughts myself almost despising,
Haply I think on thee, and then my state,
Like to the lark at break of day arising
From sullen earth, sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings."""],
["XXX","""When to the sessions of sweet silent thought
I summon up remembrance of things past,
I sigh the lack of many a thing I sought,
And with old woes new wail my dear time's waste:
Then can I drown an eye, unused to flow,
For precious friends hid in death's dateless night,
And weep afresh love's long since cancell'd woe,
And moan the expense of many a vanish'd sight:
Then can I grieve at grievances foregone,
And heavily from woe to woe tell o'er
The sad account of fore-bemoaned moan,
Which I new pay as if not paid before.
But if the while I think on thee, dear friend,
All losses are restored and sorrows end."""],
["XXXI","""Thy bosom is endeared with all hearts,
Which I by lacking have supposed dead,
And there reigns love and all love's loving parts,
And all those friends which I thought buried.
How many a holy and obsequious tear
Hath dear religious love stol'n from mine eye
As interest of the dead, which now appear
But things removed that hidden in thee lie!
Thou art the grave where buried love doth live,
Hung with the trophies of my lovers gone,
Who all their parts of me to thee did give;
That due of many now is thine alone:
Their images I loved I view in thee,
And thou, all they, hast all the all of me."""],
["XXXII","""If thou survive my well-contented day,
When that churl Death my bones with dust shall cover,
And shalt by fortune once more re-survey
These poor rude lines of thy deceased lover,
Compare them with the bettering of the time,
And though they be outstripp'd by every pen,
Reserve them for my love, not for their rhyme,
Exceeded by the height of happier men.
O, then vouchsafe me but this loving thought:
'Had my friend's Muse grown with this growing age,
A dearer birth than this his love had brought,
To march in ranks of better equipage:
But since he died and poets better prove,
Theirs for their style I'll read, his for his love.'"""],
["XXXIII","""Full many a glorious morning have I seen
Flatter the mountain-tops with sovereign eye,
Kissing with golden face the meadows green,
Gilding pale streams with heavenly alchemy;
Anon permit the basest clouds to ride
With ugly rack on his celestial face,
And from the forlorn world his visage hide,
Stealing unseen to west with this disgrace:
Even so my sun one early morn did shine
With all triumphant splendor on my brow;
But out, alack! he was but one hour mine;
The region cloud hath mask'd him from me now.
Yet him for this my love no whit disdaineth;
Suns of the world may stain when heaven's sun staineth."""],
["XXXIV","""Why didst thou promise such a beauteous day,
And make me travel forth without my cloak,
To let base clouds o'ertake me in my way,
Hiding thy bravery in their rotten smoke?
'Tis not enough that through the cloud thou break,
To dry the rain on my storm-beaten face,
For no man well of such a salve can speak
That heals the wound and cures not the disgrace:
Nor can thy shame give physic to my grief;
Though thou repent, yet I have still the loss:
The offender's sorrow lends but weak relief
To him that bears the strong offence's cross.
Ah! but those tears are pearl which thy love sheds,
And they are rich and ransom all ill deeds."""],
["XXXV","""No more be grieved at that which thou hast done:
Roses have thorns, and silver fountains mud;
Clouds and eclipses stain both moon and sun,
And loathsome canker lives in sweetest bud.
All men make faults, and even I in this,
Authorizing thy trespass with compare,
Myself corrupting, salving thy amiss,
Excusing thy sins more than thy sins are;
For to thy sensual fault I bring in sense--
Thy adverse party is thy advocate--
And 'gainst myself a lawful plea commence:
Such civil war is in my love and hate
That I an accessary needs must be
To that sweet thief which sourly robs from me."""],
["XXXVI","""Let me confess that we two must be twain,
Although our undivided loves are one:
So shall those blots that do with me remain
Without thy help by me be borne alone.
In our two loves there is but one respect,
Though in our lives a separable spite,
Which though it alter not love's sole effect,
Yet doth it steal sweet hours from love's delight.
I may not evermore acknowledge thee,
Lest my bewailed guilt should do thee shame,
Nor thou with public kindness honour me,
Unless thou take that honour from thy name:
But do not so; I love thee in such sort
As, thou being mine, mine is thy good report."""],
["XXXVII","""As a decrepit father takes delight
To see his active child do deeds of youth,
So I, made lame by fortune's dearest spite,
Take all my comfort of thy worth and truth.
For whether beauty, birth, or wealth, or wit,
Or any of these all, or all, or more,
Entitled in thy parts do crowned sit,
I make my love engrafted to this store:
So then I am not lame, poor, nor despised,
Whilst that this shadow doth such substance give
That I in thy abundance am sufficed
And by a part of all thy glory live.
Look, what is best, that best I wish in thee:
This wish I have; then ten times happy me!"""],
["XXXVIII","""How can my Muse want subject to invent,
While thou dost breathe, that pour'st into my verse
Thine own sweet argument, too excellent
For every vulgar paper to rehearse?
O, give thyself the thanks, if aught in me
Worthy perusal stand against thy sight;
For who's so dumb that cannot write to thee,
When thou thyself dost give invention light?
Be thou the tenth Muse, ten times more in worth
Than those old nine which rhymers invocate;
And he that calls on thee, let him bring forth
Eternal numbers to outlive long date.
If my slight Muse do please these curious days,
The pain be mine, but thine shall be the praise."""],
["XXXIX","""O, how thy worth with manners may I sing,
When thou art all the better part of me?
What can mine own praise to mine own self bring?
And what is 't but mine own when I praise thee?
Even for this let us divided live,
And our dear love lose name of single one,
That by this separation I may give
That due to thee which thou deservest alone.
O absence, what a torment wouldst thou prove,
Were it not thy sour leisure gave sweet leave
To entertain the time with thoughts of love,
Which time and thoughts so sweetly doth deceive,
And that thou teachest how to make one twain,
By praising him here who doth hence remain!"""],
["XL","""Take all my loves, my love, yea, take them all;
What hast thou then more than thou hadst before?
No love, my love, that thou mayst true love call;
All mine was thine before thou hadst this more.
Then if for my love thou my love receivest,
I cannot blame thee for my love thou usest;
But yet be blamed, if thou thyself deceivest
By wilful taste of what thyself refusest.
I do forgive thy robbery, gentle thief,
Although thou steal thee all my poverty;
And yet, love knows, it is a greater grief
To bear love's wrong than hate's known injury.
Lascivious grace, in whom all ill well shows,
Kill me with spites; yet we must not be foes."""],
["XLI","""Those petty wrongs that liberty commits,
When I am sometime absent from thy heart,
Thy beauty and thy years full well befits,
For still temptation follows where thou art.
Gentle thou art and therefore to be won,
Beauteous thou art, therefore to be assailed;
And when a woman woos, what woman's son
Will sourly leave her till she have prevailed?
Ay me! but yet thou mightest my seat forbear,
And chide try beauty and thy straying youth,
Who lead thee in their riot even there
Where thou art forced to break a twofold truth,
Hers by thy beauty tempting her to thee,
Thine, by thy beauty being false to me."""],
["XLII","""That thou hast her, it is not all my grief,
And yet it may be said I loved her dearly;
That she hath thee, is of my wailing chief,
A loss in love that touches me more nearly.
Loving offenders, thus I will excuse ye:
Thou dost love her, because thou knowst I love her;
And for my sake even so doth she abuse me,
Suffering my friend for my sake to approve her.
If I lose thee, my loss is my love's gain,
And losing her, my friend hath found that loss;
Both find each other, and I lose both twain,
And both for my sake lay on me this cross:
But here's the joy; my friend and I are one;
Sweet flattery! then she loves but me alone."""],
["XLIII","""When most I wink, then do mine eyes best see,
For all the day they view things unrespected;
But when I sleep, in dreams they look on thee,
And darkly bright are bright in dark directed.
Then thou, whose shadow shadows doth make bright,
How would thy shadow's form form happy show
To the clear day with thy much clearer light,
When to unseeing eyes thy shade shines so!
How would, I say, mine eyes be blessed made
By looking on thee in the living day,
When in dead night thy fair imperfect shade
Through heavy sleep on sightless eyes doth stay!
All days are nights to see till I see thee,
And nights bright days when dreams do show thee me."""],
["XLIV","""If the dull substance of my flesh were thought,
Injurious distance should not stop my way;
For then despite of space I would be brought,
From limits far remote where thou dost stay.
No matter then although my foot did stand
Upon the farthest earth removed from thee;
For nimble thought can jump both sea and land
As soon as think the place where he would be.
But ah! thought kills me that I am not thought,
To leap large lengths of miles when thou art gone,
But that so much of earth and water wrought
I must attend time's leisure with my moan,
Receiving nought by elements so slow
But heavy tears, badges of either's woe."""],
["XLV","""The other two, slight air and purging fire,
Are both with thee, wherever I abide;
The first my thought, the other my desire,
These present-absent with swift motion slide.
For when these quicker elements are gone
In tender embassy of love to thee,
My life, being made of four, with two alone
Sinks down to death, oppress'd with melancholy;
Until life's composition be recured
By those swift messengers return'd from thee,
Who even but now come back again, assured
Of thy fair health, recounting it to me:
This told, I joy; but then no longer glad,
I send them back again and straight grow sad."""],
["XLVI","""Mine eye and heart are at a mortal war
How to divide the conquest of thy sight;
Mine eye my heart thy picture's sight would bar,
My heart mine eye the freedom of that right.
My heart doth plead that thou in him dost lie--
A closet never pierced with crystal eyes--
But the defendant doth that plea deny
And says in him thy fair appearance lies.
To 'cide this title is impanneled
A quest of thoughts, all tenants to the heart,
And by their verdict is determined
The clear eye's moiety and the dear heart's part:
As thus; mine eye's due is thy outward part,
And my heart's right thy inward love of heart."""],
["XLVII","""Betwixt mine eye and heart a league is took,
And each doth good turns now unto the other:
When that mine eye is famish'd for a look,
Or heart in love with sighs himself doth smother,
With my love's picture then my eye doth feast
And to the painted banquet bids my heart;
Another time mine eye is my heart's guest
And in his thoughts of love doth share a part:
So, either by thy picture or my love,
Thyself away art resent still with me;
For thou not farther than my thoughts canst move,
And I am still with them and they with thee;
Or, if they sleep, thy picture in my sight
Awakes my heart to heart's and eye's delight."""],
["XLVIII","""How careful was I, when I took my way,
Each trifle under truest bars to thrust,
That to my use it might unused stay
From hands of falsehood, in sure wards of trust!
But thou, to whom my jewels trifles are,
Most worthy of comfort, now my greatest grief,
Thou, best of dearest and mine only care,
Art left the prey of every vulgar thief.
Thee have I not lock'd up in any chest,
Save where thou art not, though I feel thou art,
Within the gentle closure of my breast,
From whence at pleasure thou mayst come and part;
And even thence thou wilt be stol'n, I fear,
For truth proves thievish for a prize so dear."""],
["XLIX","""Against that time, if ever that time come,
When I shall see thee frown on my defects,
When as thy love hath cast his utmost sum,
Call'd to that audit by advised respects;
Against that time when thou shalt strangely pass
And scarcely greet me with that sun thine eye,
When love, converted from the thing it was,
Shall reasons find of settled gravity,--
Against that time do I ensconce me here
Within the knowledge of mine own desert,
And this my hand against myself uprear,
To guard the lawful reasons on thy part:
To leave poor me thou hast the strength of laws,
Since why to love I can allege no cause."""],
["L","""How heavy do I journey on the way,
When what I seek, my weary travel's end,
Doth teach that ease and that repose to say
'Thus far the miles are measured from thy friend!'
The beast that bears me, tired with my woe,
Plods dully on, to bear that weight in me,
As if by some instinct the wretch did know
His rider loved not speed, being made from thee:
The bloody spur cannot provoke him on
That sometimes anger thrusts into his hide;
Which heavily he answers with a groan,
More sharp to me than spurring to his side;
For that same groan doth put this in my mind;
My grief lies onward and my joy behind."""],
["LI","""Thus can my love excuse the slow offence
Of my dull bearer when from thee I speed:
From where thou art why should I haste me thence?
Till I return, of posting is no need.
O, what excuse will my poor beast then find,
When swift extremity can seem but slow?
Then should I spur, though mounted on the wind;
In winged speed no motion shall I know:
Then can no horse with my desire keep pace;
Therefore desire of perfect'st love being made,
Shall neigh--no dull flesh--in his fiery race;
But love, for love, thus shall excuse my jade;
Since from thee going he went wilful-slow,
Towards thee I'll run, and give him leave to go."""],
["LII","""So am I as the rich, whose blessed key
Can bring him to his sweet up-locked treasure,
The which he will not every hour survey,
For blunting the fine point of seldom pleasure.
Therefore are feasts so solemn and so rare,
Since, seldom coming, in the long year set,
Like stones of worth they thinly placed are,
Or captain jewels in the carcanet.
So is the time that keeps you as my chest,
Or as the wardrobe which the robe doth hide,
To make some special instant special blest,
By new unfolding his imprison'd pride.
Blessed are you, whose worthiness gives scope,
Being had, to triumph, being lack'd, to hope."""],
["LIII","""What is your substance, whereof are you made,
That millions of strange shadows on you tend?
Since every one hath, every one, one shade,
And you, but one, can every shadow lend.
Describe Adonis, and the counterfeit
Is poorly imitated after you;
On Helen's cheek all art of beauty set,
And you in Grecian tires are painted new:
Speak of the spring and foison of the year;
The one doth shadow of your beauty show,
The other as your bounty doth appear;
And you in every blessed shape we know.
In all external grace you have some part,
But you like none, none you, for constant heart."""],
["LIV","""O, how much more doth beauty beauteous seem
By that sweet ornament which truth doth give!
The rose looks fair, but fairer we it deem
For that sweet odour which doth in it live.
The canker-blooms have full as deep a dye
As the perfumed tincture of the roses,
Hang on such thorns and play as wantonly
When summer's breath their masked buds discloses:
But, for their virtue only is their show,
They live unwoo'd and unrespected fade,
Die to themselves. Sweet roses do not so;
Of their sweet deaths are sweetest odours made:
And so of you, beauteous and lovely youth,
When that shall fade, my verse distills your truth."""],
["LV","""Not marble, nor the gilded monuments
Of princes, shall outlive this powerful rhyme;
But you shall shine more bright in these contents
Than unswept stone besmear'd with sluttish time.
When wasteful war shall statues overturn,
And broils root out the work of masonry,
Nor Mars his sword nor war's quick fire shall burn
The living record of your memory.
'Gainst death and all-oblivious enmity
Shall you pace forth; your praise shall still find room
Even in the eyes of all posterity
That wear this world out to the ending doom.
So, till the judgment that yourself arise,
You live in this, and dwell in lover's eyes."""],
["LVI","""Sweet love, renew thy force; be it not said
Thy edge should blunter be than appetite,
Which but to-day by feeding is allay'd,
To-morrow sharpen'd in his former might:
So, love, be thou; although to-day thou fill
Thy hungry eyes even till they wink with fullness,
To-morrow see again, and do not kill
The spirit of love with a perpetual dullness.
Let this sad interim like the ocean be
Which parts the shore, where two contracted new
Come daily to the banks, that, when they see
Return of love, more blest may be the view;
Else call it winter, which being full of care
Makes summer's welcome thrice more wish'd, more rare."""],
["LVII","""Being your slave, what should I do but tend
Upon the hours and times of your desire?
I have no precious time at all to spend,
Nor services to do, till you require.
Nor dare I chide the world-without-end hour
Whilst I, my sovereign, watch the clock for you,
Nor think the bitterness of absence sour
When you have bid your servant once adieu;
Nor dare I question with my jealous thought
Where you may be, or your affairs suppose,
But, like a sad slave, stay and think of nought
Save, where you are how happy you make those.
So true a fool is love that in your will,
Though you do any thing, he thinks no ill."""],
["LVIII","""That god forbid that made me first your slave,
I should in thought control your times of pleasure,
Or at your hand the account of hours to crave,
Being your vassal, bound to stay your leisure!
O, let me suffer, being at your beck,
The imprison'd absence of your liberty;
And patience, tame to sufferance, bide each cheque,
Without accusing you of injury.
Be where you list, your charter is so strong
That you yourself may privilege your time
To what you will; to you it doth belong
Yourself to pardon of self-doing crime.
I am to wait, though waiting so be hell;
Not blame your pleasure, be it ill or well."""],
["LIX","""If there be nothing new, but that which is
Hath been before, how are our brains beguiled,
Which, labouring for invention, bear amiss
The second burden of a former child!
O, that record could with a backward look,
Even of five hundred courses of the sun,
Show me your image in some antique book,
Since mind at first in character was done!
That I might see what the old world could say
To this composed wonder of your frame;
Whether we are mended, or whether better they,
Or whether revolution be the same.
O, sure I am, the wits of former days
To subjects worse have given admiring praise."""],
["LX","""Like as the waves make towards the pebbled shore,
So do our minutes hasten to their end;
Each changing place with that which goes before,
In sequent toil all forwards do contend.
Nativity, once in the main of light,
Crawls to maturity, wherewith being crown'd,
Crooked elipses 'gainst his glory fight,
And Time that gave doth now his gift confound.
Time doth transfix the flourish set on youth
And delves the parallels in beauty's brow,
Feeds on the rarities of nature's truth,
And nothing stands but for his scythe to mow:
And yet to times in hope my verse shall stand,
Praising thy worth, despite his cruel hand."""],
["LXI","""Is it thy will thy image should keep open
My heavy eyelids to the weary night?
Dost thou desire my slumbers should be broken,
While shadows like to thee do mock my sight?
Is it thy spirit that thou send'st from thee
So far from home into my deeds to pry,
To find out shames and idle hours in me,
The scope and tenor of thy jealousy?
O, no! thy love, though much, is not so great:
It is my love that keeps mine eye awake;
Mine own true love that doth my rest defeat,
To play the watchman ever for thy sake:
For thee watch I whilst thou dost wake elsewhere,
From me far off, with others all too near."""],
["LXII","""Sin of self-love possesseth all mine eye
And all my soul and all my every part;
And for this sin there is no remedy,
It is so grounded inward in my heart.
Methinks no face so gracious is as mine,
No shape so true, no truth of such account;
And for myself mine own worth do define,
As I all other in all worths surmount.
But when my glass shows me myself indeed,
Beated and chopp'd with tann'd antiquity,
Mine own self-love quite contrary I read;
Self so self-loving were iniquity.
'Tis thee, myself, that for myself I praise,
Painting my age with beauty of thy days."""],
["LXIII","""Against my love shall be, as I am now,
With Time's injurious hand crush'd and o'er-worn;
When hours have drain'd his blood and fill'd his brow
With lines and wrinkles; when his youthful morn
Hath travell'd on to age's steepy night,
And all those beauties whereof now he's king
Are vanishing or vanish'd out of sight,
Stealing away the treasure of his spring;
For such a time do I now fortify
Against confounding age's cruel knife,
That he shall never cut from memory
My sweet love's beauty, though my lover's life:
His beauty shall in these black lines be seen,
And they shall live, and he in them still green."""],
["LXIV","""When I have seen by Time's fell hand defaced
The rich proud cost of outworn buried age;
When sometime lofty towers I see down-razed
And brass eternal slave to mortal rage;
When I have seen the hungry ocean gain
Advantage on the kingdom of the shore,
And the firm soil win of the watery main,
Increasing store with loss and loss with store;
When I have seen such interchange of state,
Or state itself confounded to decay;
Ruin hath taught me thus to ruminate,
That Time will come and take my love away.
This thought is as a death, which cannot choose
But weep to have that which it fears to lose."""],
["LXV","""Since brass, nor stone, nor earth, nor boundless sea,
But sad mortality o'er-sways their power,
How with this rage shall beauty hold a plea,
Whose action is no stronger than a flower?
O, how shall summer's honey breath hold out
Against the wreckful siege of battering days,
When rocks impregnable are not so stout,
Nor gates of steel so strong, but Time decays?
O fearful meditation! where, alack,
Shall Time's best jewel from Time's chest lie hid?
Or what strong hand can hold his swift foot back?
Or who his spoil of beauty can forbid?
O, none, unless this miracle have might,
That in black ink my love may still shine bright."""],
["LXVI","""Tired with all these, for restful death I cry,
As, to behold desert a beggar born,
And needy nothing trimm'd in jollity,
And purest faith unhappily forsworn,
And guilded honour shamefully misplaced,
And maiden virtue rudely strumpeted,
And right perfection wrongfully disgraced,
And strength by limping sway disabled,
And art made tongue-tied by authority,
And folly doctor-like controlling skill,
And simple truth miscall'd simplicity,
And captive good attending captain ill:
Tired with all these, from these would I be gone,
Save that, to die, I leave my love alone."""],
["LXVII","""Ah! wherefore with infection should he live,
And with his presence grace impiety,
That sin by him advantage should achieve
And lace itself with his society?
Why should false painting imitate his cheek
And steal dead seeing of his living hue?
Why should poor beauty indirectly seek
Roses of shadow, since his rose is true?
Why should he live, now Nature bankrupt is,
Beggar'd of blood to blush through lively veins?
For she hath no exchequer now but his,
And, proud of many, lives upon his gains.
O, him she stores, to show what wealth she had
In days long since, before these last so bad."""],
["LXVIII","""Thus is his cheek the map of days outworn,
When beauty lived and died as flowers do now,
Before the bastard signs of fair were born,
Or durst inhabit on a living brow;
Before the golden tresses of the dead,
The right of sepulchres, were shorn away,
To live a second life on second head;
Ere beauty's dead fleece made another gay:
In him those holy antique hours are seen,
Without all ornament, itself and true,
Making no summer of another's green,
Robbing no old to dress his beauty new;
And him as for a map doth Nature store,
To show false Art what beauty was of yore."""],
["LXIX","""Those parts of thee that the world's eye doth view
Want nothing that the thought of hearts can mend;
All tongues, the voice of souls, give thee that due,
Uttering bare truth, even so as foes commend.
Thy outward thus with outward praise is crown'd;
But those same tongues that give thee so thine own
In other accents do this praise confound
By seeing farther than the eye hath shown.
They look into the beauty of thy mind,
And that, in guess, they measure by thy deeds;
Then, churls, their thoughts, although their eyes were kind,
To thy fair flower add the rank smell of weeds:
But why thy odour matcheth not thy show,
The solve is this, that thou dost common grow."""],
["LXX","""That thou art blamed shall not be thy defect,
For slander's mark was ever yet the fair;
The ornament of beauty is suspect,
A crow that flies in heaven's sweetest air.
So thou be good, slander doth but approve
Thy worth the greater, being woo'd of time;
For canker vice the sweetest buds doth love,
And thou present'st a pure unstained prime.
Thou hast pass'd by the ambush of young days,
Either not assail'd or victor being charged;
Yet this thy praise cannot be so thy praise,
To tie up envy evermore enlarged:
If some suspect of ill mask'd not thy show,
Then thou alone kingdoms of hearts shouldst owe."""],
["LXXI","""No longer mourn for me when I am dead
Then you shall hear the surly sullen bell
Give warning to the world that I am fled
From this vile world, with vilest worms to dwell:
Nay, if you read this line, remember not
The hand that writ it; for I love you so
That I in your sweet thoughts would be forgot
If thinking on me then should make you woe.
O, if, I say, you look upon this verse
When I perhaps compounded am with clay,
Do not so much as my poor name rehearse.
But let your love even with my life decay,
Lest the wise world should look into your moan
And mock you with me after I am gone."""],
["LXXII","""O, lest the world should task you to recite
What merit lived in me, that you should love
After my death, dear love, forget me quite,
For you in me can nothing worthy prove;
Unless you would devise some virtuous lie,
To do more for me than mine own desert,
And hang more praise upon deceased I
Than niggard truth would willingly impart:
O, lest your true love may seem false in this,
That you for love speak well of me untrue,
My name be buried where my body is,
And live no more to shame nor me nor you.
For I am shamed by that which I bring forth,
And so should you, to love things nothing worth."""],
["LXXIII","""That time of year thou mayst in me behold
When yellow leaves, or none, or few, do hang
Upon those boughs which shake against the cold,
Bare ruin'd choirs, where late the sweet birds sang.
In me thou seest the twilight of such day
As after sunset fadeth in the west,
Which by and by black night doth take away,
Death's second self, that seals up all in rest.
In me thou see'st the glowing of such fire
That on the ashes of his youth doth lie,
As the death-bed whereon it must expire
Consumed with that which it was nourish'd by.
This thou perceivest, which makes thy love more strong,
To love that well which thou must leave ere long."""],
["LXXIV","""But be contented: when that fell arrest
Without all bail shall carry me away,
My life hath in this line some interest,
Which for memorial still with thee shall stay.
When thou reviewest this, thou dost review
The very part was consecrate to thee:
The earth can have but earth, which is his due;
My spirit is thine, the better part of me:
So then thou hast but lost the dregs of life,
The prey of worms, my body being dead,
The coward conquest of a wretch's knife,
Too base of thee to be remembered.
The worth of that is that which it contains,
And that is this, and this with thee remains."""],
["LXXV","""So are you to my thoughts as food to life,
Or as sweet-season'd showers are to the ground;
And for the peace of you I hold such strife
As 'twixt a miser and his wealth is found;
Now proud as an enjoyer and anon
Doubting the filching age will steal his treasure,
Now counting best to be with you alone,
Then better'd that the world may see my pleasure;
Sometime all full with feasting on your sight
And by and by clean starved for a look;
Possessing or pursuing no delight,
Save what is had or must from you be took.
Thus do I pine and surfeit day by day,
Or gluttoning on all, or all away."""],
["LXXVI","""Why is my verse so barren of new pride,
So far from variation or quick change?
Why with the time do I not glance aside
To new-found methods and to compounds strange?
Why write I still all one, ever the same,
And keep invention in a noted weed,
That every word doth almost tell my name,
Showing their birth and where they did proceed?
O, know, sweet love, I always write of you,
And you and love are still my argument;
So all my best is dressing old words new,
Spending again what is already spent:
For as the sun is daily new and old,
So is my love still telling what is told."""],
["LXXVII","""Thy glass will show thee how thy beauties wear,
Thy dial how thy precious minutes waste;
The vacant leaves thy mind's imprint will bear,
And of this book this learning mayst thou taste.
The wrinkles which thy glass will truly show
Of mouthed graves will give thee memory;
Thou by thy dial's shady stealth mayst know
Time's thievish progress to eternity.
Look, what thy memory can not contain
Commit to these waste blanks, and thou shalt find
Those children nursed, deliver'd from thy brain,
To take a new acquaintance of thy mind.
These offices, so oft as thou wilt look,
Shall profit thee and much enrich thy book."""],
["LXXVIII","""So oft have I invoked thee for my Muse
And found such fair assistance in my verse
As every alien pen hath got my use
And under thee their poesy disperse.
Thine eyes that taught the dumb on high to sing
And heavy ignorance aloft to fly
Have added feathers to the learned's wing
And given grace a double majesty.
Yet be most proud of that which I compile,
Whose influence is thine and born of thee:
In others' works thou dost but mend the style,
And arts with thy sweet graces graced be;
But thou art all my art and dost advance
As high as learning my rude ignorance."""],
["LXXIX","""Whilst I alone did call upon thy aid,
My verse alone had all thy gentle grace,
But now my gracious numbers are decay'd
And my sick Muse doth give another place.
I grant, sweet love, thy lovely argument
Deserves the travail of a worthier pen,
Yet what of thee thy poet doth invent
He robs thee of and pays it thee again.
He lends thee virtue and he stole that word
From thy behavior; beauty doth he give
And found it in thy cheek; he can afford
No praise to thee but what in thee doth live.
Then thank him not for that which he doth say,
Since what he owes thee thou thyself dost pay."""],
["LXXX","""O, how I faint when I of you do write,
Knowing a better spirit doth use your name,
And in the praise thereof spends all his might,
To make me tongue-tied, speaking of your fame!
But since your worth, wide as the ocean is,
The humble as the proudest sail doth bear,
My saucy bark inferior far to his
On your broad main doth wilfully appear.
Your shallowest help will hold me up afloat,
Whilst he upon your soundless deep doth ride;
Or being wreck'd, I am a worthless boat,
He of tall building and of goodly pride:
Then if he thrive and I be cast away,
The worst was this; my love was my decay."""],
["LXXXI","""Or I shall live your epitaph to make,
Or you survive when I in earth am rotten;
From hence your memory death cannot take,
Although in me each part will be forgotten.
Your name from hence immortal life shall have,
Though I, once gone, to all the world must die:
The earth can yield me but a common grave,
When you entombed in men's eyes shall lie.
Your monument shall be my gentle verse,
Which eyes not yet created shall o'er-read,
And tongues to be your being shall rehearse
When all the breathers of this world are dead;
You still shall live--such virtue hath my pen--
Where breath most breathes, even in the mouths of men."""],
["LXXXII","""I grant thou wert not married to my Muse
And therefore mayst without attaint o'erlook
The dedicated words which writers use
Of their fair subject, blessing every book
Thou art as fair in knowledge as in hue,
Finding thy worth a limit past my praise,
And therefore art enforced to seek anew
Some fresher stamp of the time-bettering days
And do so, love; yet when they have devised
What strained touches rhetoric can lend,
Thou truly fair wert truly sympathized
In true plain words by thy true-telling friend;
And their gross painting might be better used
Where cheeks need blood; in thee it is abused."""],
["LXXXIII","""I never saw that you did painting need
And therefore to your fair no painting set;
I found, or thought I found, you did exceed
The barren tender of a poet's debt;
And therefore have I slept in your report,
That you yourself being extant well might show
How far a modern quill doth come too short,
Speaking of worth, what worth in you doth grow.
This silence for my sin you did impute,
Which shall be most my glory, being dumb;
For I impair not beauty being mute,
When others would give life and bring a tomb.
There lives more life in one of your fair eyes
Than both your poets can in praise devise."""],
["LXXXIV","""Who is it that says most? which can say more
Than this rich praise, that you alone are you?
In whose confine immured is the store
Which should example where your equal grew.
Lean penury within that pen doth dwell
That to his subject lends not some small glory;
But he that writes of you, if he can tell
That you are you, so dignifies his story,
Let him but copy what in you is writ,
Not making worse what nature made so clear,
And such a counterpart shall fame his wit,
Making his style admired every where.
You to your beauteous blessings add a curse,
Being fond on praise, which makes your praises worse."""],
["LXXXV","""My tongue-tied Muse in manners holds her still,
While comments of your praise, richly compiled,
Reserve their character with golden quill
And precious phrase by all the Muses filed.
I think good thoughts whilst other write good words,
And like unletter'd clerk still cry 'Amen'
To every hymn that able spirit affords
In polish'd form of well-refined pen.
Hearing you praised, I say ''Tis so, 'tis true,'
And to the most of praise add something more;
But that is in my thought, whose love to you,
Though words come hindmost, holds his rank before.
Then others for the breath of words respect,
Me for my dumb thoughts, speaking in effect."""],
["LXXXVI","""Was it the proud full sail of his great verse,
Bound for the prize of all too precious you,
That did my ripe thoughts in my brain inhearse,
Making their tomb the womb wherein they grew?
Was it his spirit, by spirits taught to write
Above a mortal pitch, that struck me dead?
No, neither he, nor his compeers by night
Giving him aid, my verse astonished.
He, nor that affable familiar ghost
Which nightly gulls him with intelligence
As victors of my silence cannot boast;
I was not sick of any fear from thence:
But when your countenance fill'd up his line,
Then lack'd I matter; that enfeebled mine."""],
["LXXXVII","""Farewell! thou art too dear for my possessing,
And like enough thou know'st thy estimate:
The charter of thy worth gives thee releasing;
My bonds in thee are all determinate.
For how do I hold thee but by thy granting?
And for that riches where is my deserving?
The cause of this fair gift in me is wanting,
And so my patent back again is swerving.
Thyself thou gavest, thy own worth then not knowing,
Or me, to whom thou gavest it, else mistaking;
So thy great gift, upon misprision growing,
Comes home again, on better judgment making.
Thus have I had thee, as a dream doth flatter,
In sleep a king, but waking no such matter."""],
["LXXXVIII","""When thou shalt be disposed to set me light,
And place my merit in the eye of scorn,
Upon thy side against myself I'll fight,
And prove thee virtuous, though thou art forsworn.
With mine own weakness being best acquainted,
Upon thy part I can set down a story
Of faults conceal'd, wherein I am attainted,
That thou in losing me shalt win much glory:
And I by this will be a gainer too;
For bending all my loving thoughts on thee,
The injuries that to myself I do,
Doing thee vantage, double-vantage me.
Such is my love, to thee I so belong,
That for thy right myself will bear all wrong."""],
["LXXXIX","""Say that thou didst forsake me for some fault,
And I will comment upon that offence;
Speak of my lameness, and I straight will halt,
Against thy reasons making no defence.
Thou canst not, love, disgrace me half so ill,
To set a form upon desired change,
As I'll myself disgrace: knowing thy will,
I will acquaintance strangle and look strange,
Be absent from thy walks, and in my tongue
Thy sweet beloved name no more shall dwell,
Lest I, too much profane, should do it wrong
And haply of our old acquaintance tell.
For thee against myself I'll vow debate,
For I must ne'er love him whom thou dost hate."""],
["XC","""Then hate me when thou wilt; if ever, now;
Now, while the world is bent my deeds to cross,
Join with the spite of fortune, make me bow,
And do not drop in for an after-loss:
Ah, do not, when my heart hath 'scoped this sorrow,
Come in the rearward of a conquer'd woe;
Give not a windy night a rainy morrow,
To linger out a purposed overthrow.
If thou wilt leave me, do not leave me last,
When other petty griefs have done their spite
But in the onset come; so shall I taste
At first the very worst of fortune's might,
And other strains of woe, which now seem woe,
Compared with loss of thee will not seem so."""],
["XCI","""Some glory in their birth, some in their skill,
Some in their wealth, some in their bodies' force,
Some in their garments, though new-fangled ill,
Some in their hawks and hounds, some in their horse;
And every humour hath his adjunct pleasure,
Wherein it finds a joy above the rest:
But these particulars are not my measure;
All these I better in one general best.
Thy love is better than high birth to me,
Richer than wealth, prouder than garments' cost,
Of more delight than hawks or horses be;
And having thee, of all men's pride I boast:
Wretched in this alone, that thou mayst take
All this away and me most wretched make."""],
["XCII","""But do thy worst to steal thyself away,
For term of life thou art assured mine,
And life no longer than thy love will stay,
For it depends upon that love of thine.
Then need I not to fear the worst of wrongs,
When in the least of them my life hath end.
I see a better state to me belongs
Than that which on thy humour doth depend;
Thou canst not vex me with inconstant mind,
Since that my life on thy revolt doth lie.
O, what a happy title do I find,
Happy to have thy love, happy to die!
But what's so blessed-fair that fears no blot?
Thou mayst be false, and yet I know it not."""],
["XCIII","""So shall I live, supposing thou art true,
Like a deceived husband; so love's face
May still seem love to me, though alter'd new;
Thy looks with me, thy heart in other place:
For there can live no hatred in thine eye,
Therefore in that I cannot know thy change.
In many's looks the false heart's history
Is writ in moods and frowns and wrinkles strange,
But heaven in thy creation did decree
That in thy face sweet love should ever dwell;
Whate'er thy thoughts or thy heart's workings be,
Thy looks should nothing thence but sweetness tell.
How like Eve's apple doth thy beauty grow,
if thy sweet virtue answer not thy show!"""],
["XCIV","""They that have power to hurt and will do none,
That do not do the thing they most do show,
Who, moving others, are themselves as stone,
Unmoved, cold, and to temptation slow,
They rightly do inherit heaven's graces
And husband nature's riches from expense;
They are the lords and owners of their faces,
Others but stewards of their excellence.
The summer's flower is to the summer sweet,
Though to itself it only live and die,
But if that flower with base infection meet,
The basest weed outbraves his dignity:
For sweetest things turn sourest by their deeds;
Lilies that fester smell far worse than weeds."""],
["XCV","""How sweet and lovely dost thou make the shame
Which, like a canker in the fragrant rose,
Doth spot the beauty of thy budding name!
O, in what sweets dost thou thy sins enclose!
That tongue that tells the story of thy days,
Making lascivious comments on thy sport,
Cannot dispraise but in a kind of praise;
Naming thy name blesses an ill report.
O, what a mansion have those vices got
Which for their habitation chose out thee,
Where beauty's veil doth cover every blot,
And all things turn to fair that eyes can see!
Take heed, dear heart, of this large privilege;
The hardest knife ill-used doth lose his edge."""],
["XCVI","""Some say thy fault is youth, some wantonness;
Some say thy grace is youth and gentle sport;
Both grace and faults are loved of more and less;
Thou makest faults graces that to thee resort.
As on the finger of a throned queen
The basest jewel will be well esteem'd,
So are those errors that in thee are seen
To truths translated and for true things deem'd.
How many lambs might the stem wolf betray,
If like a lamb he could his looks translate!
How many gazers mightst thou lead away,
If thou wouldst use the strength of all thy state!
But do not so; I love thee in such sort
As, thou being mine, mine is thy good report."""],
["XCVII","""How like a winter hath my absence been
From thee, the pleasure of the fleeting year!
What freezings have I felt, what dark days seen!
What old December's bareness every where!
And yet this time removed was summer's time,
The teeming autumn, big with rich increase,
Bearing the wanton burden of the prime,
Like widow'd wombs after their lords' decease:
Yet this abundant issue seem'd to me
But hope of orphans and unfather'd fruit;
For summer and his pleasures wait on thee,
And, thou away, the very birds are mute;
Or, if they sing, 'tis with so dull a cheer
That leaves look pale, dreading the winter's near."""],
["XCVIII","""From you have I been absent in the spring,
When proud-pied April dress'd in all his trim
Hath put a spirit of youth in every thing,
That heavy Saturn laugh'd and leap'd with him.
Yet nor the lays of birds nor the sweet smell
Of different flowers in odour and in hue
Could make me any summer's story tell,
Or from their proud lap pluck them where they grew;
Nor did I wonder at the lily's white,
Nor praise the deep vermilion in the rose;
They were but sweet, but figures of delight,
Drawn after you, you pattern of all those.
Yet seem'd it winter still, and, you away,
As with your shadow I with these did play:"""],
["XCIX","""The forward violet thus did I chide:
Sweet thief, whence didst thou steal thy sweet that smells,
If not from my love's breath? The purple pride
Which on thy soft cheek for complexion dwells
In my love's veins thou hast too grossly dyed.
The lily I condemned for thy hand,
And buds of marjoram had stol'n thy hair:
The roses fearfully on thorns did stand,
One blushing shame, another white despair;
A third, nor red nor white, had stol'n of both
And to his robbery had annex'd thy breath;
But, for his theft, in pride of all his growth
A vengeful canker eat him up to death.
More flowers I noted, yet I none could see But sweet or colour it had stol'n from thee."""],
["C","""Where art thou, Muse, that thou forget'st so long
To speak of that which gives thee all thy might?
Spend'st thou thy fury on some worthless song,
Darkening thy power to lend base subjects light?
Return, forgetful Muse, and straight redeem
In gentle numbers time so idly spent;
Sing to the ear that doth thy lays esteem
And gives thy pen both skill and argument.
Rise, resty Muse, my love's sweet face survey,
If Time have any wrinkle graven there;
If any, be a satire to decay,
And make Time's spoils despised every where.
Give my love fame faster than Time wastes life;
So thou prevent'st his scythe and crooked knife."""],
["CI","""O truant Muse, what shall be thy amends
For thy neglect of truth in beauty dyed?
Both truth and beauty on my love depends;
So dost thou too, and therein dignified.
Make answer, Muse: wilt thou not haply say
'Truth needs no colour, with his colour fix'd;
Beauty no pencil, beauty's truth to lay;
But best is best, if never intermix'd?'
Because he needs no praise, wilt thou be dumb?
Excuse not silence so; for't lies in thee
To make him much outlive a gilded tomb,
And to be praised of ages yet to be.
Then do thy office, Muse; I teach thee how
To make him seem long hence as he shows now."""],
["CII","""My love is strengthen'd, though more weak in seeming;
I love not less, though less the show appear:
That love is merchandized whose rich esteeming
The owner's tongue doth publish every where.
Our love was new and then but in the spring
When I was wont to greet it with my lays,
As Philomel in summer's front doth sing
And stops her pipe in growth of riper days:
Not that the summer is less pleasant now
Than when her mournful hymns did hush the night,
But that wild music burthens every bough
And sweets grown common lose their dear delight.
Therefore like her I sometime hold my tongue,
Because I would not dull you with my song."""],
["CIII","""Alack, what poverty my Muse brings forth,
That having such a scope to show her pride,
The argument all bare is of more worth
Than when it hath my added praise beside!
O, blame me not, if I no more can write!
Look in your glass, and there appears a face
That over-goes my blunt invention quite,
Dulling my lines and doing me disgrace.
Were it not sinful then, striving to mend,
To mar the subject that before was well?
For to no other pass my verses tend
Than of your graces and your gifts to tell;
And more, much more, than in my verse can sit
Your own glass shows you when you look in it."""],
["CIV","""To me, fair friend, you never can be old,
For as you were when first your eye I eyed,
Such seems your beauty still. Three winters cold
Have from the forests shook three summers' pride,
Three beauteous springs to yellow autumn turn'd
In process of the seasons have I seen,
Three April perfumes in three hot Junes burn'd,
Since first I saw you fresh, which yet are green.
Ah! yet doth beauty, like a dial-hand,
Steal from his figure and no pace perceived;
So your sweet hue, which methinks still doth stand,
Hath motion and mine eye may be deceived:
For fear of which, hear this, thou age unbred;
Ere you were born was beauty's summer dead."""],
["CV","""Let not my love be call'd idolatry,
Nor my beloved as an idol show,
Since all alike my songs and praises be
To one, of one, still such, and ever so.
Kind is my love to-day, to-morrow kind,
Still constant in a wondrous excellence;
Therefore my verse to constancy confined,
One thing expressing, leaves out difference.
'Fair, kind and true' is all my argument,
'Fair, kind, and true' varying to other words;
And in this change is my invention spent,
Three themes in one, which wondrous scope affords.
'Fair, kind, and true,' have often lived alone,
Which three till now never kept seat in one."""],
["CVI","""When in the chronicle of wasted time
I see descriptions of the fairest wights,
And beauty making beautiful old rhyme
In praise of ladies dead and lovely knights,
Then, in the blazon of sweet beauty's best,
Of hand, of foot, of lip, of eye, of brow,
I see their antique pen would have express'd
Even such a beauty as you master now.
So all their praises are but prophecies
Of this our time, all you prefiguring;
And, for they look'd but with divining eyes,
They had not skill enough your worth to sing:
For we, which now behold these present days,
Had eyes to wonder, but lack tongues to praise."""],
["CVII","""Not mine own fears, nor the prophetic soul
Of the wide world dreaming on things to come,
Can yet the lease of my true love control,
Supposed as forfeit to a confined doom.
The mortal moon hath her eclipse endured
And the sad augurs mock their own presage;
Incertainties now crown themselves assured
And peace proclaims olives of endless age.
Now with the drops of this most balmy time
My love looks fresh, and death to me subscribes,
Since, spite of him, I'll live in this poor rhyme,
While he insults o'er dull and speechless tribes:
And thou in this shalt find thy monument,
When tyrants' crests and tombs of brass are spent."""],
["CVIII","""What's in the brain that ink may character
Which hath not figured to thee my true spirit?
What's new to speak, what new to register,
That may express my love or thy dear merit?
Nothing, sweet boy; but yet, like prayers divine,
I must, each day say o'er the very same,
Counting no old thing old, thou mine, I thine,
Even as when first I hallow'd thy fair name.
So that eternal love in love's fresh case
Weighs not the dust and injury of age,
Nor gives to necessary wrinkles place,
But makes antiquity for aye his page,
Finding the first conceit of love there bred
Where time and outward form would show it dead."""],
["CIX","""O, never say that I was false of heart,
Though absence seem'd my flame to qualify.
As easy might I from myself depart
As from my soul, which in thy breast doth lie:
That is my home of love: if I have ranged,
Like him that travels I return again,
Just to the time, not with the time exchanged,
So that myself bring water for my stain.
Never believe, though in my nature reign'd
All frailties that besiege all kinds of blood,
That it could so preposterously be stain'd,
To leave for nothing all thy sum of good;
For nothing this wide universe I call,
Save thou, my rose; in it thou art my all."""],
["CX","""Alas, 'tis true I have gone here and there
And made myself a motley to the view,
Gored mine own thoughts, sold cheap what is most dear,
Made old offences of affections new;
Most true it is that I have look'd on truth
Askance and strangely: but, by all above,
These blenches gave my heart another youth,
And worse essays proved thee my best of love.
Now all is done, have what shall have no end:
Mine appetite I never more will grind
On newer proof, to try an older friend,
A god in love, to whom I am confined.
Then give me welcome, next my heaven the best,
Even to thy pure and most most loving breast."""],
["CXI","""O, for my sake do you with Fortune chide,
The guilty goddess of my harmful deeds,
That did not better for my life provide
Than public means which public manners breeds.
Thence comes it that my name receives a brand,
And almost thence my nature is subdued
To what it works in, like the dyer's hand:
Pity me then and wish I were renew'd;
Whilst, like a willing patient, I will drink
Potions of eisel 'gainst my strong infection
No bitterness that I will bitter think,
Nor double penance, to correct correction.
Pity me then, dear friend, and I assure ye
Even that your pity is enough to cure me."""],
["CXII","""Your love and pity doth the impression fill
Which vulgar scandal stamp'd upon my brow;
For what care I who calls me well or ill,
So you o'er-green my bad, my good allow?
You are my all the world, and I must strive
To know my shames and praises from your tongue:
None else to me, nor I to none alive,
That my steel'd sense or changes right or wrong.
In so profound abysm I throw all care
Of others' voices, that my adder's sense
To critic and to flatterer stopped are.
Mark how with my neglect I do dispense:
You are so strongly in my purpose bred
That all the world besides methinks are dead."""],
["CXIII","""Since I left you, mine eye is in my mind;
And that which governs me to go about
Doth part his function and is partly blind,
Seems seeing, but effectually is out;
For it no form delivers to the heart
Of bird of flower, or shape, which it doth latch:
Of his quick objects hath the mind no part,
Nor his own vision holds what it doth catch:
For if it see the rudest or gentlest sight,
The most sweet favour or deformed'st creature,
The mountain or the sea, the day or night,
The crow or dove, it shapes them to your feature:
Incapable of more, replete with you,
My most true mind thus makes mine eye untrue."""],
["CXIV","""Or whether doth my mind, being crown'd with you,
Drink up the monarch's plague, this flattery?
Or whether shall I say, mine eye saith true,
And that your love taught it this alchemy,
To make of monsters and things indigest
Such cherubins as your sweet self resemble,
Creating every bad a perfect best,
As fast as objects to his beams assemble?
O,'tis the first; 'tis flattery in my seeing,
And my great mind most kingly drinks it up:
Mine eye well knows what with his gust is 'greeing,
And to his palate doth prepare the cup:
If it be poison'd, 'tis the lesser sin
That mine eye loves it and doth first begin."""],
["CXV","""Those lines that I before have writ do lie,
Even those that said I could not love you dearer:
Yet then my judgment knew no reason why
My most full flame should afterwards burn clearer.
But reckoning time, whose million'd accidents
Creep in 'twixt vows and change decrees of kings,
Tan sacred beauty, blunt the sharp'st intents,
Divert strong minds to the course of altering things;
Alas, why, fearing of time's tyranny,
Might I not then say 'Now I love you best,'
When I was certain o'er incertainty,
Crowning the present, doubting of the rest?
Love is a babe; then might I not say so,
To give full growth to that which still doth grow?"""],
["CXVI","""Let me not to the marriage of true minds
Admit impediments. Love is not love
Which alters when it alteration finds,
Or bends with the remover to remove:
O no! it is an ever-fixed mark
That looks on tempests and is never shaken;
It is the star to every wandering bark,
Whose worth's unknown, although his height be taken.
Love's not Time's fool, though rosy lips and cheeks
Within his bending sickle's compass come:
Love alters not with his brief hours and weeks,
But bears it out even to the edge of doom.
If this be error and upon me proved,
I never writ, nor no man ever loved."""],
["CXVII","""Accuse me thus: that I have scanted all
Wherein I should your great deserts repay,
Forgot upon your dearest love to call,
Whereto all bonds do tie me day by day;
That I have frequent been with unknown minds
And given to time your own dear-purchased right
That I have hoisted sail to all the winds
Which should transport me farthest from your sight.
Book both my wilfulness and errors down
And on just proof surmise accumulate;
Bring me within the level of your frown,
But shoot not at me in your waken'd hate;
Since my appeal says I did strive to prove
The constancy and virtue of your love."""],
["CXVIII","""Like as, to make our appetites more keen,
With eager compounds we our palate urge,
As, to prevent our maladies unseen,
We sicken to shun sickness when we purge,
Even so, being tuff of your ne'er-cloying sweetness,
To bitter sauces did I frame my feeding
And, sick of welfare, found a kind of meetness
To be diseased ere that there was true needing.
Thus policy in love, to anticipate
The ills that were not, grew to faults assured
And brought to medicine a healthful state
Which, rank of goodness, would by ill be cured:
But thence I learn, and find the lesson true,
Drugs poison him that so fell sick of you."""],
["CXIX","""What potions have I drunk of Siren tears,
Distill'd from limbecks foul as hell within,
Applying fears to hopes and hopes to fears,
Still losing when I saw myself to win!
What wretched errors hath my heart committed,
Whilst it hath thought itself so blessed never!
How have mine eyes out of their spheres been fitted
In the distraction of this madding fever!
O benefit of ill! now I find true
That better is by evil still made better;
And ruin'd love, when it is built anew,
Grows fairer than at first, more strong, far greater.
So I return rebuked to my content
And gain by ill thrice more than I have spent."""],
["CXX","""That you were once unkind befriends me now,
And for that sorrow which I then did feel
Needs must I under my transgression bow,
Unless my nerves were brass or hammer'd steel.
For if you were by my unkindness shaken
As I by yours, you've pass'd a hell of time,
And I, a tyrant, have no leisure taken
To weigh how once I suffered in your crime.
O, that our night of woe might have remember'd
My deepest sense, how hard true sorrow hits,
And soon to you, as you to me, then tender'd
The humble slave which wounded bosoms fits!
But that your trespass now becomes a fee;
Mine ransoms yours, and yours must ransom me."""],
["CXXI","""'Tis better to be vile than vile esteem'd,
When not to be receives reproach of being,
And the just pleasure lost which is so deem'd
Not by our feeling but by others' seeing:
For why should others false adulterate eyes
Give salutation to my sportive blood?
Or on my frailties why are frailer spies,
Which in their wills count bad what I think good?
No, I am that I am, and they that level
At my abuses reckon up their own:
I may be straight, though they themselves be bevel;
By their rank thoughts my deeds must not be shown;
Unless this general evil they maintain,
All men are bad, and in their badness reign."""],
["CXXII","""Thy gift, thy tables, are within my brain
Full character'd with lasting memory,
Which shall above that idle rank remain
Beyond all date, even to eternity;
Or at the least, so long as brain and heart
Have faculty by nature to subsist;
Till each to razed oblivion yield his part
Of thee, thy record never can be miss'd.
That poor retention could not so much hold,
Nor need I tallies thy dear love to score;
Therefore to give them from me was I bold,
To trust those tables that receive thee more:
To keep an adjunct to remember thee
Were to import forgetfulness in me."""],
["CXXIII","""No, Time, thou shalt not boast that I do change:
Thy pyramids built up with newer might
To me are nothing novel, nothing strange;
They are but dressings of a former sight.
Our dates are brief, and therefore we admire
What thou dost foist upon us that is old,
And rather make them born to our desire
Than think that we before have heard them told.
Thy registers and thee I both defy,
Not wondering at the present nor the past,
For thy records and what we see doth lie,
Made more or less by thy continual haste.
This I do vow and this shall ever be;
I will be true, despite thy scythe and thee."""],
["CXXIV","""If my dear love were but the child of state,
It might for Fortune's bastard be unfather'd'
As subject to Time's love or to Time's hate,
Weeds among weeds, or flowers with flowers gather'd.
No, it was builded far from accident;
It suffers not in smiling pomp, nor falls
Under the blow of thralled discontent,
Whereto the inviting time our fashion calls:
It fears not policy, that heretic,
Which works on leases of short-number'd hours,
But all alone stands hugely politic,
That it nor grows with heat nor drowns with showers.
To this I witness call the fools of time,
Which die for goodness, who have lived for crime."""],
["CXXV","""Were 't aught to me I bore the canopy,
With my extern the outward honouring,
Or laid great bases for eternity,
Which prove more short than waste or ruining?
Have I not seen dwellers on form and favour
Lose all, and more, by paying too much rent,
For compound sweet forgoing simple savour,
Pitiful thrivers, in their gazing spent?
No, let me be obsequious in thy heart,
And take thou my oblation, poor but free,
Which is not mix'd with seconds, knows no art,
But mutual render, only me for thee.
Hence, thou suborn'd informer! a true soul
When most impeach'd stands least in thy control."""],
["CXXVI","""O thou, my lovely boy, who in thy power
Dost hold Time's fickle glass, his sickle, hour;
Who hast by waning grown, and therein show'st
Thy lovers withering as thy sweet self grow'st;
If Nature, sovereign mistress over wrack,
As thou goest onwards, still will pluck thee back,
She keeps thee to this purpose, that her skill
May time disgrace and wretched minutes kill.
Yet fear her, O thou minion of her pleasure!
She may detain, but not still keep, her treasure:
Her audit,
though delay'd,
answer'd must be,
And her quietus is to render thee."""],
["CXXVII","""In the old age black was not counted fair,
Or if it were, it bore not beauty's name;
But now is black beauty's successive heir,
And beauty slander'd with a bastard shame:
For since each hand hath put on nature's power,
Fairing the foul with art's false borrow'd face,
Sweet beauty hath no name, no holy bower,
But is profaned, if not lives in disgrace.
Therefore my mistress' brows are raven black,
Her eyes so suited, and they mourners seem
At such who, not born fair, no beauty lack,
Slandering creation with a false esteem:
Yet so they mourn, becoming of their woe,
That every tongue says beauty should look so."""],
["CXXVIII","""How oft, when thou, my music, music play'st,
Upon that blessed wood whose motion sounds
With thy sweet fingers, when thou gently sway'st
The wiry concord that mine ear confounds,
Do I envy those jacks that nimble leap
To kiss the tender inward of thy hand,
Whilst my poor lips, which should that harvest reap,
At the wood's boldness by thee blushing stand!
To be so tickled, they would change their state
And situation with those dancing chips,
O'er whom thy fingers walk with gentle gait,
Making dead wood more blest than living lips.
Since saucy jacks so happy are in this,
Give them thy fingers, me thy lips to kiss."""],
["CXXIX","""The expense of spirit in a waste of shame
Is lust in action; and till action, lust
Is perjured, murderous, bloody, full of blame,
Savage, extreme, rude, cruel, not to trust,
Enjoy'd no sooner but despised straight,
Past reason hunted, and no sooner had
Past reason hated, as a swallow'd bait
On purpose laid to make the taker mad;
Mad in pursuit and in possession so;
Had, having, and in quest to have, extreme;
A bliss in proof, and proved, a very woe;
Before, a joy proposed; behind, a dream.
All this the world well knows; yet none knows well
To shun the heaven that leads men to this hell."""],
["CXXX","""My mistress' eyes are nothing like the sun;
Coral is far more red than her lips' red;
If snow be white, why then her breasts are dun;
If hairs be wires, black wires grow on her head.
I have seen roses damask'd, red and white,
But no such roses see I in her cheeks;
And in some perfumes is there more delight
Than in the breath that from my mistress reeks.
I love to hear her speak, yet well I know
That music hath a far more pleasing sound;
I grant I never saw a goddess go;
My mistress, when she walks, treads on the ground:
And yet, by heaven, I think my love as rare
As any she belied with false compare."""],
["CXXXI","""Thou art as tyrannous, so as thou art,
As those whose beauties proudly make them cruel;
For well thou know'st to my dear doting heart
Thou art the fairest and most precious jewel.
Yet, in good faith, some say that thee behold
Thy face hath not the power to make love groan:
To say they err I dare not be so bold,
Although I swear it to myself alone.
And, to be sure that is not false I swear,
A thousand groans, but thinking on thy face,
One on another's neck, do witness bear
Thy black is fairest in my judgment's place.
In nothing art thou black save in thy deeds,
And thence this slander, as I think, proceeds."""],
["CXXXII","""Thine eyes I love, and they, as pitying me,
Knowing thy heart torments me with disdain,
Have put on black and loving mourners be,
Looking with pretty ruth upon my pain.
And truly not the morning sun of heaven
Better becomes the grey cheeks of the east,
Nor that full star that ushers in the even
Doth half that glory to the sober west,
As those two mourning eyes become thy face:
O, let it then as well beseem thy heart
To mourn for me, since mourning doth thee grace,
And suit thy pity like in every part.
Then will I swear beauty herself is black
And all they foul that thy complexion lack."""],
["CXXXIII","""Beshrew that heart that makes my heart to groan
For that deep wound it gives my friend and me!
Is't not enough to torture me alone,
But slave to slavery my sweet'st friend must be?
Me from myself thy cruel eye hath taken,
And my next self thou harder hast engross'd:
Of him, myself, and thee, I am forsaken;
A torment thrice threefold thus to be cross'd.
Prison my heart in thy steel bosom's ward,
But then my friend's heart let my poor heart bail;
Whoe'er keeps me, let my heart be his guard;
Thou canst not then use rigor in my gaol:
And yet thou wilt; for I, being pent in thee,
Perforce am thine, and all that is in me."""],
["CXXXIV","""So, now I have confess'd that he is thine,
And I myself am mortgaged to thy will,
Myself I'll forfeit, so that other mine
Thou wilt restore, to be my comfort still:
But thou wilt not, nor he will not be free,
For thou art covetous and he is kind;
He learn'd but surety-like to write for me
Under that bond that him as fast doth bind.
The statute of thy beauty thou wilt take,
Thou usurer, that put'st forth all to use,
And sue a friend came debtor for my sake;
So him I lose through my unkind abuse.
Him have I lost; thou hast both him and me:
He pays the whole, and yet am I not free."""],
["CXXXV","""Whoever hath her wish, thou hast thy 'Will,'
And 'Will' to boot, and 'Will' in overplus;
More than enough am I that vex thee still,
To thy sweet will making addition thus.
Wilt thou, whose will is large and spacious,
Not once vouchsafe to hide my will in thine?
Shall will in others seem right gracious,
And in my will no fair acceptance shine?
The sea all water, yet receives rain still
And in abundance addeth to his store;
So thou, being rich in 'Will,' add to thy 'Will'
One will of mine, to make thy large 'Will' more.
Let no unkind, no fair beseechers kill;
Think all but one, and me in that one 'Will.'"""],
["CXXXVI","""If thy soul cheque thee that I come so near,
Swear to thy blind soul that I was thy 'Will,'
And will, thy soul knows, is admitted there;
Thus far for love my love-suit, sweet, fulfil.
'Will' will fulfil the treasure of thy love,
Ay, fill it full with wills, and my will one.
In things of great receipt with ease we prove
Among a number one is reckon'd none:
Then in the number let me pass untold,
Though in thy stores' account I one must be;
For nothing hold me, so it please thee hold
That nothing me, a something sweet to thee:
Make but my name thy love, and love that still,
And then thou lovest me, for my name is 'Will.'"""],
["CXXXVII","""Thou blind fool, Love, what dost thou to mine eyes,
That they behold, and see not what they see?
They know what beauty is, see where it lies,
Yet what the best is take the worst to be.
If eyes corrupt by over-partial looks
Be anchor'd in the bay where all men ride,
Why of eyes' falsehood hast thou forged hooks,
Whereto the judgment of my heart is tied?
Why should my heart think that a several plot
Which my heart knows the wide world's common place?
Or mine eyes seeing this, say this is not,
To put fair truth upon so foul a face?
In things right true my heart and eyes have erred,
And to this false plague are they now transferr'd."""],
["CXXXVIII","""When my love swears that she is made of truth
I do believe her, though I know she lies,
That she might think me some untutor'd youth,
Unlearned in the world's false subtleties.
Thus vainly thinking that she thinks me young,
Although she knows my days are past the best,
Simply I credit her false speaking tongue:
On both sides thus is simple truth suppress'd.
But wherefore says she not she is unjust?
And wherefore say not I that I am old?
O, love's best habit is in seeming trust,
And age in love loves not to have years told:
Therefore I lie with her and she with me,
And in our faults by lies we flatter'd be."""],
["CXXXIX","""O, call not me to justify the wrong
That thy unkindness lays upon my heart;
Wound me not with thine eye but with thy tongue;
Use power with power and slay me not by art.
Tell me thou lovest elsewhere, but in my sight,
Dear heart, forbear to glance thine eye aside:
What need'st thou wound with cunning when thy might
Is more than my o'er-press'd defense can bide?
Let me excuse thee: ah! my love well knows
Her pretty looks have been mine enemies,
And therefore from my face she turns my foes,
That they elsewhere might dart their injuries:
Yet do not so; but since I am near slain,
Kill me outright with looks and rid my pain."""],
["CXL","""Be wise as thou art cruel; do not press
My tongue-tied patience with too much disdain;
Lest sorrow lend me words and words express
The manner of my pity-wanting pain.
If I might teach thee wit, better it were,
Though not to love, yet, love, to tell me so;
As testy sick men, when their deaths be near,
No news but health from their physicians know;
For if I should despair, I should grow mad,
And in my madness might speak ill of thee:
Now this ill-wresting world is grown so bad,
Mad slanderers by mad ears believed be,
That I may not be so, nor thou belied,
Bear thine eyes straight, though thy proud heart go wide."""],
["CXLI","""In faith, I do not love thee with mine eyes,
For they in thee a thousand errors note;
But 'tis my heart that loves what they despise,
Who in despite of view is pleased to dote;
Nor are mine ears with thy tongue's tune delighted,
Nor tender feeling, to base touches prone,
Nor taste, nor smell, desire to be invited
To any sensual feast with thee alone:
But my five wits nor my five senses can
Dissuade one foolish heart from serving thee,
Who leaves unsway'd the likeness of a man,
Thy proud hearts slave and vassal wretch to be:
Only my plague thus far I count my gain,
That she that makes me sin awards me pain."""],
["CXLII","""Love is my sin and thy dear virtue hate,
Hate of my sin, grounded on sinful loving:
O, but with mine compare thou thine own state,
And thou shalt find it merits not reproving;
Or, if it do, not from those lips of thine,
That have profaned their scarlet ornaments
And seal'd false bonds of love as oft as mine,
Robb'd others' beds' revenues of their rents.
Be it lawful I love thee, as thou lovest those
Whom thine eyes woo as mine importune thee:
Root pity in thy heart, that when it grows
Thy pity may deserve to pitied be.
If thou dost seek to have what thou dost hide,
By self-example mayst thou be denied!"""],
["CXLIII","""Lo! as a careful housewife runs to catch
One of her feather'd creatures broke away,
Sets down her babe and makes an swift dispatch
In pursuit of the thing she would have stay,
Whilst her neglected child holds her in chase,
Cries to catch her whose busy care is bent
To follow that which flies before her face,
Not prizing her poor infant's discontent;
So runn'st thou after that which flies from thee,
Whilst I thy babe chase thee afar behind;
But if thou catch thy hope, turn back to me,
And play the mother's part, kiss me, be kind:
So will I pray that thou mayst have thy 'Will,'
If thou turn back, and my loud crying still."""],
["CXLIV","""Two loves I have of comfort and despair,
Which like two spirits do suggest me still:
The better angel is a man right fair,
The worser spirit a woman colour'd ill.
To win me soon to hell, my female evil
Tempteth my better angel from my side,
And would corrupt my saint to be a devil,
Wooing his purity with her foul pride.
And whether that my angel be turn'd fiend
Suspect I may, but not directly tell;
But being both from me, both to each friend,
I guess one angel in another's hell:
Yet this shall I ne'er know, but live in doubt,
Till my bad angel fire my good one out."""],
["CXLV","""Those lips that Love's own hand did make
Breathed forth the sound that said 'I hate'
To me that languish'd for her sake;
But when she saw my woeful state,
Straight in her heart did mercy come,
Chiding that tongue that ever sweet
Was used in giving gentle doom,
And taught it thus anew to greet:
'I hate' she alter'd with an end,
That follow'd it as gentle day
Doth follow night, who like a fiend
From heaven to hell is flown away;
'I hate' from hate away she threw,
And saved my life, saying 'not you.'"""],
["CXLVI","""Poor soul, the centre of my sinful earth,
[ ] these rebel powers that thee array;
Why dost thou pine within and suffer dearth,
Painting thy outward walls so costly gay?
Why so large cost, having so short a lease,
Dost thou upon thy fading mansion spend?
Shall worms, inheritors of this excess,
Eat up thy charge? is this thy body's end?
Then soul, live thou upon thy servant's loss,
And let that pine to aggravate thy store;
Buy terms divine in selling hours of dross;
Within be fed, without be rich no more:
So shalt thou feed on Death, that feeds on men,
And Death once dead, there's no more dying then."""],
["CXLVII","""My love is as a fever, longing still
For that which longer nurseth the disease,
Feeding on that which doth preserve the ill,
The uncertain sickly appetite to please.
My reason, the physician to my love,
Angry that his prescriptions are not kept,
Hath left me, and I desperate now approve
Desire is death, which physic did except.
Past cure I am, now reason is past care,
And frantic-mad with evermore unrest;
My thoughts and my discourse as madmen's are,
At random from the truth vainly express'd;
For I have sworn thee fair and thought thee bright,
Who art as black as hell, as dark as night."""],
["CXLVIII","""O me, what eyes hath Love put in my head,
Which have no correspondence with true sight!
Or, if they have, where is my judgment fled,
That censures falsely what they see aright?
If that be fair whereon my false eyes dote,
What means the world to say it is not so?
If it be not, then love doth well denote
Love's eye is not so true as all men's 'No.'
How can it? O, how can Love's eye be true,
That is so vex'd with watching and with tears?
No marvel then, though I mistake my view;
The sun itself sees not till heaven clears.
O cunning Love! with tears thou keep'st me blind,
Lest eyes well-seeing thy foul faults should find."""],
["CXLIX","""Canst thou, O cruel! say I love thee not,
When I against myself with thee partake?
Do I not think on thee, when I forgot
Am of myself, all tyrant, for thy sake?
Who hateth thee that I do call my friend?
On whom frown'st thou that I do fawn upon?
Nay, if thou lour'st on me, do I not spend
Revenge upon myself with present moan?
What merit do I in myself respect,
That is so proud thy service to despise,
When all my best doth worship thy defect,
Commanded by the motion of thine eyes?
But, love, hate on, for now I know thy mind;
Those that can see thou lovest, and I am blind."""],
["CL","""O, from what power hast thou this powerful might
With insufficiency my heart to sway?
To make me give the lie to my true sight,
And swear that brightness doth not grace the day?
Whence hast thou this becoming of things ill,
That in the very refuse of thy deeds
There is such strength and warrantize of skill
That, in my mind, thy worst all best exceeds?
Who taught thee how to make me love thee more
The more I hear and see just cause of hate?
O, though I love what others do abhor,
With others thou shouldst not abhor my state:
If thy unworthiness raised love in me,
More worthy I to be beloved of thee."""],
["CLI","""Love is too young to know what conscience is;
Yet who knows not conscience is born of love?
Then, gentle cheater, urge not my amiss,
Lest guilty of my faults thy sweet self prove:
For, thou betraying me, I do betray
My nobler part to my gross body's treason;
My soul doth tell my body that he may
Triumph in love; flesh stays no father reason;
But, rising at thy name, doth point out thee
As his triumphant prize. Proud of this pride,
He is contented thy poor drudge to be,
To stand in thy affairs, fall by thy side.
No want of conscience hold it that I call
Her 'love' for whose dear love I rise and fall."""],
["CLII","""Love is too young to know what conscience is;
Yet who knows not conscience is born of love?
Then, gentle cheater, urge not my amiss,
Lest guilty of my faults thy sweet self prove:
For, thou betraying me, I do betray
My nobler part to my gross body's treason;
My soul doth tell my body that he may
Triumph in love; flesh stays no father reason;
But, rising at thy name, doth point out thee
As his triumphant prize. Proud of this pride,
He is contented thy poor drudge to be,
To stand in thy affairs, fall by thy side.
No want of conscience hold it that I call
Her 'love' for whose dear love I rise and fall."""],
["CLIII","""Cupid laid by his brand, and fell asleep:
A maid of Dian's this advantage found,
And his love-kindling fire did quickly steep
In a cold valley-fountain of that ground;
Which borrow'd from this holy fire of Love
A dateless lively heat, still to endure,
And grew a seething bath, which yet men prove
Against strange maladies a sovereign cure.
But at my mistress' eye Love's brand new-fired,
The boy for trial needs would touch my breast;
I, sick withal, the help of bath desired,
And thither hied, a sad distemper'd guest,
But found no cure: the bath for my help lies
Where Cupid got new fire--my mistress' eyes."""]]} | Python |
"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
import time
from cStringIO import StringIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
def _dateFromStruct_time(t):
date = datetime.datetime(*t[:6])
return (date)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1, pukeOnBadContent=0):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
self.pukeOnBadContent = pukeOnBadContent
def writeValue(self, value):
if isinstance(value, (str, unicode, type(None))):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", str(value))
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
elif isinstance(value, time.struct_time):
date = _dateFromStruct_time(value)
self.simpleElement("date", _dateToString(date))
else:
if self.pukeOnBadContent:
raise TypeError("unsuported type: %s" % type(value))
#self.simpleElement("string", "None")
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
if "bozo" not in key:
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
| Python |
#!/usr/bin/env python
import wsgiref.handlers
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from django.utils import simplejson
from touchengine.plistHandler import PlistHandler
from models import *
from dateutil import parser
import datetime
import logging
import os
class MainPage(webapp.RequestHandler):
"""Main Page View"""
def get(self):
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = None
url_linktext = None
self.redirect(users.create_login_url(self.request.uri))
template_values = {
'url': url,
'url_linktext': url_linktext,
'username': users.get_current_user(),
}
path = os.path.join(os.path.dirname(__file__), 'base.html')
self.response.out.write(template.render(path, template_values))
class MainPageLibrary(webapp.RequestHandler):
"""Main Page View With Library Grid"""
def get(self):
user = users.get_current_user()
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = None
url_linktext = None
self.redirect(users.create_login_url(self.request.uri))
template_values = {
'url': url,
'url_linktext': url_linktext,
'username': users.get_current_user(),
}
path = os.path.join(os.path.dirname(__file__), 'library.html')
self.response.out.write(template.render(path, template_values))
class Recent(webapp.RequestHandler):
"""Query Last 10 Requests"""
def get(self):
#collection
collection = []
#grab last 10 records from datastore
query = Book.all().order('-date')
records = query.fetch(limit=10)
logging.info(collection)
for book_record in records:
collection.append(book_record.title)
self.response.out.write(collection)
class Library(webapp.RequestHandler):
"""Returns Library Contents"""
def get(self):
#Just grab the latest post
aaData = dict(aaData=[])
#select the latest input from the datastore
record = db.GqlQuery("""
SELECT * FROM Book ORDER BY date DESC LIMIT 100""")
for book in record:
row = []
row.append(book.title)
row.append(book.author)
row.append(book.copyright.strftime('%Y'))
aaData['aaData'].append(row)
logging.info('book = %s' %(book,))
aaData = simplejson.dumps(aaData)
logging.info("GET: %s" % aaData)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(aaData)
class CreateBook(webapp.RequestHandler):
def userBookshelf(self):
"""Gets the users bookshelf if none, makes one"""
user = users.get_current_user()
bookshelf = None
#only make a shelf if the user is not an admin
if user and not users.is_current_user_admin():
bookshelvesQuery = BookShelf.all().filter('owner = ', user)
bookshelf = bookshelvesQuery.get()
if not bookshelf:
bookshelf = BookShelf(owner=user)
bookshelf.put()
logging.info(u'shelf = %s' %(bookshelf,))
return bookshelf
def post(self):
"""Stores a new book entry"""
title = self.request.get('title')
author = self.request.get('author')
copyright = self.request.get('copyright')
#Create new book and save it
book = Book()
book.title = title
book.author = author
copyrightDate = parser.parse(copyright)
book.copyright = copyrightDate
book.date = datetime.datetime.now()
#automatically add to current user's shelf
shelf = self.userBookshelf()
if shelf:
book.bookshelf = shelf
book.put()
logging.info((title, author, copyright))
self.response.out.write("""
Book Updated: Title: %s, Author: %s, Copyright: %s""" %\
(book.title, book.author, book.copyright))
class CustomPlistHandler(PlistHandler):
stripFromURL = '/plist/'
def main():
application = webapp.WSGIApplication([('/', MainPage),
('/alt', MainPageLibrary),
('/submit_form', CreateBook),
('/library', Library),
('/plist/.*', CustomPlistHandler),
],debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Core classes for markup processing."""
try:
reduce # builtin in Python < 3
except NameError:
from functools import reduce
from itertools import chain
import operator
from genshi.util import plaintext, stripentities, striptags, stringrepr
__all__ = ['Stream', 'Markup', 'escape', 'unescape', 'Attrs', 'Namespace',
'QName']
__docformat__ = 'restructuredtext en'
class StreamEventKind(str):
"""A kind of event on a markup stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
class Stream(object):
"""Represents a stream of markup events.
This class is basically an iterator over the events.
Stream events are tuples of the form::
(kind, data, position)
where ``kind`` is the event kind (such as `START`, `END`, `TEXT`, etc),
``data`` depends on the kind of event, and ``position`` is a
``(filename, line, offset)`` tuple that contains the location of the
original element or text in the input. If the original location is unknown,
``position`` is ``(None, -1, -1)``.
Also provided are ways to serialize the stream to text. The `serialize()`
method will return an iterator over generated strings, while `render()`
returns the complete generated text at once. Both accept various parameters
that impact the way the stream is serialized.
"""
__slots__ = ['events', 'serializer']
START = StreamEventKind('START') #: a start tag
END = StreamEventKind('END') #: an end tag
TEXT = StreamEventKind('TEXT') #: literal text
XML_DECL = StreamEventKind('XML_DECL') #: XML declaration
DOCTYPE = StreamEventKind('DOCTYPE') #: doctype declaration
START_NS = StreamEventKind('START_NS') #: start namespace mapping
END_NS = StreamEventKind('END_NS') #: end namespace mapping
START_CDATA = StreamEventKind('START_CDATA') #: start CDATA section
END_CDATA = StreamEventKind('END_CDATA') #: end CDATA section
PI = StreamEventKind('PI') #: processing instruction
COMMENT = StreamEventKind('COMMENT') #: comment
def __init__(self, events, serializer=None):
"""Initialize the stream with a sequence of markup events.
:param events: a sequence or iterable providing the events
:param serializer: the default serialization method to use for this
stream
:note: Changed in 0.5: added the `serializer` argument
"""
self.events = events #: The underlying iterable producing the events
self.serializer = serializer #: The default serializion method
def __iter__(self):
return iter(self.events)
def __or__(self, function):
"""Override the "bitwise or" operator to apply filters or serializers
to the stream, providing a syntax similar to pipes on Unix shells.
Assume the following stream produced by the `HTML` function:
>>> from genshi.input import HTML
>>> html = HTML('''<p onclick="alert('Whoa')">Hello, world!</p>''')
>>> print(html)
<p onclick="alert('Whoa')">Hello, world!</p>
A filter such as the HTML sanitizer can be applied to that stream using
the pipe notation as follows:
>>> from genshi.filters import HTMLSanitizer
>>> sanitizer = HTMLSanitizer()
>>> print(html | sanitizer)
<p>Hello, world!</p>
Filters can be any function that accepts and produces a stream (where
a stream is anything that iterates over events):
>>> def uppercase(stream):
... for kind, data, pos in stream:
... if kind is TEXT:
... data = data.upper()
... yield kind, data, pos
>>> print(html | sanitizer | uppercase)
<p>HELLO, WORLD!</p>
Serializers can also be used with this notation:
>>> from genshi.output import TextSerializer
>>> output = TextSerializer()
>>> print(html | sanitizer | uppercase | output)
HELLO, WORLD!
Commonly, serializers should be used at the end of the "pipeline";
using them somewhere in the middle may produce unexpected results.
:param function: the callable object that should be applied as a filter
:return: the filtered stream
:rtype: `Stream`
"""
return Stream(_ensure(function(self)), serializer=self.serializer)
def filter(self, *filters):
"""Apply filters to the stream.
This method returns a new stream with the given filters applied. The
filters must be callables that accept the stream object as parameter,
and return the filtered stream.
The call::
stream.filter(filter1, filter2)
is equivalent to::
stream | filter1 | filter2
:param filters: one or more callable objects that should be applied as
filters
:return: the filtered stream
:rtype: `Stream`
"""
return reduce(operator.or_, (self,) + filters)
def render(self, method=None, encoding='utf-8', out=None, **kwargs):
"""Return a string representation of the stream.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:param encoding: how the output string should be encoded; if set to
`None`, this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:rtype: `basestring`
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
:note: Changed in 0.5: added the `out` parameter
"""
from genshi.output import encode
if method is None:
method = self.serializer or 'xml'
generator = self.serialize(method=method, **kwargs)
return encode(generator, method=method, encoding=encoding, out=out)
def select(self, path, namespaces=None, variables=None):
"""Return a new stream that contains the events matching the given
XPath expression.
>>> from genshi import HTML
>>> stream = HTML('<doc><elem>foo</elem><elem>bar</elem></doc>')
>>> print(stream.select('elem'))
<elem>foo</elem><elem>bar</elem>
>>> print(stream.select('elem/text()'))
foobar
Note that the outermost element of the stream becomes the *context
node* for the XPath test. That means that the expression "doc" would
not match anything in the example above, because it only tests against
child elements of the outermost element:
>>> print(stream.select('doc'))
<BLANKLINE>
You can use the "." expression to match the context node itself
(although that usually makes little sense):
>>> print(stream.select('.'))
<doc><elem>foo</elem><elem>bar</elem></doc>
:param path: a string containing the XPath expression
:param namespaces: mapping of namespace prefixes used in the path
:param variables: mapping of variable names to values
:return: the selected substream
:rtype: `Stream`
:raises PathSyntaxError: if the given path expression is invalid or not
supported
"""
from genshi.path import Path
return Path(path).select(self, namespaces, variables)
def serialize(self, method='xml', **kwargs):
"""Generate strings corresponding to a specific serialization of the
stream.
Unlike the `render()` method, this method is a generator that returns
the serialized output incrementally, as opposed to returning a single
string.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:return: an iterator over the serialization results (`Markup` or
`unicode` objects, depending on the serialization method)
:rtype: ``iterator``
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
"""
from genshi.output import get_serializer
if method is None:
method = self.serializer or 'xml'
return get_serializer(method, **kwargs)(_ensure(self))
def __str__(self):
return self.render()
def __unicode__(self):
return self.render(encoding=None)
def __html__(self):
return self
START = Stream.START
END = Stream.END
TEXT = Stream.TEXT
XML_DECL = Stream.XML_DECL
DOCTYPE = Stream.DOCTYPE
START_NS = Stream.START_NS
END_NS = Stream.END_NS
START_CDATA = Stream.START_CDATA
END_CDATA = Stream.END_CDATA
PI = Stream.PI
COMMENT = Stream.COMMENT
def _ensure(stream):
"""Ensure that every item on the stream is actually a markup event."""
stream = iter(stream)
event = stream.next()
# Check whether the iterable is a real markup event stream by examining the
# first item it yields; if it's not we'll need to do some conversion
if type(event) is not tuple or len(event) != 3:
for event in chain([event], stream):
if hasattr(event, 'totuple'):
event = event.totuple()
else:
event = TEXT, unicode(event), (None, -1, -1)
yield event
return
# This looks like a markup event stream, so we'll just pass it through
# unchanged
yield event
for event in stream:
yield event
class Attrs(tuple):
"""Immutable sequence type that stores the attributes of an element.
Ordering of the attributes is preserved, while access by name is also
supported.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> 'href' in attrs
True
>>> 'tabindex' in attrs
False
>>> attrs.get('title')
'Foo'
Instances may not be manipulated directly. Instead, the operators ``|`` and
``-`` can be used to produce new instances that have specific attributes
added, replaced or removed.
To remove an attribute, use the ``-`` operator. The right hand side can be
either a string or a set/sequence of strings, identifying the name(s) of
the attribute(s) to remove:
>>> attrs - 'title'
Attrs([('href', '#')])
>>> attrs - ('title', 'href')
Attrs()
The original instance is not modified, but the operator can of course be
used with an assignment:
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs -= 'title'
>>> attrs
Attrs([('href', '#')])
To add a new attribute, use the ``|`` operator, where the right hand value
is a sequence of ``(name, value)`` tuples (which includes `Attrs`
instances):
>>> attrs | [('title', 'Bar')]
Attrs([('href', '#'), ('title', 'Bar')])
If the attributes already contain an attribute with a given name, the value
of that attribute is replaced:
>>> attrs | [('href', 'http://example.org/')]
Attrs([('href', 'http://example.org/')])
"""
__slots__ = []
def __contains__(self, name):
"""Return whether the list includes an attribute with the specified
name.
:return: `True` if the list includes the attribute
:rtype: `bool`
"""
for attr, _ in self:
if attr == name:
return True
def __getitem__(self, i):
"""Return an item or slice of the attributes list.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs[1]
('title', 'Foo')
>>> attrs[1:]
Attrs([('title', 'Foo')])
"""
items = tuple.__getitem__(self, i)
if type(i) is slice:
return Attrs(items)
return items
def __getslice__(self, i, j):
"""Return a slice of the attributes list.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs[1:]
Attrs([('title', 'Foo')])
"""
return Attrs(tuple.__getslice__(self, i, j))
def __or__(self, attrs):
"""Return a new instance that contains the attributes in `attrs` in
addition to any already existing attributes.
:return: a new instance with the merged attributes
:rtype: `Attrs`
"""
repl = dict([(an, av) for an, av in attrs if an in self])
return Attrs([(sn, repl.get(sn, sv)) for sn, sv in self] +
[(an, av) for an, av in attrs if an not in self])
def __repr__(self):
if not self:
return 'Attrs()'
return 'Attrs([%s])' % ', '.join([repr(item) for item in self])
def __sub__(self, names):
"""Return a new instance with all attributes with a name in `names` are
removed.
:param names: the names of the attributes to remove
:return: a new instance with the attribute removed
:rtype: `Attrs`
"""
if isinstance(names, basestring):
names = (names,)
return Attrs([(name, val) for name, val in self if name not in names])
def get(self, name, default=None):
"""Return the value of the attribute with the specified name, or the
value of the `default` parameter if no such attribute is found.
:param name: the name of the attribute
:param default: the value to return when the attribute does not exist
:return: the attribute value, or the `default` value if that attribute
does not exist
:rtype: `object`
"""
for attr, value in self:
if attr == name:
return value
return default
def totuple(self):
"""Return the attributes as a markup event.
The returned event is a `TEXT` event, the data is the value of all
attributes joined together.
>>> Attrs([('href', '#'), ('title', 'Foo')]).totuple()
('TEXT', '#Foo', (None, -1, -1))
:return: a `TEXT` event
:rtype: `tuple`
"""
return TEXT, ''.join([x[1] for x in self]), (None, -1, -1)
class Markup(unicode):
"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped.
"""
__slots__ = []
def __add__(self, other):
return Markup(unicode.__add__(self, escape(other)))
def __radd__(self, other):
return Markup(unicode.__add__(escape(other), self))
def __mod__(self, args):
if isinstance(args, dict):
args = dict(zip(args.keys(), map(escape, args.values())))
elif isinstance(args, (list, tuple)):
args = tuple(map(escape, args))
else:
args = escape(args)
return Markup(unicode.__mod__(self, args))
def __mul__(self, num):
return Markup(unicode.__mul__(self, num))
__rmul__ = __mul__
def __repr__(self):
return "<%s %s>" % (type(self).__name__, unicode.__repr__(self))
def join(self, seq, escape_quotes=True):
"""Return a `Markup` object which is the concatenation of the strings
in the given sequence, where this `Markup` object is the separator
between the joined elements.
Any element in the sequence that is not a `Markup` instance is
automatically escaped.
:param seq: the sequence of strings to join
:param escape_quotes: whether double quote characters in the elements
should be escaped
:return: the joined `Markup` object
:rtype: `Markup`
:see: `escape`
"""
return Markup(unicode.join(self, [escape(item, quotes=escape_quotes)
for item in seq]))
@classmethod
def escape(cls, text, quotes=True):
"""Create a Markup instance from a string and escape special characters
it may contain (<, >, & and \").
>>> escape('"1 < 2"')
<Markup u'"1 < 2"'>
If the `quotes` parameter is set to `False`, the \" character is left
as is. Escaping quotes is generally only required for strings that are
to be used in attribute values.
>>> escape('"1 < 2"', quotes=False)
<Markup u'"1 < 2"'>
:param text: the text to escape
:param quotes: if ``True``, double quote characters are escaped in
addition to the other special characters
:return: the escaped `Markup` string
:rtype: `Markup`
"""
if not text:
return cls()
if type(text) is cls:
return text
if hasattr(text, '__html__'):
return Markup(text.__html__())
text = text.replace('&', '&') \
.replace('<', '<') \
.replace('>', '>')
if quotes:
text = text.replace('"', '"')
return cls(text)
def unescape(self):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> Markup('1 < 2').unescape()
u'1 < 2'
:return: the unescaped string
:rtype: `unicode`
:see: `genshi.core.unescape`
"""
if not self:
return ''
return unicode(self).replace('"', '"') \
.replace('>', '>') \
.replace('<', '<') \
.replace('&', '&')
def stripentities(self, keepxmlentities=False):
"""Return a copy of the text with any character or numeric entities
replaced by the equivalent UTF-8 characters.
If the `keepxmlentities` parameter is provided and evaluates to `True`,
the core XML entities (``&``, ``'``, ``>``, ``<`` and
``"``) are not stripped.
:return: a `Markup` instance with entities removed
:rtype: `Markup`
:see: `genshi.util.stripentities`
"""
return Markup(stripentities(self, keepxmlentities=keepxmlentities))
def striptags(self):
"""Return a copy of the text with all XML/HTML tags removed.
:return: a `Markup` instance with all tags removed
:rtype: `Markup`
:see: `genshi.util.striptags`
"""
return Markup(striptags(self))
try:
from genshi._speedups import Markup
except ImportError:
pass # just use the Python implementation
escape = Markup.escape
def unescape(text):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> unescape(Markup('1 < 2'))
u'1 < 2'
If the provided `text` object is not a `Markup` instance, it is returned
unchanged.
>>> unescape('1 < 2')
'1 < 2'
:param text: the text to unescape
:return: the unescsaped string
:rtype: `unicode`
"""
if not isinstance(text, Markup):
return text
return text.unescape()
class Namespace(object):
"""Utility class creating and testing elements with a namespace.
Internally, namespace URIs are encoded in the `QName` of any element or
attribute, the namespace URI being enclosed in curly braces. This class
helps create and test these strings.
A `Namespace` object is instantiated with the namespace URI.
>>> html = Namespace('http://www.w3.org/1999/xhtml')
>>> html
Namespace('http://www.w3.org/1999/xhtml')
>>> html.uri
u'http://www.w3.org/1999/xhtml'
The `Namespace` object can than be used to generate `QName` objects with
that namespace:
>>> html.body
QName('http://www.w3.org/1999/xhtml}body')
>>> html.body.localname
u'body'
>>> html.body.namespace
u'http://www.w3.org/1999/xhtml'
The same works using item access notation, which is useful for element or
attribute names that are not valid Python identifiers:
>>> html['body']
QName('http://www.w3.org/1999/xhtml}body')
A `Namespace` object can also be used to test whether a specific `QName`
belongs to that namespace using the ``in`` operator:
>>> qname = html.body
>>> qname in html
True
>>> qname in Namespace('http://www.w3.org/2002/06/xhtml2')
False
"""
def __new__(cls, uri):
if type(uri) is cls:
return uri
return object.__new__(cls)
def __getnewargs__(self):
return (self.uri,)
def __getstate__(self):
return self.uri
def __setstate__(self, uri):
self.uri = uri
def __init__(self, uri):
self.uri = unicode(uri)
def __contains__(self, qname):
return qname.namespace == self.uri
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if isinstance(other, Namespace):
return self.uri == other.uri
return self.uri == other
def __getitem__(self, name):
return QName(self.uri + '}' + name)
__getattr__ = __getitem__
def __hash__(self):
return hash(self.uri)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, stringrepr(self.uri))
def __str__(self):
return self.uri.encode('utf-8')
def __unicode__(self):
return self.uri
# The namespace used by attributes such as xml:lang and xml:space
XML_NAMESPACE = Namespace('http://www.w3.org/XML/1998/namespace')
class QName(unicode):
"""A qualified element or attribute name.
The unicode value of instances of this class contains the qualified name of
the element or attribute, in the form ``{namespace-uri}local-name``. The
namespace URI can be obtained through the additional `namespace` attribute,
while the local name can be accessed through the `localname` attribute.
>>> qname = QName('foo')
>>> qname
QName('foo')
>>> qname.localname
u'foo'
>>> qname.namespace
>>> qname = QName('http://www.w3.org/1999/xhtml}body')
>>> qname
QName('http://www.w3.org/1999/xhtml}body')
>>> qname.localname
u'body'
>>> qname.namespace
u'http://www.w3.org/1999/xhtml'
"""
__slots__ = ['namespace', 'localname']
def __new__(cls, qname):
"""Create the `QName` instance.
:param qname: the qualified name as a string of the form
``{namespace-uri}local-name``, where the leading curly
brace is optional
"""
if type(qname) is cls:
return qname
parts = qname.lstrip('{').split('}', 1)
if len(parts) > 1:
self = unicode.__new__(cls, '{%s' % qname)
self.namespace, self.localname = map(unicode, parts)
else:
self = unicode.__new__(cls, qname)
self.namespace, self.localname = None, unicode(qname)
return self
def __getnewargs__(self):
return (self.lstrip('{'),)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, stringrepr(self.lstrip('{')))
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import pickle
from StringIO import StringIO
try:
from cStringIO import StringIO as cStringIO
except ImportError:
cStringIO = StringIO
import unittest
from genshi import core
from genshi.core import Markup, Attrs, Namespace, QName, escape, unescape
from genshi.input import XML, ParseError
class StreamTestCase(unittest.TestCase):
def test_render_utf8(self):
xml = XML('<li>Über uns</li>')
self.assertEqual('<li>Über uns</li>', xml.render())
def test_render_unicode(self):
xml = XML('<li>Über uns</li>')
self.assertEqual(u'<li>Über uns</li>', xml.render(encoding=None))
def test_render_ascii(self):
xml = XML('<li>Über uns</li>')
self.assertEqual('<li>Über uns</li>', xml.render(encoding='ascii'))
def test_render_output_stream_utf8(self):
xml = XML('<li>Über uns</li>')
strio = cStringIO()
self.assertEqual(None, xml.render(out=strio))
self.assertEqual('<li>Über uns</li>', strio.getvalue())
def test_render_output_stream_unicode(self):
xml = XML('<li>Über uns</li>')
strio = StringIO()
self.assertEqual(None, xml.render(encoding=None, out=strio))
self.assertEqual(u'<li>Über uns</li>', strio.getvalue())
def test_pickle(self):
xml = XML('<li>Foo</li>')
buf = StringIO()
pickle.dump(xml, buf, 2)
buf.seek(0)
xml = pickle.load(buf)
self.assertEquals('<li>Foo</li>', xml.render(encoding=None))
class MarkupTestCase(unittest.TestCase):
def test_new_with_encoding(self):
markup = Markup('Döner', encoding='utf-8')
self.assertEquals("<Markup u'D\\xf6ner'>", repr(markup))
def test_repr(self):
markup = Markup('foo')
self.assertEquals("<Markup u'foo'>", repr(markup))
def test_escape(self):
markup = escape('<b>"&"</b>')
assert type(markup) is Markup
self.assertEquals('<b>"&"</b>', markup)
def test_escape_noquotes(self):
markup = escape('<b>"&"</b>', quotes=False)
assert type(markup) is Markup
self.assertEquals('<b>"&"</b>', markup)
def test_unescape_markup(self):
string = '<b>"&"</b>'
markup = Markup.escape(string)
assert type(markup) is Markup
self.assertEquals(string, unescape(markup))
def test_add_str(self):
markup = Markup('<b>foo</b>') + '<br/>'
assert type(markup) is Markup
self.assertEquals('<b>foo</b><br/>', markup)
def test_add_markup(self):
markup = Markup('<b>foo</b>') + Markup('<br/>')
assert type(markup) is Markup
self.assertEquals('<b>foo</b><br/>', markup)
def test_add_reverse(self):
markup = '<br/>' + Markup('<b>bar</b>')
assert type(markup) is Markup
self.assertEquals('<br/><b>bar</b>', markup)
def test_mod(self):
markup = Markup('<b>%s</b>') % '&'
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mod_multi(self):
markup = Markup('<b>%s</b> %s') % ('&', 'boo')
assert type(markup) is Markup
self.assertEquals('<b>&</b> boo', markup)
def test_mod_mapping(self):
markup = Markup('<b>%(foo)s</b>') % {'foo': '&'}
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mod_noescape(self):
markup = Markup('<b>%(amp)s</b>') % {'amp': Markup('&')}
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mul(self):
markup = Markup('<b>foo</b>') * 2
assert type(markup) is Markup
self.assertEquals('<b>foo</b><b>foo</b>', markup)
def test_mul_reverse(self):
markup = 2 * Markup('<b>foo</b>')
assert type(markup) is Markup
self.assertEquals('<b>foo</b><b>foo</b>', markup)
def test_join(self):
markup = Markup('<br />').join(['foo', '<bar />', Markup('<baz />')])
assert type(markup) is Markup
self.assertEquals('foo<br /><bar /><br /><baz />', markup)
def test_stripentities_all(self):
markup = Markup('& j').stripentities()
assert type(markup) is Markup
self.assertEquals('& j', markup)
def test_stripentities_keepxml(self):
markup = Markup('& j').stripentities(keepxmlentities=True)
assert type(markup) is Markup
self.assertEquals('& j', markup)
def test_striptags_empty(self):
markup = Markup('<br />').striptags()
assert type(markup) is Markup
self.assertEquals('', markup)
def test_striptags_mid(self):
markup = Markup('<a href="#">fo<br />o</a>').striptags()
assert type(markup) is Markup
self.assertEquals('foo', markup)
def test_pickle(self):
markup = Markup('foo')
buf = StringIO()
pickle.dump(markup, buf, 2)
buf.seek(0)
self.assertEquals("<Markup u'foo'>", repr(pickle.load(buf)))
class AttrsTestCase(unittest.TestCase):
def test_pickle(self):
attrs = Attrs([("attr1", "foo"), ("attr2", "bar")])
buf = StringIO()
pickle.dump(attrs, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals("Attrs([('attr1', 'foo'), ('attr2', 'bar')])",
repr(unpickled))
def test_non_ascii(self):
attrs_tuple = Attrs([("attr1", u"föö"), ("attr2", u"bär")]).totuple()
self.assertEqual(u'fööbär', attrs_tuple[1])
class NamespaceTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual("Namespace('http://www.example.org/namespace')",
repr(Namespace('http://www.example.org/namespace')))
def test_repr_eval(self):
ns = Namespace('http://www.example.org/namespace')
self.assertEqual(eval(repr(ns)), ns)
def test_repr_eval_non_ascii(self):
ns = Namespace(u'http://www.example.org/nämespäcé')
self.assertEqual(eval(repr(ns)), ns)
def test_pickle(self):
ns = Namespace('http://www.example.org/namespace')
buf = StringIO()
pickle.dump(ns, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals("Namespace('http://www.example.org/namespace')",
repr(unpickled))
self.assertEquals('http://www.example.org/namespace', unpickled.uri)
class QNameTestCase(unittest.TestCase):
def test_pickle(self):
qname = QName('http://www.example.org/namespace}elem')
buf = StringIO()
pickle.dump(qname, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals('{http://www.example.org/namespace}elem', unpickled)
self.assertEquals('http://www.example.org/namespace',
unpickled.namespace)
self.assertEquals('elem', unpickled.localname)
def test_repr(self):
self.assertEqual("QName('elem')", repr(QName('elem')))
self.assertEqual("QName('http://www.example.org/namespace}elem')",
repr(QName('http://www.example.org/namespace}elem')))
def test_repr_eval(self):
qn = QName('elem')
self.assertEqual(eval(repr(qn)), qn)
def test_repr_eval_non_ascii(self):
qn = QName(u'élem')
self.assertEqual(eval(repr(qn)), qn)
def test_leading_curly_brace(self):
qname = QName('{http://www.example.org/namespace}elem')
self.assertEquals('http://www.example.org/namespace', qname.namespace)
self.assertEquals('elem', qname.localname)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(StreamTestCase, 'test'))
suite.addTest(unittest.makeSuite(MarkupTestCase, 'test'))
suite.addTest(unittest.makeSuite(NamespaceTestCase, 'test'))
suite.addTest(unittest.makeSuite(AttrsTestCase, 'test'))
suite.addTest(unittest.makeSuite(QNameTestCase, 'test'))
suite.addTest(doctest.DocTestSuite(core))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
import sys
from genshi.core import Attrs, Stream, QName
from genshi.input import HTML, XML
from genshi.output import DocType, XMLSerializer, XHTMLSerializer, \
HTMLSerializer, EmptyTagFilter
class XMLSerializerTestCase(unittest.TestCase):
def test_with_xml_decl(self):
stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))])
output = stream.render(XMLSerializer, doctype='xhtml', encoding=None)
self.assertEqual('<?xml version="1.0"?>\n'
'<!DOCTYPE html PUBLIC '
'"-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n',
output)
def test_doctype_in_stream(self):
stream = Stream([(Stream.DOCTYPE, DocType.HTML_STRICT, (None, -1, -1))])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<!DOCTYPE html PUBLIC '
'"-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">\n',
output)
def test_doctype_in_stream_no_sysid(self):
stream = Stream([(Stream.DOCTYPE,
('html', '-//W3C//DTD HTML 4.01//EN', None),
(None, -1, -1))])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">\n',
output)
def test_doctype_in_stream_no_pubid(self):
stream = Stream([
(Stream.DOCTYPE,
('html', None, 'http://www.w3.org/TR/html4/strict.dtd'),
(None, -1, -1))
])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<!DOCTYPE html SYSTEM '
'"http://www.w3.org/TR/html4/strict.dtd">\n',
output)
def test_doctype_in_stream_no_pubid_or_sysid(self):
stream = Stream([(Stream.DOCTYPE, ('html', None, None),
(None, -1, -1))])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<!DOCTYPE html>\n', output)
def test_serializer_doctype(self):
stream = Stream([])
output = stream.render(XMLSerializer, doctype=DocType.HTML_STRICT,
encoding=None)
self.assertEqual('<!DOCTYPE html PUBLIC '
'"-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">\n',
output)
def test_doctype_one_and_only(self):
stream = Stream([
(Stream.DOCTYPE, ('html', None, None), (None, -1, -1))
])
output = stream.render(XMLSerializer, doctype=DocType.HTML_STRICT,
encoding=None)
self.assertEqual('<!DOCTYPE html PUBLIC '
'"-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">\n',
output)
def test_comment(self):
stream = Stream([(Stream.COMMENT, 'foo bar', (None, -1, -1))])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<!--foo bar-->', output)
def test_processing_instruction(self):
stream = Stream([(Stream.PI, ('python', 'x = 2'), (None, -1, -1))])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual('<?python x = 2?>', output)
def test_nested_default_namespaces(self):
stream = Stream([
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('http://example.org/}div'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1))
])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual("""<div xmlns="http://example.org/">
<p/>
<p/>
</div>""", output)
def test_nested_bound_namespaces(self):
stream = Stream([
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('http://example.org/}div'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1))
])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual("""<x:div xmlns:x="http://example.org/">
<x:p/>
<x:p/>
</x:div>""", output)
def test_multiple_default_namespaces(self):
stream = Stream([
(Stream.START, (QName('div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('div'), (None, -1, -1)),
])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual("""<div>
<p xmlns="http://example.org/"/>
<p xmlns="http://example.org/"/>
</div>""", output)
def test_multiple_bound_namespaces(self):
stream = Stream([
(Stream.START, (QName('div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('http://example.org/}p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('http://example.org/}p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('div'), (None, -1, -1)),
])
output = stream.render(XMLSerializer, encoding=None)
self.assertEqual("""<div>
<x:p xmlns:x="http://example.org/"/>
<x:p xmlns:x="http://example.org/"/>
</div>""", output)
def test_atom_with_xhtml(self):
text = """<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<id>urn:uuid:c60843aa-0da8-4fa6-bbe5-98007bc6774e</id>
<updated>2007-01-28T11:36:02.807108-06:00</updated>
<title type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">Example</div>
</title>
<subtitle type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">Bla bla bla</div>
</subtitle>
<icon/>
</feed>"""
output = XML(text).render(XMLSerializer, encoding=None)
self.assertEqual(text, output)
class XHTMLSerializerTestCase(unittest.TestCase):
def test_xml_decl_dropped(self):
stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))])
output = stream.render(XHTMLSerializer, doctype='xhtml', encoding=None)
self.assertEqual('<!DOCTYPE html PUBLIC '
'"-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n',
output)
def test_xml_decl_included(self):
stream = Stream([(Stream.XML_DECL, ('1.0', None, -1), (None, -1, -1))])
output = stream.render(XHTMLSerializer, doctype='xhtml',
drop_xml_decl=False, encoding=None)
self.assertEqual('<?xml version="1.0"?>\n'
'<!DOCTYPE html PUBLIC '
'"-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n',
output)
def test_xml_lang(self):
text = '<p xml:lang="en">English text</p>'
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual('<p lang="en" xml:lang="en">English text</p>', output)
def test_xml_lang_nodup(self):
text = '<p xml:lang="en" lang="en">English text</p>'
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual('<p xml:lang="en" lang="en">English text</p>', output)
def test_textarea_whitespace(self):
content = '\nHey there. \n\n I am indented.\n'
stream = XML('<textarea name="foo">%s</textarea>' % content)
output = stream.render(XHTMLSerializer, encoding=None)
self.assertEqual('<textarea name="foo">%s</textarea>' % content, output)
def test_pre_whitespace(self):
content = '\nHey <em>there</em>. \n\n I am indented.\n'
stream = XML('<pre>%s</pre>' % content)
output = stream.render(XHTMLSerializer, encoding=None)
self.assertEqual('<pre>%s</pre>' % content, output)
def test_xml_space(self):
text = '<foo xml:space="preserve"> Do not mess \n\n with me </foo>'
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual('<foo> Do not mess \n\n with me </foo>', output)
def test_empty_script(self):
text = """<html xmlns="http://www.w3.org/1999/xhtml">
<script src="foo.js" />
</html>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual("""<html xmlns="http://www.w3.org/1999/xhtml">
<script src="foo.js"></script>
</html>""", output)
def test_script_escaping(self):
text = """<script>/*<![CDATA[*/
if (1 < 2) { alert("Doh"); }
/*]]>*/</script>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_script_escaping_with_namespace(self):
text = """<script xmlns="http://www.w3.org/1999/xhtml">/*<![CDATA[*/
if (1 < 2) { alert("Doh"); }
/*]]>*/</script>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_style_escaping(self):
text = """<style>/*<![CDATA[*/
html > body { display: none; }
/*]]>*/</style>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_style_escaping_with_namespace(self):
text = """<style xmlns="http://www.w3.org/1999/xhtml">/*<![CDATA[*/
html > body { display: none; }
/*]]>*/</style>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_embedded_svg(self):
text = """<html xmlns="http://www.w3.org/1999/xhtml" xmlns:svg="http://www.w3.org/2000/svg">
<body>
<button>
<svg:svg width="600px" height="400px">
<svg:polygon id="triangle" points="50,50 50,300 300,300"></svg:polygon>
</svg:svg>
</button>
</body>
</html>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_xhtml_namespace_prefix(self):
text = """<div xmlns="http://www.w3.org/1999/xhtml">
<strong>Hello</strong>
</div>"""
output = XML(text).render(XHTMLSerializer, encoding=None)
self.assertEqual(text, output)
def test_nested_default_namespaces(self):
stream = Stream([
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('p'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('div'), (None, -1, -1)),
(Stream.END_NS, '', (None, -1, -1))
])
output = stream.render(XHTMLSerializer, encoding=None)
self.assertEqual("""<div xmlns="http://example.org/">
<p></p>
<p></p>
</div>""", output)
def test_nested_bound_namespaces(self):
stream = Stream([
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('div'), Attrs()), (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.START_NS, ('x', 'http://example.org/'), (None, -1, -1)),
(Stream.START, (QName('p'), Attrs()), (None, -1, -1)),
(Stream.END, QName('p'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1)),
(Stream.TEXT, '\n ', (None, -1, -1)),
(Stream.END, QName('div'), (None, -1, -1)),
(Stream.END_NS, 'x', (None, -1, -1))
])
output = stream.render(XHTMLSerializer, encoding=None)
self.assertEqual("""<div xmlns:x="http://example.org/">
<p></p>
<p></p>
</div>""", output)
def test_html5_doctype(self):
stream = HTML('<html></html>')
output = stream.render(XHTMLSerializer, doctype=DocType.HTML5,
encoding=None)
self.assertEqual('<!DOCTYPE html>\n<html></html>', output)
class HTMLSerializerTestCase(unittest.TestCase):
def test_xml_lang(self):
text = '<p xml:lang="en">English text</p>'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<p lang="en">English text</p>', output)
def test_xml_lang_nodup(self):
text = '<p lang="en" xml:lang="en">English text</p>'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<p lang="en">English text</p>', output)
def test_textarea_whitespace(self):
content = '\nHey there. \n\n I am indented.\n'
stream = XML('<textarea name="foo">%s</textarea>' % content)
output = stream.render(HTMLSerializer, encoding=None)
self.assertEqual('<textarea name="foo">%s</textarea>' % content, output)
def test_pre_whitespace(self):
content = '\nHey <em>there</em>. \n\n I am indented.\n'
stream = XML('<pre>%s</pre>' % content)
output = stream.render(HTMLSerializer, encoding=None)
self.assertEqual('<pre>%s</pre>' % content, output)
def test_xml_space(self):
text = '<foo xml:space="preserve"> Do not mess \n\n with me </foo>'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<foo> Do not mess \n\n with me </foo>', output)
def test_empty_script(self):
text = '<script src="foo.js" />'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<script src="foo.js"></script>', output)
def test_script_escaping(self):
text = '<script>if (1 < 2) { alert("Doh"); }</script>'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<script>if (1 < 2) { alert("Doh"); }</script>',
output)
def test_script_escaping_with_namespace(self):
text = """<script xmlns="http://www.w3.org/1999/xhtml">
if (1 < 2) { alert("Doh"); }
</script>"""
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual("""<script>
if (1 < 2) { alert("Doh"); }
</script>""", output)
def test_style_escaping(self):
text = '<style>html > body { display: none; }</style>'
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual('<style>html > body { display: none; }</style>',
output)
def test_style_escaping_with_namespace(self):
text = """<style xmlns="http://www.w3.org/1999/xhtml">
html > body { display: none; }
</style>"""
output = XML(text).render(HTMLSerializer, encoding=None)
self.assertEqual("""<style>
html > body { display: none; }
</style>""", output)
def test_html5_doctype(self):
stream = HTML('<html></html>')
output = stream.render(HTMLSerializer, doctype=DocType.HTML5,
encoding=None)
self.assertEqual('<!DOCTYPE html>\n<html></html>', output)
class EmptyTagFilterTestCase(unittest.TestCase):
def test_empty(self):
stream = XML('<elem></elem>') | EmptyTagFilter()
self.assertEqual([EmptyTagFilter.EMPTY], [ev[0] for ev in stream])
def test_text_content(self):
stream = XML('<elem>foo</elem>') | EmptyTagFilter()
self.assertEqual([Stream.START, Stream.TEXT, Stream.END],
[ev[0] for ev in stream])
def test_elem_content(self):
stream = XML('<elem><sub /><sub /></elem>') | EmptyTagFilter()
self.assertEqual([Stream.START, EmptyTagFilter.EMPTY,
EmptyTagFilter.EMPTY, Stream.END],
[ev[0] for ev in stream])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(XMLSerializerTestCase, 'test'))
suite.addTest(unittest.makeSuite(XHTMLSerializerTestCase, 'test'))
suite.addTest(unittest.makeSuite(HTMLSerializerTestCase, 'test'))
suite.addTest(unittest.makeSuite(EmptyTagFilterTestCase, 'test'))
suite.addTest(doctest.DocTestSuite(XMLSerializer.__module__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006,2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi import util
from genshi.util import LRUCache
class LRUCacheTestCase(unittest.TestCase):
def test_setitem(self):
cache = LRUCache(2)
cache['A'] = 0
self.assertEqual(1, len(cache))
self.assertEqual('A', cache.head.key)
self.assertEqual('A', cache.tail.key)
item_a = cache._dict['A']
self.assertEqual('A', item_a.key)
self.assertEqual(0, item_a.value)
self.assertEqual(None, item_a.prv)
self.assertEqual(None, item_a.nxt)
cache['B'] = 1
self.assertEqual(2, len(cache))
self.assertEqual('B', cache.head.key)
self.assertEqual('A', cache.tail.key)
item_a = cache._dict['A']
item_b = cache._dict['B']
self.assertEqual('A', item_a.key)
self.assertEqual(0, item_a.value)
self.assertEqual(item_b, item_a.prv)
self.assertEqual(None, item_a.nxt)
self.assertEqual('B', item_b.key)
self.assertEqual(1, item_b.value)
self.assertEqual(None, item_b.prv)
self.assertEqual(item_a, item_b.nxt)
cache['C'] = 2
self.assertEqual(2, len(cache))
self.assertEqual('C', cache.head.key)
self.assertEqual('B', cache.tail.key)
item_b = cache._dict['B']
item_c = cache._dict['C']
self.assertEqual('B', item_b.key)
self.assertEqual(1, item_b.value)
self.assertEqual(item_c, item_b.prv)
self.assertEqual(None, item_b.nxt)
self.assertEqual('C', item_c.key)
self.assertEqual(2, item_c.value)
self.assertEqual(None, item_c.prv)
self.assertEqual(item_b, item_c.nxt)
def test_getitem(self):
cache = LRUCache(2)
cache['A'] = 0
cache['B'] = 1
cache['A']
self.assertEqual(2, len(cache))
self.assertEqual('A', cache.head.key)
self.assertEqual('B', cache.tail.key)
item_a = cache._dict['A']
item_b = cache._dict['B']
self.assertEqual('A', item_a.key)
self.assertEqual(0, item_a.value)
self.assertEqual(None, item_a.prv)
self.assertEqual(item_b, item_a.nxt)
self.assertEqual('B', item_b.key)
self.assertEqual(1, item_b.value)
self.assertEqual(item_a, item_b.prv)
self.assertEqual(None, item_b.nxt)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(util))
suite.addTest(unittest.makeSuite(LRUCacheTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.input import XML
from genshi.path import Path, PathParser, PathSyntaxError, GenericStrategy, \
SingleStepStrategy, SimplePathStrategy
class FakePath(Path):
def __init__(self, strategy):
self.strategy = strategy
def test(self, ignore_context = False):
return self.strategy.test(ignore_context)
class PathTestCase(unittest.TestCase):
strategies = [GenericStrategy, SingleStepStrategy, SimplePathStrategy]
def test_error_no_absolute_path(self):
self.assertRaises(PathSyntaxError, Path, '/root')
def test_error_unsupported_axis(self):
self.assertRaises(PathSyntaxError, Path, '..')
self.assertRaises(PathSyntaxError, Path, 'parent::ma')
def test_1step(self):
xml = XML('<root><elem/></root>')
self._test_eval(
path = 'elem',
equiv = '<Path "child::elem">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = 'elem',
equiv = '<Path "child::elem">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = 'child::elem',
equiv = '<Path "child::elem">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = '//elem',
equiv = '<Path "descendant-or-self::elem">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = 'descendant::elem',
equiv = '<Path "descendant::elem">',
input = xml,
output = '<elem/>'
)
def test_1step_self(self):
xml = XML('<root><elem/></root>')
self._test_eval(
path = '.',
equiv = '<Path "self::node()">',
input = xml,
output = '<root><elem/></root>'
)
self._test_eval(
path = 'self::node()',
equiv = '<Path "self::node()">',
input = xml,
output = '<root><elem/></root>'
)
def test_1step_wildcard(self):
xml = XML('<root><elem/></root>')
self._test_eval(
path = '*',
equiv = '<Path "child::*">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = 'child::*',
equiv = '<Path "child::*">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = 'child::node()',
equiv = '<Path "child::node()">',
input = xml,
output = '<elem/>'
)
self._test_eval(
path = '//*',
equiv = '<Path "descendant-or-self::*">',
input = xml,
output = '<root><elem/></root>'
)
def test_1step_attribute(self):
self._test_eval(
path = '@foo',
equiv = '<Path "attribute::foo">',
input = XML('<root/>'),
output = ''
)
xml = XML('<root foo="bar"/>')
self._test_eval(
path = '@foo',
equiv = '<Path "attribute::foo">',
input = xml,
output = 'bar'
)
self._test_eval(
path = './@foo',
equiv = '<Path "self::node()/attribute::foo">',
input = xml,
output = 'bar'
)
def test_1step_text(self):
xml = XML('<root>Hey</root>')
self._test_eval(
path = 'text()',
equiv = '<Path "child::text()">',
input = xml,
output = 'Hey'
)
self._test_eval(
path = './text()',
equiv = '<Path "self::node()/child::text()">',
input = xml,
output = 'Hey'
)
self._test_eval(
path = '//text()',
equiv = '<Path "descendant-or-self::text()">',
input = xml,
output = 'Hey'
)
self._test_eval(
path = './/text()',
equiv = '<Path "self::node()/descendant-or-self::node()/child::text()">',
input = xml,
output = 'Hey'
)
def test_2step(self):
xml = XML('<root><foo/><bar/></root>')
self._test_eval('*', input=xml, output='<foo/><bar/>')
self._test_eval('bar', input=xml, output='<bar/>')
self._test_eval('baz', input=xml, output='')
def test_2step_attribute(self):
xml = XML('<elem class="x"><span id="joe">Hey Joe</span></elem>')
self._test_eval('@*', input=xml, output='x')
self._test_eval('./@*', input=xml, output='x')
self._test_eval('.//@*', input=xml, output='xjoe')
self._test_eval('*/@*', input=xml, output='joe')
xml = XML('<elem><foo id="1"/><foo id="2"/></elem>')
self._test_eval('@*', input=xml, output='')
self._test_eval('foo/@*', input=xml, output='12')
def test_2step_complex(self):
xml = XML('<root><foo><bar/></foo></root>')
self._test_eval(
path = 'foo/bar',
equiv = '<Path "child::foo/child::bar">',
input = xml,
output = '<bar/>'
)
self._test_eval(
path = './bar',
equiv = '<Path "self::node()/child::bar">',
input = xml,
output = ''
)
self._test_eval(
path = 'foo/*',
equiv = '<Path "child::foo/child::*">',
input = xml,
output = '<bar/>'
)
xml = XML('<root><foo><bar id="1"/></foo><bar id="2"/></root>')
self._test_eval(
path = './bar',
equiv = '<Path "self::node()/child::bar">',
input = xml,
output = '<bar id="2"/>'
)
xml = XML('''<table>
<tr><td>1</td><td>One</td></tr>
<tr><td>2</td><td>Two</td></tr>
</table>''')
self._test_eval(
path = 'tr/td[1]',
input = xml,
output = '<td>1</td><td>2</td>'
)
xml = XML('''<ul>
<li>item1
<ul><li>subitem11</li></ul>
</li>
<li>item2
<ul><li>subitem21</li></ul>
</li>
</ul>''')
self._test_eval(
path = 'li[2]/ul',
input = xml,
output = '<ul><li>subitem21</li></ul>'
)
def test_2step_text(self):
xml = XML('<root><item>Foo</item></root>')
self._test_eval(
path = 'item/text()',
equiv = '<Path "child::item/child::text()">',
input = xml,
output = 'Foo'
)
self._test_eval(
path = '*/text()',
equiv = '<Path "child::*/child::text()">',
input = xml,
output = 'Foo'
)
self._test_eval(
path = '//text()',
equiv = '<Path "descendant-or-self::text()">',
input = xml,
output = 'Foo'
)
self._test_eval(
path = './text()',
equiv = '<Path "self::node()/child::text()">',
input = xml,
output = ''
)
xml = XML('<root><item>Foo</item><item>Bar</item></root>')
self._test_eval(
path = 'item/text()',
equiv = '<Path "child::item/child::text()">',
input = xml,
output = 'FooBar'
)
xml = XML('<root><item><name>Foo</name><sub><name>Bar</name></sub></item></root>')
self._test_eval(
path = 'item/name/text()',
equiv = '<Path "child::item/child::name/child::text()">',
input = xml,
output = 'Foo'
)
def test_3step(self):
xml = XML('<root><foo><bar/></foo></root>')
self._test_eval(
path = 'foo/*',
equiv = '<Path "child::foo/child::*">',
input = xml,
output = '<bar/>'
)
def test_3step_complex(self):
self._test_eval(
path = '*/bar',
equiv = '<Path "child::*/child::bar">',
input = XML('<root><foo><bar/></foo></root>'),
output = '<bar/>'
)
self._test_eval(
path = '//bar',
equiv = '<Path "descendant-or-self::bar">',
input = XML('<root><foo><bar id="1"/></foo><bar id="2"/></root>'),
output = '<bar id="1"/><bar id="2"/>'
)
def test_3step_complex_text(self):
xml = XML('<root><item><bar>Some text </bar><baz><bar>in here.</bar></baz></item></root>')
self._test_eval(
path = 'item/bar/text()',
equiv = '<Path "child::item/child::bar/child::text()">',
input = xml,
output = 'Some text '
)
self._test_eval(
path = 'item//bar/text()',
equiv = '<Path "child::item/descendant-or-self::node()/child::bar/child::text()">',
input = xml,
output = 'Some text in here.'
)
def test_node_type_comment(self):
xml = XML('<root><!-- commented --></root>')
self._test_eval(
path = 'comment()',
equiv = '<Path "child::comment()">',
input = xml,
output = '<!-- commented -->'
)
def test_node_type_text(self):
xml = XML('<root>Some text <br/>in here.</root>')
self._test_eval(
path = 'text()',
equiv = '<Path "child::text()">',
input = xml,
output = 'Some text in here.'
)
def test_node_type_node(self):
xml = XML('<root>Some text <br/>in here.</root>')
self._test_eval(
path = 'node()',
equiv = '<Path "child::node()">',
input = xml,
output = 'Some text <br/>in here.'
)
def test_node_type_processing_instruction(self):
xml = XML('<?python x = 2 * 3 ?><root><?php echo("x") ?></root>')
self._test_eval(
path = '//processing-instruction()',
equiv = '<Path "descendant-or-self::processing-instruction()">',
input = xml,
output = '<?python x = 2 * 3 ?><?php echo("x") ?>'
)
self._test_eval(
path = 'processing-instruction()',
equiv = '<Path "child::processing-instruction()">',
input = xml,
output = '<?php echo("x") ?>'
)
self._test_eval(
path = 'processing-instruction("php")',
equiv = '<Path "child::processing-instruction(\"php\")">',
input = xml,
output = '<?php echo("x") ?>'
)
def test_simple_union(self):
xml = XML("""<body>1<br />2<br />3<br /></body>""")
self._test_eval(
path = '*|text()',
equiv = '<Path "child::*|child::text()">',
input = xml,
output = '1<br/>2<br/>3<br/>'
)
def test_predicate_name(self):
xml = XML('<root><foo/><bar/></root>')
self._test_eval('*[name()="foo"]', input=xml, output='<foo/>')
def test_predicate_localname(self):
xml = XML('<root><foo xmlns="NS"/><bar/></root>')
self._test_eval('*[local-name()="foo"]', input=xml,
output='<foo xmlns="NS"/>')
def test_predicate_namespace(self):
xml = XML('<root><foo xmlns="NS"/><bar/></root>')
self._test_eval('*[namespace-uri()="NS"]', input=xml,
output='<foo xmlns="NS"/>')
def test_predicate_not_name(self):
xml = XML('<root><foo/><bar/></root>')
self._test_eval('*[not(name()="foo")]', input=xml,
output='<bar/>')
def test_predicate_attr(self):
xml = XML('<root><item/><item important="very"/></root>')
self._test_eval('item[@important]', input=xml,
output='<item important="very"/>')
self._test_eval('item[@important="very"]', input=xml,
output='<item important="very"/>')
def test_predicate_attr_equality(self):
xml = XML('<root><item/><item important="notso"/></root>')
self._test_eval('item[@important="very"]', input=xml, output='')
self._test_eval('item[@important!="very"]', input=xml,
output='<item/><item important="notso"/>')
def test_predicate_attr_greater_than(self):
xml = XML('<root><item priority="3"/></root>')
self._test_eval('item[@priority>3]', input=xml, output='')
self._test_eval('item[@priority>2]', input=xml,
output='<item priority="3"/>')
def test_predicate_attr_less_than(self):
xml = XML('<root><item priority="3"/></root>')
self._test_eval('item[@priority<3]', input=xml, output='')
self._test_eval('item[@priority<4]', input=xml,
output='<item priority="3"/>')
def test_predicate_attr_and(self):
xml = XML('<root><item/><item important="very"/></root>')
self._test_eval('item[@important and @important="very"]',
input=xml, output='<item important="very"/>')
self._test_eval('item[@important and @important="notso"]',
input=xml, output='')
def test_predicate_attr_or(self):
xml = XML('<root><item/><item important="very"/></root>')
self._test_eval('item[@urgent or @important]', input=xml,
output='<item important="very"/>')
self._test_eval('item[@urgent or @notso]', input=xml, output='')
def test_predicate_boolean_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[boolean("")]', input=xml, output='')
self._test_eval('*[boolean("yo")]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[boolean(0)]', input=xml, output='')
self._test_eval('*[boolean(42)]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[boolean(false())]', input=xml, output='')
self._test_eval('*[boolean(true())]', input=xml,
output='<foo>bar</foo>')
def test_predicate_ceil_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[ceiling("4.5")=5]', input=xml,
output='<foo>bar</foo>')
def test_predicate_concat_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[name()=concat("f", "oo")]', input=xml,
output='<foo>bar</foo>')
def test_predicate_contains_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[contains(name(), "oo")]', input=xml,
output='<foo>bar</foo>')
def test_predicate_matches_function(self):
xml = XML('<root><foo>bar</foo><bar>foo</bar></root>')
self._test_eval('*[matches(name(), "foo|bar")]', input=xml,
output='<foo>bar</foo><bar>foo</bar>')
def test_predicate_false_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[false()]', input=xml, output='')
def test_predicate_floor_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[floor("4.5")=4]', input=xml,
output='<foo>bar</foo>')
def test_predicate_normalize_space_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[normalize-space(" foo bar ")="foo bar"]',
input=xml, output='<foo>bar</foo>')
def test_predicate_number_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[number("3.0")=3]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[number("3.0")=3.0]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[number("0.1")=.1]', input=xml,
output='<foo>bar</foo>')
def test_predicate_round_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[round("4.4")=4]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[round("4.6")=5]', input=xml,
output='<foo>bar</foo>')
def test_predicate_starts_with_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[starts-with(name(), "f")]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[starts-with(name(), "b")]', input=xml,
output='')
def test_predicate_string_length_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[string-length(name())=3]', input=xml,
output='<foo>bar</foo>')
def test_predicate_substring_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[substring(name(), 1)="oo"]', input=xml,
output='<foo>bar</foo>')
self._test_eval('*[substring(name(), 1, 1)="o"]', input=xml,
output='<foo>bar</foo>')
def test_predicate_substring_after_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[substring-after(name(), "f")="oo"]', input=xml,
output='<foo>bar</foo>')
def test_predicate_substring_before_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[substring-before(name(), "oo")="f"]',
input=xml, output='<foo>bar</foo>')
def test_predicate_translate_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[translate(name(), "fo", "ba")="baa"]',
input=xml, output='<foo>bar</foo>')
def test_predicate_true_function(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval('*[true()]', input=xml, output='<foo>bar</foo>')
def test_predicate_variable(self):
xml = XML('<root><foo>bar</foo></root>')
self._test_eval(
path = '*[name()=$bar]',
input = xml,
output = '<foo>bar</foo>',
variables = {'bar': 'foo'}
)
def test_predicate_position(self):
xml = XML('<root><foo id="a1"/><foo id="a2"/><foo id="a3"/></root>')
self._test_eval('*[2]', input=xml, output='<foo id="a2"/>')
def test_predicate_attr_and_position(self):
xml = XML('<root><foo/><foo id="a1"/><foo id="a2"/></root>')
self._test_eval('*[@id][2]', input=xml, output='<foo id="a2"/>')
def test_predicate_position_and_attr(self):
xml = XML('<root><foo/><foo id="a1"/><foo id="a2"/></root>')
self._test_eval('*[1][@id]', input=xml, output='')
self._test_eval('*[2][@id]', input=xml, output='<foo id="a1"/>')
def test_predicate_advanced_position(self):
xml = XML('<root><a><b><c><d><e/></d></c></b></a></root>')
self._test_eval( 'descendant-or-self::*/'
'descendant-or-self::*/'
'descendant-or-self::*[2]/'
'self::*/descendant::*[3]', input=xml,
output='<d><e/></d>')
def test_predicate_child_position(self):
xml = XML('\
<root><a><b>1</b><b>2</b><b>3</b></a><a><b>4</b><b>5</b></a></root>')
self._test_eval('//a/b[2]', input=xml, output='<b>2</b><b>5</b>')
self._test_eval('//a/b[3]', input=xml, output='<b>3</b>')
def test_name_with_namespace(self):
xml = XML('<root xmlns:f="FOO"><f:foo>bar</f:foo></root>')
self._test_eval(
path = 'f:foo',
equiv = '<Path "child::f:foo">',
input = xml,
output = '<foo xmlns="FOO">bar</foo>',
namespaces = {'f': 'FOO'}
)
def test_wildcard_with_namespace(self):
xml = XML('<root xmlns:f="FOO"><f:foo>bar</f:foo></root>')
self._test_eval(
path = 'f:*',
equiv = '<Path "child::f:*">',
input = xml,
output = '<foo xmlns="FOO">bar</foo>',
namespaces = {'f': 'FOO'}
)
def test_predicate_termination(self):
"""
Verify that a patch matching the self axis with a predicate doesn't
cause an infinite loop. See <http://genshi.edgewall.org/ticket/82>.
"""
xml = XML('<ul flag="1"><li>a</li><li>b</li></ul>')
self._test_eval('.[@flag="1"]/*', input=xml,
output='<li>a</li><li>b</li>')
xml = XML('<ul flag="1"><li>a</li><li>b</li></ul>')
self._test_eval('.[@flag="0"]/*', input=xml, output='')
def test_attrname_with_namespace(self):
xml = XML('<root xmlns:f="FOO"><foo f:bar="baz"/></root>')
self._test_eval('foo[@f:bar]', input=xml,
output='<foo xmlns:ns1="FOO" ns1:bar="baz"/>',
namespaces={'f': 'FOO'})
def test_attrwildcard_with_namespace(self):
xml = XML('<root xmlns:f="FOO"><foo f:bar="baz"/></root>')
self._test_eval('foo[@f:*]', input=xml,
output='<foo xmlns:ns1="FOO" ns1:bar="baz"/>',
namespaces={'f': 'FOO'})
def test_self_and_descendant(self):
xml = XML('<root><foo/></root>')
self._test_eval('self::root', input=xml, output='<root><foo/></root>')
self._test_eval('self::foo', input=xml, output='')
self._test_eval('descendant::root', input=xml, output='')
self._test_eval('descendant::foo', input=xml, output='<foo/>')
self._test_eval('descendant-or-self::root', input=xml,
output='<root><foo/></root>')
self._test_eval('descendant-or-self::foo', input=xml, output='<foo/>')
def test_long_simple_paths(self):
xml = XML('<root><a><b><a><d><a><b><a><b><a><b><a><c>!'
'</c></a></b></a></b></a></b></a></d></a></b></a></root>')
self._test_eval('//a/b/a/b/a/c', input=xml, output='<c>!</c>')
self._test_eval('//a/b/a/c', input=xml, output='<c>!</c>')
self._test_eval('//a/c', input=xml, output='<c>!</c>')
self._test_eval('//c', input=xml, output='<c>!</c>')
# Please note that a//b is NOT the same as a/descendant::b
# it is a/descendant-or-self::node()/b, which SimplePathStrategy
# does NOT support
self._test_eval('a/b/descendant::a/c', input=xml, output='<c>!</c>')
self._test_eval('a/b/descendant::a/d/descendant::a/c',
input=xml, output='<c>!</c>')
self._test_eval('a/b/descendant::a/d/a/c', input=xml, output='')
self._test_eval('//d/descendant::b/descendant::b/descendant::b'
'/descendant::c', input=xml, output='<c>!</c>')
self._test_eval('//d/descendant::b/descendant::b/descendant::b'
'/descendant::b/descendant::c', input=xml,
output='')
def _test_support(self, strategy_class, text):
path = PathParser(text, None, -1).parse()[0]
return strategy_class.supports(path)
def test_simple_strategy_support(self):
self.assert_(self._test_support(SimplePathStrategy, 'a/b'))
self.assert_(self._test_support(SimplePathStrategy, 'self::a/b'))
self.assert_(self._test_support(SimplePathStrategy, 'descendant::a/b'))
self.assert_(self._test_support(SimplePathStrategy,
'descendant-or-self::a/b'))
self.assert_(self._test_support(SimplePathStrategy, '//a/b'))
self.assert_(self._test_support(SimplePathStrategy, 'a/@b'))
self.assert_(self._test_support(SimplePathStrategy, 'a/text()'))
# a//b is a/descendant-or-self::node()/b
self.assert_(not self._test_support(SimplePathStrategy, 'a//b'))
self.assert_(not self._test_support(SimplePathStrategy, 'node()/@a'))
self.assert_(not self._test_support(SimplePathStrategy, '@a'))
self.assert_(not self._test_support(SimplePathStrategy, 'foo:bar'))
self.assert_(not self._test_support(SimplePathStrategy, 'a/@foo:bar'))
def _test_strategies(self, input, path, output,
namespaces=None, variables=None):
for strategy in self.strategies:
if not strategy.supports(path):
continue
s = strategy(path)
rendered = FakePath(s).select(input, namespaces=namespaces,
variables=variables) \
.render(encoding=None)
msg = 'Bad render using %s strategy' % str(strategy)
msg += '\nExpected:\t%r' % output
msg += '\nRendered:\t%r' % rendered
self.assertEqual(output, rendered, msg)
def _test_eval(self, path, equiv=None, input=None, output='',
namespaces=None, variables=None):
path = Path(path)
if equiv is not None:
self.assertEqual(equiv, repr(path))
if input is None:
return
rendered = path.select(input, namespaces=namespaces,
variables=variables).render(encoding=None)
msg = 'Bad output using whole path'
msg += '\nExpected:\t%r' % output
msg += '\nRendered:\t%r' % rendered
self.assertEqual(output, rendered, msg)
if len(path.paths) == 1:
self._test_strategies(input, path.paths[0], output,
namespaces=namespaces, variables=variables)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Path.__module__))
suite.addTest(unittest.makeSuite(PathTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
from StringIO import StringIO
import sys
import unittest
from genshi.core import Attrs, Stream
from genshi.input import XMLParser, HTMLParser, ParseError
class XMLParserTestCase(unittest.TestCase):
def test_text_node_pos_single_line(self):
text = '<elem>foo bar</elem>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo bar', data)
self.assertEqual((None, 1, 6), pos)
def test_text_node_pos_multi_line(self):
text = '''<elem>foo
bar</elem>'''
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo\nbar', data)
self.assertEqual((None, 1, -1), pos)
def test_element_attribute_order(self):
text = '<elem title="baz" id="foo" class="bar" />'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
tag, attrib = data
self.assertEqual('elem', tag)
self.assertEqual(('title', 'baz'), attrib[0])
self.assertEqual(('id', 'foo'), attrib[1])
self.assertEqual(('class', 'bar'), attrib[2])
def test_unicode_input(self):
text = u'<div>\u2013</div>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\u2013', data)
def test_latin1_encoded(self):
text = u'<div>\xf6</div>'.encode('iso-8859-1')
events = list(XMLParser(StringIO(text), encoding='iso-8859-1'))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xf6', data)
def test_latin1_encoded_xmldecl(self):
text = u"""<?xml version="1.0" encoding="iso-8859-1" ?>
<div>\xf6</div>
""".encode('iso-8859-1')
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[2]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xf6', data)
def test_html_entity_with_dtd(self):
text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html> </html>
"""
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[2]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xa0', data)
def test_html_entity_without_dtd(self):
text = '<html> </html>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xa0', data)
def test_html_entity_in_attribute(self):
text = '<p title=" "/>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual(u'\xa0', data[1].get('title'))
kind, data, pos = events[1]
self.assertEqual(Stream.END, kind)
def test_undefined_entity_with_dtd(self):
text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>&junk;</html>
"""
events = XMLParser(StringIO(text))
self.assertRaises(ParseError, list, events)
def test_undefined_entity_without_dtd(self):
text = '<html>&junk;</html>'
events = XMLParser(StringIO(text))
self.assertRaises(ParseError, list, events)
class HTMLParserTestCase(unittest.TestCase):
def test_text_node_pos_single_line(self):
text = '<elem>foo bar</elem>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo bar', data)
self.assertEqual((None, 1, 6), pos)
def test_text_node_pos_multi_line(self):
text = '''<elem>foo
bar</elem>'''
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo\nbar', data)
self.assertEqual((None, 1, 6), pos)
def test_input_encoding_text(self):
text = u'<div>\xf6</div>'.encode('iso-8859-1')
events = list(HTMLParser(StringIO(text), encoding='iso-8859-1'))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xf6', data)
def test_input_encoding_attribute(self):
text = u'<div title="\xf6"></div>'.encode('iso-8859-1')
events = list(HTMLParser(StringIO(text), encoding='iso-8859-1'))
kind, (tag, attrib), pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual(u'\xf6', attrib.get('title'))
def test_unicode_input(self):
text = u'<div>\u2013</div>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\u2013', data)
def test_html_entity_in_attribute(self):
text = '<p title=" "></p>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual(u'\xa0', data[1].get('title'))
kind, data, pos = events[1]
self.assertEqual(Stream.END, kind)
def test_html_entity_in_text(self):
text = '<p> </p>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual(u'\xa0', data)
def test_processing_instruction(self):
text = '<?php echo "Foobar" ?>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('php', target)
self.assertEqual('echo "Foobar"', data)
def test_xmldecl(self):
text = '<?xml version="1.0" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual(None, encoding)
self.assertEqual(-1, standalone)
def test_xmldecl_encoding(self):
text = '<?xml version="1.0" encoding="utf-8" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual('utf-8', encoding)
self.assertEqual(-1, standalone)
def test_xmldecl_standalone(self):
text = '<?xml version="1.0" standalone="yes" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual(None, encoding)
self.assertEqual(1, standalone)
def test_processing_instruction_trailing_qmark(self):
text = '<?php echo "Foobar" ??>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('php', target)
self.assertEqual('echo "Foobar" ?', data)
def test_out_of_order_tags1(self):
text = '<span><b>Foobar</span></b>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(5, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2])
self.assertEqual((Stream.END, 'b'), events[3][:2])
self.assertEqual((Stream.END, 'span'), events[4][:2])
def test_out_of_order_tags2(self):
text = '<span class="baz"><b><i>Foobar</span></b></i>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(7, len(events))
self.assertEqual((Stream.START, ('span', Attrs([('class', 'baz')]))),
events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.START, ('i', ())), events[2][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[3][:2])
self.assertEqual((Stream.END, 'i'), events[4][:2])
self.assertEqual((Stream.END, 'b'), events[5][:2])
self.assertEqual((Stream.END, 'span'), events[6][:2])
def test_out_of_order_tags3(self):
text = '<span><b>Foobar</i>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(5, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2])
self.assertEqual((Stream.END, 'b'), events[3][:2])
self.assertEqual((Stream.END, 'span'), events[4][:2])
def test_hex_charref(self):
text = '<span>'</span>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(3, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.TEXT, "'"), events[1][:2])
self.assertEqual((Stream.END, 'span'), events[2][:2])
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(XMLParser.__module__))
suite.addTest(unittest.makeSuite(XMLParserTestCase, 'test'))
suite.addTest(unittest.makeSuite(HTMLParserTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import unittest
def suite():
import genshi
from genshi.tests import builder, core, input, output, path, util
from genshi.filters import tests as filters
from genshi.template import tests as template
suite = unittest.TestSuite()
suite.addTest(builder.suite())
suite.addTest(core.suite())
suite.addTest(filters.suite())
suite.addTest(input.suite())
suite.addTest(output.suite())
suite.addTest(path.suite())
suite.addTest(template.suite())
suite.addTest(util.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.builder import Element, tag
from genshi.core import Attrs, Markup, Stream
from genshi.input import XML
class ElementFactoryTestCase(unittest.TestCase):
def test_link(self):
link = tag.a(href='#', title='Foo', accesskey=None)('Bar')
events = list(link.generate())
self.assertEqual((Stream.START,
('a', Attrs([('href', "#"), ('title', "Foo")])),
(None, -1, -1)), events[0])
self.assertEqual((Stream.TEXT, 'Bar', (None, -1, -1)), events[1])
self.assertEqual((Stream.END, 'a', (None, -1, -1)), events[2])
def test_nonstring_attributes(self):
"""
Verify that if an attribute value is given as an int (or some other
non-string type), it is coverted to a string when the stream is
generated.
"""
events = list(tag.foo(id=3))
self.assertEqual((Stream.START, ('foo', Attrs([('id', '3')])),
(None, -1, -1)), events[0])
def test_duplicate_attributes(self):
link = tag.a(href='#1', href_='#2')('Bar')
events = list(link.generate())
self.assertEqual((Stream.START, ('a', Attrs([('href', "#1")])),
(None, -1, -1)), events[0])
self.assertEqual((Stream.TEXT, 'Bar', (None, -1, -1)), events[1])
self.assertEqual((Stream.END, 'a', (None, -1, -1)), events[2])
def test_stream_as_child(self):
events = list(tag.span(XML('<b>Foo</b>')).generate())
self.assertEqual(5, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.TEXT, 'Foo'), events[2][:2])
self.assertEqual((Stream.END, 'b'), events[3][:2])
self.assertEqual((Stream.END, 'span'), events[4][:2])
def test_markup_escape(self):
m = Markup('See %s') % tag.a('genshi',
href='http://genshi.edgwall.org')
self.assertEqual(m, Markup('See <a href="http://genshi.edgwall.org">'
'genshi</a>'))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Element.__module__))
suite.addTest(unittest.makeSuite(ElementFactoryTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding='utf-8', out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
cache = {}
cache_get = cache.get
if self.cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
else:
def _emit(kind, input, output):
return output
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
cached = cache_get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
cache = {}
cache_get = cache.get
if self.cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
else:
def _emit(kind, input, output):
return output
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
cached = cache_get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
cache = {}
cache_get = cache.get
if self.cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
else:
def _emit(kind, input, output):
return output
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
output = cache_get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
cache = {}
cache_get = cache.get
if self.cache:
def _emit(kind, input, output, pos):
cache[kind, input] = output
return kind, output, pos
else:
def _emit(kind, input, output, pos):
return output
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
output = cache_get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
yield _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)), pos)
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield _emit(kind, data, tagname, pos)
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Various utility classes and functions."""
import htmlentitydefs as entities
import re
__docformat__ = 'restructuredtext en'
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
>>> cache['A']
0
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
Iterating over the cache returns the keys, starting with the most recently
used:
>>> for key in cache:
... print(key)
D
A
C
This code is based on the LRUCache class from ``myghtyutils.util``, written
by Mike Bayer and released under the MIT license. See:
http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py
"""
class _Item(object):
def __init__(self, key, value):
self.prv = self.nxt = None
self.key = key
self.value = value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
self._dict = dict()
self.capacity = capacity
self.head = None
self.tail = None
def __contains__(self, key):
return key in self._dict
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.nxt
def __len__(self):
return len(self._dict)
def __getitem__(self, key):
item = self._dict[key]
self._update_item(item)
return item.value
def __setitem__(self, key, value):
item = self._dict.get(key)
if item is None:
item = self._Item(key, value)
self._dict[key] = item
self._insert_item(item)
else:
item.value = value
self._update_item(item)
self._manage_size()
def __repr__(self):
return repr(self._dict)
def _insert_item(self, item):
item.prv = None
item.nxt = self.head
if self.head is not None:
self.head.prv = item
else:
self.tail = item
self.head = item
self._manage_size()
def _manage_size(self):
while len(self._dict) > self.capacity:
olditem = self._dict[self.tail.key]
del self._dict[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.prv
self.tail.nxt = None
else:
self.head = self.tail = None
def _update_item(self, item):
if self.head == item:
return
prv = item.prv
prv.nxt = item.nxt
if item.nxt is not None:
item.nxt.prv = prv
else:
self.tail = prv
item.prv = None
item.nxt = self.head
self.head.prv = self.head = item
def flatten(items):
"""Flattens a potentially nested sequence into a flat list.
:param items: the sequence to flatten
>>> flatten((1, 2))
[1, 2]
>>> flatten([1, (2, 3), 4])
[1, 2, 3, 4]
>>> flatten([1, (2, [3, 4]), 5])
[1, 2, 3, 4, 5]
"""
retval = []
for item in items:
if isinstance(item, (frozenset, list, set, tuple)):
retval += flatten(item)
else:
retval.append(item)
return retval
def plaintext(text, keeplinebreaks=True):
"""Return the text with all entities and tags removed.
>>> plaintext('<b>1 < 2</b>')
u'1 < 2'
The `keeplinebreaks` parameter can be set to ``False`` to replace any line
breaks by simple spaces:
>>> plaintext('''<b>1
... <
... 2</b>''', keeplinebreaks=False)
u'1 < 2'
:param text: the text to convert to plain text
:param keeplinebreaks: whether line breaks in the text should be kept intact
:return: the text with tags and entities removed
"""
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace('\n', ' ')
return text
_STRIPENTITIES_RE = re.compile(r'&(?:#((?:\d+)|(?:[xX][0-9a-fA-F]+));?|(\w+);)')
def stripentities(text, keepxmlentities=False):
"""Return a copy of the given text with any character or numeric entities
replaced by the equivalent UTF-8 characters.
>>> stripentities('1 < 2')
u'1 < 2'
>>> stripentities('more …')
u'more \u2026'
>>> stripentities('…')
u'\u2026'
>>> stripentities('…')
u'\u2026'
If the `keepxmlentities` parameter is provided and is a truth value, the
core XML entities (&, ', >, < and ") are left intact.
>>> stripentities('1 < 2 …', keepxmlentities=True)
u'1 < 2 \u2026'
"""
def _replace_entity(match):
if match.group(1): # numeric entity
ref = match.group(1)
if ref.startswith('x'):
ref = int(ref[1:], 16)
else:
ref = int(ref, 10)
return unichr(ref)
else: # character entity
ref = match.group(2)
if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):
return '&%s;' % ref
try:
return unichr(entities.name2codepoint[ref])
except KeyError:
if keepxmlentities:
return '&%s;' % ref
else:
return ref
return _STRIPENTITIES_RE.sub(_replace_entity, text)
_STRIPTAGS_RE = re.compile(r'(<!--.*?-->|<[^>]*>)')
def striptags(text):
"""Return a copy of the text with any XML/HTML tags removed.
>>> striptags('<span>Foo</span> bar')
'Foo bar'
>>> striptags('<span class="bar">Foo</span>')
'Foo'
>>> striptags('Foo<br />')
'Foo'
HTML/XML comments are stripped, too:
>>> striptags('<!-- <blub>hehe</blah> -->test')
'test'
:param text: the string to remove tags from
:return: the text with tags removed
"""
return _STRIPTAGS_RE.sub('', text)
def stringrepr(string):
ascii = string.encode('ascii', 'backslashreplace')
quoted = "'" + ascii.replace("'", "\\'") + "'"
if len(ascii) > len(string):
return 'u' + quoted
return quoted
# Compatibility fallback implementations for older Python versions
try:
all = all
any = any
except NameError:
def any(S):
for x in S:
if x:
return True
return False
def all(S):
for x in S:
if not x:
return False
return True
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic support for evaluating XPath expressions against streams.
>>> from genshi.input import XML
>>> doc = XML('''<doc>
... <items count="4">
... <item status="new">
... <summary>Foo</summary>
... </item>
... <item status="closed">
... <summary>Bar</summary>
... </item>
... <item status="closed" resolution="invalid">
... <summary>Baz</summary>
... </item>
... <item status="closed" resolution="fixed">
... <summary>Waz</summary>
... </item>
... </items>
... </doc>''')
>>> print(doc.select('items/item[@status="closed" and '
... '(@resolution="invalid" or not(@resolution))]/summary/text()'))
BarBaz
Because the XPath engine operates on markup streams (as opposed to tree
structures), it only implements a subset of the full XPath 1.0 language.
"""
from collections import deque
try:
reduce # builtin in Python < 3
except NameError:
from functools import reduce
from math import ceil, floor
import operator
import re
from itertools import chain
from genshi.core import Stream, Attrs, Namespace, QName
from genshi.core import START, END, TEXT, START_NS, END_NS, COMMENT, PI, \
START_CDATA, END_CDATA
__all__ = ['Path', 'PathSyntaxError']
__docformat__ = 'restructuredtext en'
class Axis(object):
"""Defines constants for the various supported XPath axes."""
ATTRIBUTE = 'attribute'
CHILD = 'child'
DESCENDANT = 'descendant'
DESCENDANT_OR_SELF = 'descendant-or-self'
SELF = 'self'
@classmethod
def forname(cls, name):
"""Return the axis constant for the given name, or `None` if no such
axis was defined.
"""
return getattr(cls, name.upper().replace('-', '_'), None)
ATTRIBUTE = Axis.ATTRIBUTE
CHILD = Axis.CHILD
DESCENDANT = Axis.DESCENDANT
DESCENDANT_OR_SELF = Axis.DESCENDANT_OR_SELF
SELF = Axis.SELF
class GenericStrategy(object):
@classmethod
def supports(cls, path):
return True
def __init__(self, path):
self.path = path
def test(self, ignore_context):
p = self.path
if ignore_context:
if p[0][0] is ATTRIBUTE:
steps = [_DOTSLASHSLASH] + p
else:
steps = [(DESCENDANT_OR_SELF, p[0][1], p[0][2])] + p[1:]
elif p[0][0] is CHILD or p[0][0] is ATTRIBUTE \
or p[0][0] is DESCENDANT:
steps = [_DOTSLASH] + p
else:
steps = p
# for node it contains all positions of xpath expression
# where its child should start checking for matches
# with list of corresponding context counters
# there can be many of them, because position that is from
# descendant-like axis can be achieved from different nodes
# for example <a><a><b/></a></a> should match both //a//b[1]
# and //a//b[2]
# positions always form increasing sequence (invariant)
stack = [[(0, [[]])]]
def _test(event, namespaces, variables, updateonly=False):
kind, data, pos = event[:3]
retval = None
# Manage the stack that tells us "where we are" in the stream
if kind is END:
if stack:
stack.pop()
return None
if kind is START_NS or kind is END_NS \
or kind is START_CDATA or kind is END_CDATA:
# should we make namespaces work?
return None
pos_queue = deque([(pos, cou, []) for pos, cou in stack[-1]])
next_pos = []
# length of real part of path - we omit attribute axis
real_len = len(steps) - ((steps[-1][0] == ATTRIBUTE) or 1 and 0)
last_checked = -1
# places where we have to check for match, are these
# provided by parent
while pos_queue:
x, pcou, mcou = pos_queue.popleft()
axis, nodetest, predicates = steps[x]
# we need to push descendant-like positions from parent
# further
if (axis is DESCENDANT or axis is DESCENDANT_OR_SELF) and pcou:
if next_pos and next_pos[-1][0] == x:
next_pos[-1][1].extend(pcou)
else:
next_pos.append((x, pcou))
# nodetest first
if not nodetest(kind, data, pos, namespaces, variables):
continue
# counters packs that were already bad
missed = set()
counters_len = len(pcou) + len(mcou)
# number of counters - we have to create one
# for every context position based predicate
cnum = 0
# tells if we have match with position x
matched = True
if predicates:
for predicate in predicates:
pretval = predicate(kind, data, pos,
namespaces,
variables)
if type(pretval) is float: # FIXME <- need to check
# this for other types that
# can be coerced to float
# each counter pack needs to be checked
for i, cou in enumerate(chain(pcou, mcou)):
# it was bad before
if i in missed:
continue
if len(cou) < cnum + 1:
cou.append(0)
cou[cnum] += 1
# it is bad now
if cou[cnum] != int(pretval):
missed.add(i)
# none of counters pack was good
if len(missed) == counters_len:
pretval = False
cnum += 1
if not pretval:
matched = False
break
if not matched:
continue
# counter for next position with current node as context node
child_counter = []
if x + 1 == real_len:
# we reached end of expression, because x + 1
# is equal to the length of expression
matched = True
axis, nodetest, predicates = steps[-1]
if axis is ATTRIBUTE:
matched = nodetest(kind, data, pos, namespaces,
variables)
if matched:
retval = matched
else:
next_axis = steps[x + 1][0]
# if next axis allows matching self we have
# to add next position to our queue
if next_axis is DESCENDANT_OR_SELF or next_axis is SELF:
if not pos_queue or pos_queue[0][0] > x + 1:
pos_queue.appendleft((x + 1, [], [child_counter]))
else:
pos_queue[0][2].append(child_counter)
# if axis is not self we have to add it to child's list
if next_axis is not SELF:
next_pos.append((x + 1, [child_counter]))
if kind is START:
stack.append(next_pos)
return retval
return _test
class SimplePathStrategy(object):
"""Strategy for path with only local names, attributes and text nodes."""
@classmethod
def supports(cls, path):
if path[0][0] is ATTRIBUTE:
return False
allowed_tests = (LocalNameTest, CommentNodeTest, TextNodeTest)
for _, nodetest, predicates in path:
if predicates:
return False
if not isinstance(nodetest, allowed_tests):
return False
return True
def __init__(self, path):
# fragments is list of tuples (fragment, pi, attr, self_beginning)
# fragment is list of nodetests for fragment of path with only
# child:: axes between
# pi is KMP partial match table for this fragment
# attr is attribute nodetest if fragment ends with @ and None otherwise
# self_beginning is True if axis for first fragment element
# was self (first fragment) or descendant-or-self (farther fragment)
self.fragments = []
self_beginning = False
fragment = []
def nodes_equal(node1, node2):
"""Tests if two node tests are equal"""
if type(node1) is not type(node2):
return False
if type(node1) == LocalNameTest:
return node1.name == node2.name
return True
def calculate_pi(f):
"""KMP prefix calculation for table"""
# the indexes in prefix table are shifted by one
# in comparision with common implementations
# pi[i] = NORMAL_PI[i + 1]
if len(f) == 0:
return []
pi = [0]
s = 0
for i in range(1, len(f)):
while s > 0 and not nodes_equal(f[s], f[i]):
s = pi[s-1]
if nodes_equal(f[s], f[i]):
s += 1
pi.append(s)
return pi
for axis in path:
if axis[0] is SELF:
if len(fragment) != 0:
# if element is not first in fragment it has to be
# the same as previous one
# for example child::a/self::b is always wrong
if axis[1] != fragment[-1][1]:
self.fragments = None
return
else:
self_beginning = True
fragment.append(axis[1])
elif axis[0] is CHILD:
fragment.append(axis[1])
elif axis[0] is ATTRIBUTE:
pi = calculate_pi(fragment)
self.fragments.append((fragment, pi, axis[1], self_beginning))
# attribute has always to be at the end, so we can jump out
return
else:
pi = calculate_pi(fragment)
self.fragments.append((fragment, pi, None, self_beginning))
fragment = [axis[1]]
if axis[0] is DESCENDANT:
self_beginning = False
else: # DESCENDANT_OR_SELF
self_beginning = True
pi = calculate_pi(fragment)
self.fragments.append((fragment, pi, None, self_beginning))
def test(self, ignore_context):
# stack of triples (fid, p, ic)
# fid is index of current fragment
# p is position in this fragment
# ic is if we ignore context in this fragment
stack = []
stack_push = stack.append
stack_pop = stack.pop
frags = self.fragments
frags_len = len(frags)
def _test(event, namespaces, variables, updateonly=False):
# expression found impossible during init
if frags is None:
return None
kind, data, pos = event[:3]
# skip events we don't care about
if kind is END:
if stack:
stack_pop()
return None
if kind is START_NS or kind is END_NS \
or kind is START_CDATA or kind is END_CDATA:
return None
if not stack:
# root node, nothing on stack, special case
fid = 0
# skip empty fragments (there can be actually only one)
while not frags[fid][0]:
fid += 1
p = 0
# empty fragment means descendant node at beginning
ic = ignore_context or (fid > 0)
# expression can match first node, if first axis is self::,
# descendant-or-self:: or if ignore_context is True and
# axis is not descendant::
if not frags[fid][3] and (not ignore_context or fid > 0):
# axis is not self-beggining, we have to skip this node
stack_push((fid, p, ic))
return None
else:
# take position of parent
fid, p, ic = stack[-1]
if fid is not None and not ic:
# fragment not ignoring context - we can't jump back
frag, pi, attrib, _ = frags[fid]
frag_len = len(frag)
if p == frag_len:
# that probably means empty first fragment
pass
elif frag[p](kind, data, pos, namespaces, variables):
# match, so we can go further
p += 1
else:
# not matched, so there will be no match in subtree
fid, p = None, None
if p == frag_len and fid + 1 != frags_len:
# we made it to end of fragment, we can go to following
fid += 1
p = 0
ic = True
if fid is None:
# there was no match in fragment not ignoring context
if kind is START:
stack_push((fid, p, ic))
return None
if ic:
# we are in fragment ignoring context
while True:
frag, pi, attrib, _ = frags[fid]
frag_len = len(frag)
# KMP new "character"
while p > 0 and (p >= frag_len or not \
frag[p](kind, data, pos, namespaces, variables)):
p = pi[p-1]
if frag[p](kind, data, pos, namespaces, variables):
p += 1
if p == frag_len:
# end of fragment reached
if fid + 1 == frags_len:
# that was last fragment
break
else:
fid += 1
p = 0
ic = True
if not frags[fid][3]:
# next fragment not self-beginning
break
else:
break
if kind is START:
# we have to put new position on stack, for children
if not ic and fid + 1 == frags_len and p == frag_len:
# it is end of the only, not context ignoring fragment
# so there will be no matches in subtree
stack_push((None, None, ic))
else:
stack_push((fid, p, ic))
# have we reached the end of the last fragment?
if fid + 1 == frags_len and p == frag_len:
if attrib: # attribute ended path, return value
return attrib(kind, data, pos, namespaces, variables)
return True
return None
return _test
class SingleStepStrategy(object):
@classmethod
def supports(cls, path):
return len(path) == 1
def __init__(self, path):
self.path = path
def test(self, ignore_context):
steps = self.path
if steps[0][0] is ATTRIBUTE:
steps = [_DOTSLASH] + steps
select_attr = steps[-1][0] is ATTRIBUTE and steps[-1][1] or None
# for every position in expression stores counters' list
# it is used for position based predicates
counters = []
depth = [0]
def _test(event, namespaces, variables, updateonly=False):
kind, data, pos = event[:3]
# Manage the stack that tells us "where we are" in the stream
if kind is END:
if not ignore_context:
depth[0] -= 1
return None
elif kind is START_NS or kind is END_NS \
or kind is START_CDATA or kind is END_CDATA:
# should we make namespaces work?
return None
if not ignore_context:
outside = (steps[0][0] is SELF and depth[0] != 0) \
or (steps[0][0] is CHILD and depth[0] != 1) \
or (steps[0][0] is DESCENDANT and depth[0] < 1)
if kind is START:
depth[0] += 1
if outside:
return None
axis, nodetest, predicates = steps[0]
if not nodetest(kind, data, pos, namespaces, variables):
return None
if predicates:
cnum = 0
for predicate in predicates:
pretval = predicate(kind, data, pos, namespaces, variables)
if type(pretval) is float: # FIXME <- need to check this
# for other types that can be
# coerced to float
if len(counters) < cnum + 1:
counters.append(0)
counters[cnum] += 1
if counters[cnum] != int(pretval):
pretval = False
cnum += 1
if not pretval:
return None
if select_attr:
return select_attr(kind, data, pos, namespaces, variables)
return True
return _test
class Path(object):
"""Implements basic XPath support on streams.
Instances of this class represent a "compiled" XPath expression, and
provide methods for testing the path against a stream, as well as
extracting a substream matching that path.
"""
STRATEGIES = (SingleStepStrategy, SimplePathStrategy, GenericStrategy)
def __init__(self, text, filename=None, lineno=-1):
"""Create the path object from a string.
:param text: the path expression
:param filename: the name of the file in which the path expression was
found (used in error messages)
:param lineno: the line on which the expression was found
"""
self.source = text
self.paths = PathParser(text, filename, lineno).parse()
self.strategies = []
for path in self.paths:
for strategy_class in self.STRATEGIES:
if strategy_class.supports(path):
self.strategies.append(strategy_class(path))
break
else:
raise NotImplemented('No strategy found for path')
def __repr__(self):
paths = []
for path in self.paths:
steps = []
for axis, nodetest, predicates in path:
steps.append('%s::%s' % (axis, nodetest))
for predicate in predicates:
steps[-1] += '[%s]' % predicate
paths.append('/'.join(steps))
return '<%s "%s">' % (type(self).__name__, '|'.join(paths))
def select(self, stream, namespaces=None, variables=None):
"""Returns a substream of the given stream that matches the path.
If there are no matches, this method returns an empty stream.
>>> from genshi.input import XML
>>> xml = XML('<root><elem><child>Text</child></elem></root>')
>>> print(Path('.//child').select(xml))
<child>Text</child>
>>> print(Path('.//child/text()').select(xml))
Text
:param stream: the stream to select from
:param namespaces: (optional) a mapping of namespace prefixes to URIs
:param variables: (optional) a mapping of variable names to values
:return: the substream matching the path, or an empty stream
:rtype: `Stream`
"""
if namespaces is None:
namespaces = {}
if variables is None:
variables = {}
stream = iter(stream)
def _generate(stream=stream, ns=namespaces, vs=variables):
next = stream.next
test = self.test()
for event in stream:
result = test(event, ns, vs)
if result is True:
yield event
if event[0] is START:
depth = 1
while depth > 0:
subevent = next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
yield subevent
test(subevent, ns, vs, updateonly=True)
elif result:
yield result
return Stream(_generate(),
serializer=getattr(stream, 'serializer', None))
def test(self, ignore_context=False):
"""Returns a function that can be used to track whether the path matches
a specific stream event.
The function returned expects the positional arguments ``event``,
``namespaces`` and ``variables``. The first is a stream event, while the
latter two are a mapping of namespace prefixes to URIs, and a mapping
of variable names to values, respectively. In addition, the function
accepts an ``updateonly`` keyword argument that default to ``False``. If
it is set to ``True``, the function only updates its internal state,
but does not perform any tests or return a result.
If the path matches the event, the function returns the match (for
example, a `START` or `TEXT` event.) Otherwise, it returns ``None``.
>>> from genshi.input import XML
>>> xml = XML('<root><elem><child id="1"/></elem><child id="2"/></root>')
>>> test = Path('child').test()
>>> namespaces, variables = {}, {}
>>> for event in xml:
... if test(event, namespaces, variables):
... print('%s %r' % (event[0], event[1]))
START (QName('child'), Attrs([(QName('id'), u'2')]))
:param ignore_context: if `True`, the path is interpreted like a pattern
in XSLT, meaning for example that it will match
at any depth
:return: a function that can be used to test individual events in a
stream against the path
:rtype: ``function``
"""
tests = [s.test(ignore_context) for s in self.strategies]
if len(tests) == 1:
return tests[0]
def _multi(event, namespaces, variables, updateonly=False):
retval = None
for test in tests:
val = test(event, namespaces, variables, updateonly=updateonly)
if retval is None:
retval = val
return retval
return _multi
class PathSyntaxError(Exception):
"""Exception raised when an XPath expression is syntactically incorrect."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
if filename:
message = '%s (%s, line %d)' % (message, filename, lineno)
Exception.__init__(self, message)
self.filename = filename
self.lineno = lineno
self.offset = offset
class PathParser(object):
"""Tokenizes and parses an XPath expression."""
_QUOTES = (("'", "'"), ('"', '"'))
_TOKENS = ('::', ':', '..', '.', '//', '/', '[', ']', '()', '(', ')', '@',
'=', '!=', '!', '|', ',', '>=', '>', '<=', '<', '$')
_tokenize = re.compile('("[^"]*")|(\'[^\']*\')|((?:\d+)?\.\d+)|(%s)|([^%s\s]+)|\s+' % (
'|'.join([re.escape(t) for t in _TOKENS]),
''.join([re.escape(t[0]) for t in _TOKENS]))).findall
def __init__(self, text, filename=None, lineno=-1):
self.filename = filename
self.lineno = lineno
self.tokens = [t for t in [dqstr or sqstr or number or token or name
for dqstr, sqstr, number, token, name in
self._tokenize(text)] if t]
self.pos = 0
# Tokenizer
@property
def at_end(self):
return self.pos == len(self.tokens) - 1
@property
def cur_token(self):
return self.tokens[self.pos]
def next_token(self):
self.pos += 1
return self.tokens[self.pos]
def peek_token(self):
if not self.at_end:
return self.tokens[self.pos + 1]
return None
# Recursive descent parser
def parse(self):
"""Parses the XPath expression and returns a list of location path
tests.
For union expressions (such as `*|text()`), this function returns one
test for each operand in the union. For patch expressions that don't
use the union operator, the function always returns a list of size 1.
Each path test in turn is a sequence of tests that correspond to the
location steps, each tuples of the form `(axis, testfunc, predicates)`
"""
paths = [self._location_path()]
while self.cur_token == '|':
self.next_token()
paths.append(self._location_path())
if not self.at_end:
raise PathSyntaxError('Unexpected token %r after end of expression'
% self.cur_token, self.filename, self.lineno)
return paths
def _location_path(self):
steps = []
while True:
if self.cur_token.startswith('/'):
if not steps:
if self.cur_token == '//':
# hack to make //* match every node - also root
self.next_token()
axis, nodetest, predicates = self._location_step()
steps.append((DESCENDANT_OR_SELF, nodetest,
predicates))
if self.at_end or not self.cur_token.startswith('/'):
break
continue
else:
raise PathSyntaxError('Absolute location paths not '
'supported', self.filename,
self.lineno)
elif self.cur_token == '//':
steps.append((DESCENDANT_OR_SELF, NodeTest(), []))
self.next_token()
axis, nodetest, predicates = self._location_step()
if not axis:
axis = CHILD
steps.append((axis, nodetest, predicates))
if self.at_end or not self.cur_token.startswith('/'):
break
return steps
def _location_step(self):
if self.cur_token == '@':
axis = ATTRIBUTE
self.next_token()
elif self.cur_token == '.':
axis = SELF
elif self.cur_token == '..':
raise PathSyntaxError('Unsupported axis "parent"', self.filename,
self.lineno)
elif self.peek_token() == '::':
axis = Axis.forname(self.cur_token)
if axis is None:
raise PathSyntaxError('Unsupport axis "%s"' % axis,
self.filename, self.lineno)
self.next_token()
self.next_token()
else:
axis = None
nodetest = self._node_test(axis or CHILD)
predicates = []
while self.cur_token == '[':
predicates.append(self._predicate())
return axis, nodetest, predicates
def _node_test(self, axis=None):
test = prefix = None
next_token = self.peek_token()
if next_token in ('(', '()'): # Node type test
test = self._node_type()
elif next_token == ':': # Namespace prefix
prefix = self.cur_token
self.next_token()
localname = self.next_token()
if localname == '*':
test = QualifiedPrincipalTypeTest(axis, prefix)
else:
test = QualifiedNameTest(axis, prefix, localname)
else: # Name test
if self.cur_token == '*':
test = PrincipalTypeTest(axis)
elif self.cur_token == '.':
test = NodeTest()
else:
test = LocalNameTest(axis, self.cur_token)
if not self.at_end:
self.next_token()
return test
def _node_type(self):
name = self.cur_token
self.next_token()
args = []
if self.cur_token != '()':
# The processing-instruction() function optionally accepts the
# name of the PI as argument, which must be a literal string
self.next_token() # (
if self.cur_token != ')':
string = self.cur_token
if (string[0], string[-1]) in self._QUOTES:
string = string[1:-1]
args.append(string)
cls = _nodetest_map.get(name)
if not cls:
raise PathSyntaxError('%s() not allowed here' % name, self.filename,
self.lineno)
return cls(*args)
def _predicate(self):
assert self.cur_token == '['
self.next_token()
expr = self._or_expr()
if self.cur_token != ']':
raise PathSyntaxError('Expected "]" to close predicate, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
if not self.at_end:
self.next_token()
return expr
def _or_expr(self):
expr = self._and_expr()
while self.cur_token == 'or':
self.next_token()
expr = OrOperator(expr, self._and_expr())
return expr
def _and_expr(self):
expr = self._equality_expr()
while self.cur_token == 'and':
self.next_token()
expr = AndOperator(expr, self._equality_expr())
return expr
def _equality_expr(self):
expr = self._relational_expr()
while self.cur_token in ('=', '!='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._relational_expr())
return expr
def _relational_expr(self):
expr = self._sub_expr()
while self.cur_token in ('>', '>=', '<', '>='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._sub_expr())
return expr
def _sub_expr(self):
token = self.cur_token
if token != '(':
return self._primary_expr()
self.next_token()
expr = self._or_expr()
if self.cur_token != ')':
raise PathSyntaxError('Expected ")" to close sub-expression, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
return expr
def _primary_expr(self):
token = self.cur_token
if len(token) > 1 and (token[0], token[-1]) in self._QUOTES:
self.next_token()
return StringLiteral(token[1:-1])
elif token[0].isdigit() or token[0] == '.':
self.next_token()
return NumberLiteral(as_float(token))
elif token == '$':
token = self.next_token()
self.next_token()
return VariableReference(token)
elif not self.at_end and self.peek_token().startswith('('):
return self._function_call()
else:
axis = None
if token == '@':
axis = ATTRIBUTE
self.next_token()
return self._node_test(axis)
def _function_call(self):
name = self.cur_token
if self.next_token() == '()':
args = []
else:
assert self.cur_token == '('
self.next_token()
args = [self._or_expr()]
while self.cur_token == ',':
self.next_token()
args.append(self._or_expr())
if not self.cur_token == ')':
raise PathSyntaxError('Expected ")" to close function argument '
'list, but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
cls = _function_map.get(name)
if not cls:
raise PathSyntaxError('Unsupported function "%s"' % name,
self.filename, self.lineno)
return cls(*args)
# Type coercion
def as_scalar(value):
"""Convert value to a scalar. If a single element Attrs() object is passed
the value of the single attribute will be returned."""
if isinstance(value, Attrs):
assert len(value) == 1
return value[0][1]
else:
return value
def as_float(value):
# FIXME - if value is a bool it will be coerced to 0.0 and consequently
# compared as a float. This is probably not ideal.
return float(as_scalar(value))
def as_long(value):
return long(as_scalar(value))
def as_string(value):
value = as_scalar(value)
if value is False:
return ''
return unicode(value)
def as_bool(value):
return bool(as_scalar(value))
# Node tests
class PrincipalTypeTest(object):
"""Node test that matches any event with the given principal type."""
__slots__ = ['principal_type']
def __init__(self, principal_type):
self.principal_type = principal_type
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE:
return data[1] or None
else:
return True
def __repr__(self):
return '*'
class QualifiedPrincipalTypeTest(object):
"""Node test that matches any event with the given principal type in a
specific namespace."""
__slots__ = ['principal_type', 'prefix']
def __init__(self, principal_type, prefix):
self.principal_type = principal_type
self.prefix = prefix
def __call__(self, kind, data, pos, namespaces, variables):
namespace = Namespace(namespaces.get(self.prefix))
if kind is START:
if self.principal_type is ATTRIBUTE and data[1]:
return Attrs([(name, value) for name, value in data[1]
if name in namespace]) or None
else:
return data[0] in namespace
def __repr__(self):
return '%s:*' % self.prefix
class LocalNameTest(object):
"""Node test that matches any event with the given principal type and
local name.
"""
__slots__ = ['principal_type', 'name']
def __init__(self, principal_type, name):
self.principal_type = principal_type
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE and self.name in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0].localname == self.name
def __repr__(self):
return self.name
class QualifiedNameTest(object):
"""Node test that matches any event with the given principal type and
qualified name.
"""
__slots__ = ['principal_type', 'prefix', 'name']
def __init__(self, principal_type, prefix, name):
self.principal_type = principal_type
self.prefix = prefix
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
qname = QName('%s}%s' % (namespaces.get(self.prefix), self.name))
if kind is START:
if self.principal_type is ATTRIBUTE and qname in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0] == qname
def __repr__(self):
return '%s:%s' % (self.prefix, self.name)
class CommentNodeTest(object):
"""Node test that matches any comment events."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is COMMENT
def __repr__(self):
return 'comment()'
class NodeTest(object):
"""Node test that matches any node."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return True
return kind, data, pos
def __repr__(self):
return 'node()'
class ProcessingInstructionNodeTest(object):
"""Node test that matches any processing instruction event."""
__slots__ = ['target']
def __init__(self, target=None):
self.target = target
def __call__(self, kind, data, pos, namespaces, variables):
return kind is PI and (not self.target or data[0] == self.target)
def __repr__(self):
arg = ''
if self.target:
arg = '"' + self.target + '"'
return 'processing-instruction(%s)' % arg
class TextNodeTest(object):
"""Node test that matches any text event."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is TEXT
def __repr__(self):
return 'text()'
_nodetest_map = {'comment': CommentNodeTest, 'node': NodeTest,
'processing-instruction': ProcessingInstructionNodeTest,
'text': TextNodeTest}
# Functions
class Function(object):
"""Base class for function nodes in XPath expressions."""
class BooleanFunction(Function):
"""The `boolean` function, which converts its argument to a boolean
value.
"""
__slots__ = ['expr']
_return_type = bool
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_bool(val)
def __repr__(self):
return 'boolean(%r)' % self.expr
class CeilingFunction(Function):
"""The `ceiling` function, which returns the nearest lower integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return ceil(as_float(number))
def __repr__(self):
return 'ceiling(%r)' % self.number
class ConcatFunction(Function):
"""The `concat` function, which concatenates (joins) the variable number of
strings it gets as arguments.
"""
__slots__ = ['exprs']
def __init__(self, *exprs):
self.exprs = exprs
def __call__(self, kind, data, pos, namespaces, variables):
strings = []
for item in [expr(kind, data, pos, namespaces, variables)
for expr in self.exprs]:
strings.append(as_string(item))
return ''.join(strings)
def __repr__(self):
return 'concat(%s)' % ', '.join([repr(expr) for expr in self.exprs])
class ContainsFunction(Function):
"""The `contains` function, which returns whether a string contains a given
substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string2) in as_string(string1)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class MatchesFunction(Function):
"""The `matches` function, which returns whether a string matches a regular
expression.
"""
__slots__ = ['string1', 'string2']
flag_mapping = {'s': re.S, 'm': re.M, 'i': re.I, 'x': re.X}
def __init__(self, string1, string2, flags=''):
self.string1 = string1
self.string2 = string2
self.flags = self._map_flags(flags)
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
return re.search(string2, string1, self.flags)
def _map_flags(self, flags):
return reduce(operator.or_,
[self.flag_map[flag] for flag in flags], re.U)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class FalseFunction(Function):
"""The `false` function, which always returns the boolean `false` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return False
def __repr__(self):
return 'false()'
class FloorFunction(Function):
"""The `ceiling` function, which returns the nearest higher integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return floor(as_float(number))
def __repr__(self):
return 'floor(%r)' % self.number
class LocalNameFunction(Function):
"""The `local-name` function, which returns the local name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].localname
def __repr__(self):
return 'local-name()'
class NameFunction(Function):
"""The `name` function, which returns the qualified name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0]
def __repr__(self):
return 'name()'
class NamespaceUriFunction(Function):
"""The `namespace-uri` function, which returns the namespace URI of the
current element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].namespace
def __repr__(self):
return 'namespace-uri()'
class NotFunction(Function):
"""The `not` function, which returns the negated boolean value of its
argument.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
return not as_bool(self.expr(kind, data, pos, namespaces, variables))
def __repr__(self):
return 'not(%s)' % self.expr
class NormalizeSpaceFunction(Function):
"""The `normalize-space` function, which removes leading and trailing
whitespace in the given string, and replaces multiple adjacent whitespace
characters inside the string with a single space.
"""
__slots__ = ['expr']
_normalize = re.compile(r'\s{2,}').sub
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return self._normalize(' ', as_string(string).strip())
def __repr__(self):
return 'normalize-space(%s)' % repr(self.expr)
class NumberFunction(Function):
"""The `number` function that converts its argument to a number."""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_float(val)
def __repr__(self):
return 'number(%r)' % self.expr
class RoundFunction(Function):
"""The `round` function, which returns the nearest integer number for the
given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return round(as_float(number))
def __repr__(self):
return 'round(%r)' % self.number
class StartsWithFunction(Function):
"""The `starts-with` function that returns whether one string starts with
a given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string1).startswith(as_string(string2))
def __repr__(self):
return 'starts-with(%r, %r)' % (self.string1, self.string2)
class StringLengthFunction(Function):
"""The `string-length` function that returns the length of the given
string.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return len(as_string(string))
def __repr__(self):
return 'string-length(%r)' % self.expr
class SubstringFunction(Function):
"""The `substring` function that returns the part of a string that starts
at the given offset, and optionally limited to the given length.
"""
__slots__ = ['string', 'start', 'length']
def __init__(self, string, start, length=None):
self.string = string
self.start = start
self.length = length
def __call__(self, kind, data, pos, namespaces, variables):
string = self.string(kind, data, pos, namespaces, variables)
start = self.start(kind, data, pos, namespaces, variables)
length = 0
if self.length is not None:
length = self.length(kind, data, pos, namespaces, variables)
return string[as_long(start):len(as_string(string)) - as_long(length)]
def __repr__(self):
if self.length is not None:
return 'substring(%r, %r, %r)' % (self.string, self.start,
self.length)
else:
return 'substring(%r, %r)' % (self.string, self.start)
class SubstringAfterFunction(Function):
"""The `substring-after` function that returns the part of a string that
is found after the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[index + len(string2):]
return ''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class SubstringBeforeFunction(Function):
"""The `substring-before` function that returns the part of a string that
is found before the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[:index]
return ''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class TranslateFunction(Function):
"""The `translate` function that translates a set of characters in a
string to target set of characters.
"""
__slots__ = ['string', 'fromchars', 'tochars']
def __init__(self, string, fromchars, tochars):
self.string = string
self.fromchars = fromchars
self.tochars = tochars
def __call__(self, kind, data, pos, namespaces, variables):
string = as_string(self.string(kind, data, pos, namespaces, variables))
fromchars = as_string(self.fromchars(kind, data, pos, namespaces, variables))
tochars = as_string(self.tochars(kind, data, pos, namespaces, variables))
table = dict(zip([ord(c) for c in fromchars],
[ord(c) for c in tochars]))
return string.translate(table)
def __repr__(self):
return 'translate(%r, %r, %r)' % (self.string, self.fromchars,
self.tochars)
class TrueFunction(Function):
"""The `true` function, which always returns the boolean `true` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return True
def __repr__(self):
return 'true()'
_function_map = {'boolean': BooleanFunction, 'ceiling': CeilingFunction,
'concat': ConcatFunction, 'contains': ContainsFunction,
'matches': MatchesFunction, 'false': FalseFunction, 'floor':
FloorFunction, 'local-name': LocalNameFunction, 'name':
NameFunction, 'namespace-uri': NamespaceUriFunction,
'normalize-space': NormalizeSpaceFunction, 'not': NotFunction,
'number': NumberFunction, 'round': RoundFunction,
'starts-with': StartsWithFunction, 'string-length':
StringLengthFunction, 'substring': SubstringFunction,
'substring-after': SubstringAfterFunction, 'substring-before':
SubstringBeforeFunction, 'translate': TranslateFunction,
'true': TrueFunction}
# Literals & Variables
class Literal(object):
"""Abstract base class for literal nodes."""
class StringLiteral(Literal):
"""A string literal node."""
__slots__ = ['text']
def __init__(self, text):
self.text = text
def __call__(self, kind, data, pos, namespaces, variables):
return self.text
def __repr__(self):
return '"%s"' % self.text
class NumberLiteral(Literal):
"""A number literal node."""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
return self.number
def __repr__(self):
return str(self.number)
class VariableReference(Literal):
"""A variable reference node."""
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
return variables.get(self.name)
def __repr__(self):
return str(self.name)
# Operators
class AndOperator(object):
"""The boolean operator `and`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if not lval:
return False
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s and %s' % (self.lval, self.rval)
class EqualsOperator(object):
"""The equality operator `=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval == rval
def __repr__(self):
return '%s=%s' % (self.lval, self.rval)
class NotEqualsOperator(object):
"""The equality operator `!=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval != rval
def __repr__(self):
return '%s!=%s' % (self.lval, self.rval)
class OrOperator(object):
"""The boolean operator `or`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if lval:
return True
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s or %s' % (self.lval, self.rval)
class GreaterThanOperator(object):
"""The relational operator `>` (greater than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) > as_float(rval)
def __repr__(self):
return '%s>%s' % (self.lval, self.rval)
class GreaterThanOrEqualOperator(object):
"""The relational operator `>=` (greater than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) >= as_float(rval)
def __repr__(self):
return '%s>=%s' % (self.lval, self.rval)
class LessThanOperator(object):
"""The relational operator `<` (less than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) < as_float(rval)
def __repr__(self):
return '%s<%s' % (self.lval, self.rval)
class LessThanOrEqualOperator(object):
"""The relational operator `<=` (less than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) <= as_float(rval)
def __repr__(self):
return '%s<=%s' % (self.lval, self.rval)
_operator_map = {'=': EqualsOperator, '!=': NotEqualsOperator,
'>': GreaterThanOperator, '>=': GreaterThanOrEqualOperator,
'<': LessThanOperator, '>=': LessThanOrEqualOperator}
_DOTSLASHSLASH = (DESCENDANT_OR_SELF, PrincipalTypeTest(None), ())
_DOTSLASH = (SELF, PrincipalTypeTest(None), ())
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Implementation of the various template directives."""
from genshi.core import QName, Stream
from genshi.path import Path
from genshi.template.base import TemplateRuntimeError, TemplateSyntaxError, \
EXPR, _apply_directives, _eval_expr
from genshi.template.eval import Expression, ExpressionASTTransformer, \
_ast, _parse
__all__ = ['AttrsDirective', 'ChooseDirective', 'ContentDirective',
'DefDirective', 'ForDirective', 'IfDirective', 'MatchDirective',
'OtherwiseDirective', 'ReplaceDirective', 'StripDirective',
'WhenDirective', 'WithDirective']
__docformat__ = 'restructuredtext en'
class DirectiveMeta(type):
"""Meta class for template directives."""
def __new__(cls, name, bases, d):
d['tagname'] = name.lower().replace('directive', '')
return type.__new__(cls, name, bases, d)
class Directive(object):
"""Abstract base class for template directives.
A directive is basically a callable that takes three positional arguments:
``ctxt`` is the template data context, ``stream`` is an iterable over the
events that the directive applies to, and ``directives`` is is a list of
other directives on the same stream that need to be applied.
Directives can be "anonymous" or "registered". Registered directives can be
applied by the template author using an XML attribute with the
corresponding name in the template. Such directives should be subclasses of
this base class that can be instantiated with the value of the directive
attribute as parameter.
Anonymous directives are simply functions conforming to the protocol
described above, and can only be applied programmatically (for example by
template filters).
"""
__metaclass__ = DirectiveMeta
__slots__ = ['expr']
def __init__(self, value, template=None, namespaces=None, lineno=-1,
offset=-1):
self.expr = self._parse_expr(value, template, lineno, offset)
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
"""Called after the template stream has been completely parsed.
:param template: the `Template` object
:param stream: the event stream associated with the directive
:param value: the argument value for the directive; if the directive was
specified as an element, this will be an `Attrs` instance
with all specified attributes, otherwise it will be a
`unicode` object with just the attribute value
:param namespaces: a mapping of namespace URIs to prefixes
:param pos: a ``(filename, lineno, offset)`` tuple describing the
location where the directive was found in the source
This class method should return a ``(directive, stream)`` tuple. If
``directive`` is not ``None``, it should be an instance of the `Directive`
class, and gets added to the list of directives applied to the substream
at runtime. `stream` is an event stream that replaces the original
stream associated with the directive.
"""
return cls(value, template, namespaces, *pos[1:]), stream
def __call__(self, stream, directives, ctxt, **vars):
"""Apply the directive to the given stream.
:param stream: the event stream
:param directives: a list of the remaining directives that should
process the stream
:param ctxt: the context data
:param vars: additional variables that should be made available when
Python code is executed
"""
raise NotImplementedError
def __repr__(self):
expr = ''
if getattr(self, 'expr', None) is not None:
expr = ' "%s"' % self.expr.source
return '<%s%s>' % (type(self).__name__, expr)
@classmethod
def _parse_expr(cls, expr, template, lineno=-1, offset=-1):
"""Parses the given expression, raising a useful error message when a
syntax error is encountered.
"""
try:
return expr and Expression(expr, template.filepath, lineno,
lookup=template.lookup) or None
except SyntaxError, err:
err.msg += ' in expression "%s" of "%s" directive' % (expr,
cls.tagname)
raise TemplateSyntaxError(err, template.filepath, lineno,
offset + (err.offset or 0))
def _assignment(ast):
"""Takes the AST representation of an assignment, and returns a
function that applies the assignment of a given value to a dictionary.
"""
def _names(node):
if isinstance(node, _ast.Tuple):
return tuple([_names(child) for child in node.elts])
elif isinstance(node, _ast.Name):
return node.id
def _assign(data, value, names=_names(ast)):
if type(names) is tuple:
for idx in range(len(names)):
_assign(data, value[idx], names[idx])
else:
data[names] = value
return _assign
class AttrsDirective(Directive):
"""Implementation of the ``py:attrs`` template directive.
The value of the ``py:attrs`` attribute should be a dictionary or a sequence
of ``(name, value)`` tuples. The items in that dictionary or sequence are
added as attributes to the element:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:attrs="foo">Bar</li>
... </ul>''')
>>> print(tmpl.generate(foo={'class': 'collapse'}))
<ul>
<li class="collapse">Bar</li>
</ul>
>>> print(tmpl.generate(foo=[('class', 'collapse')]))
<ul>
<li class="collapse">Bar</li>
</ul>
If the value evaluates to ``None`` (or any other non-truth value), no
attributes are added:
>>> print(tmpl.generate(foo=None))
<ul>
<li>Bar</li>
</ul>
"""
__slots__ = []
def __call__(self, stream, directives, ctxt, **vars):
def _generate():
kind, (tag, attrib), pos = stream.next()
attrs = _eval_expr(self.expr, ctxt, vars)
if attrs:
if isinstance(attrs, Stream):
try:
attrs = iter(attrs).next()
except StopIteration:
attrs = []
elif not isinstance(attrs, list): # assume it's a dict
attrs = attrs.items()
attrib -= [name for name, val in attrs if val is None]
attrib |= [(QName(name), unicode(val).strip()) for name, val
in attrs if val is not None]
yield kind, (tag, attrib), pos
for event in stream:
yield event
return _apply_directives(_generate(), directives, ctxt, vars)
class ContentDirective(Directive):
"""Implementation of the ``py:content`` template directive.
This directive replaces the content of the element with the result of
evaluating the value of the ``py:content`` attribute:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:content="bar">Hello</li>
... </ul>''')
>>> print(tmpl.generate(bar='Bye'))
<ul>
<li>Bye</li>
</ul>
"""
__slots__ = []
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
raise TemplateSyntaxError('The content directive can not be used '
'as an element', template.filepath,
*pos[1:])
expr = cls._parse_expr(value, template, *pos[1:])
return None, [stream[0], (EXPR, expr, pos), stream[-1]]
class DefDirective(Directive):
"""Implementation of the ``py:def`` template directive.
This directive can be used to create "Named Template Functions", which
are template snippets that are not actually output during normal
processing, but rather can be expanded from expressions in other places
in the template.
A named template function can be used just like a normal Python function
from template expressions:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <p py:def="echo(greeting, name='world')" class="message">
... ${greeting}, ${name}!
... </p>
... ${echo('Hi', name='you')}
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
<p class="message">
Hi, you!
</p>
</div>
If a function does not require parameters, the parenthesis can be omitted
in the definition:
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <p py:def="helloworld" class="message">
... Hello, world!
... </p>
... ${helloworld()}
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
<p class="message">
Hello, world!
</p>
</div>
"""
__slots__ = ['name', 'args', 'star_args', 'dstar_args', 'defaults']
def __init__(self, args, template, namespaces=None, lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
ast = _parse(args).body
self.args = []
self.star_args = None
self.dstar_args = None
self.defaults = {}
if isinstance(ast, _ast.Call):
self.name = ast.func.id
for arg in ast.args:
# only names
self.args.append(arg.id)
for kwd in ast.keywords:
self.args.append(kwd.arg)
exp = Expression(kwd.value, template.filepath,
lineno, lookup=template.lookup)
self.defaults[kwd.arg] = exp
if getattr(ast, 'starargs', None):
self.star_args = ast.starargs.id
if getattr(ast, 'kwargs', None):
self.dstar_args = ast.kwargs.id
else:
self.name = ast.id
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('function')
return super(DefDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
stream = list(stream)
def function(*args, **kwargs):
scope = {}
args = list(args) # make mutable
for name in self.args:
if args:
scope[name] = args.pop(0)
else:
if name in kwargs:
val = kwargs.pop(name)
else:
val = _eval_expr(self.defaults.get(name), ctxt, vars)
scope[name] = val
if not self.star_args is None:
scope[self.star_args] = args
if not self.dstar_args is None:
scope[self.dstar_args] = kwargs
ctxt.push(scope)
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt.pop()
function.__name__ = self.name
# Store the function reference in the bottom context frame so that it
# doesn't get popped off before processing the template has finished
# FIXME: this makes context data mutable as a side-effect
ctxt.frames[-1][self.name] = function
return []
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.name)
class ForDirective(Directive):
"""Implementation of the ``py:for`` template directive for repeating an
element based on an iterable in the context data.
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:for="item in items">${item}</li>
... </ul>''')
>>> print(tmpl.generate(items=[1, 2, 3]))
<ul>
<li>1</li><li>2</li><li>3</li>
</ul>
"""
__slots__ = ['assign', 'filename']
def __init__(self, value, template, namespaces=None, lineno=-1, offset=-1):
if ' in ' not in value:
raise TemplateSyntaxError('"in" keyword missing in "for" directive',
template.filepath, lineno, offset)
assign, value = value.split(' in ', 1)
ast = _parse(assign, 'exec')
value = 'iter(%s)' % value.strip()
self.assign = _assignment(ast.body[0].value)
self.filename = template.filepath
Directive.__init__(self, value, template, namespaces, lineno, offset)
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('each')
return super(ForDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
iterable = _eval_expr(self.expr, ctxt, vars)
if iterable is None:
return
assign = self.assign
scope = {}
stream = list(stream)
for item in iterable:
assign(scope, item)
ctxt.push(scope)
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt.pop()
def __repr__(self):
return '<%s>' % type(self).__name__
class IfDirective(Directive):
"""Implementation of the ``py:if`` template directive for conditionally
excluding elements from being output.
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <b py:if="foo">${bar}</b>
... </div>''')
>>> print(tmpl.generate(foo=True, bar='Hello'))
<div>
<b>Hello</b>
</div>
"""
__slots__ = []
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('test')
return super(IfDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
value = _eval_expr(self.expr, ctxt, vars)
if value:
return _apply_directives(stream, directives, ctxt, vars)
return []
class MatchDirective(Directive):
"""Implementation of the ``py:match`` template directive.
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <span py:match="greeting">
... Hello ${select('@name')}
... </span>
... <greeting name="Dude" />
... </div>''')
>>> print(tmpl.generate())
<div>
<span>
Hello Dude
</span>
</div>
"""
__slots__ = ['path', 'namespaces', 'hints']
def __init__(self, value, template, hints=None, namespaces=None,
lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.path = Path(value, template.filepath, lineno)
self.namespaces = namespaces or {}
self.hints = hints or ()
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
hints = []
if type(value) is dict:
if value.get('buffer', '').lower() == 'false':
hints.append('not_buffered')
if value.get('once', '').lower() == 'true':
hints.append('match_once')
if value.get('recursive', '').lower() == 'false':
hints.append('not_recursive')
value = value.get('path')
return cls(value, template, frozenset(hints), namespaces, *pos[1:]), \
stream
def __call__(self, stream, directives, ctxt, **vars):
ctxt._match_templates.append((self.path.test(ignore_context=True),
self.path, list(stream), self.hints,
self.namespaces, directives))
return []
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.path.source)
class ReplaceDirective(Directive):
"""Implementation of the ``py:replace`` template directive.
This directive replaces the element with the result of evaluating the
value of the ``py:replace`` attribute:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <span py:replace="bar">Hello</span>
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
Bye
</div>
This directive is equivalent to ``py:content`` combined with ``py:strip``,
providing a less verbose way to achieve the same effect:
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <span py:content="bar" py:strip="">Hello</span>
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
Bye
</div>
"""
__slots__ = []
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('value')
if not value:
raise TemplateSyntaxError('missing value for "replace" directive',
template.filepath, *pos[1:])
expr = cls._parse_expr(value, template, *pos[1:])
return None, [(EXPR, expr, pos)]
class StripDirective(Directive):
"""Implementation of the ``py:strip`` template directive.
When the value of the ``py:strip`` attribute evaluates to ``True``, the
element is stripped from the output
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <div py:strip="True"><b>foo</b></div>
... </div>''')
>>> print(tmpl.generate())
<div>
<b>foo</b>
</div>
Leaving the attribute value empty is equivalent to a truth value.
This directive is particulary interesting for named template functions or
match templates that do not generate a top-level element:
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <div py:def="echo(what)" py:strip="">
... <b>${what}</b>
... </div>
... ${echo('foo')}
... </div>''')
>>> print(tmpl.generate())
<div>
<b>foo</b>
</div>
"""
__slots__ = []
def __call__(self, stream, directives, ctxt, **vars):
def _generate():
if not self.expr or _eval_expr(self.expr, ctxt, vars):
stream.next() # skip start tag
previous = stream.next()
for event in stream:
yield previous
previous = event
else:
for event in stream:
yield event
return _apply_directives(_generate(), directives, ctxt, vars)
class ChooseDirective(Directive):
"""Implementation of the ``py:choose`` directive for conditionally selecting
one of several body elements to display.
If the ``py:choose`` expression is empty the expressions of nested
``py:when`` directives are tested for truth. The first true ``py:when``
body is output. If no ``py:when`` directive is matched then the fallback
directive ``py:otherwise`` will be used.
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/"
... py:choose="">
... <span py:when="0 == 1">0</span>
... <span py:when="1 == 1">1</span>
... <span py:otherwise="">2</span>
... </div>''')
>>> print(tmpl.generate())
<div>
<span>1</span>
</div>
If the ``py:choose`` directive contains an expression, the nested
``py:when`` directives are tested for equality to the ``py:choose``
expression:
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/"
... py:choose="2">
... <span py:when="1">1</span>
... <span py:when="2">2</span>
... </div>''')
>>> print(tmpl.generate())
<div>
<span>2</span>
</div>
Behavior is undefined if a ``py:choose`` block contains content outside a
``py:when`` or ``py:otherwise`` block. Behavior is also undefined if a
``py:otherwise`` occurs before ``py:when`` blocks.
"""
__slots__ = ['matched', 'value']
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('test')
return super(ChooseDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
info = [False, bool(self.expr), None]
if self.expr:
info[2] = _eval_expr(self.expr, ctxt, vars)
ctxt._choice_stack.append(info)
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt._choice_stack.pop()
class WhenDirective(Directive):
"""Implementation of the ``py:when`` directive for nesting in a parent with
the ``py:choose`` directive.
See the documentation of the `ChooseDirective` for usage.
"""
__slots__ = ['filename']
def __init__(self, value, template, namespaces=None, lineno=-1, offset=-1):
Directive.__init__(self, value, template, namespaces, lineno, offset)
self.filename = template.filepath
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('test')
return super(WhenDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
info = ctxt._choice_stack and ctxt._choice_stack[-1]
if not info:
raise TemplateRuntimeError('"when" directives can only be used '
'inside a "choose" directive',
self.filename, *stream.next()[2][1:])
if info[0]:
return []
if not self.expr and not info[1]:
raise TemplateRuntimeError('either "choose" or "when" directive '
'must have a test expression',
self.filename, *stream.next()[2][1:])
if info[1]:
value = info[2]
if self.expr:
matched = value == _eval_expr(self.expr, ctxt, vars)
else:
matched = bool(value)
else:
matched = bool(_eval_expr(self.expr, ctxt, vars))
info[0] = matched
if not matched:
return []
return _apply_directives(stream, directives, ctxt, vars)
class OtherwiseDirective(Directive):
"""Implementation of the ``py:otherwise`` directive for nesting in a parent
with the ``py:choose`` directive.
See the documentation of `ChooseDirective` for usage.
"""
__slots__ = ['filename']
def __init__(self, value, template, namespaces=None, lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.filename = template.filepath
def __call__(self, stream, directives, ctxt, **vars):
info = ctxt._choice_stack and ctxt._choice_stack[-1]
if not info:
raise TemplateRuntimeError('an "otherwise" directive can only be '
'used inside a "choose" directive',
self.filename, *stream.next()[2][1:])
if info[0]:
return []
info[0] = True
return _apply_directives(stream, directives, ctxt, vars)
class WithDirective(Directive):
"""Implementation of the ``py:with`` template directive, which allows
shorthand access to variables and expressions.
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <span py:with="y=7; z=x+10">$x $y $z</span>
... </div>''')
>>> print(tmpl.generate(x=42))
<div>
<span>42 7 52</span>
</div>
"""
__slots__ = ['vars']
def __init__(self, value, template, namespaces=None, lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.vars = []
value = value.strip()
try:
ast = _parse(value, 'exec')
for node in ast.body:
if not isinstance(node, _ast.Assign):
raise TemplateSyntaxError('only assignment allowed in '
'value of the "with" directive',
template.filepath, lineno, offset)
self.vars.append(([_assignment(n) for n in node.targets],
Expression(node.value, template.filepath,
lineno, lookup=template.lookup)))
except SyntaxError, err:
err.msg += ' in expression "%s" of "%s" directive' % (value,
self.tagname)
raise TemplateSyntaxError(err, template.filepath, lineno,
offset + (err.offset or 0))
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('vars')
return super(WithDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
frame = {}
ctxt.push(frame)
for targets, expr in self.vars:
value = _eval_expr(expr, ctxt, vars)
for assign in targets:
assign(frame, value)
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt.pop()
def __repr__(self):
return '<%s>' % (type(self).__name__)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Markup templating engine."""
from itertools import chain
from genshi.core import Attrs, Markup, Namespace, Stream, StreamEventKind
from genshi.core import START, END, START_NS, END_NS, TEXT, PI, COMMENT
from genshi.input import XMLParser
from genshi.template.base import BadDirectiveError, Template, \
TemplateSyntaxError, _apply_directives, \
EXEC, INCLUDE, SUB
from genshi.template.eval import Suite
from genshi.template.interpolation import interpolate
from genshi.template.directives import *
from genshi.template.text import NewTextTemplate
__all__ = ['MarkupTemplate']
__docformat__ = 'restructuredtext en'
class MarkupTemplate(Template):
"""Implementation of the template language for XML-based templates.
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:for="item in items">${item}</li>
... </ul>''')
>>> print(tmpl.generate(items=[1, 2, 3]))
<ul>
<li>1</li><li>2</li><li>3</li>
</ul>
"""
DIRECTIVE_NAMESPACE = 'http://genshi.edgewall.org/'
XINCLUDE_NAMESPACE = 'http://www.w3.org/2001/XInclude'
directives = [('def', DefDirective),
('match', MatchDirective),
('when', WhenDirective),
('otherwise', OtherwiseDirective),
('for', ForDirective),
('if', IfDirective),
('choose', ChooseDirective),
('with', WithDirective),
('replace', ReplaceDirective),
('content', ContentDirective),
('attrs', AttrsDirective),
('strip', StripDirective)]
serializer = 'xml'
_number_conv = Markup
def __init__(self, source, filepath=None, filename=None, loader=None,
encoding=None, lookup='strict', allow_exec=True):
Template.__init__(self, source, filepath=filepath, filename=filename,
loader=loader, encoding=encoding, lookup=lookup,
allow_exec=allow_exec)
self.add_directives(self.DIRECTIVE_NAMESPACE, self)
def _init_filters(self):
Template._init_filters(self)
# Make sure the include filter comes after the match filter
if self.loader:
self.filters.remove(self._include)
self.filters += [self._match]
if self.loader:
self.filters.append(self._include)
def _parse(self, source, encoding):
if not isinstance(source, Stream):
source = XMLParser(source, filename=self.filename,
encoding=encoding)
stream = []
for kind, data, pos in source:
if kind is TEXT:
for kind, data, pos in interpolate(data, self.filepath, pos[1],
pos[2], lookup=self.lookup):
stream.append((kind, data, pos))
elif kind is PI and data[0] == 'python':
if not self.allow_exec:
raise TemplateSyntaxError('Python code blocks not allowed',
self.filepath, *pos[1:])
try:
suite = Suite(data[1], self.filepath, pos[1],
lookup=self.lookup)
except SyntaxError, err:
raise TemplateSyntaxError(err, self.filepath,
pos[1] + (err.lineno or 1) - 1,
pos[2] + (err.offset or 0))
stream.append((EXEC, suite, pos))
elif kind is COMMENT:
if not data.lstrip().startswith('!'):
stream.append((kind, data, pos))
else:
stream.append((kind, data, pos))
return stream
def _extract_directives(self, stream, namespace, factory):
depth = 0
dirmap = {} # temporary mapping of directives to elements
new_stream = []
ns_prefix = {} # namespace prefixes in use
for kind, data, pos in stream:
if kind is START:
tag, attrs = data
directives = []
strip = False
if tag.namespace == namespace:
cls = factory.get_directive(tag.localname)
if cls is None:
raise BadDirectiveError(tag.localname,
self.filepath, pos[1])
args = dict([(name.localname, value) for name, value
in attrs if not name.namespace])
directives.append((factory.get_directive_index(cls), cls,
args, ns_prefix.copy(), pos))
strip = True
new_attrs = []
for name, value in attrs:
if name.namespace == namespace:
cls = factory.get_directive(name.localname)
if cls is None:
raise BadDirectiveError(name.localname,
self.filepath, pos[1])
if type(value) is list and len(value) == 1:
value = value[0][1]
directives.append((factory.get_directive_index(cls),
cls, value, ns_prefix.copy(), pos))
else:
new_attrs.append((name, value))
new_attrs = Attrs(new_attrs)
if directives:
directives.sort()
dirmap[(depth, tag)] = (directives, len(new_stream),
strip)
new_stream.append((kind, (tag, new_attrs), pos))
depth += 1
elif kind is END:
depth -= 1
new_stream.append((kind, data, pos))
# If there have have directive attributes with the
# corresponding start tag, move the events inbetween into
# a "subprogram"
if (depth, data) in dirmap:
directives, offset, strip = dirmap.pop((depth, data))
substream = new_stream[offset:]
if strip:
substream = substream[1:-1]
new_stream[offset:] = [
(SUB, (directives, substream), pos)
]
elif kind is SUB:
directives, substream = data
substream = self._extract_directives(substream, namespace,
factory)
if len(substream) == 1 and substream[0][0] is SUB:
added_directives, substream = substream[0][1]
directives += added_directives
new_stream.append((kind, (directives, substream), pos))
elif kind is START_NS:
# Strip out the namespace declaration for template
# directives
prefix, uri = data
ns_prefix[prefix] = uri
if uri != namespace:
new_stream.append((kind, data, pos))
elif kind is END_NS:
uri = ns_prefix.pop(data, None)
if uri and uri != namespace:
new_stream.append((kind, data, pos))
else:
new_stream.append((kind, data, pos))
return new_stream
def _extract_includes(self, stream):
streams = [[]] # stacked lists of events of the "compiled" template
prefixes = {}
fallbacks = []
includes = []
xinclude_ns = Namespace(self.XINCLUDE_NAMESPACE)
for kind, data, pos in stream:
stream = streams[-1]
if kind is START:
# Record any directive attributes in start tags
tag, attrs = data
if tag in xinclude_ns:
if tag.localname == 'include':
include_href = attrs.get('href')
if not include_href:
raise TemplateSyntaxError('Include misses required '
'attribute "href"',
self.filepath, *pos[1:])
includes.append((include_href, attrs.get('parse')))
streams.append([])
elif tag.localname == 'fallback':
streams.append([])
fallbacks.append(streams[-1])
else:
stream.append((kind, (tag, attrs), pos))
elif kind is END:
if fallbacks and data == xinclude_ns['fallback']:
assert streams.pop() is fallbacks[-1]
elif data == xinclude_ns['include']:
fallback = None
if len(fallbacks) == len(includes):
fallback = fallbacks.pop()
streams.pop() # discard anything between the include tags
# and the fallback element
stream = streams[-1]
href, parse = includes.pop()
try:
cls = {
'xml': MarkupTemplate,
'text': NewTextTemplate
}[parse or 'xml']
except KeyError:
raise TemplateSyntaxError('Invalid value for "parse" '
'attribute of include',
self.filepath, *pos[1:])
stream.append((INCLUDE, (href, cls, fallback), pos))
else:
stream.append((kind, data, pos))
elif kind is START_NS and data[1] == xinclude_ns:
# Strip out the XInclude namespace
prefixes[data[0]] = data[1]
elif kind is END_NS and data in prefixes:
prefixes.pop(data)
else:
stream.append((kind, data, pos))
assert len(streams) == 1
return streams[0]
def _interpolate_attrs(self, stream):
for kind, data, pos in stream:
if kind is START:
# Record any directive attributes in start tags
tag, attrs = data
new_attrs = []
for name, value in attrs:
if value:
value = list(interpolate(value, self.filepath, pos[1],
pos[2], lookup=self.lookup))
if len(value) == 1 and value[0][0] is TEXT:
value = value[0][1]
new_attrs.append((name, value))
data = tag, Attrs(new_attrs)
yield kind, data, pos
def _prepare(self, stream):
return Template._prepare(self,
self._extract_includes(self._interpolate_attrs(stream))
)
def add_directives(self, namespace, factory):
"""Register a custom `DirectiveFactory` for a given namespace.
:param namespace: the namespace URI
:type namespace: `basestring`
:param factory: the directive factory to register
:type factory: `DirectiveFactory`
:since: version 0.6
"""
assert not self._prepared, 'Too late for adding directives, ' \
'template already prepared'
self._stream = self._extract_directives(self._stream, namespace,
factory)
def _match(self, stream, ctxt, start=0, end=None, **vars):
"""Internal stream filter that applies any defined match templates
to the stream.
"""
match_templates = ctxt._match_templates
tail = []
def _strip(stream, append=tail.append):
depth = 1
next = stream.next
while 1:
event = next()
if event[0] is START:
depth += 1
elif event[0] is END:
depth -= 1
if depth > 0:
yield event
else:
append(event)
break
for event in stream:
# We (currently) only care about start and end events for matching
# We might care about namespace events in the future, though
if not match_templates or (event[0] is not START and
event[0] is not END):
yield event
continue
for idx, (test, path, template, hints, namespaces, directives) \
in enumerate(match_templates):
if idx < start or end is not None and idx >= end:
continue
if test(event, namespaces, ctxt) is True:
if 'match_once' in hints:
del match_templates[idx]
idx -= 1
# Let the remaining match templates know about the event so
# they get a chance to update their internal state
for test in [mt[0] for mt in match_templates[idx + 1:]]:
test(event, namespaces, ctxt, updateonly=True)
# Consume and store all events until an end event
# corresponding to this start event is encountered
pre_end = idx + 1
if 'match_once' not in hints and 'not_recursive' in hints:
pre_end -= 1
inner = _strip(stream)
if pre_end > 0:
inner = self._match(inner, ctxt, end=pre_end, **vars)
content = self._include(chain([event], inner, tail), ctxt)
if 'not_buffered' not in hints:
content = list(content)
content = Stream(content)
# Make the select() function available in the body of the
# match template
selected = [False]
def select(path):
selected[0] = True
return content.select(path, namespaces, ctxt)
vars = dict(select=select)
# Recursively process the output
template = _apply_directives(template, directives, ctxt,
vars)
for event in self._match(self._flatten(template, ctxt,
**vars),
ctxt, start=idx + 1, **vars):
yield event
# If the match template did not actually call select to
# consume the matched stream, the original events need to
# be consumed here or they'll get appended to the output
if not selected[0]:
for event in content:
pass
# Let the remaining match templates know about the last
# event in the matched content, so they can update their
# internal state accordingly
for test in [mt[0] for mt in match_templates]:
test(tail[0], namespaces, ctxt, updateonly=True)
break
else: # no matches
yield event
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import sys
import unittest
from genshi.template import directives, MarkupTemplate, TextTemplate, \
TemplateRuntimeError, TemplateSyntaxError
class AttrsDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:attrs` template directive."""
def test_combined_with_loop(self):
"""
Verify that the directive has access to the loop variables.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem py:for="item in items" py:attrs="item"/>
</doc>""")
items = [{'id': 1, 'class': 'foo'}, {'id': 2, 'class': 'bar'}]
self.assertEqual("""<doc>
<elem id="1" class="foo"/><elem id="2" class="bar"/>
</doc>""", tmpl.generate(items=items).render(encoding=None))
def test_update_existing_attr(self):
"""
Verify that an attribute value that evaluates to `None` removes an
existing attribute of that name.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem class="foo" py:attrs="{'class': 'bar'}"/>
</doc>""")
self.assertEqual("""<doc>
<elem class="bar"/>
</doc>""", tmpl.generate().render(encoding=None))
def test_remove_existing_attr(self):
"""
Verify that an attribute value that evaluates to `None` removes an
existing attribute of that name.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem class="foo" py:attrs="{'class': None}"/>
</doc>""")
self.assertEqual("""<doc>
<elem/>
</doc>""", tmpl.generate().render(encoding=None))
class ChooseDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:choose` template directive and the complementary
directives `py:when` and `py:otherwise`."""
def test_multiple_true_whens(self):
"""
Verify that, if multiple `py:when` bodies match, only the first is
output.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/" py:choose="">
<span py:when="1 == 1">1</span>
<span py:when="2 == 2">2</span>
<span py:when="3 == 3">3</span>
</div>""")
self.assertEqual("""<div>
<span>1</span>
</div>""", tmpl.generate().render(encoding=None))
def test_otherwise(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/" py:choose="">
<span py:when="False">hidden</span>
<span py:otherwise="">hello</span>
</div>""")
self.assertEqual("""<div>
<span>hello</span>
</div>""", tmpl.generate().render(encoding=None))
def test_nesting(self):
"""
Verify that `py:choose` blocks can be nested:
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="1">
<div py:when="1" py:choose="3">
<span py:when="2">2</span>
<span py:when="3">3</span>
</div>
</div>
</doc>""")
self.assertEqual("""<doc>
<div>
<div>
<span>3</span>
</div>
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_complex_nesting(self):
"""
Verify more complex nesting.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="1">
<div py:when="1" py:choose="">
<span py:when="2">OK</span>
<span py:when="1">FAIL</span>
</div>
</div>
</doc>""")
self.assertEqual("""<doc>
<div>
<div>
<span>OK</span>
</div>
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_complex_nesting_otherwise(self):
"""
Verify more complex nesting using otherwise.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="1">
<div py:when="1" py:choose="2">
<span py:when="1">FAIL</span>
<span py:otherwise="">OK</span>
</div>
</div>
</doc>""")
self.assertEqual("""<doc>
<div>
<div>
<span>OK</span>
</div>
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_when_with_strip(self):
"""
Verify that a when directive with a strip directive actually strips of
the outer element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<span py:otherwise="">foo</span>
</div>
</doc>""")
self.assertEqual("""<doc>
<span>foo</span>
</doc>""", tmpl.generate().render(encoding=None))
def test_when_outside_choose(self):
"""
Verify that a `when` directive outside of a `choose` directive is
reported as an error.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:when="xy" />
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
def test_otherwise_outside_choose(self):
"""
Verify that an `otherwise` directive outside of a `choose` directive is
reported as an error.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:otherwise="" />
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
def test_when_without_test(self):
"""
Verify that an `when` directive that doesn't have a `test` attribute
is reported as an error.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<py:when>foo</py:when>
</div>
</doc>""")
self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
def test_when_without_test_but_with_choose_value(self):
"""
Verify that an `when` directive that doesn't have a `test` attribute
works as expected as long as the parent `choose` directive has a test
expression.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="foo" py:strip="">
<py:when>foo</py:when>
</div>
</doc>""")
self.assertEqual("""<doc>
foo
</doc>""", tmpl.generate(foo='Yeah').render(encoding=None))
def test_otherwise_without_test(self):
"""
Verify that an `otherwise` directive can be used without a `test`
attribute.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:choose="" py:strip="">
<py:otherwise>foo</py:otherwise>
</div>
</doc>""")
self.assertEqual("""<doc>
foo
</doc>""", tmpl.generate().render(encoding=None))
def test_as_element(self):
"""
Verify that the directive can also be used as an element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:choose>
<py:when test="1 == 1">1</py:when>
<py:when test="2 == 2">2</py:when>
<py:when test="3 == 3">3</py:when>
</py:choose>
</doc>""")
self.assertEqual("""<doc>
1
</doc>""", tmpl.generate().render(encoding=None))
def test_in_text_template(self):
"""
Verify that the directive works as expected in a text template.
"""
tmpl = TextTemplate("""#choose
#when 1 == 1
1
#end
#when 2 == 2
2
#end
#when 3 == 3
3
#end
#end""")
self.assertEqual(""" 1\n""",
tmpl.generate().render(encoding=None))
class DefDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:def` template directive."""
def test_function_with_strip(self):
"""
Verify that a named template function with a strip directive actually
strips of the outer element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:def="echo(what)" py:strip="">
<b>${what}</b>
</div>
${echo('foo')}
</doc>""")
self.assertEqual("""<doc>
<b>foo</b>
</doc>""", tmpl.generate().render(encoding=None))
def test_exec_in_replace(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<p py:def="echo(greeting, name='world')" class="message">
${greeting}, ${name}!
</p>
<div py:replace="echo('hello')"></div>
</div>""")
self.assertEqual("""<div>
<p class="message">
hello, world!
</p>
</div>""", tmpl.generate().render(encoding=None))
def test_as_element(self):
"""
Verify that the directive can also be used as an element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:def function="echo(what)">
<b>${what}</b>
</py:def>
${echo('foo')}
</doc>""")
self.assertEqual("""<doc>
<b>foo</b>
</doc>""", tmpl.generate().render(encoding=None))
def test_nested_defs(self):
"""
Verify that a template function defined inside a conditional block can
be called from outside that block.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:if test="semantic">
<strong py:def="echo(what)">${what}</strong>
</py:if>
<py:if test="not semantic">
<b py:def="echo(what)">${what}</b>
</py:if>
${echo('foo')}
</doc>""")
self.assertEqual("""<doc>
<strong>foo</strong>
</doc>""", tmpl.generate(semantic=True).render(encoding=None))
def test_function_with_default_arg(self):
"""
Verify that keyword arguments work with `py:def` directives.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<b py:def="echo(what, bold=False)" py:strip="not bold">${what}</b>
${echo('foo')}
</doc>""")
self.assertEqual("""<doc>
foo
</doc>""", tmpl.generate().render(encoding=None))
def test_invocation_in_attribute(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:def function="echo(what)">${what or 'something'}</py:def>
<p class="${echo('foo')}">bar</p>
</doc>""")
self.assertEqual("""<doc>
<p class="foo">bar</p>
</doc>""", tmpl.generate().render(encoding=None))
def test_invocation_in_attribute_none(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:def function="echo()">${None}</py:def>
<p class="${echo()}">bar</p>
</doc>""")
self.assertEqual("""<doc>
<p>bar</p>
</doc>""", tmpl.generate().render(encoding=None))
def test_function_raising_typeerror(self):
def badfunc():
raise TypeError
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<div py:def="dobadfunc()">
${badfunc()}
</div>
<div py:content="dobadfunc()"/>
</html>""")
self.assertRaises(TypeError, list, tmpl.generate(badfunc=badfunc))
def test_def_in_matched(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<head py:match="head">${select('*')}</head>
<head>
<py:def function="maketitle(test)"><b py:replace="test" /></py:def>
<title>${maketitle(True)}</title>
</head>
</doc>""")
self.assertEqual("""<doc>
<head><title>True</title></head>
</doc>""", tmpl.generate().render(encoding=None))
def test_in_text_template(self):
"""
Verify that the directive works as expected in a text template.
"""
tmpl = TextTemplate("""
#def echo(greeting, name='world')
${greeting}, ${name}!
#end
${echo('Hi', name='you')}
""")
self.assertEqual("""
Hi, you!
""", tmpl.generate().render(encoding=None))
def test_function_with_star_args(self):
"""
Verify that a named template function using "star arguments" works as
expected.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:def="f(*args, **kwargs)">
${repr(args)}
${repr(kwargs)}
</div>
${f(1, 2, a=3, b=4)}
</doc>""")
self.assertEqual("""<doc>
<div>
[1, 2]
{'a': 3, 'b': 4}
</div>
</doc>""", tmpl.generate().render(encoding=None))
class ForDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:for` template directive."""
def test_loop_with_strip(self):
"""
Verify that the combining the `py:for` directive with `py:strip` works
correctly.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:for="item in items" py:strip="">
<b>${item}</b>
</div>
</doc>""")
self.assertEqual("""<doc>
<b>1</b>
<b>2</b>
<b>3</b>
<b>4</b>
<b>5</b>
</doc>""", tmpl.generate(items=range(1, 6)).render(encoding=None))
def test_as_element(self):
"""
Verify that the directive can also be used as an element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="item in items">
<b>${item}</b>
</py:for>
</doc>""")
self.assertEqual("""<doc>
<b>1</b>
<b>2</b>
<b>3</b>
<b>4</b>
<b>5</b>
</doc>""", tmpl.generate(items=range(1, 6)).render(encoding=None))
def test_multi_assignment(self):
"""
Verify that assignment to tuples works correctly.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="k, v in items">
<p>key=$k, value=$v</p>
</py:for>
</doc>""")
self.assertEqual("""<doc>
<p>key=a, value=1</p>
<p>key=b, value=2</p>
</doc>""", tmpl.generate(items=dict(a=1, b=2).items())
.render(encoding=None))
def test_nested_assignment(self):
"""
Verify that assignment to nested tuples works correctly.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="idx, (k, v) in items">
<p>$idx: key=$k, value=$v</p>
</py:for>
</doc>""")
self.assertEqual("""<doc>
<p>0: key=a, value=1</p>
<p>1: key=b, value=2</p>
</doc>""", tmpl.generate(items=enumerate(dict(a=1, b=2).items()))
.render(encoding=None))
def test_not_iterable(self):
"""
Verify that assignment to nested tuples works correctly.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="item in foo">
$item
</py:for>
</doc>""", filename='test.html')
try:
list(tmpl.generate(foo=12))
self.fail('Expected TemplateRuntimeError')
except TypeError, e:
assert (str(e) == "iteration over non-sequence" or
str(e) == "'int' object is not iterable")
exc_type, exc_value, exc_traceback = sys.exc_info()
frame = exc_traceback.tb_next
frames = []
while frame.tb_next:
frame = frame.tb_next
frames.append(frame)
self.assertEqual("<Expression u'iter(foo)'>",
frames[-1].tb_frame.f_code.co_name)
self.assertEqual('test.html',
frames[-1].tb_frame.f_code.co_filename)
self.assertEqual(2, frames[-1].tb_lineno)
def test_for_with_empty_value(self):
"""
Verify an empty 'for' value is an error
"""
try:
MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:for each="">
empty
</py:for>
</doc>""", filename='test.html').generate()
self.fail('ExpectedTemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] > (2,4):
self.assertEqual(2, e.lineno)
class IfDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:if` template directive."""
def test_loop_with_strip(self):
"""
Verify that the combining the `py:if` directive with `py:strip` works
correctly.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<b py:if="foo" py:strip="">${bar}</b>
</doc>""")
self.assertEqual("""<doc>
Hello
</doc>""", tmpl.generate(foo=True, bar='Hello').render(encoding=None))
def test_as_element(self):
"""
Verify that the directive can also be used as an element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:if test="foo">${bar}</py:if>
</doc>""")
self.assertEqual("""<doc>
Hello
</doc>""", tmpl.generate(foo=True, bar='Hello').render(encoding=None))
class MatchDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:match` template directive."""
def test_with_strip(self):
"""
Verify that a match template can produce the same kind of element that
it matched without entering an infinite recursion.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem py:match="elem" py:strip="">
<div class="elem">${select('text()')}</div>
</elem>
<elem>Hey Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<div class="elem">Hey Joe</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_without_strip(self):
"""
Verify that a match template can produce the same kind of element that
it matched without entering an infinite recursion.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem py:match="elem">
<div class="elem">${select('text()')}</div>
</elem>
<elem>Hey Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<elem>
<div class="elem">Hey Joe</div>
</elem>
</doc>""", tmpl.generate().render(encoding=None))
def test_as_element(self):
"""
Verify that the directive can also be used as an element.
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:match path="elem">
<div class="elem">${select('text()')}</div>
</py:match>
<elem>Hey Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<div class="elem">Hey Joe</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_recursive_match_1(self):
"""
Match directives are applied recursively, meaning that they are also
applied to any content they may have produced themselves:
"""
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem py:match="elem">
<div class="elem">
${select('*')}
</div>
</elem>
<elem>
<subelem>
<elem/>
</subelem>
</elem>
</doc>""")
self.assertEqual("""<doc>
<elem>
<div class="elem">
<subelem>
<elem>
<div class="elem">
</div>
</elem>
</subelem>
</div>
</elem>
</doc>""", tmpl.generate().render(encoding=None))
def test_recursive_match_2(self):
"""
When two or more match templates match the same element and also
themselves output the element they match, avoiding recursion is even
more complex, but should work.
"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<body py:match="body">
<div id="header"/>
${select('*')}
</body>
<body py:match="body">
${select('*')}
<div id="footer"/>
</body>
<body>
<h1>Foo</h1>
</body>
</html>""")
self.assertEqual("""<html>
<body>
<div id="header"/><h1>Foo</h1>
<div id="footer"/>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_recursive_match_3(self):
tmpl = MarkupTemplate("""<test xmlns:py="http://genshi.edgewall.org/">
<py:match path="b[@type='bullet']">
<bullet>${select('*|text()')}</bullet>
</py:match>
<py:match path="group[@type='bullet']">
<ul>${select('*')}</ul>
</py:match>
<py:match path="b">
<generic>${select('*|text()')}</generic>
</py:match>
<b>
<group type="bullet">
<b type="bullet">1</b>
<b type="bullet">2</b>
</group>
</b>
</test>
""")
self.assertEqual("""<test>
<generic>
<ul><bullet>1</bullet><bullet>2</bullet></ul>
</generic>
</test>""", tmpl.generate().render(encoding=None))
def test_not_match_self(self):
"""
See http://genshi.edgewall.org/ticket/77
"""
tmpl = MarkupTemplate("""<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/">
<body py:match="body" py:content="select('*')" />
<h1 py:match="h1">
${select('text()')}
Goodbye!
</h1>
<body>
<h1>Hello!</h1>
</body>
</html>""")
self.assertEqual("""<html xmlns="http://www.w3.org/1999/xhtml">
<body><h1>
Hello!
Goodbye!
</h1></body>
</html>""", tmpl.generate().render(encoding=None))
def test_select_text_in_element(self):
"""
See http://genshi.edgewall.org/ticket/77#comment:1
"""
tmpl = MarkupTemplate("""<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/">
<body py:match="body" py:content="select('*')" />
<h1 py:match="h1">
<text>
${select('text()')}
</text>
Goodbye!
</h1>
<body>
<h1>Hello!</h1>
</body>
</html>""")
self.assertEqual("""<html xmlns="http://www.w3.org/1999/xhtml">
<body><h1>
<text>
Hello!
</text>
Goodbye!
</h1></body>
</html>""", tmpl.generate().render(encoding=None))
def test_select_all_attrs(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:match="elem" py:attrs="select('@*')">
${select('text()')}
</div>
<elem id="joe">Hey Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<div id="joe">
Hey Joe
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_select_all_attrs_empty(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:match="elem" py:attrs="select('@*')">
${select('text()')}
</div>
<elem>Hey Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<div>
Hey Joe
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_select_all_attrs_in_body(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<div py:match="elem">
Hey ${select('text()')} ${select('@*')}
</div>
<elem title="Cool">Joe</elem>
</doc>""")
self.assertEqual("""<doc>
<div>
Hey Joe Cool
</div>
</doc>""", tmpl.generate().render(encoding=None))
def test_def_in_match(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:def function="maketitle(test)"><b py:replace="test" /></py:def>
<head py:match="head">${select('*')}</head>
<head><title>${maketitle(True)}</title></head>
</doc>""")
self.assertEqual("""<doc>
<head><title>True</title></head>
</doc>""", tmpl.generate().render(encoding=None))
def test_match_with_xpath_variable(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span py:match="*[name()=$tagname]">
Hello ${select('@name')}
</span>
<greeting name="Dude"/>
</div>""")
self.assertEqual("""<div>
<span>
Hello Dude
</span>
</div>""", tmpl.generate(tagname='greeting').render(encoding=None))
self.assertEqual("""<div>
<greeting name="Dude"/>
</div>""", tmpl.generate(tagname='sayhello').render(encoding=None))
def test_content_directive_in_match(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<div py:match="foo">I said <q py:content="select('text()')">something</q>.</div>
<foo>bar</foo>
</html>""")
self.assertEqual("""<html>
<div>I said <q>bar</q>.</div>
</html>""", tmpl.generate().render(encoding=None))
def test_cascaded_matches(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<body py:match="body">${select('*')}</body>
<head py:match="head">${select('title')}</head>
<body py:match="body">${select('*')}<hr /></body>
<head><title>Welcome to Markup</title></head>
<body><h2>Are you ready to mark up?</h2></body>
</html>""")
self.assertEqual("""<html>
<head><title>Welcome to Markup</title></head>
<body><h2>Are you ready to mark up?</h2><hr/></body>
</html>""", tmpl.generate().render(encoding=None))
def test_multiple_matches(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<input py:match="form//input" py:attrs="select('@*')"
value="${values[str(select('@name'))]}" />
<form><p py:for="field in fields">
<label>${field.capitalize()}</label>
<input type="text" name="${field}" />
</p></form>
</html>""")
fields = ['hello_%s' % i for i in range(5)]
values = dict([('hello_%s' % i, i) for i in range(5)])
self.assertEqual("""<html>
<form><p>
<label>Hello_0</label>
<input value="0" type="text" name="hello_0"/>
</p><p>
<label>Hello_1</label>
<input value="1" type="text" name="hello_1"/>
</p><p>
<label>Hello_2</label>
<input value="2" type="text" name="hello_2"/>
</p><p>
<label>Hello_3</label>
<input value="3" type="text" name="hello_3"/>
</p><p>
<label>Hello_4</label>
<input value="4" type="text" name="hello_4"/>
</p></form>
</html>""", tmpl.generate(fields=fields, values=values)
.render(encoding=None))
def test_namespace_context(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:x="http://www.example.org/">
<div py:match="x:foo">Foo</div>
<foo xmlns="http://www.example.org/"/>
</html>""")
# FIXME: there should be a way to strip out unwanted/unused namespaces,
# such as the "x" in this example
self.assertEqual("""<html xmlns:x="http://www.example.org/">
<div>Foo</div>
</html>""", tmpl.generate().render(encoding=None))
def test_match_with_position_predicate(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p py:match="body/p[1]" class="first">${select('*|text()')}</p>
<body>
<p>Foo</p>
<p>Bar</p>
</body>
</html>""")
self.assertEqual("""<html>
<body>
<p class="first">Foo</p>
<p>Bar</p>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_with_closure(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p py:match="body//p" class="para">${select('*|text()')}</p>
<body>
<p>Foo</p>
<div><p>Bar</p></div>
</body>
</html>""")
self.assertEqual("""<html>
<body>
<p class="para">Foo</p>
<div><p class="para">Bar</p></div>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_without_closure(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p py:match="body/p" class="para">${select('*|text()')}</p>
<body>
<p>Foo</p>
<div><p>Bar</p></div>
</body>
</html>""")
self.assertEqual("""<html>
<body>
<p class="para">Foo</p>
<div><p>Bar</p></div>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_with_once_attribute(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" once="true"><body>
<div id="wrap">
${select("*")}
</div>
</body></py:match>
<body>
<p>Foo</p>
</body>
<body>
<p>Bar</p>
</body>
</html>""")
self.assertEqual("""<html>
<body>
<div id="wrap">
<p>Foo</p>
</div>
</body>
<body>
<p>Bar</p>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_with_recursive_attribute(self):
tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:match path="elem" recursive="false"><elem>
<div class="elem">
${select('*')}
</div>
</elem></py:match>
<elem>
<subelem>
<elem/>
</subelem>
</elem>
</doc>""")
self.assertEqual("""<doc>
<elem>
<div class="elem">
<subelem>
<elem/>
</subelem>
</div>
</elem>
</doc>""", tmpl.generate().render(encoding=None))
# FIXME
#def test_match_after_step(self):
# tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
# <span py:match="div/greeting">
# Hello ${select('@name')}
# </span>
# <greeting name="Dude" />
# </div>""")
# self.assertEqual("""<div>
# <span>
# Hello Dude
# </span>
# </div>""", tmpl.generate().render(encoding=None))
class ContentDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:content` template directive."""
def test_as_element(self):
try:
MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<py:content foo="">Foo</py:content>
</doc>""", filename='test.html').generate()
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(2, e.lineno)
class ReplaceDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:replace` template directive."""
def test_replace_with_empty_value(self):
"""
Verify that the directive raises an apprioriate exception when an empty
expression is supplied.
"""
try:
MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/">
<elem py:replace="">Foo</elem>
</doc>""", filename='test.html').generate()
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(2, e.lineno)
def test_as_element(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:replace value="title" />
</div>""", filename='test.html')
self.assertEqual("""<div>
Test
</div>""", tmpl.generate(title='Test').render(encoding=None))
class StripDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:strip` template directive."""
def test_strip_false(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<div py:strip="False"><b>foo</b></div>
</div>""")
self.assertEqual("""<div>
<div><b>foo</b></div>
</div>""", tmpl.generate().render(encoding=None))
def test_strip_empty(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<div py:strip=""><b>foo</b></div>
</div>""")
self.assertEqual("""<div>
<b>foo</b>
</div>""", tmpl.generate().render(encoding=None))
class WithDirectiveTestCase(unittest.TestCase):
"""Tests for the `py:with` template directive."""
def test_shadowing(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
${x}
<span py:with="x = x * 2" py:replace="x"/>
${x}
</div>""")
self.assertEqual("""<div>
42
84
42
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_as_element(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="x = x * 2">${x}</py:with>
</div>""")
self.assertEqual("""<div>
84
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_multiple_vars_same_name(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="
foo = 'bar';
foo = foo.replace('r', 'z')
">
$foo
</py:with>
</div>""")
self.assertEqual("""<div>
baz
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_multiple_vars_single_assignment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="x = y = z = 1">${x} ${y} ${z}</py:with>
</div>""")
self.assertEqual("""<div>
1 1 1
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_nested_vars_single_assignment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="x, (y, z) = (1, (2, 3))">${x} ${y} ${z}</py:with>
</div>""")
self.assertEqual("""<div>
1 2 3
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_multiple_vars_trailing_semicolon(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="x = x * 2; y = x / 2;">${x} ${y}</py:with>
</div>""")
self.assertEqual("""<div>
84 42
</div>""", tmpl.generate(x=42).render(encoding=None))
def test_semicolon_escape(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:with vars="x = 'here is a semicolon: ;'; y = 'here are two semicolons: ;;' ;">
${x}
${y}
</py:with>
</div>""")
self.assertEqual("""<div>
here is a semicolon: ;
here are two semicolons: ;;
</div>""", tmpl.generate().render(encoding=None))
def test_ast_transformation(self):
"""
Verify that the usual template expression AST transformations are
applied despite the code being compiled to a `Suite` object.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span py:with="bar=foo.bar">
$bar
</span>
</div>""")
self.assertEqual("""<div>
<span>
42
</span>
</div>""", tmpl.generate(foo={'bar': 42}).render(encoding=None))
def test_unicode_expr(self):
tmpl = MarkupTemplate(u"""<div xmlns:py="http://genshi.edgewall.org/">
<span py:with="weeks=(u'一', u'二', u'三', u'四', u'五', u'六', u'日')">
$weeks
</span>
</div>""")
self.assertEqual(u"""<div>
<span>
一二三四五六日
</span>
</div>""", tmpl.generate().render(encoding=None))
def test_with_empty_value(self):
"""
Verify that an empty py:with works (useless, but legal)
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span py:with="">Text</span></div>""")
self.assertEqual("""<div>
<span>Text</span></div>""", tmpl.generate().render(encoding=None))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(directives))
suite.addTest(unittest.makeSuite(AttrsDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(ChooseDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(DefDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(ForDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(IfDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(MatchDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(ContentDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(ReplaceDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(StripDirectiveTestCase, 'test'))
suite.addTest(unittest.makeSuite(WithDirectiveTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import pickle
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest
from genshi.core import Markup
from genshi.input import XML
from genshi.template.base import BadDirectiveError, TemplateSyntaxError
from genshi.template.loader import TemplateLoader, TemplateNotFound
from genshi.template.markup import MarkupTemplate
class MarkupTemplateTestCase(unittest.TestCase):
"""Tests for markup template processing."""
def test_parse_fileobj(self):
fileobj = StringIO('<root> ${var} $var</root>')
tmpl = MarkupTemplate(fileobj)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_parse_stream(self):
stream = XML('<root> ${var} $var</root>')
tmpl = MarkupTemplate(stream)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_pickle(self):
stream = XML('<root>$var</root>')
tmpl = MarkupTemplate(stream)
buf = StringIO()
pickle.dump(tmpl, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEqual('<root>42</root>', str(unpickled.generate(var=42)))
def test_interpolate_mixed3(self):
tmpl = MarkupTemplate('<root> ${var} $var</root>')
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_interpolate_leading_trailing_space(self):
tmpl = MarkupTemplate('<root>${ foo }</root>')
self.assertEqual('<root>bar</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_multiline(self):
tmpl = MarkupTemplate("""<root>${dict(
bar = 'baz'
)[foo]}</root>""")
self.assertEqual('<root>baz</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_non_string_attrs(self):
tmpl = MarkupTemplate('<root attr="${1}"/>')
self.assertEqual('<root attr="1"/>', str(tmpl.generate()))
def test_interpolate_list_result(self):
tmpl = MarkupTemplate('<root>$foo</root>')
self.assertEqual('<root>buzz</root>', str(tmpl.generate(foo=('buzz',))))
def test_empty_attr(self):
tmpl = MarkupTemplate('<root attr=""/>')
self.assertEqual('<root attr=""/>', str(tmpl.generate()))
def test_empty_attr_interpolated(self):
tmpl = MarkupTemplate('<root attr="$attr"/>')
self.assertEqual('<root attr=""/>', str(tmpl.generate(attr='')))
def test_bad_directive_error(self):
xml = '<p xmlns:py="http://genshi.edgewall.org/" py:do="nothing" />'
try:
tmpl = MarkupTemplate(xml, filename='test.html')
except BadDirectiveError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(1, e.lineno)
def test_directive_value_syntax_error(self):
xml = """<p xmlns:py="http://genshi.edgewall.org/" py:if="bar'" />"""
try:
tmpl = MarkupTemplate(xml, filename='test.html').generate()
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(1, e.lineno)
def test_expression_syntax_error(self):
xml = """<p>
Foo <em>${bar"}</em>
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(2, e.lineno)
def test_expression_syntax_error_multi_line(self):
xml = """<p><em></em>
${bar"}
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
self.assertEqual(3, e.lineno)
def test_markup_noescape(self):
"""
Verify that outputting context data that is a `Markup` instance is not
escaped.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
<b>foo</b>
</div>""", str(tmpl.generate(myvar=Markup('<b>foo</b>'))))
def test_text_noescape_quotes(self):
"""
Verify that outputting context data in text nodes doesn't escape
quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
"foo"
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_attr_escape_quotes(self):
"""
Verify that outputting context data in attribtes escapes quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<elem class="$myvar"/>
</div>""")
self.assertEqual("""<div>
<elem class=""foo""/>
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_directive_element(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:if test="myvar">bar</py:if>
</div>""")
self.assertEqual("""<div>
bar
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_normal_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- foo bar -->
</div>""")
self.assertEqual("""<div>
<!-- foo bar -->
</div>""", str(tmpl.generate()))
def test_template_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- !foo -->
<!--!bar-->
</div>""")
self.assertEqual("""<div>
</div>""", str(tmpl.generate()))
def test_parse_with_same_namespace_nested(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span xmlns:py="http://genshi.edgewall.org/">
</span>
</div>""")
self.assertEqual("""<div>
<span>
</span>
</div>""", str(tmpl.generate()))
def test_latin1_encoded_with_xmldecl(self):
tmpl = MarkupTemplate(u"""<?xml version="1.0" encoding="iso-8859-1" ?>
<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual(u"""<?xml version="1.0" encoding="iso-8859-1"?>\n<div>
\xf6
</div>""", unicode(tmpl.generate()))
def test_latin1_encoded_explicit_encoding(self):
tmpl = MarkupTemplate(u"""<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual(u"""<div>
\xf6
</div>""", unicode(tmpl.generate()))
def test_exec_with_trailing_space(self):
"""
Verify that a code block processing instruction with trailing space
does not cause a syntax error (see ticket #127).
"""
MarkupTemplate("""<foo>
<?python
bar = 42
?>
</foo>""")
def test_exec_import(self):
tmpl = MarkupTemplate("""<?python from datetime import timedelta ?>
<div xmlns:py="http://genshi.edgewall.org/">
${timedelta(days=2)}
</div>""")
self.assertEqual("""<div>
2 days, 0:00:00
</div>""", str(tmpl.generate()))
def test_exec_def(self):
tmpl = MarkupTemplate("""
<?python
def foo():
return 42
?>
<div xmlns:py="http://genshi.edgewall.org/">
${foo()}
</div>""")
self.assertEqual("""<div>
42
</div>""", str(tmpl.generate()))
def test_namespace_on_removed_elem(self):
"""
Verify that a namespace declaration on an element that is removed from
the generated stream does not get pushed up to the next non-stripped
element (see ticket #107).
"""
tmpl = MarkupTemplate("""<?xml version="1.0"?>
<Test xmlns:py="http://genshi.edgewall.org/">
<Size py:if="0" xmlns:t="test">Size</Size>
<Item/>
</Test>""")
self.assertEqual("""<?xml version="1.0"?>\n<Test>
<Item/>
</Test>""", str(tmpl.generate()))
def test_include_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate(name='tmpl1').render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_dynamic_include_href(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate(name='tmpl1').render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_select_included_elements(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<li>$item</li>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<ul py:match="ul">${select('li')}</ul>
<ul py:with="items=(1, 2, 3)">
<xi:include href="tmpl1.html" py:for="item in items" />
</ul>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<ul><li>1</li><li>2</li><li>3</li></ul>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_error_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"/>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertRaises(TemplateNotFound, tmpl.generate().render)
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_fallback_when_auto_reload_true(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_include_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html" />
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_fallback_with_directive(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html"><xi:fallback>
<py:if test="True">tmpl1.html not found</py:if>
</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
tmpl1.html not found
</html>""", tmpl.generate(debug=True).render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_inlined(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
# if not inlined the following would be 5
self.assertEqual(7, len(tmpl.stream))
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_inlined_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_allow_exec_false(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
try:
tmpl = MarkupTemplate(xml, filename='test.html',
allow_exec=False)
self.fail('Expected SyntaxError')
except TemplateSyntaxError, e:
pass
def test_allow_exec_true(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
def test_exec_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<?python title="wakka wakka wakka" ?>
${title}
</py:match>
<body><p>moot text</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
self.assertEqual("""<html>
<body>
wakka wakka wakka
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_with_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<h1>${select('text()')}</h1>
${select('.')}
</py:match>
<body><p py:with="foo='bar'">${foo}</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<h1>bar</h1>
<p>bar</p>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_nested_include_matches(self):
# See ticket #157
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<html xmlns:py="http://genshi.edgewall.org/" py:strip="">
<div class="target">Some content.</div>
</html>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude">
<body>
<h1>Some full html document that includes file1.html</h1>
<xi:include href="tmpl1.html" />
</body>
</html>""")
finally:
file2.close()
file3 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file3.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude" py:strip="">
<div py:match="div[@class='target']" py:attrs="select('@*')">
Some added stuff.
${select('*|text()')}
</div>
<xi:include href="tmpl2.html" />
</html>
""")
finally:
file3.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""
<html>
<body>
<h1>Some full html document that includes file1.html</h1>
<div class="target">
Some added stuff.
Some content.
</div>
</body>
</html>
""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_matches_without_buffering(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" once="true" buffer="false">
<body>
${select('*|text')}
And some other stuff...
</body>
</py:match>
<body>
<span py:match="span">Foo</span>
<span>Bar</span>
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<span>Foo</span>
And some other stuff...
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_without_select(self):
# See <http://genshi.edgewall.org/ticket/243>
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" buffer="false">
<body>
This replaces the other text.
</body>
</py:match>
<body>
This gets replaced.
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
This replaces the other text.
</body>
</html>""", tmpl.generate().render(encoding=None))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(MarkupTemplate.__module__))
suite.addTest(unittest.makeSuite(MarkupTemplateTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import shutil
import tempfile
import unittest
from genshi.template.base import TemplateSyntaxError
from genshi.template.loader import TemplateLoader
from genshi.template.text import OldTextTemplate, NewTextTemplate
class OldTextTemplateTestCase(unittest.TestCase):
"""Tests for text template processing."""
def setUp(self):
self.dirname = tempfile.mkdtemp(suffix='markup_test')
def tearDown(self):
shutil.rmtree(self.dirname)
def test_escaping(self):
tmpl = OldTextTemplate('\\#escaped')
self.assertEqual('#escaped', tmpl.generate().render(encoding=None))
def test_comment(self):
tmpl = OldTextTemplate('## a comment')
self.assertEqual('', tmpl.generate().render(encoding=None))
def test_comment_escaping(self):
tmpl = OldTextTemplate('\\## escaped comment')
self.assertEqual('## escaped comment',
tmpl.generate().render(encoding=None))
def test_end_with_args(self):
tmpl = OldTextTemplate("""
#if foo
bar
#end 'if foo'""")
self.assertEqual('\n', tmpl.generate(foo=False).render(encoding=None))
def test_latin1_encoded(self):
text = u'$foo\xf6$bar'.encode('iso-8859-1')
tmpl = OldTextTemplate(text, encoding='iso-8859-1')
self.assertEqual(u'x\xf6y',
tmpl.generate(foo='x', bar='y').render(encoding=None))
def test_unicode_input(self):
text = u'$foo\xf6$bar'
tmpl = OldTextTemplate(text)
self.assertEqual(u'x\xf6y',
tmpl.generate(foo='x', bar='y').render(encoding=None))
def test_empty_lines1(self):
tmpl = OldTextTemplate("""Your items:
#for item in items
* ${item}
#end""")
self.assertEqual("""Your items:
* 0
* 1
* 2
""", tmpl.generate(items=range(3)).render(encoding=None))
def test_empty_lines2(self):
tmpl = OldTextTemplate("""Your items:
#for item in items
* ${item}
#end""")
self.assertEqual("""Your items:
* 0
* 1
* 2
""", tmpl.generate(items=range(3)).render(encoding=None))
def test_include(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w')
try:
file1.write("Included\n")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w')
try:
file2.write("""----- Included data below this line -----
#include tmpl1.txt
----- Included data above this line -----""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('tmpl2.txt', cls=OldTextTemplate)
self.assertEqual("""----- Included data below this line -----
Included
----- Included data above this line -----""",
tmpl.generate().render(encoding=None))
class NewTextTemplateTestCase(unittest.TestCase):
"""Tests for text template processing."""
def setUp(self):
self.dirname = tempfile.mkdtemp(suffix='markup_test')
def tearDown(self):
shutil.rmtree(self.dirname)
def test_escaping(self):
tmpl = NewTextTemplate('\\{% escaped %}')
self.assertEqual('{% escaped %}',
tmpl.generate().render(encoding=None))
def test_comment(self):
tmpl = NewTextTemplate('{# a comment #}')
self.assertEqual('', tmpl.generate().render(encoding=None))
def test_comment_escaping(self):
tmpl = NewTextTemplate('\\{# escaped comment #}')
self.assertEqual('{# escaped comment #}',
tmpl.generate().render(encoding=None))
def test_end_with_args(self):
tmpl = NewTextTemplate("""
{% if foo %}
bar
{% end 'if foo' %}""")
self.assertEqual('\n', tmpl.generate(foo=False).render(encoding=None))
def test_latin1_encoded(self):
text = u'$foo\xf6$bar'.encode('iso-8859-1')
tmpl = NewTextTemplate(text, encoding='iso-8859-1')
self.assertEqual(u'x\xf6y',
tmpl.generate(foo='x', bar='y').render(encoding=None))
def test_unicode_input(self):
text = u'$foo\xf6$bar'
tmpl = NewTextTemplate(text)
self.assertEqual(u'x\xf6y',
tmpl.generate(foo='x', bar='y').render(encoding=None))
def test_empty_lines1(self):
tmpl = NewTextTemplate("""Your items:
{% for item in items %}\
* ${item}
{% end %}""")
self.assertEqual("""Your items:
* 0
* 1
* 2
""", tmpl.generate(items=range(3)).render(encoding=None))
def test_empty_lines2(self):
tmpl = NewTextTemplate("""Your items:
{% for item in items %}\
* ${item}
{% end %}""")
self.assertEqual("""Your items:
* 0
* 1
* 2
""", tmpl.generate(items=range(3)).render(encoding=None))
def test_exec_with_trailing_space(self):
"""
Verify that a code block with trailing space does not cause a syntax
error (see ticket #127).
"""
NewTextTemplate("""
{% python
bar = 42
$}
""")
def test_exec_import(self):
tmpl = NewTextTemplate("""{% python from datetime import timedelta %}
${timedelta(days=2)}
""")
self.assertEqual("""
2 days, 0:00:00
""", tmpl.generate().render(encoding=None))
def test_exec_def(self):
tmpl = NewTextTemplate("""{% python
def foo():
return 42
%}
${foo()}
""")
self.assertEqual("""
42
""", tmpl.generate().render(encoding=None))
def test_include(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w')
try:
file1.write("Included")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w')
try:
file2.write("""----- Included data below this line -----
{% include tmpl1.txt %}
----- Included data above this line -----""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('tmpl2.txt', cls=NewTextTemplate)
self.assertEqual("""----- Included data below this line -----
Included
----- Included data above this line -----""",
tmpl.generate().render(encoding=None))
def test_include_expr(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.txt'), 'w')
try:
file1.write("Included")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.txt'), 'w')
try:
file2.write("""----- Included data below this line -----
{% include ${'%s.txt' % ('tmpl1',)} %}
----- Included data above this line -----""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('tmpl2.txt', cls=NewTextTemplate)
self.assertEqual("""----- Included data below this line -----
Included
----- Included data above this line -----""",
tmpl.generate().render(encoding=None))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(NewTextTemplate.__module__))
suite.addTest(unittest.makeSuite(OldTextTemplateTestCase, 'test'))
suite.addTest(unittest.makeSuite(NewTextTemplateTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# Copyright (C) 2006 Matthew Good
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import unittest
from genshi.core import Stream
from genshi.output import DocType
from genshi.template import MarkupTemplate, TextTemplate, NewTextTemplate
from genshi.template.plugin import ConfigurationError, \
MarkupTemplateEnginePlugin, \
TextTemplateEnginePlugin
PACKAGE = 'genshi.template.tests'
class MarkupTemplateEnginePluginTestCase(unittest.TestCase):
def test_init_no_options(self):
plugin = MarkupTemplateEnginePlugin()
self.assertEqual('utf-8', plugin.default_encoding)
self.assertEqual('html', plugin.default_format)
self.assertEqual(None, plugin.default_doctype)
self.assertEqual([], plugin.loader.search_path)
self.assertEqual(True, plugin.loader.auto_reload)
self.assertEqual(25, plugin.loader._cache.capacity)
def test_init_with_loader_options(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.auto_reload': 'off',
'genshi.max_cache_size': '100',
'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl',
})
self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'],
plugin.loader.search_path)
self.assertEqual(False, plugin.loader.auto_reload)
self.assertEqual(100, plugin.loader._cache.capacity)
def test_init_with_invalid_cache_size(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.max_cache_size': 'thirty'})
def test_init_with_output_options(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_encoding': 'iso-8859-15',
'genshi.default_format': 'xhtml',
'genshi.default_doctype': 'xhtml-strict',
})
self.assertEqual('iso-8859-15', plugin.default_encoding)
self.assertEqual('xhtml', plugin.default_format)
self.assertEqual(DocType.XHTML, plugin.default_doctype)
def test_init_with_invalid_output_format(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.default_format': 'foobar'})
def test_init_with_invalid_doctype(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.default_doctype': 'foobar'})
def test_load_template_from_file(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
self.assertEqual('test.html', os.path.basename(tmpl.filename))
assert isinstance(tmpl, MarkupTemplate)
def test_load_template_from_string(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(None, template_string="""<p>
$message
</p>""")
self.assertEqual(None, tmpl.filename)
assert isinstance(tmpl, MarkupTemplate)
def test_transform_with_load(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
stream = plugin.transform({'message': 'Hello'}, tmpl)
assert isinstance(stream, Stream)
def test_transform_without_load(self):
plugin = MarkupTemplateEnginePlugin()
stream = plugin.transform({'message': 'Hello'},
PACKAGE + '.templates.test')
assert isinstance(stream, Stream)
def test_render(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_with_format(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, format='xhtml',
template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_with_doctype(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_doctype': 'html-strict',
})
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_fragment_with_doctype(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_doctype': 'html-strict',
})
tmpl = plugin.load_template(PACKAGE + '.templates.test_no_doctype')
output = plugin.render({'message': 'Hello'}, template=tmpl,
fragment=True)
self.assertEqual("""<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_helper_functions(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.functions')
output = plugin.render({'snippet': '<b>Foo</b>'}, template=tmpl)
self.assertEqual("""<div>
False
bar
<b>Foo</b>
<b>Foo</b>
</div>""", output)
class TextTemplateEnginePluginTestCase(unittest.TestCase):
def test_init_no_options(self):
plugin = TextTemplateEnginePlugin()
self.assertEqual('utf-8', plugin.default_encoding)
self.assertEqual('text', plugin.default_format)
self.assertEqual([], plugin.loader.search_path)
self.assertEqual(True, plugin.loader.auto_reload)
self.assertEqual(25, plugin.loader._cache.capacity)
def test_init_with_loader_options(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.auto_reload': 'off',
'genshi.max_cache_size': '100',
'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl',
})
self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'],
plugin.loader.search_path)
self.assertEqual(False, plugin.loader.auto_reload)
self.assertEqual(100, plugin.loader._cache.capacity)
def test_init_with_output_options(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.default_encoding': 'iso-8859-15',
})
self.assertEqual('iso-8859-15', plugin.default_encoding)
def test_init_with_new_syntax(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.new_text_syntax': 'yes',
})
self.assertEqual(NewTextTemplate, plugin.template_class)
tmpl = plugin.load_template(PACKAGE + '.templates.new_syntax')
output = plugin.render({'foo': True}, template=tmpl)
self.assertEqual('bar', output)
def test_load_template_from_file(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
assert isinstance(tmpl, TextTemplate)
self.assertEqual('test.txt', os.path.basename(tmpl.filename))
def test_load_template_from_string(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(None, template_string="$message")
self.assertEqual(None, tmpl.filename)
assert isinstance(tmpl, TextTemplate)
def test_transform_without_load(self):
plugin = TextTemplateEnginePlugin()
stream = plugin.transform({'message': 'Hello'},
PACKAGE + '.templates.test')
assert isinstance(stream, Stream)
def test_transform_with_load(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
stream = plugin.transform({'message': 'Hello'}, tmpl)
assert isinstance(stream, Stream)
def test_render(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""Test
====
Hello
""", output)
def test_helper_functions(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.functions')
output = plugin.render({}, template=tmpl)
self.assertEqual("""False
bar
""", output)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTemplateEnginePluginTestCase, 'test'))
suite.addTest(unittest.makeSuite(TextTemplateEnginePluginTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.template.base import Template
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Template.__module__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
def suite():
from genshi.template.tests import base, directives, eval, interpolation, \
loader, markup, plugin, text
suite = unittest.TestSuite()
suite.addTest(base.suite())
suite.addTest(directives.suite())
suite.addTest(eval.suite())
suite.addTest(interpolation.suite())
suite.addTest(loader.suite())
suite.addTest(markup.suite())
suite.addTest(plugin.suite())
suite.addTest(text.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import sys
import unittest
from genshi.core import TEXT
from genshi.template.base import TemplateSyntaxError, EXPR
from genshi.template.interpolation import interpolate
class InterpolateTestCase(unittest.TestCase):
def test_interpolate_string(self):
parts = list(interpolate('bla'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('bla', parts[0][1])
def test_interpolate_simple(self):
parts = list(interpolate('${bla}'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('bla', parts[0][1].source)
def test_interpolate_escaped(self):
parts = list(interpolate('$${bla}'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('${bla}', parts[0][1])
def test_interpolate_dobuleescaped(self):
parts = list(interpolate('$$${bla}'))
self.assertEqual(2, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$', parts[0][1])
self.assertEqual(EXPR, parts[1][0])
self.assertEqual('bla', parts[1][1].source)
def test_interpolate_short(self):
parts = list(interpolate('$bla'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('bla', parts[0][1].source)
def test_interpolate_short_escaped(self):
parts = list(interpolate('$$bla'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$bla', parts[0][1])
def test_interpolate_short_escaped_2(self):
parts = list(interpolate('my $$bla = 2'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('my $bla = 2', parts[0][1])
def test_interpolate_short_doubleescaped(self):
parts = list(interpolate('$$$bla'))
self.assertEqual(2, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$', parts[0][1])
self.assertEqual(EXPR, parts[1][0])
self.assertEqual('bla', parts[1][1].source)
def test_interpolate_short_starting_with_underscore(self):
parts = list(interpolate('$_bla'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('_bla', parts[0][1].source)
def test_interpolate_short_containing_underscore(self):
parts = list(interpolate('$foo_bar'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('foo_bar', parts[0][1].source)
def test_interpolate_short_starting_with_dot(self):
parts = list(interpolate('$.bla'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$.bla', parts[0][1])
def test_interpolate_short_containing_dot(self):
parts = list(interpolate('$foo.bar'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('foo.bar', parts[0][1].source)
def test_interpolate_short_starting_with_digit(self):
parts = list(interpolate('$0bla'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$0bla', parts[0][1])
def test_interpolate_short_containing_digit(self):
parts = list(interpolate('$foo0'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('foo0', parts[0][1].source)
def test_interpolate_short_starting_with_digit(self):
parts = list(interpolate('$0bla'))
self.assertEqual(1, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('$0bla', parts[0][1])
def test_interpolate_short_containing_digit(self):
parts = list(interpolate('$foo0'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('foo0', parts[0][1].source)
def test_interpolate_full_nested_brackets(self):
parts = list(interpolate('${{1:2}}'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('{1:2}', parts[0][1].source)
def test_interpolate_full_mismatched_brackets(self):
try:
list(interpolate('${{1:2}'))
except TemplateSyntaxError, e:
pass
else:
self.fail('Expected TemplateSyntaxError')
def test_interpolate_quoted_brackets_1(self):
parts = list(interpolate('${"}"}'))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('"}"', parts[0][1].source)
def test_interpolate_quoted_brackets_2(self):
parts = list(interpolate("${'}'}"))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual("'}'", parts[0][1].source)
def test_interpolate_quoted_brackets_3(self):
parts = list(interpolate("${'''}'''}"))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual("'''}'''", parts[0][1].source)
def test_interpolate_quoted_brackets_4(self):
parts = list(interpolate("${'''}\"\"\"'''}"))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual("'''}\"\"\"'''", parts[0][1].source)
def test_interpolate_quoted_brackets_5(self):
parts = list(interpolate(r"${'\'}'}"))
self.assertEqual(1, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual(r"'\'}'", parts[0][1].source)
def test_interpolate_mixed1(self):
parts = list(interpolate('$foo bar $baz'))
self.assertEqual(3, len(parts))
self.assertEqual(EXPR, parts[0][0])
self.assertEqual('foo', parts[0][1].source)
self.assertEqual(TEXT, parts[1][0])
self.assertEqual(' bar ', parts[1][1])
self.assertEqual(EXPR, parts[2][0])
self.assertEqual('baz', parts[2][1].source)
def test_interpolate_mixed2(self):
parts = list(interpolate('foo $bar baz'))
self.assertEqual(3, len(parts))
self.assertEqual(TEXT, parts[0][0])
self.assertEqual('foo ', parts[0][1])
self.assertEqual(EXPR, parts[1][0])
self.assertEqual('bar', parts[1][1].source)
self.assertEqual(TEXT, parts[2][0])
self.assertEqual(' baz', parts[2][1])
def test_interpolate_triplequoted(self):
parts = list(interpolate('${"""foo\nbar"""}'))
self.assertEqual(1, len(parts))
self.assertEqual('"""foo\nbar"""', parts[0][1].source)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(interpolate.__module__))
suite.addTest(unittest.makeSuite(InterpolateTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import shutil
import tempfile
import unittest
from genshi.core import TEXT
from genshi.template.loader import TemplateLoader
from genshi.template.markup import MarkupTemplate
class TemplateLoaderTestCase(unittest.TestCase):
"""Tests for the template loader."""
def setUp(self):
self.dirname = tempfile.mkdtemp(suffix='markup_test')
def tearDown(self):
shutil.rmtree(self.dirname)
def test_search_path_empty(self):
loader = TemplateLoader()
self.assertEqual([], loader.search_path)
def test_search_path_as_string(self):
loader = TemplateLoader(self.dirname)
self.assertEqual([self.dirname], loader.search_path)
def test_relative_include_samedir(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_subdir(self):
os.mkdir(os.path.join(self.dirname, 'sub'))
file1 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="sub/tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_parentdir(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
os.mkdir(os.path.join(self.dirname, 'sub'))
file2 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="../tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('sub/tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_samesubdir(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included tmpl1.html</div>""")
finally:
file1.close()
os.mkdir(os.path.join(self.dirname, 'sub'))
file2 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w')
try:
file2.write("""<div>Included sub/tmpl1.html</div>""")
finally:
file2.close()
file3 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w')
try:
file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file3.close()
loader = TemplateLoader([self.dirname])
tmpl = loader.load('sub/tmpl2.html')
self.assertEqual("""<html>
<div>Included sub/tmpl1.html</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_without_search_path(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader()
tmpl = loader.load(os.path.join(self.dirname, 'tmpl2.html'))
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_without_search_path_nested(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html" />
</div>""")
finally:
file2.close()
file3 = open(os.path.join(self.dirname, 'tmpl3.html'), 'w')
try:
file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html" />
</html>""")
finally:
file3.close()
loader = TemplateLoader()
tmpl = loader.load(os.path.join(self.dirname, 'tmpl3.html'))
self.assertEqual("""<html>
<div>
<div>Included</div>
</div>
</html>""", tmpl.generate().render(encoding=None))
def test_relative_include_from_inmemory_template(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
loader = TemplateLoader([self.dirname])
tmpl2 = MarkupTemplate("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="../tmpl1.html" />
</html>""", filename='subdir/tmpl2.html', loader=loader)
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl2.generate().render(encoding=None))
def test_relative_absolute_template_preferred(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
os.mkdir(os.path.join(self.dirname, 'sub'))
file2 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w')
try:
file2.write("""<div>Included from sub</div>""")
finally:
file2.close()
file3 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w')
try:
file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file3.close()
loader = TemplateLoader()
tmpl = loader.load(os.path.abspath(os.path.join(self.dirname, 'sub',
'tmpl2.html')))
self.assertEqual("""<html>
<div>Included from sub</div>
</html>""", tmpl.generate().render(encoding=None))
def test_abspath_caching(self):
abspath = os.path.join(self.dirname, 'abs')
os.mkdir(abspath)
file1 = open(os.path.join(abspath, 'tmpl1.html'), 'w')
try:
file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html" />
</html>""")
finally:
file1.close()
file2 = open(os.path.join(abspath, 'tmpl2.html'), 'w')
try:
file2.write("""<div>Included from abspath.</div>""")
finally:
file2.close()
searchpath = os.path.join(self.dirname, 'searchpath')
os.mkdir(searchpath)
file3 = open(os.path.join(searchpath, 'tmpl2.html'), 'w')
try:
file3.write("""<div>Included from searchpath.</div>""")
finally:
file3.close()
loader = TemplateLoader(searchpath)
tmpl1 = loader.load(os.path.join(abspath, 'tmpl1.html'))
self.assertEqual("""<html>
<div>Included from searchpath.</div>
</html>""", tmpl1.generate().render(encoding=None))
assert 'tmpl2.html' in loader._cache
def test_abspath_include_caching_without_search_path(self):
file1 = open(os.path.join(self.dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html" />
</html>""")
finally:
file1.close()
file2 = open(os.path.join(self.dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<div>Included</div>""")
finally:
file2.close()
os.mkdir(os.path.join(self.dirname, 'sub'))
file3 = open(os.path.join(self.dirname, 'sub', 'tmpl1.html'), 'w')
try:
file3.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html" />
</html>""")
finally:
file3.close()
file4 = open(os.path.join(self.dirname, 'sub', 'tmpl2.html'), 'w')
try:
file4.write("""<div>Included from sub</div>""")
finally:
file4.close()
loader = TemplateLoader()
tmpl1 = loader.load(os.path.join(self.dirname, 'tmpl1.html'))
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl1.generate().render(encoding=None))
tmpl2 = loader.load(os.path.join(self.dirname, 'sub', 'tmpl1.html'))
self.assertEqual("""<html>
<div>Included from sub</div>
</html>""", tmpl2.generate().render(encoding=None))
assert 'tmpl2.html' not in loader._cache
def test_load_with_default_encoding(self):
f = open(os.path.join(self.dirname, 'tmpl.html'), 'w')
try:
f.write(u'<div>\xf6</div>'.encode('iso-8859-1'))
finally:
f.close()
loader = TemplateLoader([self.dirname], default_encoding='iso-8859-1')
loader.load('tmpl.html')
def test_load_with_explicit_encoding(self):
f = open(os.path.join(self.dirname, 'tmpl.html'), 'w')
try:
f.write(u'<div>\xf6</div>'.encode('iso-8859-1'))
finally:
f.close()
loader = TemplateLoader([self.dirname], default_encoding='utf-8')
loader.load('tmpl.html', encoding='iso-8859-1')
def test_load_with_callback(self):
fileobj = open(os.path.join(self.dirname, 'tmpl.html'), 'w')
try:
fileobj.write("""<html>
<p>Hello</p>
</html>""")
finally:
fileobj.close()
def template_loaded(template):
def my_filter(stream, ctxt):
for kind, data, pos in stream:
if kind is TEXT and data.strip():
data = ', '.join([data, data.lower()])
yield kind, data, pos
template.filters.insert(0, my_filter)
loader = TemplateLoader([self.dirname], callback=template_loaded)
tmpl = loader.load('tmpl.html')
self.assertEqual("""<html>
<p>Hello, hello</p>
</html>""", tmpl.generate().render(encoding=None))
# Make sure the filter is only added once
tmpl = loader.load('tmpl.html')
self.assertEqual("""<html>
<p>Hello, hello</p>
</html>""", tmpl.generate().render(encoding=None))
def test_prefix_delegation_to_directories(self):
"""
Test prefix delegation with the following layout:
templates/foo.html
sub1/templates/tmpl1.html
sub2/templates/tmpl2.html
Where sub1 and sub2 are prefixes, and both tmpl1.html and tmpl2.html
incldue foo.html.
"""
dir1 = os.path.join(self.dirname, 'templates')
os.mkdir(dir1)
file1 = open(os.path.join(dir1, 'foo.html'), 'w')
try:
file1.write("""<div>Included foo</div>""")
finally:
file1.close()
dir2 = os.path.join(self.dirname, 'sub1', 'templates')
os.makedirs(dir2)
file2 = open(os.path.join(dir2, 'tmpl1.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="../foo.html" /> from sub1
</html>""")
finally:
file2.close()
dir3 = os.path.join(self.dirname, 'sub2', 'templates')
os.makedirs(dir3)
file3 = open(os.path.join(dir3, 'tmpl2.html'), 'w')
try:
file3.write("""<div>tmpl2</div>""")
finally:
file3.close()
loader = TemplateLoader([dir1, TemplateLoader.prefixed(
sub1 = dir2,
sub2 = dir3
)])
tmpl = loader.load('sub1/tmpl1.html')
self.assertEqual("""<html>
<div>Included foo</div> from sub1
</html>""", tmpl.generate().render(encoding=None))
def test_prefix_delegation_to_directories_with_subdirs(self):
"""
Test prefix delegation with the following layout:
templates/foo.html
sub1/templates/tmpl1.html
sub1/templates/tmpl2.html
sub1/templates/bar/tmpl3.html
Where sub1 is a prefix, and tmpl1.html includes all the others.
"""
dir1 = os.path.join(self.dirname, 'templates')
os.mkdir(dir1)
file1 = open(os.path.join(dir1, 'foo.html'), 'w')
try:
file1.write("""<div>Included foo</div>""")
finally:
file1.close()
dir2 = os.path.join(self.dirname, 'sub1', 'templates')
os.makedirs(dir2)
file2 = open(os.path.join(dir2, 'tmpl1.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="../foo.html" /> from sub1
<xi:include href="tmpl2.html" /> from sub1
<xi:include href="bar/tmpl3.html" /> from sub1
</html>""")
finally:
file2.close()
file3 = open(os.path.join(dir2, 'tmpl2.html'), 'w')
try:
file3.write("""<div>tmpl2</div>""")
finally:
file3.close()
dir3 = os.path.join(self.dirname, 'sub1', 'templates', 'bar')
os.makedirs(dir3)
file4 = open(os.path.join(dir3, 'tmpl3.html'), 'w')
try:
file4.write("""<div>bar/tmpl3</div>""")
finally:
file4.close()
loader = TemplateLoader([dir1, TemplateLoader.prefixed(
sub1 = os.path.join(dir2),
sub2 = os.path.join(dir3)
)])
tmpl = loader.load('sub1/tmpl1.html')
self.assertEqual("""<html>
<div>Included foo</div> from sub1
<div>tmpl2</div> from sub1
<div>bar/tmpl3</div> from sub1
</html>""", tmpl.generate().render(encoding=None))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(TemplateLoader.__module__))
suite.addTest(unittest.makeSuite(TemplateLoaderTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import pickle
from StringIO import StringIO
import sys
from tempfile import mkstemp
import unittest
from genshi.core import Markup
from genshi.template.base import Context
from genshi.template.eval import Expression, Suite, Undefined, UndefinedError, \
UNDEFINED
class ExpressionTestCase(unittest.TestCase):
def test_eq(self):
expr = Expression('x,y')
self.assertEqual(expr, Expression('x,y'))
self.assertNotEqual(expr, Expression('y, x'))
def test_hash(self):
expr = Expression('x,y')
self.assertEqual(hash(expr), hash(Expression('x,y')))
self.assertNotEqual(hash(expr), hash(Expression('y, x')))
def test_pickle(self):
expr = Expression('1 < 2')
buf = StringIO()
pickle.dump(expr, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
assert unpickled.evaluate({}) is True
def test_name_lookup(self):
self.assertEqual('bar', Expression('foo').evaluate({'foo': 'bar'}))
self.assertEqual(id, Expression('id').evaluate({}))
self.assertEqual('bar', Expression('id').evaluate({'id': 'bar'}))
self.assertEqual(None, Expression('id').evaluate({'id': None}))
def test_builtins(self):
expr = Expression('Markup')
self.assertEqual(expr.evaluate({}), Markup)
def test_str_literal(self):
self.assertEqual('foo', Expression('"foo"').evaluate({}))
self.assertEqual('foo', Expression('"""foo"""').evaluate({}))
self.assertEqual('foo', Expression("'foo'").evaluate({}))
self.assertEqual('foo', Expression("'''foo'''").evaluate({}))
self.assertEqual('foo', Expression("u'foo'").evaluate({}))
self.assertEqual('foo', Expression("r'foo'").evaluate({}))
def test_str_literal_non_ascii(self):
expr = Expression(u"u'\xfe'")
self.assertEqual(u'þ', expr.evaluate({}))
expr = Expression("u'\xfe'")
self.assertEqual(u'þ', expr.evaluate({}))
expr = Expression("'\xc3\xbe'")
self.assertEqual(u'þ', expr.evaluate({}))
def test_num_literal(self):
self.assertEqual(42, Expression("42").evaluate({}))
self.assertEqual(42L, Expression("42L").evaluate({}))
self.assertEqual(.42, Expression(".42").evaluate({}))
self.assertEqual(07, Expression("07").evaluate({}))
self.assertEqual(0xF2, Expression("0xF2").evaluate({}))
self.assertEqual(0XF2, Expression("0XF2").evaluate({}))
def test_dict_literal(self):
self.assertEqual({}, Expression("{}").evaluate({}))
self.assertEqual({'key': True},
Expression("{'key': value}").evaluate({'value': True}))
def test_list_literal(self):
self.assertEqual([], Expression("[]").evaluate({}))
self.assertEqual([1, 2, 3], Expression("[1, 2, 3]").evaluate({}))
self.assertEqual([True],
Expression("[value]").evaluate({'value': True}))
def test_tuple_literal(self):
self.assertEqual((), Expression("()").evaluate({}))
self.assertEqual((1, 2, 3), Expression("(1, 2, 3)").evaluate({}))
self.assertEqual((True,),
Expression("(value,)").evaluate({'value': True}))
def test_unaryop_pos(self):
self.assertEqual(1, Expression("+1").evaluate({}))
self.assertEqual(1, Expression("+x").evaluate({'x': 1}))
def test_unaryop_neg(self):
self.assertEqual(-1, Expression("-1").evaluate({}))
self.assertEqual(-1, Expression("-x").evaluate({'x': 1}))
def test_unaryop_not(self):
self.assertEqual(False, Expression("not True").evaluate({}))
self.assertEqual(False, Expression("not x").evaluate({'x': True}))
def test_unaryop_inv(self):
self.assertEqual(-2, Expression("~1").evaluate({}))
self.assertEqual(-2, Expression("~x").evaluate({'x': 1}))
def test_binop_add(self):
self.assertEqual(3, Expression("2 + 1").evaluate({}))
self.assertEqual(3, Expression("x + y").evaluate({'x': 2, 'y': 1}))
def test_binop_sub(self):
self.assertEqual(1, Expression("2 - 1").evaluate({}))
self.assertEqual(1, Expression("x - y").evaluate({'x': 1, 'y': 1}))
def test_binop_sub(self):
self.assertEqual(1, Expression("2 - 1").evaluate({}))
self.assertEqual(1, Expression("x - y").evaluate({'x': 2, 'y': 1}))
def test_binop_mul(self):
self.assertEqual(4, Expression("2 * 2").evaluate({}))
self.assertEqual(4, Expression("x * y").evaluate({'x': 2, 'y': 2}))
def test_binop_pow(self):
self.assertEqual(4, Expression("2 ** 2").evaluate({}))
self.assertEqual(4, Expression("x ** y").evaluate({'x': 2, 'y': 2}))
def test_binop_div(self):
self.assertEqual(2, Expression("4 / 2").evaluate({}))
self.assertEqual(2, Expression("x / y").evaluate({'x': 4, 'y': 2}))
def test_binop_floordiv(self):
self.assertEqual(1, Expression("3 // 2").evaluate({}))
self.assertEqual(1, Expression("x // y").evaluate({'x': 3, 'y': 2}))
def test_binop_mod(self):
self.assertEqual(1, Expression("3 % 2").evaluate({}))
self.assertEqual(1, Expression("x % y").evaluate({'x': 3, 'y': 2}))
def test_binop_and(self):
self.assertEqual(0, Expression("1 & 0").evaluate({}))
self.assertEqual(0, Expression("x & y").evaluate({'x': 1, 'y': 0}))
def test_binop_or(self):
self.assertEqual(1, Expression("1 | 0").evaluate({}))
self.assertEqual(1, Expression("x | y").evaluate({'x': 1, 'y': 0}))
def test_binop_xor(self):
self.assertEqual(1, Expression("1 ^ 0").evaluate({}))
self.assertEqual(1, Expression("x ^ y").evaluate({'x': 1, 'y': 0}))
def test_binop_contains(self):
self.assertEqual(True, Expression("1 in (1, 2, 3)").evaluate({}))
self.assertEqual(True, Expression("x in y").evaluate({'x': 1,
'y': (1, 2, 3)}))
def test_binop_not_contains(self):
self.assertEqual(True, Expression("4 not in (1, 2, 3)").evaluate({}))
self.assertEqual(True, Expression("x not in y").evaluate({'x': 4,
'y': (1, 2, 3)}))
def test_binop_is(self):
self.assertEqual(True, Expression("1 is 1").evaluate({}))
self.assertEqual(True, Expression("x is y").evaluate({'x': 1, 'y': 1}))
self.assertEqual(False, Expression("1 is 2").evaluate({}))
self.assertEqual(False, Expression("x is y").evaluate({'x': 1, 'y': 2}))
def test_binop_is_not(self):
self.assertEqual(True, Expression("1 is not 2").evaluate({}))
self.assertEqual(True, Expression("x is not y").evaluate({'x': 1,
'y': 2}))
self.assertEqual(False, Expression("1 is not 1").evaluate({}))
self.assertEqual(False, Expression("x is not y").evaluate({'x': 1,
'y': 1}))
def test_boolop_and(self):
self.assertEqual(False, Expression("True and False").evaluate({}))
self.assertEqual(False, Expression("x and y").evaluate({'x': True,
'y': False}))
def test_boolop_or(self):
self.assertEqual(True, Expression("True or False").evaluate({}))
self.assertEqual(True, Expression("x or y").evaluate({'x': True,
'y': False}))
def test_compare_eq(self):
self.assertEqual(True, Expression("1 == 1").evaluate({}))
self.assertEqual(True, Expression("x == y").evaluate({'x': 1, 'y': 1}))
def test_compare_ne(self):
self.assertEqual(False, Expression("1 != 1").evaluate({}))
self.assertEqual(False, Expression("x != y").evaluate({'x': 1, 'y': 1}))
if sys.version < '3':
self.assertEqual(False, Expression("1 <> 1").evaluate({}))
self.assertEqual(False, Expression("x <> y").evaluate({'x': 1, 'y': 1}))
def test_compare_lt(self):
self.assertEqual(True, Expression("1 < 2").evaluate({}))
self.assertEqual(True, Expression("x < y").evaluate({'x': 1, 'y': 2}))
def test_compare_le(self):
self.assertEqual(True, Expression("1 <= 1").evaluate({}))
self.assertEqual(True, Expression("x <= y").evaluate({'x': 1, 'y': 1}))
def test_compare_gt(self):
self.assertEqual(True, Expression("2 > 1").evaluate({}))
self.assertEqual(True, Expression("x > y").evaluate({'x': 2, 'y': 1}))
def test_compare_ge(self):
self.assertEqual(True, Expression("1 >= 1").evaluate({}))
self.assertEqual(True, Expression("x >= y").evaluate({'x': 1, 'y': 1}))
def test_compare_multi(self):
self.assertEqual(True, Expression("1 != 3 == 3").evaluate({}))
self.assertEqual(True, Expression("x != y == y").evaluate({'x': 1,
'y': 3}))
def test_call_function(self):
self.assertEqual(42, Expression("foo()").evaluate({'foo': lambda: 42}))
data = {'foo': 'bar'}
self.assertEqual('BAR', Expression("foo.upper()").evaluate(data))
data = {'foo': {'bar': range(42)}}
self.assertEqual(42, Expression("len(foo.bar)").evaluate(data))
def test_call_keywords(self):
self.assertEqual(42, Expression("foo(x=bar)").evaluate({'foo': lambda x: x,
'bar': 42}))
def test_call_star_args(self):
self.assertEqual(42, Expression("foo(*bar)").evaluate({'foo': lambda x: x,
'bar': [42]}))
def test_call_dstar_args(self):
def foo(x):
return x
expr = Expression("foo(**bar)")
self.assertEqual(42, expr.evaluate({'foo': foo, 'bar': {"x": 42}}))
def test_lambda(self):
data = {'items': range(5)}
expr = Expression("filter(lambda x: x > 2, items)")
self.assertEqual([3, 4], expr.evaluate(data))
def test_list_comprehension(self):
expr = Expression("[n for n in numbers if n < 2]")
self.assertEqual([0, 1], expr.evaluate({'numbers': range(5)}))
expr = Expression("[(i, n + 1) for i, n in enumerate(numbers)]")
self.assertEqual([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)],
expr.evaluate({'numbers': range(5)}))
expr = Expression("[offset + n for n in numbers]")
self.assertEqual([2, 3, 4, 5, 6],
expr.evaluate({'numbers': range(5), 'offset': 2}))
expr = Expression("[n for group in groups for n in group]")
self.assertEqual([0, 1, 0, 1, 2],
expr.evaluate({'groups': [range(2), range(3)]}))
expr = Expression("[(a, b) for a in x for b in y]")
self.assertEqual([('x0', 'y0'), ('x0', 'y1'), ('x1', 'y0'), ('x1', 'y1')],
expr.evaluate({'x': ['x0', 'x1'], 'y': ['y0', 'y1']}))
def test_list_comprehension_with_getattr(self):
items = [{'name': 'a', 'value': 1}, {'name': 'b', 'value': 2}]
expr = Expression("[i.name for i in items if i.value > 1]")
self.assertEqual(['b'], expr.evaluate({'items': items}))
def test_list_comprehension_with_getitem(self):
items = [{'name': 'a', 'value': 1}, {'name': 'b', 'value': 2}]
expr = Expression("[i['name'] for i in items if i['value'] > 1]")
self.assertEqual(['b'], expr.evaluate({'items': items}))
def test_generator_expression(self):
expr = Expression("list(n for n in numbers if n < 2)")
self.assertEqual([0, 1], expr.evaluate({'numbers': range(5)}))
expr = Expression("list((i, n + 1) for i, n in enumerate(numbers))")
self.assertEqual([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)],
expr.evaluate({'numbers': range(5)}))
expr = Expression("list(offset + n for n in numbers)")
self.assertEqual([2, 3, 4, 5, 6],
expr.evaluate({'numbers': range(5), 'offset': 2}))
expr = Expression("list(n for group in groups for n in group)")
self.assertEqual([0, 1, 0, 1, 2],
expr.evaluate({'groups': [range(2), range(3)]}))
expr = Expression("list((a, b) for a in x for b in y)")
self.assertEqual([('x0', 'y0'), ('x0', 'y1'), ('x1', 'y0'), ('x1', 'y1')],
expr.evaluate({'x': ['x0', 'x1'], 'y': ['y0', 'y1']}))
def test_generator_expression_with_getattr(self):
items = [{'name': 'a', 'value': 1}, {'name': 'b', 'value': 2}]
expr = Expression("list(i.name for i in items if i.value > 1)")
self.assertEqual(['b'], expr.evaluate({'items': items}))
def test_generator_expression_with_getitem(self):
items = [{'name': 'a', 'value': 1}, {'name': 'b', 'value': 2}]
expr = Expression("list(i['name'] for i in items if i['value'] > 1)")
self.assertEqual(['b'], expr.evaluate({'items': items}))
if sys.version_info >= (2, 5):
def test_conditional_expression(self):
expr = Expression("'T' if foo else 'F'")
self.assertEqual('T', expr.evaluate({'foo': True}))
self.assertEqual('F', expr.evaluate({'foo': False}))
def test_slice(self):
expr = Expression("numbers[0:2]")
self.assertEqual([0, 1], expr.evaluate({'numbers': range(5)}))
def test_slice_with_vars(self):
expr = Expression("numbers[start:end]")
self.assertEqual([0, 1], expr.evaluate({'numbers': range(5), 'start': 0,
'end': 2}))
def test_slice_copy(self):
expr = Expression("numbers[:]")
self.assertEqual([0, 1, 2, 3, 4], expr.evaluate({'numbers': range(5)}))
def test_slice_stride(self):
expr = Expression("numbers[::stride]")
self.assertEqual([0, 2, 4], expr.evaluate({'numbers': range(5),
'stride': 2}))
def test_slice_negative_start(self):
expr = Expression("numbers[-1:]")
self.assertEqual([4], expr.evaluate({'numbers': range(5)}))
def test_slice_negative_end(self):
expr = Expression("numbers[:-1]")
self.assertEqual([0, 1, 2, 3], expr.evaluate({'numbers': range(5)}))
def test_access_undefined(self):
expr = Expression("nothing", filename='index.html', lineno=50,
lookup='lenient')
retval = expr.evaluate({})
assert isinstance(retval, Undefined)
self.assertEqual('nothing', retval._name)
assert retval._owner is UNDEFINED
def test_getattr_undefined(self):
class Something(object):
def __repr__(self):
return '<Something>'
something = Something()
expr = Expression('something.nil', filename='index.html', lineno=50,
lookup='lenient')
retval = expr.evaluate({'something': something})
assert isinstance(retval, Undefined)
self.assertEqual('nil', retval._name)
assert retval._owner is something
def test_getattr_exception(self):
class Something(object):
def prop_a(self):
raise NotImplementedError
prop_a = property(prop_a)
def prop_b(self):
raise AttributeError
prop_b = property(prop_b)
self.assertRaises(NotImplementedError,
Expression('s.prop_a').evaluate, {'s': Something()})
self.assertRaises(AttributeError,
Expression('s.prop_b').evaluate, {'s': Something()})
def test_getitem_undefined_string(self):
class Something(object):
def __repr__(self):
return '<Something>'
something = Something()
expr = Expression('something["nil"]', filename='index.html', lineno=50,
lookup='lenient')
retval = expr.evaluate({'something': something})
assert isinstance(retval, Undefined)
self.assertEqual('nil', retval._name)
assert retval._owner is something
def test_getitem_exception(self):
class Something(object):
def __getitem__(self, key):
raise NotImplementedError
self.assertRaises(NotImplementedError,
Expression('s["foo"]').evaluate, {'s': Something()})
def test_error_access_undefined(self):
expr = Expression("nothing", filename='index.html', lineno=50,
lookup='strict')
try:
expr.evaluate({})
self.fail('Expected UndefinedError')
except UndefinedError, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
frame = exc_traceback.tb_next
frames = []
while frame.tb_next:
frame = frame.tb_next
frames.append(frame)
self.assertEqual('"nothing" not defined', str(e))
self.assertEqual("<Expression 'nothing'>",
frames[-3].tb_frame.f_code.co_name)
self.assertEqual('index.html',
frames[-3].tb_frame.f_code.co_filename)
self.assertEqual(50, frames[-3].tb_lineno)
def test_error_getattr_undefined(self):
class Something(object):
def __repr__(self):
return '<Something>'
expr = Expression('something.nil', filename='index.html', lineno=50,
lookup='strict')
try:
expr.evaluate({'something': Something()})
self.fail('Expected UndefinedError')
except UndefinedError, e:
self.assertEqual('<Something> has no member named "nil"', str(e))
exc_type, exc_value, exc_traceback = sys.exc_info()
search_string = "<Expression 'something.nil'>"
frame = exc_traceback.tb_next
while frame.tb_next:
frame = frame.tb_next
code = frame.tb_frame.f_code
if code.co_name == search_string:
break
else:
self.fail("never found the frame I was looking for")
self.assertEqual('index.html', code.co_filename)
self.assertEqual(50, frame.tb_lineno)
def test_error_getitem_undefined_string(self):
class Something(object):
def __repr__(self):
return '<Something>'
expr = Expression('something["nil"]', filename='index.html', lineno=50,
lookup='strict')
try:
expr.evaluate({'something': Something()})
self.fail('Expected UndefinedError')
except UndefinedError, e:
self.assertEqual('<Something> has no member named "nil"', str(e))
exc_type, exc_value, exc_traceback = sys.exc_info()
search_string = '''<Expression 'something["nil"]'>'''
frame = exc_traceback.tb_next
while frame.tb_next:
frame = frame.tb_next
code = frame.tb_frame.f_code
if code.co_name == search_string:
break
else:
self.fail("never found the frame I was looking for")
self.assertEqual('index.html', code.co_filename)
self.assertEqual(50, frame.tb_lineno)
class SuiteTestCase(unittest.TestCase):
def test_pickle(self):
suite = Suite('foo = 42')
buf = StringIO()
pickle.dump(suite, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
data = {}
unpickled.execute(data)
self.assertEqual(42, data['foo'])
def test_internal_shadowing(self):
# The context itself is stored in the global execution scope of a suite
# It used to get stored under the name 'data', which meant the
# following test would fail, as the user defined 'data' variable
# shadowed the Genshi one. We now use the name '__data__' to avoid
# conflicts
suite = Suite("""data = []
bar = foo
""")
data = {'foo': 42}
suite.execute(data)
self.assertEqual(42, data['bar'])
def test_assign(self):
suite = Suite("foo = 42")
data = {}
suite.execute(data)
self.assertEqual(42, data['foo'])
def test_def(self):
suite = Suite("def donothing(): pass")
data = {}
suite.execute(data)
assert 'donothing' in data
self.assertEqual(None, data['donothing']())
def test_def_with_multiple_statements(self):
suite = Suite("""
def donothing():
if True:
return foo
""")
data = {'foo': 'bar'}
suite.execute(data)
assert 'donothing' in data
self.assertEqual('bar', data['donothing']())
def test_def_using_nonlocal(self):
suite = Suite("""
values = []
def add(value):
if value not in values:
values.append(value)
add('foo')
add('bar')
""")
data = {}
suite.execute(data)
self.assertEqual(['foo', 'bar'], data['values'])
def test_def_some_defaults(self):
suite = Suite("""
def difference(v1, v2=10):
return v1 - v2
x = difference(20, 19)
y = difference(20)
""")
data = {}
suite.execute(data)
self.assertEqual(1, data['x'])
self.assertEqual(10, data['y'])
def test_def_all_defaults(self):
suite = Suite("""
def difference(v1=100, v2=10):
return v1 - v2
x = difference(20, 19)
y = difference(20)
z = difference()
""")
data = {}
suite.execute(data)
self.assertEqual(1, data['x'])
self.assertEqual(10, data['y'])
self.assertEqual(90, data['z'])
def test_def_vararg(self):
suite = Suite("""
def mysum(*others):
rv = 0
for n in others:
rv = rv + n
return rv
x = mysum(1, 2, 3)
""")
data = {}
suite.execute(data)
self.assertEqual(6, data['x'])
def test_def_kwargs(self):
suite = Suite("""
def smash(**kw):
return [''.join(i) for i in kw.items()]
x = smash(foo='abc', bar='def')
""")
data = {}
suite.execute(data)
self.assertEqual(['fooabc', 'bardef'], data['x'])
def test_def_nested(self):
suite = Suite("""
def doit():
values = []
def add(value):
if value not in values:
values.append(value)
add('foo')
add('bar')
return values
x = doit()
""")
data = {}
suite.execute(data)
self.assertEqual(['foo', 'bar'], data['x'])
def test_delete(self):
suite = Suite("""foo = 42
del foo
""")
data = {}
suite.execute(data)
assert 'foo' not in data
def test_class(self):
suite = Suite("class plain(object): pass")
data = {}
suite.execute(data)
assert 'plain' in data
def test_class_in_def(self):
suite = Suite("""
def create():
class Foobar(object):
def __str__(self):
return 'foobar'
return Foobar()
x = create()
""")
data = {}
suite.execute(data)
self.assertEqual('foobar', str(data['x']))
def test_class_with_methods(self):
suite = Suite("""class plain(object):
def donothing():
pass
""")
data = {}
suite.execute(data)
assert 'plain' in data
def test_import(self):
suite = Suite("from itertools import ifilter")
data = {}
suite.execute(data)
assert 'ifilter' in data
def test_import_star(self):
suite = Suite("from itertools import *")
data = Context()
suite.execute(data)
assert 'ifilter' in data
def test_import_in_def(self):
suite = Suite("""def fun():
from itertools import ifilter
return ifilter(None, range(3))
""")
data = Context()
suite.execute(data)
assert 'ifilter' not in data
self.assertEqual([1, 2], list(data['fun']()))
def test_for(self):
suite = Suite("""x = []
for i in range(3):
x.append(i**2)
""")
data = {}
suite.execute(data)
self.assertEqual([0, 1, 4], data['x'])
def test_for_in_def(self):
suite = Suite("""def loop():
for i in range(10):
if i == 5:
break
return i
""")
data = {}
suite.execute(data)
assert 'loop' in data
self.assertEqual(5, data['loop']())
def test_if(self):
suite = Suite("""if foo == 42:
x = True
""")
data = {'foo': 42}
suite.execute(data)
self.assertEqual(True, data['x'])
def test_raise(self):
suite = Suite("""raise NotImplementedError""")
self.assertRaises(NotImplementedError, suite.execute, {})
def test_try_except(self):
suite = Suite("""try:
import somemod
except ImportError:
somemod = None
else:
somemod.dosth()""")
data = {}
suite.execute(data)
self.assertEqual(None, data['somemod'])
def test_finally(self):
suite = Suite("""try:
x = 2
finally:
x = None
""")
data = {}
suite.execute(data)
self.assertEqual(None, data['x'])
def test_while_break(self):
suite = Suite("""x = 0
while x < 5:
x += step
if x == 4:
break
""")
data = {'step': 2}
suite.execute(data)
self.assertEqual(4, data['x'])
def test_augmented_attribute_assignment(self):
suite = Suite("d['k'] += 42")
d = {"k": 1}
suite.execute({"d": d})
self.assertEqual(43, d["k"])
def test_local_augmented_assign(self):
Suite("x = 1; x += 42; assert x == 43").execute({})
def test_augmented_assign_in_def(self):
d = {}
Suite("""def foo():
i = 1
i += 1
return i
x = foo()""").execute(d)
self.assertEqual(2, d['x'])
def test_augmented_assign_in_loop_in_def(self):
d = {}
Suite("""def foo():
i = 0
for n in range(5):
i += n
return i
x = foo()""").execute(d)
self.assertEqual(10, d['x'])
def test_assign_in_list(self):
suite = Suite("[d['k']] = 'foo',; assert d['k'] == 'foo'")
d = {"k": "bar"}
suite.execute({"d": d})
self.assertEqual("foo", d["k"])
def test_exec(self):
suite = Suite("x = 1; exec d['k']; assert x == 42, x")
suite.execute({"d": {"k": "x = 42"}})
def test_return(self):
suite = Suite("""
def f():
return v
assert f() == 42
""")
suite.execute({"v": 42})
def test_assign_to_dict_item(self):
suite = Suite("d['k'] = 'foo'")
data = {'d': {}}
suite.execute(data)
self.assertEqual('foo', data['d']['k'])
def test_assign_to_attribute(self):
class Something(object): pass
something = Something()
suite = Suite("obj.attr = 'foo'")
data = {"obj": something}
suite.execute(data)
self.assertEqual('foo', something.attr)
def test_delattr(self):
class Something(object):
def __init__(self):
self.attr = 'foo'
obj = Something()
Suite("del obj.attr").execute({'obj': obj})
self.failIf(hasattr(obj, 'attr'))
def test_delitem(self):
d = {'k': 'foo'}
Suite("del d['k']").execute({'d': d})
self.failIf('k' in d, repr(d))
if sys.version_info >= (2, 5):
def test_with_statement(self):
fd, path = mkstemp()
f = os.fdopen(fd, "w")
try:
f.write('foo\nbar\n')
f.seek(0)
f.close()
d = {'path': path}
suite = Suite("""from __future__ import with_statement
lines = []
with open(path) as file:
for line in file:
lines.append(line)
""")
suite.execute(d)
self.assertEqual(['foo\n', 'bar\n'], d['lines'])
finally:
os.remove(path)
def test_yield_expression(self):
d = {}
suite = Suite("""results = []
def counter(maximum):
i = 0
while i < maximum:
val = (yield i)
if val is not None:
i = val
else:
i += 1
it = counter(5)
results.append(it.next())
results.append(it.send(3))
results.append(it.next())
""")
suite.execute(d)
self.assertEqual([0, 3, 4], d['results'])
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Expression.__module__))
suite.addTest(unittest.makeSuite(ExpressionTestCase, 'test'))
suite.addTest(unittest.makeSuite(SuiteTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Plain text templating engine.
This module implements two template language syntaxes, at least for a certain
transitional period. `OldTextTemplate` (aliased to just `TextTemplate`) defines
a syntax that was inspired by Cheetah/Velocity. `NewTextTemplate` on the other
hand is inspired by the syntax of the Django template language, which has more
explicit delimiting of directives, and is more flexible with regards to
white space and line breaks.
In a future release, `OldTextTemplate` will be phased out in favor of
`NewTextTemplate`, as the names imply. Therefore the new syntax is strongly
recommended for new projects, and existing projects may want to migrate to the
new syntax to remain compatible with future Genshi releases.
"""
import re
from genshi.core import TEXT
from genshi.template.base import BadDirectiveError, Template, \
TemplateSyntaxError, EXEC, INCLUDE, SUB
from genshi.template.eval import Suite
from genshi.template.directives import *
from genshi.template.directives import Directive
from genshi.template.interpolation import interpolate
__all__ = ['NewTextTemplate', 'OldTextTemplate', 'TextTemplate']
__docformat__ = 'restructuredtext en'
class NewTextTemplate(Template):
r"""Implementation of a simple text-based template engine. This class will
replace `OldTextTemplate` in a future release.
It uses a more explicit delimiting style for directives: instead of the old
style which required putting directives on separate lines that were prefixed
with a ``#`` sign, directives and commenbtsr are enclosed in delimiter pairs
(by default ``{% ... %}`` and ``{# ... #}``, respectively).
Variable substitution uses the same interpolation syntax as for markup
languages: simple references are prefixed with a dollar sign, more complex
expression enclosed in curly braces.
>>> tmpl = NewTextTemplate('''Dear $name,
...
... {# This is a comment #}
... We have the following items for you:
... {% for item in items %}
... * ${'Item %d' % item}
... {% end %}
... ''')
>>> print(tmpl.generate(name='Joe', items=[1, 2, 3]).render(encoding=None))
Dear Joe,
<BLANKLINE>
<BLANKLINE>
We have the following items for you:
<BLANKLINE>
* Item 1
<BLANKLINE>
* Item 2
<BLANKLINE>
* Item 3
<BLANKLINE>
<BLANKLINE>
By default, no spaces or line breaks are removed. If a line break should
not be included in the output, prefix it with a backslash:
>>> tmpl = NewTextTemplate('''Dear $name,
...
... {# This is a comment #}\
... We have the following items for you:
... {% for item in items %}\
... * $item
... {% end %}\
... ''')
>>> print(tmpl.generate(name='Joe', items=[1, 2, 3]).render(encoding=None))
Dear Joe,
<BLANKLINE>
We have the following items for you:
* 1
* 2
* 3
<BLANKLINE>
Backslashes are also used to escape the start delimiter of directives and
comments:
>>> tmpl = NewTextTemplate('''Dear $name,
...
... \{# This is a comment #}
... We have the following items for you:
... {% for item in items %}\
... * $item
... {% end %}\
... ''')
>>> print(tmpl.generate(name='Joe', items=[1, 2, 3]).render(encoding=None))
Dear Joe,
<BLANKLINE>
{# This is a comment #}
We have the following items for you:
* 1
* 2
* 3
<BLANKLINE>
:since: version 0.5
"""
directives = [('def', DefDirective),
('when', WhenDirective),
('otherwise', OtherwiseDirective),
('for', ForDirective),
('if', IfDirective),
('choose', ChooseDirective),
('with', WithDirective)]
serializer = 'text'
_DIRECTIVE_RE = r'((?<!\\)%s\s*(\w+)\s*(.*?)\s*%s|(?<!\\)%s.*?%s)'
_ESCAPE_RE = r'\\\n|\\(\\)|\\(%s)|\\(%s)'
def __init__(self, source, filepath=None, filename=None, loader=None,
encoding=None, lookup='strict', allow_exec=False,
delims=('{%', '%}', '{#', '#}')):
self.delimiters = delims
Template.__init__(self, source, filepath=filepath, filename=filename,
loader=loader, encoding=encoding, lookup=lookup)
def _get_delims(self):
return self._delims
def _set_delims(self, delims):
if len(delims) != 4:
raise ValueError('delimiers tuple must have exactly four elements')
self._delims = delims
self._directive_re = re.compile(self._DIRECTIVE_RE % tuple(
[re.escape(d) for d in delims]
), re.DOTALL)
self._escape_re = re.compile(self._ESCAPE_RE % tuple(
[re.escape(d) for d in delims[::2]]
))
delimiters = property(_get_delims, _set_delims, """\
The delimiters for directives and comments. This should be a four item tuple
of the form ``(directive_start, directive_end, comment_start,
comment_end)``, where each item is a string.
""")
def _parse(self, source, encoding):
"""Parse the template from text input."""
stream = [] # list of events of the "compiled" template
dirmap = {} # temporary mapping of directives to elements
depth = 0
source = source.read()
if isinstance(source, str):
source = source.decode(encoding or 'utf-8', 'replace')
offset = 0
lineno = 1
_escape_sub = self._escape_re.sub
def _escape_repl(mo):
groups = [g for g in mo.groups() if g]
if not groups:
return ''
return groups[0]
for idx, mo in enumerate(self._directive_re.finditer(source)):
start, end = mo.span(1)
if start > offset:
text = _escape_sub(_escape_repl, source[offset:start])
for kind, data, pos in interpolate(text, self.filepath, lineno,
lookup=self.lookup):
stream.append((kind, data, pos))
lineno += len(text.splitlines())
lineno += len(source[start:end].splitlines())
command, value = mo.group(2, 3)
if command == 'include':
pos = (self.filename, lineno, 0)
value = list(interpolate(value, self.filepath, lineno, 0,
lookup=self.lookup))
if len(value) == 1 and value[0][0] is TEXT:
value = value[0][1]
stream.append((INCLUDE, (value, None, []), pos))
elif command == 'python':
if not self.allow_exec:
raise TemplateSyntaxError('Python code blocks not allowed',
self.filepath, lineno)
try:
suite = Suite(value, self.filepath, lineno,
lookup=self.lookup)
except SyntaxError, err:
raise TemplateSyntaxError(err, self.filepath,
lineno + (err.lineno or 1) - 1)
pos = (self.filename, lineno, 0)
stream.append((EXEC, suite, pos))
elif command == 'end':
depth -= 1
if depth in dirmap:
directive, start_offset = dirmap.pop(depth)
substream = stream[start_offset:]
stream[start_offset:] = [(SUB, ([directive], substream),
(self.filepath, lineno, 0))]
elif command:
cls = self.get_directive(command)
if cls is None:
raise BadDirectiveError(command)
directive = 0, cls, value, None, (self.filepath, lineno, 0)
dirmap[depth] = (directive, len(stream))
depth += 1
offset = end
if offset < len(source):
text = _escape_sub(_escape_repl, source[offset:])
for kind, data, pos in interpolate(text, self.filepath, lineno,
lookup=self.lookup):
stream.append((kind, data, pos))
return stream
class OldTextTemplate(Template):
"""Legacy implementation of the old syntax text-based templates. This class
is provided in a transition phase for backwards compatibility. New code
should use the `NewTextTemplate` class and the improved syntax it provides.
>>> tmpl = OldTextTemplate('''Dear $name,
...
... We have the following items for you:
... #for item in items
... * $item
... #end
...
... All the best,
... Foobar''')
>>> print(tmpl.generate(name='Joe', items=[1, 2, 3]).render(encoding=None))
Dear Joe,
<BLANKLINE>
We have the following items for you:
* 1
* 2
* 3
<BLANKLINE>
All the best,
Foobar
"""
directives = [('def', DefDirective),
('when', WhenDirective),
('otherwise', OtherwiseDirective),
('for', ForDirective),
('if', IfDirective),
('choose', ChooseDirective),
('with', WithDirective)]
serializer = 'text'
_DIRECTIVE_RE = re.compile(r'(?:^[ \t]*(?<!\\)#(end).*\n?)|'
r'(?:^[ \t]*(?<!\\)#((?:\w+|#).*)\n?)',
re.MULTILINE)
def _parse(self, source, encoding):
"""Parse the template from text input."""
stream = [] # list of events of the "compiled" template
dirmap = {} # temporary mapping of directives to elements
depth = 0
source = source.read()
if isinstance(source, str):
source = source.decode(encoding or 'utf-8', 'replace')
offset = 0
lineno = 1
for idx, mo in enumerate(self._DIRECTIVE_RE.finditer(source)):
start, end = mo.span()
if start > offset:
text = source[offset:start]
for kind, data, pos in interpolate(text, self.filepath, lineno,
lookup=self.lookup):
stream.append((kind, data, pos))
lineno += len(text.splitlines())
text = source[start:end].lstrip()[1:]
lineno += len(text.splitlines())
directive = text.split(None, 1)
if len(directive) > 1:
command, value = directive
else:
command, value = directive[0], None
if command == 'end':
depth -= 1
if depth in dirmap:
directive, start_offset = dirmap.pop(depth)
substream = stream[start_offset:]
stream[start_offset:] = [(SUB, ([directive], substream),
(self.filepath, lineno, 0))]
elif command == 'include':
pos = (self.filename, lineno, 0)
stream.append((INCLUDE, (value.strip(), None, []), pos))
elif command != '#':
cls = self.get_directive(command)
if cls is None:
raise BadDirectiveError(command)
directive = 0, cls, value, None, (self.filepath, lineno, 0)
dirmap[depth] = (directive, len(stream))
depth += 1
offset = end
if offset < len(source):
text = source[offset:].replace('\\#', '#')
for kind, data, pos in interpolate(text, self.filepath, lineno,
lookup=self.lookup):
stream.append((kind, data, pos))
return stream
TextTemplate = OldTextTemplate
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic support for the template engine plugin API used by TurboGears and
CherryPy/Buffet.
"""
from genshi.input import ET, HTML, XML
from genshi.output import DocType
from genshi.template.base import Template
from genshi.template.loader import TemplateLoader
from genshi.template.markup import MarkupTemplate
from genshi.template.text import TextTemplate, NewTextTemplate
__all__ = ['ConfigurationError', 'AbstractTemplateEnginePlugin',
'MarkupTemplateEnginePlugin', 'TextTemplateEnginePlugin']
__docformat__ = 'restructuredtext en'
class ConfigurationError(ValueError):
"""Exception raised when invalid plugin options are encountered."""
class AbstractTemplateEnginePlugin(object):
"""Implementation of the plugin API."""
template_class = None
extension = None
def __init__(self, extra_vars_func=None, options=None):
self.get_extra_vars = extra_vars_func
if options is None:
options = {}
self.options = options
self.default_encoding = options.get('genshi.default_encoding', 'utf-8')
auto_reload = options.get('genshi.auto_reload', '1')
if isinstance(auto_reload, basestring):
auto_reload = auto_reload.lower() in ('1', 'on', 'yes', 'true')
search_path = [p for p in
options.get('genshi.search_path', '').split(':') if p]
self.use_package_naming = not search_path
try:
max_cache_size = int(options.get('genshi.max_cache_size', 25))
except ValueError:
raise ConfigurationError('Invalid value for max_cache_size: "%s"' %
options.get('genshi.max_cache_size'))
loader_callback = options.get('genshi.loader_callback', None)
if loader_callback and not hasattr(loader_callback, '__call__'):
raise ConfigurationError('loader callback must be a function')
lookup_errors = options.get('genshi.lookup_errors', 'strict')
if lookup_errors not in ('lenient', 'strict'):
raise ConfigurationError('Unknown lookup errors mode "%s"' %
lookup_errors)
try:
allow_exec = bool(options.get('genshi.allow_exec', True))
except ValueError:
raise ConfigurationError('Invalid value for allow_exec "%s"' %
options.get('genshi.allow_exec'))
self.loader = TemplateLoader([p for p in search_path if p],
auto_reload=auto_reload,
max_cache_size=max_cache_size,
default_class=self.template_class,
variable_lookup=lookup_errors,
allow_exec=allow_exec,
callback=loader_callback)
def load_template(self, templatename, template_string=None):
"""Find a template specified in python 'dot' notation, or load one from
a string.
"""
if template_string is not None:
return self.template_class(template_string)
if self.use_package_naming:
divider = templatename.rfind('.')
if divider >= 0:
from pkg_resources import resource_filename
package = templatename[:divider]
basename = templatename[divider + 1:] + self.extension
templatename = resource_filename(package, basename)
return self.loader.load(templatename)
def _get_render_options(self, format=None, fragment=False):
if format is None:
format = self.default_format
kwargs = {'method': format}
if self.default_encoding:
kwargs['encoding'] = self.default_encoding
return kwargs
def render(self, info, format=None, fragment=False, template=None):
"""Render the template to a string using the provided info."""
kwargs = self._get_render_options(format=format, fragment=fragment)
return self.transform(info, template).render(**kwargs)
def transform(self, info, template):
"""Render the output to an event stream."""
if not isinstance(template, Template):
template = self.load_template(template)
return template.generate(**info)
class MarkupTemplateEnginePlugin(AbstractTemplateEnginePlugin):
"""Implementation of the plugin API for markup templates."""
template_class = MarkupTemplate
extension = '.html'
def __init__(self, extra_vars_func=None, options=None):
AbstractTemplateEnginePlugin.__init__(self, extra_vars_func, options)
default_doctype = self.options.get('genshi.default_doctype')
if default_doctype:
doctype = DocType.get(default_doctype)
if doctype is None:
raise ConfigurationError('Unknown doctype %r' % default_doctype)
self.default_doctype = doctype
else:
self.default_doctype = None
format = self.options.get('genshi.default_format', 'html').lower()
if format not in ('html', 'xhtml', 'xml', 'text'):
raise ConfigurationError('Unknown output format %r' % format)
self.default_format = format
def _get_render_options(self, format=None, fragment=False):
kwargs = super(MarkupTemplateEnginePlugin,
self)._get_render_options(format, fragment)
if self.default_doctype and not fragment:
kwargs['doctype'] = self.default_doctype
return kwargs
def transform(self, info, template):
"""Render the output to an event stream."""
data = {'ET': ET, 'HTML': HTML, 'XML': XML}
if self.get_extra_vars:
data.update(self.get_extra_vars())
data.update(info)
return super(MarkupTemplateEnginePlugin, self).transform(data, template)
class TextTemplateEnginePlugin(AbstractTemplateEnginePlugin):
"""Implementation of the plugin API for text templates."""
template_class = TextTemplate
extension = '.txt'
default_format = 'text'
def __init__(self, extra_vars_func=None, options=None):
if options is None:
options = {}
new_syntax = options.get('genshi.new_text_syntax')
if isinstance(new_syntax, basestring):
new_syntax = new_syntax.lower() in ('1', 'on', 'yes', 'true')
if new_syntax:
self.template_class = NewTextTemplate
AbstractTemplateEnginePlugin.__init__(self, extra_vars_func, options)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Emulation of the proper abstract syntax tree API for Python 2.4."""
import compiler
import compiler.ast
from genshi.template import _ast24 as _ast
__all__ = ['_ast', 'parse']
__docformat__ = 'restructuredtext en'
def _new(cls, *args, **kwargs):
ret = cls()
if ret._fields:
for attr, value in zip(ret._fields, args):
if attr in kwargs:
raise ValueError('Field set both in args and kwargs')
setattr(ret, attr, value)
for attr in kwargs:
if (getattr(ret, '_fields', None) and attr in ret._fields) \
or (getattr(ret, '_attributes', None) and
attr in ret._attributes):
setattr(ret, attr, kwargs[attr])
return ret
class ASTUpgrader(object):
"""Transformer changing structure of Python 2.4 ASTs to
Python 2.5 ones.
Transforms ``compiler.ast`` Abstract Syntax Tree to builtin ``_ast``.
It can use fake`` _ast`` classes and this way allow ``_ast`` emulation
in Python 2.4.
"""
def __init__(self):
self.out_flags = None
self.lines = [-1]
def _new(self, *args, **kwargs):
return _new(lineno = self.lines[-1], *args, **kwargs)
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
lno = getattr(node, 'lineno', None)
if lno is not None:
self.lines.append(lno)
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
raise Exception('Unhandled node type %r' % type(node))
retval = visitor(node)
if lno is not None:
self.lines.pop()
return retval
def visit_Module(self, node):
body = self.visit(node.node)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.Module, body)
def visit_Expression(self, node):
return self._new(_ast.Expression, self.visit(node.node))
def _extract_args(self, node):
tab = node.argnames[:]
if node.flags & compiler.ast.CO_VARKEYWORDS:
kwarg = tab[-1]
tab = tab[:-1]
else:
kwarg = None
if node.flags & compiler.ast.CO_VARARGS:
vararg = tab[-1]
tab = tab[:-1]
else:
vararg = None
def _tup(t):
if isinstance(t, str):
return self._new(_ast.Name, t, _ast.Store())
elif isinstance(t, tuple):
elts = [_tup(x) for x in t]
return self._new(_ast.Tuple, elts, _ast.Store())
else:
raise NotImplemented
args = []
for arg in tab:
if isinstance(arg, str):
args.append(self._new(_ast.Name, arg, _ast.Param()))
elif isinstance(arg, tuple):
args.append(_tup(arg))
else:
assert False, node.__class__
defaults = [self.visit(d) for d in node.defaults]
return self._new(_ast.arguments, args, vararg, kwarg, defaults)
def visit_Function(self, node):
if getattr(node, 'decorators', ()):
decorators = [self.visit(d) for d in node.decorators.nodes]
else:
decorators = []
args = self._extract_args(node)
body = self.visit(node.code)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.FunctionDef, node.name, args, body, decorators)
def visit_Class(self, node):
#self.name_types.append(_ast.Load)
bases = [self.visit(b) for b in node.bases]
#self.name_types.pop()
body = self.visit(node.code)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.ClassDef, node.name, bases, body)
def visit_Return(self, node):
return self._new(_ast.Return, self.visit(node.value))
def visit_Assign(self, node):
#self.name_types.append(_ast.Store)
targets = [self.visit(t) for t in node.nodes]
#self.name_types.pop()
return self._new(_ast.Assign, targets, self.visit(node.expr))
aug_operators = {
'+=': _ast.Add,
'/=': _ast.Div,
'//=': _ast.FloorDiv,
'<<=': _ast.LShift,
'%=': _ast.Mod,
'*=': _ast.Mult,
'**=': _ast.Pow,
'>>=': _ast.RShift,
'-=': _ast.Sub,
}
def visit_AugAssign(self, node):
target = self.visit(node.node)
# Because it's AugAssign target can't be list nor tuple
# so we only have to change context of one node
target.ctx = _ast.Store()
op = self.aug_operators[node.op]()
return self._new(_ast.AugAssign, target, op, self.visit(node.expr))
def _visit_Print(nl):
def _visit(self, node):
values = [self.visit(v) for v in node.nodes]
return self._new(_ast.Print, self.visit(node.dest), values, nl)
return _visit
visit_Print = _visit_Print(False)
visit_Printnl = _visit_Print(True)
del _visit_Print
def visit_For(self, node):
return self._new(_ast.For, self.visit(node.assign), self.visit(node.list),
self.visit(node.body), self.visit(node.else_))
def visit_While(self, node):
return self._new(_ast.While, self.visit(node.test), self.visit(node.body),
self.visit(node.else_))
def visit_If(self, node):
def _level(tests, else_):
test = self.visit(tests[0][0])
body = self.visit(tests[0][1])
if len(tests) == 1:
orelse = self.visit(else_)
else:
orelse = [_level(tests[1:], else_)]
return self._new(_ast.If, test, body, orelse)
return _level(node.tests, node.else_)
def visit_With(self, node):
return self._new(_ast.With, self.visit(node.expr),
self.visit(node.vars), self.visit(node.body))
def visit_Raise(self, node):
return self._new(_ast.Raise, self.visit(node.expr1),
self.visit(node.expr2), self.visit(node.expr3))
def visit_TryExcept(self, node):
handlers = []
for type, name, body in node.handlers:
handlers.append(self._new(_ast.excepthandler, self.visit(type),
self.visit(name), self.visit(body)))
return self._new(_ast.TryExcept, self.visit(node.body),
handlers, self.visit(node.else_))
def visit_TryFinally(self, node):
return self._new(_ast.TryFinally, self.visit(node.body),
self.visit(node.final))
def visit_Assert(self, node):
return self._new(_ast.Assert, self.visit(node.test), self.visit(node.fail))
def visit_Import(self, node):
names = [self._new(_ast.alias, n[0], n[1]) for n in node.names]
return self._new(_ast.Import, names)
def visit_From(self, node):
names = [self._new(_ast.alias, n[0], n[1]) for n in node.names]
return self._new(_ast.ImportFrom, node.modname, names, 0)
def visit_Exec(self, node):
return self._new(_ast.Exec, self.visit(node.expr),
self.visit(node.locals), self.visit(node.globals))
def visit_Global(self, node):
return self._new(_ast.Global, node.names[:])
def visit_Discard(self, node):
return self._new(_ast.Expr, self.visit(node.expr))
def _map_class(to):
def _visit(self, node):
return self._new(to)
return _visit
visit_Pass = _map_class(_ast.Pass)
visit_Break = _map_class(_ast.Break)
visit_Continue = _map_class(_ast.Continue)
def _visit_BinOperator(opcls):
def _visit(self, node):
return self._new(_ast.BinOp, self.visit(node.left),
opcls(), self.visit(node.right))
return _visit
visit_Add = _visit_BinOperator(_ast.Add)
visit_Div = _visit_BinOperator(_ast.Div)
visit_FloorDiv = _visit_BinOperator(_ast.FloorDiv)
visit_LeftShift = _visit_BinOperator(_ast.LShift)
visit_Mod = _visit_BinOperator(_ast.Mod)
visit_Mul = _visit_BinOperator(_ast.Mult)
visit_Power = _visit_BinOperator(_ast.Pow)
visit_RightShift = _visit_BinOperator(_ast.RShift)
visit_Sub = _visit_BinOperator(_ast.Sub)
del _visit_BinOperator
def _visit_BitOperator(opcls):
def _visit(self, node):
def _make(nodes):
if len(nodes) == 1:
return self.visit(nodes[0])
left = _make(nodes[:-1])
right = self.visit(nodes[-1])
return self._new(_ast.BinOp, left, opcls(), right)
return _make(node.nodes)
return _visit
visit_Bitand = _visit_BitOperator(_ast.BitAnd)
visit_Bitor = _visit_BitOperator(_ast.BitOr)
visit_Bitxor = _visit_BitOperator(_ast.BitXor)
del _visit_BitOperator
def _visit_UnaryOperator(opcls):
def _visit(self, node):
return self._new(_ast.UnaryOp, opcls(), self.visit(node.expr))
return _visit
visit_Invert = _visit_UnaryOperator(_ast.Invert)
visit_Not = _visit_UnaryOperator(_ast.Not)
visit_UnaryAdd = _visit_UnaryOperator(_ast.UAdd)
visit_UnarySub = _visit_UnaryOperator(_ast.USub)
del _visit_UnaryOperator
def _visit_BoolOperator(opcls):
def _visit(self, node):
values = [self.visit(n) for n in node.nodes]
return self._new(_ast.BoolOp, opcls(), values)
return _visit
visit_And = _visit_BoolOperator(_ast.And)
visit_Or = _visit_BoolOperator(_ast.Or)
del _visit_BoolOperator
cmp_operators = {
'==': _ast.Eq,
'!=': _ast.NotEq,
'<': _ast.Lt,
'<=': _ast.LtE,
'>': _ast.Gt,
'>=': _ast.GtE,
'is': _ast.Is,
'is not': _ast.IsNot,
'in': _ast.In,
'not in': _ast.NotIn,
}
def visit_Compare(self, node):
left = self.visit(node.expr)
ops = []
comparators = []
for optype, expr in node.ops:
ops.append(self.cmp_operators[optype]())
comparators.append(self.visit(expr))
return self._new(_ast.Compare, left, ops, comparators)
def visit_Lambda(self, node):
args = self._extract_args(node)
body = self.visit(node.code)
return self._new(_ast.Lambda, args, body)
def visit_IfExp(self, node):
return self._new(_ast.IfExp, self.visit(node.test), self.visit(node.then),
self.visit(node.else_))
def visit_Dict(self, node):
keys = [self.visit(x[0]) for x in node.items]
values = [self.visit(x[1]) for x in node.items]
return self._new(_ast.Dict, keys, values)
def visit_ListComp(self, node):
generators = [self.visit(q) for q in node.quals]
return self._new(_ast.ListComp, self.visit(node.expr), generators)
def visit_GenExprInner(self, node):
generators = [self.visit(q) for q in node.quals]
return self._new(_ast.GeneratorExp, self.visit(node.expr), generators)
def visit_GenExpr(self, node):
return self.visit(node.code)
def visit_GenExprFor(self, node):
ifs = [self.visit(i) for i in node.ifs]
return self._new(_ast.comprehension, self.visit(node.assign),
self.visit(node.iter), ifs)
def visit_ListCompFor(self, node):
ifs = [self.visit(i) for i in node.ifs]
return self._new(_ast.comprehension, self.visit(node.assign),
self.visit(node.list), ifs)
def visit_GenExprIf(self, node):
return self.visit(node.test)
visit_ListCompIf = visit_GenExprIf
def visit_Yield(self, node):
return self._new(_ast.Yield, self.visit(node.value))
def visit_CallFunc(self, node):
args = []
keywords = []
for arg in node.args:
if isinstance(arg, compiler.ast.Keyword):
keywords.append(self._new(_ast.keyword, arg.name,
self.visit(arg.expr)))
else:
args.append(self.visit(arg))
return self._new(_ast.Call, self.visit(node.node), args, keywords,
self.visit(node.star_args), self.visit(node.dstar_args))
def visit_Backquote(self, node):
return self._new(_ast.Repr, self.visit(node.expr))
def visit_Const(self, node):
if node.value is None: # appears in slices
return None
elif isinstance(node.value, basestring):
return self._new(_ast.Str, node.value)
else:
return self._new(_ast.Num, node.value)
def visit_Name(self, node):
return self._new(_ast.Name, node.name, _ast.Load())
def visit_Getattr(self, node):
return self._new(_ast.Attribute, self.visit(node.expr), node.attrname,
_ast.Load())
def visit_Tuple(self, node):
nodes = [self.visit(n) for n in node.nodes]
return self._new(_ast.Tuple, nodes, _ast.Load())
def visit_List(self, node):
nodes = [self.visit(n) for n in node.nodes]
return self._new(_ast.List, nodes, _ast.Load())
def get_ctx(self, flags):
if flags == 'OP_DELETE':
return _ast.Del()
elif flags == 'OP_APPLY':
return _ast.Load()
elif flags == 'OP_ASSIGN':
return _ast.Store()
else:
# FIXME Exception here
assert False, repr(flags)
def visit_AssName(self, node):
self.out_flags = node.flags
ctx = self.get_ctx(node.flags)
return self._new(_ast.Name, node.name, ctx)
def visit_AssAttr(self, node):
self.out_flags = node.flags
ctx = self.get_ctx(node.flags)
return self._new(_ast.Attribute, self.visit(node.expr),
node.attrname, ctx)
def _visit_AssCollection(cls):
def _visit(self, node):
flags = None
elts = []
for n in node.nodes:
elts.append(self.visit(n))
if flags is None:
flags = self.out_flags
else:
assert flags == self.out_flags
self.out_flags = flags
ctx = self.get_ctx(flags)
return self._new(cls, elts, ctx)
return _visit
visit_AssList = _visit_AssCollection(_ast.List)
visit_AssTuple = _visit_AssCollection(_ast.Tuple)
del _visit_AssCollection
def visit_Slice(self, node):
lower = self.visit(node.lower)
upper = self.visit(node.upper)
ctx = self.get_ctx(node.flags)
self.out_flags = node.flags
return self._new(_ast.Subscript, self.visit(node.expr),
self._new(_ast.Slice, lower, upper, None), ctx)
def visit_Subscript(self, node):
ctx = self.get_ctx(node.flags)
subs = [self.visit(s) for s in node.subs]
advanced = (_ast.Slice, _ast.Ellipsis)
slices = []
nonindex = False
for sub in subs:
if isinstance(sub, advanced):
nonindex = True
slices.append(sub)
else:
slices.append(self._new(_ast.Index, sub))
if len(slices) == 1:
slice = slices[0]
elif nonindex:
slice = self._new(_ast.ExtSlice, slices)
else:
slice = self._new(_ast.Tuple, slices, _ast.Load())
self.out_flags = node.flags
return self._new(_ast.Subscript, self.visit(node.expr), slice, ctx)
def visit_Sliceobj(self, node):
a = [self.visit(n) for n in node.nodes + [None]*(3 - len(node.nodes))]
return self._new(_ast.Slice, a[0], a[1], a[2])
def visit_Ellipsis(self, node):
return self._new(_ast.Ellipsis)
def visit_Stmt(self, node):
def _check_del(n):
# del x is just AssName('x', 'OP_DELETE')
# we want to transform it to Delete([Name('x', Del())])
dcls = (_ast.Name, _ast.List, _ast.Subscript, _ast.Attribute)
if isinstance(n, dcls) and isinstance(n.ctx, _ast.Del):
return self._new(_ast.Delete, [n])
elif isinstance(n, _ast.Tuple) and isinstance(n.ctx, _ast.Del):
# unpack last tuple to avoid making del (x, y, z,);
# out of del x, y, z; (there's no difference between
# this two in compiler.ast)
return self._new(_ast.Delete, n.elts)
else:
return n
def _keep(n):
if isinstance(n, _ast.Expr) and n.value is None:
return False
else:
return True
return [s for s in [_check_del(self.visit(n)) for n in node.nodes]
if _keep(s)]
def parse(source, mode):
node = compiler.parse(source, mode)
return ASTUpgrader().visit(node)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic templating functionality."""
from collections import deque
import os
from StringIO import StringIO
import sys
from genshi.core import Attrs, Stream, StreamEventKind, START, TEXT, _ensure
from genshi.input import ParseError
__all__ = ['Context', 'DirectiveFactory', 'Template', 'TemplateError',
'TemplateRuntimeError', 'TemplateSyntaxError', 'BadDirectiveError']
__docformat__ = 'restructuredtext en'
class TemplateError(Exception):
"""Base exception class for errors related to template processing."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception.
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if filename is None:
filename = '<string>'
self.msg = message #: the error message string
if filename != '<string>' or lineno >= 0:
message = '%s (%s, line %d)' % (self.msg, filename, lineno)
Exception.__init__(self, message)
self.filename = filename #: the name of the template file
self.lineno = lineno #: the number of the line containing the error
self.offset = offset #: the offset on the line
class TemplateSyntaxError(TemplateError):
"""Exception raised when an expression in a template causes a Python syntax
error, or the template is not well-formed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Create the exception
:param message: the error message
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
:param offset: the column number at which the error occurred
"""
if isinstance(message, SyntaxError) and message.lineno is not None:
message = str(message).replace(' (line %d)' % message.lineno, '')
TemplateError.__init__(self, message, filename, lineno)
class BadDirectiveError(TemplateSyntaxError):
"""Exception raised when an unknown directive is encountered when parsing
a template.
An unknown directive is any attribute using the namespace for directives,
with a local name that doesn't match any registered directive.
"""
def __init__(self, name, filename=None, lineno=-1):
"""Create the exception
:param name: the name of the directive
:param filename: the filename of the template
:param lineno: the number of line in the template at which the error
occurred
"""
TemplateSyntaxError.__init__(self, 'bad directive "%s"' % name,
filename, lineno)
class TemplateRuntimeError(TemplateError):
"""Exception raised when an the evaluation of a Python expression in a
template causes an error.
"""
class Context(object):
"""Container for template input data.
A context provides a stack of scopes (represented by dictionaries).
Template directives such as loops can push a new scope on the stack with
data that should only be available inside the loop. When the loop
terminates, that scope can get popped off the stack again.
>>> ctxt = Context(one='foo', other=1)
>>> ctxt.get('one')
'foo'
>>> ctxt.get('other')
1
>>> ctxt.push(dict(one='frost'))
>>> ctxt.get('one')
'frost'
>>> ctxt.get('other')
1
>>> ctxt.pop()
{'one': 'frost'}
>>> ctxt.get('one')
'foo'
"""
def __init__(self, **data):
"""Initialize the template context with the given keyword arguments as
data.
"""
self.frames = deque([data])
self.pop = self.frames.popleft
self.push = self.frames.appendleft
self._match_templates = []
self._choice_stack = []
# Helper functions for use in expressions
def defined(name):
"""Return whether a variable with the specified name exists in the
expression scope."""
return name in self
def value_of(name, default=None):
"""If a variable of the specified name is defined, return its value.
Otherwise, return the provided default value, or ``None``."""
return self.get(name, default)
data.setdefault('defined', defined)
data.setdefault('value_of', value_of)
def __repr__(self):
return repr(list(self.frames))
def __contains__(self, key):
"""Return whether a variable exists in any of the scopes.
:param key: the name of the variable
"""
return self._find(key)[1] is not None
has_key = __contains__
def __delitem__(self, key):
"""Remove a variable from all scopes.
:param key: the name of the variable
"""
for frame in self.frames:
if key in frame:
del frame[key]
def __getitem__(self, key):
"""Get a variables's value, starting at the current scope and going
upward.
:param key: the name of the variable
:return: the variable value
:raises KeyError: if the requested variable wasn't found in any scope
"""
value, frame = self._find(key)
if frame is None:
raise KeyError(key)
return value
def __len__(self):
"""Return the number of distinctly named variables in the context.
:return: the number of variables in the context
"""
return len(self.items())
def __setitem__(self, key, value):
"""Set a variable in the current scope.
:param key: the name of the variable
:param value: the variable value
"""
self.frames[0][key] = value
def _find(self, key, default=None):
"""Retrieve a given variable's value and the frame it was found in.
Intended primarily for internal use by directives.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key], frame
return default, None
def get(self, key, default=None):
"""Get a variable's value, starting at the current scope and going
upward.
:param key: the name of the variable
:param default: the default value to return when the variable is not
found
"""
for frame in self.frames:
if key in frame:
return frame[key]
return default
def keys(self):
"""Return the name of all variables in the context.
:return: a list of variable names
"""
keys = []
for frame in self.frames:
keys += [key for key in frame if key not in keys]
return keys
def items(self):
"""Return a list of ``(name, value)`` tuples for all variables in the
context.
:return: a list of variables
"""
return [(key, self.get(key)) for key in self.keys()]
def update(self, mapping):
"""Update the context from the mapping provided."""
self.frames[0].update(mapping)
def push(self, data):
"""Push a new scope on the stack.
:param data: the data dictionary to push on the context stack.
"""
def pop(self):
"""Pop the top-most scope from the stack."""
def _apply_directives(stream, directives, ctxt, vars):
"""Apply the given directives to the stream.
:param stream: the stream the directives should be applied to
:param directives: the list of directives to apply
:param ctxt: the `Context`
:param vars: additional variables that should be available when Python
code is executed
:return: the stream with the given directives applied
"""
if directives:
stream = directives[0](iter(stream), directives[1:], ctxt, **vars)
return stream
def _eval_expr(expr, ctxt, vars=None):
"""Evaluate the given `Expression` object.
:param expr: the expression to evaluate
:param ctxt: the `Context`
:param vars: additional variables that should be available to the
expression
:return: the result of the evaluation
"""
if vars:
ctxt.push(vars)
retval = expr.evaluate(ctxt)
if vars:
ctxt.pop()
return retval
def _exec_suite(suite, ctxt, vars=None):
"""Execute the given `Suite` object.
:param suite: the code suite to execute
:param ctxt: the `Context`
:param vars: additional variables that should be available to the
code
"""
if vars:
ctxt.push(vars)
ctxt.push({})
suite.execute(ctxt)
if vars:
top = ctxt.pop()
ctxt.pop()
ctxt.frames[0].update(top)
class DirectiveFactoryMeta(type):
"""Meta class for directive factories."""
def __new__(cls, name, bases, d):
if 'directives' in d:
d['_dir_by_name'] = dict(d['directives'])
d['_dir_order'] = [directive[1] for directive in d['directives']]
return type.__new__(cls, name, bases, d)
class DirectiveFactory(object):
"""Base for classes that provide a set of template directives.
:since: version 0.6
"""
__metaclass__ = DirectiveFactoryMeta
directives = []
"""A list of ``(name, cls)`` tuples that define the set of directives
provided by this factory.
"""
def get_directive(self, name):
"""Return the directive class for the given name.
:param name: the directive name as used in the template
:return: the directive class
:see: `Directive`
"""
return self._dir_by_name.get(name)
def get_directive_index(self, dir_cls):
"""Return a key for the given directive class that should be used to
sort it among other directives on the same `SUB` event.
The default implementation simply returns the index of the directive in
the `directives` list.
:param dir_cls: the directive class
:return: the sort key
"""
if dir_cls in self._dir_order:
return self._dir_order.index(dir_cls)
return len(self._dir_order)
class Template(DirectiveFactory):
"""Abstract template base class.
This class implements most of the template processing model, but does not
specify the syntax of templates.
"""
EXEC = StreamEventKind('EXEC')
"""Stream event kind representing a Python code suite to execute."""
EXPR = StreamEventKind('EXPR')
"""Stream event kind representing a Python expression."""
INCLUDE = StreamEventKind('INCLUDE')
"""Stream event kind representing the inclusion of another template."""
SUB = StreamEventKind('SUB')
"""Stream event kind representing a nested stream to which one or more
directives should be applied.
"""
serializer = None
_number_conv = unicode # function used to convert numbers to event data
def __init__(self, source, filepath=None, filename=None, loader=None,
encoding=None, lookup='strict', allow_exec=True):
"""Initialize a template from either a string, a file-like object, or
an already parsed markup stream.
:param source: a string, file-like object, or markup stream to read the
template from
:param filepath: the absolute path to the template file
:param filename: the path to the template file relative to the search
path
:param loader: the `TemplateLoader` to use for loading included
templates
:param encoding: the encoding of the `source`
:param lookup: the variable lookup mechanism; either "strict" (the
default), "lenient", or a custom lookup class
:param allow_exec: whether Python code blocks in templates should be
allowed
:note: Changed in 0.5: Added the `allow_exec` argument
"""
self.filepath = filepath or filename
self.filename = filename
self.loader = loader
self.lookup = lookup
self.allow_exec = allow_exec
self._init_filters()
self._prepared = False
if isinstance(source, basestring):
source = StringIO(source)
else:
source = source
try:
self._stream = self._parse(source, encoding)
except ParseError, e:
raise TemplateSyntaxError(e.msg, self.filepath, e.lineno, e.offset)
def __getstate__(self):
state = self.__dict__.copy()
state['filters'] = []
return state
def __setstate__(self, state):
self.__dict__ = state
self._init_filters()
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.filename)
def _init_filters(self):
self.filters = [self._flatten]
if self.loader:
self.filters.append(self._include)
@property
def stream(self):
if not self._prepared:
self._stream = list(self._prepare(self._stream))
self._prepared = True
return self._stream
def _parse(self, source, encoding):
"""Parse the template.
The parsing stage parses the template and constructs a list of
directives that will be executed in the render stage. The input is
split up into literal output (text that does not depend on the context
data) and directives or expressions.
:param source: a file-like object containing the XML source of the
template, or an XML event stream
:param encoding: the encoding of the `source`
"""
raise NotImplementedError
def _prepare(self, stream):
"""Call the `attach` method of every directive found in the template.
:param stream: the event stream of the template
"""
from genshi.template.loader import TemplateNotFound
for kind, data, pos in stream:
if kind is SUB:
directives = []
substream = data[1]
for _, cls, value, namespaces, pos in sorted(data[0]):
directive, substream = cls.attach(self, substream, value,
namespaces, pos)
if directive:
directives.append(directive)
substream = self._prepare(substream)
if directives:
yield kind, (directives, list(substream)), pos
else:
for event in substream:
yield event
else:
if kind is INCLUDE:
href, cls, fallback = data
if isinstance(href, basestring) and \
not getattr(self.loader, 'auto_reload', True):
# If the path to the included template is static, and
# auto-reloading is disabled on the template loader,
# the template is inlined into the stream
try:
tmpl = self.loader.load(href, relative_to=pos[0],
cls=cls or self.__class__)
for event in tmpl.stream:
yield event
except TemplateNotFound:
if fallback is None:
raise
for event in self._prepare(fallback):
yield event
continue
elif fallback:
# Otherwise the include is performed at run time
data = href, cls, list(self._prepare(fallback))
yield kind, data, pos
def generate(self, *args, **kwargs):
"""Apply the template to the given context data.
Any keyword arguments are made available to the template as context
data.
Only one positional argument is accepted: if it is provided, it must be
an instance of the `Context` class, and keyword arguments are ignored.
This calling style is used for internal processing.
:return: a markup event stream representing the result of applying
the template to the context data.
"""
vars = {}
if args:
assert len(args) == 1
ctxt = args[0]
if ctxt is None:
ctxt = Context(**kwargs)
else:
vars = kwargs
assert isinstance(ctxt, Context)
else:
ctxt = Context(**kwargs)
stream = self.stream
for filter_ in self.filters:
stream = filter_(iter(stream), ctxt, **vars)
return Stream(stream, self.serializer)
def _flatten(self, stream, ctxt, **vars):
number_conv = self._number_conv
stack = []
push = stack.append
pop = stack.pop
stream = iter(stream)
while 1:
for kind, data, pos in stream:
if kind is START and data[1]:
# Attributes may still contain expressions in start tags at
# this point, so do some evaluation
tag, attrs = data
new_attrs = []
for name, value in attrs:
if type(value) is list: # this is an interpolated string
values = [event[1]
for event in self._flatten(value, ctxt, **vars)
if event[0] is TEXT and event[1] is not None
]
if not values:
continue
value = ''.join(values)
new_attrs.append((name, value))
yield kind, (tag, Attrs(new_attrs)), pos
elif kind is EXPR:
result = _eval_expr(data, ctxt, vars)
if result is not None:
# First check for a string, otherwise the iterable test
# below succeeds, and the string will be chopped up into
# individual characters
if isinstance(result, basestring):
yield TEXT, result, pos
elif isinstance(result, (int, float, long)):
yield TEXT, number_conv(result), pos
elif hasattr(result, '__iter__'):
push(stream)
stream = _ensure(result)
break
else:
yield TEXT, unicode(result), pos
elif kind is SUB:
# This event is a list of directives and a list of nested
# events to which those directives should be applied
push(stream)
stream = _apply_directives(data[1], data[0], ctxt, vars)
break
elif kind is EXEC:
_exec_suite(data, ctxt, vars)
else:
yield kind, data, pos
else:
if not stack:
break
stream = pop()
def _include(self, stream, ctxt, **vars):
"""Internal stream filter that performs inclusion of external
template files.
"""
from genshi.template.loader import TemplateNotFound
for event in stream:
if event[0] is INCLUDE:
href, cls, fallback = event[1]
if not isinstance(href, basestring):
parts = []
for subkind, subdata, subpos in self._flatten(href, ctxt,
**vars):
if subkind is TEXT:
parts.append(subdata)
href = ''.join([x for x in parts if x is not None])
try:
tmpl = self.loader.load(href, relative_to=event[2][0],
cls=cls or self.__class__)
for event in tmpl.generate(ctxt, **vars):
yield event
except TemplateNotFound:
if fallback is None:
raise
for filter_ in self.filters:
fallback = filter_(iter(fallback), ctxt, **vars)
for event in fallback:
yield event
else:
yield event
EXEC = Template.EXEC
EXPR = Template.EXPR
INCLUDE = Template.INCLUDE
SUB = Template.SUB
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Implementation of the template engine."""
from genshi.template.base import Context, Template, TemplateError, \
TemplateRuntimeError, TemplateSyntaxError, \
BadDirectiveError
from genshi.template.loader import TemplateLoader, TemplateNotFound
from genshi.template.markup import MarkupTemplate
from genshi.template.text import TextTemplate, OldTextTemplate, NewTextTemplate
__docformat__ = 'restructuredtext en'
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support classes for generating code from abstract syntax trees."""
try:
import _ast
except ImportError:
from genshi.template.ast24 import _ast, parse
else:
def parse(source, mode):
return compile(source, '', mode, _ast.PyCF_ONLY_AST)
__docformat__ = 'restructuredtext en'
class ASTCodeGenerator(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def __init__(self, tree):
self.lines_info = []
self.line_info = None
self.code = ''
self.line = None
self.last = None
self.indent = 0
self.blame_stack = []
self.visit(tree)
if self.line.strip():
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = None
self.line_info = None
def _change_indent(self, delta):
self.indent += delta
def _new_line(self):
if self.line is not None:
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = ' '*4*self.indent
if len(self.blame_stack) == 0:
self.line_info = []
self.last = None
else:
self.line_info = [(0, self.blame_stack[-1],)]
self.last = self.blame_stack[-1]
def _write(self, s):
if len(s) == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
else:
if self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += s
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
try:
self.blame_stack.append((node.lineno, node.col_offset,))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
raise Exception('Unhandled node type %r' % type(node))
ret = visitor(node)
if info:
self.blame_stack.pop()
return ret
def visit_Module(self, node):
for n in node.body:
self.visit(n)
visit_Interactive = visit_Module
visit_Suite = visit_Module
def visit_Expression(self, node):
self._new_line()
return self.visit(node.body)
# arguments = (expr* args, identifier? vararg,
# identifier? kwarg, expr* defaults)
def visit_arguments(self, node):
first = True
no_default_count = len(node.args) - len(node.defaults)
for i, arg in enumerate(node.args):
if not first:
self._write(', ')
else:
first = False
self.visit(arg)
if i >= no_default_count:
self._write('=')
self.visit(node.defaults[i - no_default_count])
if getattr(node, 'vararg', None):
if not first:
self._write(', ')
else:
first = False
self._write('*' + node.vararg)
if getattr(node, 'kwarg', None):
if not first:
self._write(', ')
else:
first = False
self._write('**' + node.kwarg)
# FunctionDef(identifier name, arguments args,
# stmt* body, expr* decorators)
def visit_FunctionDef(self, node):
for decorator in getattr(node, 'decorators', ()):
self._new_line()
self._write('@')
self.visit(decorator)
self._new_line()
self._write('def ' + node.name + '(')
self.visit(node.args)
self._write('):')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# ClassDef(identifier name, expr* bases, stmt* body)
def visit_ClassDef(self, node):
self._new_line()
self._write('class ' + node.name)
if node.bases:
self._write('(')
self.visit(node.bases[0])
for base in node.bases[1:]:
self._write(', ')
self.visit(base)
self._write(')')
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# Return(expr? value)
def visit_Return(self, node):
self._new_line()
self._write('return')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
# Delete(expr* targets)
def visit_Delete(self, node):
self._new_line()
self._write('del ')
self.visit(node.targets[0])
for target in node.targets[1:]:
self._write(', ')
self.visit(target)
# Assign(expr* targets, expr value)
def visit_Assign(self, node):
self._new_line()
for target in node.targets:
self.visit(target)
self._write(' = ')
self.visit(node.value)
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, node):
self._new_line()
self.visit(node.target)
self._write(' ' + self.binary_operators[node.op.__class__] + '= ')
self.visit(node.value)
# Print(expr? dest, expr* values, bool nl)
def visit_Print(self, node):
self._new_line()
self._write('print')
if getattr(node, 'dest', None):
self._write(' >> ')
self.visit(node.dest)
if getattr(node, 'values', None):
self._write(', ')
else:
self._write(' ')
if getattr(node, 'values', None):
self.visit(node.values[0])
for value in node.values[1:]:
self._write(', ')
self.visit(value)
if not node.nl:
self._write(',')
# For(expr target, expr iter, stmt* body, stmt* orelse)
def visit_For(self, node):
self._new_line()
self._write('for ')
self.visit(node.target)
self._write(' in ')
self.visit(node.iter)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, node):
self._new_line()
self._write('while ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, node):
self._new_line()
self._write('if ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# With(expr context_expr, expr? optional_vars, stmt* body)
def visit_With(self, node):
self._new_line()
self._write('with ')
self.visit(node.context_expr)
if getattr(node, 'optional_vars', None):
self._write(' as ')
self.visit(node.optional_vars)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# Raise(expr? type, expr? inst, expr? tback)
def visit_Raise(self, node):
self._new_line()
self._write('raise')
if not node.type:
return
self._write(' ')
self.visit(node.type)
if not node.inst:
return
self._write(', ')
self.visit(node.inst)
if not node.tback:
return
self._write(', ')
self.visit(node.tback)
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
def visit_TryExcept(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'handlers', None):
for handler in node.handlers:
self.visit(handler)
self._new_line()
if getattr(node, 'orelse', None):
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# excepthandler = (expr? type, expr? name, stmt* body)
def visit_ExceptHandler(self, node):
self._new_line()
self._write('except')
if getattr(node, 'type', None):
self._write(' ')
self.visit(node.type)
if getattr(node, 'name', None):
self._write(', ')
self.visit(node.name)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
visit_excepthandler = visit_ExceptHandler
# TryFinally(stmt* body, stmt* finalbody)
def visit_TryFinally(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'finalbody', None):
self._new_line()
self._write('finally:')
self._change_indent(1)
for statement in node.finalbody:
self.visit(statement)
self._change_indent(-1)
# Assert(expr test, expr? msg)
def visit_Assert(self, node):
self._new_line()
self._write('assert ')
self.visit(node.test)
if getattr(node, 'msg', None):
self._write(', ')
self.visit(node.msg)
def visit_alias(self, node):
self._write(node.name)
if getattr(node, 'asname', None):
self._write(' as ')
self._write(node.asname)
# Import(alias* names)
def visit_Import(self, node):
self._new_line()
self._write('import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# ImportFrom(identifier module, alias* names, int? level)
def visit_ImportFrom(self, node):
self._new_line()
self._write('from ')
if node.level:
self._write('.' * node.level)
self._write(node.module)
self._write(' import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Exec(expr body, expr? globals, expr? locals)
def visit_Exec(self, node):
self._new_line()
self._write('exec ')
self.visit(node.body)
if not node.globals:
return
self._write(', ')
self.visit(node.globals)
if not node.locals:
return
self._write(', ')
self.visit(node.locals)
# Global(identifier* names)
def visit_Global(self, node):
self._new_line()
self._write('global ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Expr(expr value)
def visit_Expr(self, node):
self._new_line()
self.visit(node.value)
# Pass
def visit_Pass(self, node):
self._new_line()
self._write('pass')
# Break
def visit_Break(self, node):
self._new_line()
self._write('break')
# Continue
def visit_Continue(self, node):
self._new_line()
self._write('continue')
### EXPRESSIONS
def with_parens(f):
def _f(self, node):
self._write('(')
f(self, node)
self._write(')')
return _f
bool_operators = {_ast.And: 'and', _ast.Or: 'or'}
# BoolOp(boolop op, expr* values)
@with_parens
def visit_BoolOp(self, node):
joiner = ' ' + self.bool_operators[node.op.__class__] + ' '
self.visit(node.values[0])
for value in node.values[1:]:
self._write(joiner)
self.visit(value)
binary_operators = {
_ast.Add: '+',
_ast.Sub: '-',
_ast.Mult: '*',
_ast.Div: '/',
_ast.Mod: '%',
_ast.Pow: '**',
_ast.LShift: '<<',
_ast.RShift: '>>',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.BitAnd: '&',
_ast.FloorDiv: '//'
}
# BinOp(expr left, operator op, expr right)
@with_parens
def visit_BinOp(self, node):
self.visit(node.left)
self._write(' ' + self.binary_operators[node.op.__class__] + ' ')
self.visit(node.right)
unary_operators = {
_ast.Invert: '~',
_ast.Not: 'not',
_ast.UAdd: '+',
_ast.USub: '-',
}
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, node):
self._write(self.unary_operators[node.op.__class__] + ' ')
self.visit(node.operand)
# Lambda(arguments args, expr body)
@with_parens
def visit_Lambda(self, node):
self._write('lambda ')
self.visit(node.args)
self._write(': ')
self.visit(node.body)
# IfExp(expr test, expr body, expr orelse)
@with_parens
def visit_IfExp(self, node):
self.visit(node.body)
self._write(' if ')
self.visit(node.test)
self._write(' else ')
self.visit(node.orelse)
# Dict(expr* keys, expr* values)
def visit_Dict(self, node):
self._write('{')
for key, value in zip(node.keys, node.values):
self.visit(key)
self._write(': ')
self.visit(value)
self._write(', ')
self._write('}')
# ListComp(expr elt, comprehension* generators)
def visit_ListComp(self, node):
self._write('[')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(']')
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, node):
self._write('(')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(')')
# Yield(expr? value)
def visit_Yield(self, node):
self._write('yield')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
comparision_operators = {
_ast.Eq: '==',
_ast.NotEq: '!=',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.In: 'in',
_ast.NotIn: 'not in',
}
# Compare(expr left, cmpop* ops, expr* comparators)
@with_parens
def visit_Compare(self, node):
self.visit(node.left)
for op, comparator in zip(node.ops, node.comparators):
self._write(' ' + self.comparision_operators[op.__class__] + ' ')
self.visit(comparator)
# Call(expr func, expr* args, keyword* keywords,
# expr? starargs, expr? kwargs)
def visit_Call(self, node):
self.visit(node.func)
self._write('(')
first = True
for arg in node.args:
if not first:
self._write(', ')
first = False
self.visit(arg)
for keyword in node.keywords:
if not first:
self._write(', ')
first = False
# keyword = (identifier arg, expr value)
self._write(keyword.arg)
self._write('=')
self.visit(keyword.value)
if getattr(node, 'starargs', None):
if not first:
self._write(', ')
first = False
self._write('*')
self.visit(node.starargs)
if getattr(node, 'kwargs', None):
if not first:
self._write(', ')
first = False
self._write('**')
self.visit(node.kwargs)
self._write(')')
# Repr(expr value)
def visit_Repr(self, node):
self._write('`')
self.visit(node.value)
self._write('`')
# Num(object n)
def visit_Num(self, node):
self._write(repr(node.n))
# Str(string s)
def visit_Str(self, node):
self._write(repr(node.s))
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, node):
self.visit(node.value)
self._write('.')
self._write(node.attr)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, node):
self.visit(node.value)
self._write('[')
def _process_slice(node):
if isinstance(node, _ast.Ellipsis):
self._write('...')
elif isinstance(node, _ast.Slice):
if getattr(node, 'lower', 'None'):
self.visit(node.lower)
self._write(':')
if getattr(node, 'upper', None):
self.visit(node.upper)
if getattr(node, 'step', None):
self._write(':')
self.visit(node.step)
elif isinstance(node, _ast.Index):
self.visit(node.value)
elif isinstance(node, _ast.ExtSlice):
self.visit(node.dims[0])
for dim in node.dims[1:]:
self._write(', ')
self.visit(dim)
else:
raise NotImplemented('Slice type not implemented')
_process_slice(node.slice)
self._write(']')
# Name(identifier id, expr_context ctx)
def visit_Name(self, node):
self._write(node.id)
# List(expr* elts, expr_context ctx)
def visit_List(self, node):
self._write('[')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(']')
# Tuple(expr *elts, expr_context ctx)
def visit_Tuple(self, node):
self._write('(')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(')')
class ASTTransformer(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
return node
return visitor(node)
def _clone(self, node):
clone = node.__class__()
for name in getattr(clone, '_attributes', ()):
try:
setattr(clone, 'name', getattr(node, name))
except AttributeError:
pass
for name in clone._fields:
try:
value = getattr(node, name)
except AttributeError:
pass
else:
if value is None:
pass
elif isinstance(value, list):
value = [self.visit(x) for x in value]
elif isinstance(value, tuple):
value = tuple(self.visit(x) for x in value)
else:
value = self.visit(value)
setattr(clone, name, value)
return clone
visit_Module = _clone
visit_Interactive = _clone
visit_Expression = _clone
visit_Suite = _clone
visit_FunctionDef = _clone
visit_ClassDef = _clone
visit_Return = _clone
visit_Delete = _clone
visit_Assign = _clone
visit_AugAssign = _clone
visit_Print = _clone
visit_For = _clone
visit_While = _clone
visit_If = _clone
visit_With = _clone
visit_Raise = _clone
visit_TryExcept = _clone
visit_TryFinally = _clone
visit_Assert = _clone
visit_Import = _clone
visit_ImportFrom = _clone
visit_Exec = _clone
visit_Global = _clone
visit_Expr = _clone
# Pass, Break, Continue don't need to be copied
visit_BoolOp = _clone
visit_BinOp = _clone
visit_UnaryOp = _clone
visit_Lambda = _clone
visit_IfExp = _clone
visit_Dict = _clone
visit_ListComp = _clone
visit_GeneratorExp = _clone
visit_Yield = _clone
visit_Compare = _clone
visit_Call = _clone
visit_Repr = _clone
# Num, Str don't need to be copied
visit_Attribute = _clone
visit_Subscript = _clone
visit_Name = _clone
visit_List = _clone
visit_Tuple = _clone
visit_comprehension = _clone
visit_excepthandler = _clone
visit_arguments = _clone
visit_keyword = _clone
visit_alias = _clone
visit_Slice = _clone
visit_ExtSlice = _clone
visit_Index = _clone
del _clone
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""String interpolation routines, i.e. the splitting up a given text into some
parts that are literal strings, and others that are Python expressions.
"""
from itertools import chain
import os
import re
from tokenize import PseudoToken
from genshi.core import TEXT
from genshi.template.base import TemplateSyntaxError, EXPR
from genshi.template.eval import Expression
__all__ = ['interpolate']
__docformat__ = 'restructuredtext en'
NAMESTART = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
NAMECHARS = NAMESTART + '.0123456789'
PREFIX = '$'
token_re = re.compile('%s|%s(?s)' % (
r'[uU]?[rR]?("""|\'\'\')((?<!\\)\\\1|.)*?\1',
PseudoToken
))
def interpolate(text, filepath=None, lineno=-1, offset=0, lookup='strict'):
"""Parse the given string and extract expressions.
This function is a generator that yields `TEXT` events for literal strings,
and `EXPR` events for expressions, depending on the results of parsing the
string.
>>> for kind, data, pos in interpolate("hey ${foo}bar"):
... print('%s %r' % (kind, data))
TEXT 'hey '
EXPR Expression('foo')
TEXT 'bar'
:param text: the text to parse
:param filepath: absolute path to the file in which the text was found
(optional)
:param lineno: the line number at which the text was found (optional)
:param offset: the column number at which the text starts in the source
(optional)
:param lookup: the variable lookup mechanism; either "lenient" (the
default), "strict", or a custom lookup class
:return: a list of `TEXT` and `EXPR` events
:raise TemplateSyntaxError: when a syntax error in an expression is
encountered
"""
pos = [filepath, lineno, offset]
textbuf = []
textpos = None
for is_expr, chunk in chain(lex(text, pos, filepath), [(True, '')]):
if is_expr:
if textbuf:
yield TEXT, ''.join(textbuf), textpos
del textbuf[:]
textpos = None
if chunk:
try:
expr = Expression(chunk.strip(), pos[0], pos[1],
lookup=lookup)
yield EXPR, expr, tuple(pos)
except SyntaxError, err:
raise TemplateSyntaxError(err, filepath, pos[1],
pos[2] + (err.offset or 0))
else:
textbuf.append(chunk)
if textpos is None:
textpos = tuple(pos)
if '\n' in chunk:
lines = chunk.splitlines()
pos[1] += len(lines) - 1
pos[2] += len(lines[-1])
else:
pos[2] += len(chunk)
def lex(text, textpos, filepath):
offset = pos = 0
end = len(text)
escaped = False
while 1:
if escaped:
offset = text.find(PREFIX, offset + 2)
escaped = False
else:
offset = text.find(PREFIX, pos)
if offset < 0 or offset == end - 1:
break
next = text[offset + 1]
if next == '{':
if offset > pos:
yield False, text[pos:offset]
pos = offset + 2
level = 1
while level:
match = token_re.match(text, pos)
if match is None:
raise TemplateSyntaxError('invalid syntax', filepath,
*textpos[1:])
pos = match.end()
tstart, tend = match.regs[3]
token = text[tstart:tend]
if token == '{':
level += 1
elif token == '}':
level -= 1
yield True, text[offset + 2:pos - 1]
elif next in NAMESTART:
if offset > pos:
yield False, text[pos:offset]
pos = offset
pos += 1
while pos < end:
char = text[pos]
if char not in NAMECHARS:
break
pos += 1
yield True, text[offset + 1:pos].strip()
elif not escaped and next == PREFIX:
if offset > pos:
yield False, text[pos:offset]
escaped = True
pos = offset + 1
else:
yield False, text[pos:offset + 1]
pos = offset + 1
if pos < end:
yield False, text[pos:]
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Template loading and caching."""
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from genshi.template.base import TemplateError
from genshi.util import LRUCache
__all__ = ['TemplateLoader', 'TemplateNotFound', 'directory', 'package',
'prefixed']
__docformat__ = 'restructuredtext en'
class TemplateNotFound(TemplateError):
"""Exception raised when a specific template file could not be found."""
def __init__(self, name, search_path):
"""Create the exception.
:param name: the filename of the template
:param search_path: the search path used to lookup the template
"""
TemplateError.__init__(self, 'Template "%s" not found' % name)
self.search_path = search_path
class TemplateLoader(object):
"""Responsible for loading templates from files on the specified search
path.
>>> import tempfile
>>> fd, path = tempfile.mkstemp(suffix='.html', prefix='template')
>>> os.write(fd, '<p>$var</p>')
11
>>> os.close(fd)
The template loader accepts a list of directory paths that are then used
when searching for template files, in the given order:
>>> loader = TemplateLoader([os.path.dirname(path)])
The `load()` method first checks the template cache whether the requested
template has already been loaded. If not, it attempts to locate the
template file, and returns the corresponding `Template` object:
>>> from genshi.template import MarkupTemplate
>>> template = loader.load(os.path.basename(path))
>>> isinstance(template, MarkupTemplate)
True
Template instances are cached: requesting a template with the same name
results in the same instance being returned:
>>> loader.load(os.path.basename(path)) is template
True
The `auto_reload` option can be used to control whether a template should
be automatically reloaded when the file it was loaded from has been
changed. Disable this automatic reloading to improve performance.
>>> os.remove(path)
"""
def __init__(self, search_path=None, auto_reload=False,
default_encoding=None, max_cache_size=25, default_class=None,
variable_lookup='strict', allow_exec=True, callback=None):
"""Create the template laoder.
:param search_path: a list of absolute path names that should be
searched for template files, or a string containing
a single absolute path; alternatively, any item on
the list may be a ''load function'' that is passed
a filename and returns a file-like object and some
metadata
:param auto_reload: whether to check the last modification time of
template files, and reload them if they have changed
:param default_encoding: the default encoding to assume when loading
templates; defaults to UTF-8
:param max_cache_size: the maximum number of templates to keep in the
cache
:param default_class: the default `Template` subclass to use when
instantiating templates
:param variable_lookup: the variable lookup mechanism; either "strict"
(the default), "lenient", or a custom lookup
class
:param allow_exec: whether to allow Python code blocks in templates
:param callback: (optional) a callback function that is invoked after a
template was initialized by this loader; the function
is passed the template object as only argument. This
callback can be used for example to add any desired
filters to the template
:see: `LenientLookup`, `StrictLookup`
:note: Changed in 0.5: Added the `allow_exec` argument
"""
from genshi.template.markup import MarkupTemplate
self.search_path = search_path
if self.search_path is None:
self.search_path = []
elif not isinstance(self.search_path, (list, tuple)):
self.search_path = [self.search_path]
self.auto_reload = auto_reload
"""Whether templates should be reloaded when the underlying file is
changed"""
self.default_encoding = default_encoding
self.default_class = default_class or MarkupTemplate
self.variable_lookup = variable_lookup
self.allow_exec = allow_exec
if callback is not None and not hasattr(callback, '__call__'):
raise TypeError('The "callback" parameter needs to be callable')
self.callback = callback
self._cache = LRUCache(max_cache_size)
self._uptodate = {}
self._lock = threading.RLock()
def load(self, filename, relative_to=None, cls=None, encoding=None):
"""Load the template with the given name.
If the `filename` parameter is relative, this method searches the
search path trying to locate a template matching the given name. If the
file name is an absolute path, the search path is ignored.
If the requested template is not found, a `TemplateNotFound` exception
is raised. Otherwise, a `Template` object is returned that represents
the parsed template.
Template instances are cached to avoid having to parse the same
template file more than once. Thus, subsequent calls of this method
with the same template file name will return the same `Template`
object (unless the ``auto_reload`` option is enabled and the file was
changed since the last parse.)
If the `relative_to` parameter is provided, the `filename` is
interpreted as being relative to that path.
:param filename: the relative path of the template file to load
:param relative_to: the filename of the template from which the new
template is being loaded, or ``None`` if the
template is being loaded directly
:param cls: the class of the template object to instantiate
:param encoding: the encoding of the template to load; defaults to the
``default_encoding`` of the loader instance
:return: the loaded `Template` instance
:raises TemplateNotFound: if a template with the given name could not
be found
"""
if cls is None:
cls = self.default_class
search_path = self.search_path
# Make the filename relative to the template file its being loaded
# from, but only if that file is specified as a relative path, or no
# search path has been set up
if relative_to and (not search_path or not os.path.isabs(relative_to)):
filename = os.path.join(os.path.dirname(relative_to), filename)
filename = os.path.normpath(filename)
cachekey = filename
self._lock.acquire()
try:
# First check the cache to avoid reparsing the same file
try:
tmpl = self._cache[cachekey]
if not self.auto_reload:
return tmpl
uptodate = self._uptodate[cachekey]
if uptodate is not None and uptodate():
return tmpl
except (KeyError, OSError):
pass
isabs = False
if os.path.isabs(filename):
# Bypass the search path if the requested filename is absolute
search_path = [os.path.dirname(filename)]
isabs = True
elif relative_to and os.path.isabs(relative_to):
# Make sure that the directory containing the including
# template is on the search path
dirname = os.path.dirname(relative_to)
if dirname not in search_path:
search_path = list(search_path) + [dirname]
isabs = True
elif not search_path:
# Uh oh, don't know where to look for the template
raise TemplateError('Search path for templates not configured')
for loadfunc in search_path:
if isinstance(loadfunc, basestring):
loadfunc = directory(loadfunc)
try:
filepath, filename, fileobj, uptodate = loadfunc(filename)
except IOError:
continue
else:
try:
if isabs:
# If the filename of either the included or the
# including template is absolute, make sure the
# included template gets an absolute path, too,
# so that nested includes work properly without a
# search path
filename = filepath
tmpl = self._instantiate(cls, fileobj, filepath,
filename, encoding=encoding)
if self.callback:
self.callback(tmpl)
self._cache[cachekey] = tmpl
self._uptodate[cachekey] = uptodate
finally:
if hasattr(fileobj, 'close'):
fileobj.close()
return tmpl
raise TemplateNotFound(filename, search_path)
finally:
self._lock.release()
def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):
"""Instantiate and return the `Template` object based on the given
class and parameters.
This function is intended for subclasses to override if they need to
implement special template instantiation logic. Code that just uses
the `TemplateLoader` should use the `load` method instead.
:param cls: the class of the template object to instantiate
:param fileobj: a readable file-like object containing the template
source
:param filepath: the absolute path to the template file
:param filename: the path to the template file relative to the search
path
:param encoding: the encoding of the template to load; defaults to the
``default_encoding`` of the loader instance
:return: the loaded `Template` instance
:rtype: `Template`
"""
if encoding is None:
encoding = self.default_encoding
return cls(fileobj, filepath=filepath, filename=filename, loader=self,
encoding=encoding, lookup=self.variable_lookup,
allow_exec=self.allow_exec)
@staticmethod
def directory(path):
"""Loader factory for loading templates from a local directory.
:param path: the path to the local directory containing the templates
:return: the loader function to load templates from the given directory
:rtype: ``function``
"""
def _load_from_directory(filename):
filepath = os.path.join(path, filename)
fileobj = open(filepath, 'U')
mtime = os.path.getmtime(filepath)
def _uptodate():
return mtime == os.path.getmtime(filepath)
return filepath, filename, fileobj, _uptodate
return _load_from_directory
@staticmethod
def package(name, path):
"""Loader factory for loading templates from egg package data.
:param name: the name of the package containing the resources
:param path: the path inside the package data
:return: the loader function to load templates from the given package
:rtype: ``function``
"""
from pkg_resources import resource_stream
def _load_from_package(filename):
filepath = os.path.join(path, filename)
return filepath, filename, resource_stream(name, filepath), None
return _load_from_package
@staticmethod
def prefixed(**delegates):
"""Factory for a load function that delegates to other loaders
depending on the prefix of the requested template path.
The prefix is stripped from the filename when passing on the load
request to the delegate.
>>> load = prefixed(
... app1 = lambda filename: ('app1', filename, None, None),
... app2 = lambda filename: ('app2', filename, None, None)
... )
>>> print(load('app1/foo.html'))
('app1', 'app1/foo.html', None, None)
>>> print(load('app2/bar.html'))
('app2', 'app2/bar.html', None, None)
:param delegates: mapping of path prefixes to loader functions
:return: the loader function
:rtype: ``function``
"""
def _dispatch_by_prefix(filename):
for prefix, delegate in delegates.items():
if filename.startswith(prefix):
if isinstance(delegate, basestring):
delegate = directory(delegate)
filepath, _, fileobj, uptodate = delegate(
filename[len(prefix):].lstrip('/\\')
)
return filepath, filename, fileobj, uptodate
raise TemplateNotFound(filename, list(delegates.keys()))
return _dispatch_by_prefix
directory = TemplateLoader.directory
package = TemplateLoader.package
prefixed = TemplateLoader.prefixed
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for "safe" evaluation of Python expressions."""
import __builtin__
from textwrap import dedent
from types import CodeType
from genshi.core import Markup
from genshi.template.astutil import ASTTransformer, ASTCodeGenerator, \
_ast, parse
from genshi.template.base import TemplateRuntimeError
from genshi.util import flatten
__all__ = ['Code', 'Expression', 'Suite', 'LenientLookup', 'StrictLookup',
'Undefined', 'UndefinedError']
__docformat__ = 'restructuredtext en'
# Check for a Python 2.4 bug in the eval loop
has_star_import_bug = False
try:
class _FakeMapping(object):
__getitem__ = __setitem__ = lambda *a: None
exec 'from sys import *' in {}, _FakeMapping()
except SystemError:
has_star_import_bug = True
del _FakeMapping
def _star_import_patch(mapping, modname):
"""This function is used as helper if a Python version with a broken
star-import opcode is in use.
"""
module = __import__(modname, None, None, ['__all__'])
if hasattr(module, '__all__'):
members = module.__all__
else:
members = [x for x in module.__dict__ if not x.startswith('_')]
mapping.update([(name, getattr(module, name)) for name in members])
class Code(object):
"""Abstract base class for the `Expression` and `Suite` classes."""
__slots__ = ['source', 'code', 'ast', '_globals']
def __init__(self, source, filename=None, lineno=-1, lookup='strict',
xform=None):
"""Create the code object, either from a string, or from an AST node.
:param source: either a string containing the source code, or an AST
node
:param filename: the (preferably absolute) name of the file containing
the code
:param lineno: the number of the line on which the code was found
:param lookup: the lookup class that defines how variables are looked
up in the context; can be either "strict" (the default),
"lenient", or a custom lookup class
:param xform: the AST transformer that should be applied to the code;
if `None`, the appropriate transformation is chosen
depending on the mode
"""
if isinstance(source, basestring):
self.source = source
node = _parse(source, mode=self.mode)
else:
assert isinstance(source, _ast.AST), \
'Expected string or AST node, but got %r' % source
self.source = '?'
if self.mode == 'eval':
node = _ast.Expression()
node.body = source
else:
node = _ast.Module()
node.body = [source]
self.ast = node
self.code = _compile(node, self.source, mode=self.mode,
filename=filename, lineno=lineno, xform=xform)
if lookup is None:
lookup = LenientLookup
elif isinstance(lookup, basestring):
lookup = {'lenient': LenientLookup, 'strict': StrictLookup}[lookup]
self._globals = lookup.globals
def __getstate__(self):
state = {'source': self.source, 'ast': self.ast,
'lookup': self._globals.im_self}
c = self.code
state['code'] = (c.co_nlocals, c.co_stacksize, c.co_flags, c.co_code,
c.co_consts, c.co_names, c.co_varnames, c.co_filename,
c.co_name, c.co_firstlineno, c.co_lnotab, (), ())
return state
def __setstate__(self, state):
self.source = state['source']
self.ast = state['ast']
self.code = CodeType(0, *state['code'])
self._globals = state['lookup'].globals
def __eq__(self, other):
return (type(other) == type(self)) and (self.code == other.code)
def __hash__(self):
return hash(self.code)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.source)
class Expression(Code):
"""Evaluates Python expressions used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Expression('test').evaluate(data)
'Foo'
>>> Expression('items[0]').evaluate(data)
1
>>> Expression('items[-1]').evaluate(data)
3
>>> Expression('dict["some"]').evaluate(data)
'thing'
Similar to e.g. Javascript, expressions in templates can use the dot
notation for attribute access to access items in mappings:
>>> Expression('dict.some').evaluate(data)
'thing'
This also works the other way around: item access can be used to access
any object attribute:
>>> class MyClass(object):
... myattr = 'Bar'
>>> data = dict(mine=MyClass(), key='myattr')
>>> Expression('mine.myattr').evaluate(data)
'Bar'
>>> Expression('mine["myattr"]').evaluate(data)
'Bar'
>>> Expression('mine[key]').evaluate(data)
'Bar'
All of the standard Python operators are available to template expressions.
Built-in functions such as ``len()`` are also available in template
expressions:
>>> data = dict(items=[1, 2, 3])
>>> Expression('len(items)').evaluate(data)
3
"""
__slots__ = []
mode = 'eval'
def evaluate(self, data):
"""Evaluate the expression against the given data dictionary.
:param data: a mapping containing the data to evaluate against
:return: the result of the evaluation
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
return eval(self.code, _globals, {'__data__': data})
class Suite(Code):
"""Executes Python statements used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Suite("foo = dict['some']").execute(data)
>>> data['foo']
'thing'
"""
__slots__ = []
mode = 'exec'
def execute(self, data):
"""Execute the suite in the given data dictionary.
:param data: a mapping containing the data to execute in
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
exec self.code in _globals, data
UNDEFINED = object()
class UndefinedError(TemplateRuntimeError):
"""Exception thrown when a template expression attempts to access a variable
not defined in the context.
:see: `LenientLookup`, `StrictLookup`
"""
def __init__(self, name, owner=UNDEFINED):
if owner is not UNDEFINED:
message = '%s has no member named "%s"' % (repr(owner), name)
else:
message = '"%s" not defined' % name
TemplateRuntimeError.__init__(self, message)
class Undefined(object):
"""Represents a reference to an undefined variable.
Unlike the Python runtime, template expressions can refer to an undefined
variable without causing a `NameError` to be raised. The result will be an
instance of the `Undefined` class, which is treated the same as ``False`` in
conditions, but raise an exception on any other operation:
>>> foo = Undefined('foo')
>>> bool(foo)
False
>>> list(foo)
[]
>>> print(foo)
undefined
However, calling an undefined variable, or trying to access an attribute
of that variable, will raise an exception that includes the name used to
reference that undefined variable.
>>> foo('bar')
Traceback (most recent call last):
...
UndefinedError: "foo" not defined
>>> foo.bar
Traceback (most recent call last):
...
UndefinedError: "foo" not defined
:see: `LenientLookup`
"""
__slots__ = ['_name', '_owner']
def __init__(self, name, owner=UNDEFINED):
"""Initialize the object.
:param name: the name of the reference
:param owner: the owning object, if the variable is accessed as a member
"""
self._name = name
self._owner = owner
def __iter__(self):
return iter([])
def __nonzero__(self):
return False
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self._name)
def __str__(self):
return 'undefined'
def _die(self, *args, **kwargs):
"""Raise an `UndefinedError`."""
__traceback_hide__ = True
raise UndefinedError(self._name, self._owner)
__call__ = __getattr__ = __getitem__ = _die
class LookupBase(object):
"""Abstract base class for variable lookup implementations."""
@classmethod
def globals(cls, data):
"""Construct the globals dictionary to use as the execution context for
the expression or suite.
"""
return {
'__data__': data,
'_lookup_name': cls.lookup_name,
'_lookup_attr': cls.lookup_attr,
'_lookup_item': cls.lookup_item,
'_star_import_patch': _star_import_patch,
'UndefinedError': UndefinedError,
}
@classmethod
def lookup_name(cls, data, name):
__traceback_hide__ = True
val = data.get(name, UNDEFINED)
if val is UNDEFINED:
val = BUILTINS.get(name, val)
if val is UNDEFINED:
val = cls.undefined(name)
return val
@classmethod
def lookup_attr(cls, obj, key):
__traceback_hide__ = True
try:
val = getattr(obj, key)
except AttributeError:
if hasattr(obj.__class__, key):
raise
else:
try:
val = obj[key]
except (KeyError, TypeError):
val = cls.undefined(key, owner=obj)
return val
@classmethod
def lookup_item(cls, obj, key):
__traceback_hide__ = True
if len(key) == 1:
key = key[0]
try:
return obj[key]
except (AttributeError, KeyError, IndexError, TypeError), e:
if isinstance(key, basestring):
val = getattr(obj, key, UNDEFINED)
if val is UNDEFINED:
val = cls.undefined(key, owner=obj)
return val
raise
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Can be overridden by subclasses to specify behavior when undefined
variables are accessed.
:param key: the name of the variable
:param owner: the owning object, if the variable is accessed as a member
"""
raise NotImplementedError
class LenientLookup(LookupBase):
"""Default variable lookup mechanism for expressions.
When an undefined variable is referenced using this lookup style, the
reference evaluates to an instance of the `Undefined` class:
>>> expr = Expression('nothing', lookup='lenient')
>>> undef = expr.evaluate({})
>>> undef
<Undefined 'nothing'>
The same will happen when a non-existing attribute or item is accessed on
an existing object:
>>> expr = Expression('something.nil', lookup='lenient')
>>> expr.evaluate({'something': dict()})
<Undefined 'nil'>
See the documentation of the `Undefined` class for details on the behavior
of such objects.
:see: `StrictLookup`
"""
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Return an ``Undefined`` object."""
__traceback_hide__ = True
return Undefined(key, owner=owner)
class StrictLookup(LookupBase):
"""Strict variable lookup mechanism for expressions.
Referencing an undefined variable using this lookup style will immediately
raise an ``UndefinedError``:
>>> expr = Expression('nothing', lookup='strict')
>>> expr.evaluate({})
Traceback (most recent call last):
...
UndefinedError: "nothing" not defined
The same happens when a non-existing attribute or item is accessed on an
existing object:
>>> expr = Expression('something.nil', lookup='strict')
>>> expr.evaluate({'something': dict()})
Traceback (most recent call last):
...
UndefinedError: {} has no member named "nil"
"""
@classmethod
def undefined(cls, key, owner=UNDEFINED):
"""Raise an ``UndefinedError`` immediately."""
__traceback_hide__ = True
raise UndefinedError(key, owner=owner)
def _parse(source, mode='eval'):
source = source.strip()
if mode == 'exec':
lines = [line.expandtabs() for line in source.splitlines()]
if lines:
first = lines[0]
rest = dedent('\n'.join(lines[1:])).rstrip()
if first.rstrip().endswith(':') and not rest[0].isspace():
rest = '\n'.join([' %s' % line for line in rest.splitlines()])
source = '\n'.join([first, rest])
if isinstance(source, unicode):
source = '\xef\xbb\xbf' + source.encode('utf-8')
return parse(source, mode)
def _compile(node, source=None, mode='eval', filename=None, lineno=-1,
xform=None):
if isinstance(filename, unicode):
# unicode file names not allowed for code objects
filename = filename.encode('utf-8', 'replace')
elif not filename:
filename = '<string>'
if lineno <= 0:
lineno = 1
if xform is None:
xform = {
'eval': ExpressionASTTransformer
}.get(mode, TemplateASTTransformer)
tree = xform().visit(node)
if mode == 'eval':
name = '<Expression %r>' % (source or '?')
else:
lines = source.splitlines()
if not lines:
extract = ''
else:
extract = lines[0]
if len(lines) > 1:
extract += ' ...'
name = '<Suite %r>' % (extract)
new_source = ASTCodeGenerator(tree).code
code = compile(new_source, filename, mode)
try:
# We'd like to just set co_firstlineno, but it's readonly. So we need
# to clone the code object while adjusting the line number
return CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags | 0x0040, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename, name,
lineno, code.co_lnotab, (), ())
except RuntimeError:
return code
def _new(class_, *args, **kwargs):
ret = class_()
for attr, value in zip(ret._fields, args):
if attr in kwargs:
raise ValueError('Field set both in args and kwargs')
setattr(ret, attr, value)
for attr, value in kwargs:
setattr(ret, attr, value)
return ret
BUILTINS = __builtin__.__dict__.copy()
BUILTINS.update({'Markup': Markup, 'Undefined': Undefined})
CONSTANTS = frozenset(['False', 'True', 'None', 'NotImplemented', 'Ellipsis'])
class TemplateASTTransformer(ASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def __init__(self):
self.locals = [CONSTANTS]
def _extract_names(self, node):
names = set()
def _process(node):
if isinstance(node, _ast.Name):
names.add(node.id)
elif isinstance(node, _ast.alias):
names.add(node.asname or node.name)
elif isinstance(node, _ast.Tuple):
for elt in node.elts:
_process(node)
if hasattr(node, 'args'):
for arg in node.args:
_process(arg)
if hasattr(node, 'vararg'):
names.add(node.vararg)
if hasattr(node, 'kwarg'):
names.add(node.kwarg)
elif hasattr(node, 'names'):
for elt in node.names:
_process(elt)
return names
def visit_Str(self, node):
if isinstance(node.s, str):
try: # If the string is ASCII, return a `str` object
node.s.decode('ascii')
except ValueError: # Otherwise return a `unicode` object
return _new(_ast.Str, node.s.decode('utf-8'))
return node
def visit_ClassDef(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(set())
try:
return ASTTransformer.visit_ClassDef(self, node)
finally:
self.locals.pop()
def visit_Import(self, node):
if len(self.locals) > 1:
self.locals[-1].update(self._extract_names(node))
return ASTTransformer.visit_Import(self, node)
def visit_ImportFrom(self, node):
if [a.name for a in node.names] == ['*']:
if has_star_import_bug:
# This is a Python 2.4 bug. Only if we have a broken Python
# version do we need to apply this hack
node = _new(_ast.Expr, _new(_ast.Call,
_new(_ast.Name, '_star_import_patch'), [
_new(_ast.Name, '__data__'),
_new(_ast.Str, node.module)
], (), ()))
return node
if len(self.locals) > 1:
self.locals[-1].update(self._extract_names(node))
return ASTTransformer.visit_ImportFrom(self, node)
def visit_FunctionDef(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(self._extract_names(node.args))
try:
return ASTTransformer.visit_FunctionDef(self, node)
finally:
self.locals.pop()
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, node):
gens = []
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self.locals.append(set())
gen = _new(_ast.comprehension, self.visit(generator.target),
self.visit(generator.iter),
[self.visit(if_) for if_ in generator.ifs])
gens.append(gen)
# use node.__class__ to make it reusable as ListComp
ret = _new(node.__class__, self.visit(node.elt), gens)
#delete inserted locals
del self.locals[-len(node.generators):]
return ret
# ListComp(expr elt, comprehension* generators)
visit_ListComp = visit_GeneratorExp
def visit_Lambda(self, node):
self.locals.append(self._extract_names(node.args))
try:
return ASTTransformer.visit_Lambda(self, node)
finally:
self.locals.pop()
def visit_Name(self, node):
# If the name refers to a local inside a lambda, list comprehension, or
# generator expression, leave it alone
if isinstance(node.ctx, _ast.Load) and \
node.id not in flatten(self.locals):
# Otherwise, translate the name ref into a context lookup
name = _new(_ast.Name, '_lookup_name', _ast.Load())
namearg = _new(_ast.Name, '__data__', _ast.Load())
strarg = _new(_ast.Str, node.id)
node = _new(_ast.Call, name, [namearg, strarg], [])
elif isinstance(node.ctx, _ast.Store):
if len(self.locals) > 1:
self.locals[-1].add(node.id)
return node
class ExpressionASTTransformer(TemplateASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def visit_Attribute(self, node):
if not isinstance(node.ctx, _ast.Load):
return ASTTransformer.visit_Attribute(self, node)
func = _new(_ast.Name, '_lookup_attr', _ast.Load())
args = [self.visit(node.value), _new(_ast.Str, node.attr)]
return _new(_ast.Call, func, args, [])
def visit_Subscript(self, node):
if not isinstance(node.ctx, _ast.Load) or \
not isinstance(node.slice, _ast.Index):
return ASTTransformer.visit_Subscript(self, node)
func = _new(_ast.Name, '_lookup_item', _ast.Load())
args = [
self.visit(node.value),
_new(_ast.Tuple, (self.visit(node.slice.value),), _ast.Load())
]
return _new(_ast.Call, func, args, [])
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for constructing markup streams from files, strings, or other
sources.
"""
from itertools import chain
import htmlentitydefs as entities
import HTMLParser as html
from StringIO import StringIO
from xml.parsers import expat
from genshi.core import Attrs, QName, Stream, stripentities
from genshi.core import START, END, XML_DECL, DOCTYPE, TEXT, START_NS, \
END_NS, START_CDATA, END_CDATA, PI, COMMENT
__all__ = ['ET', 'ParseError', 'XMLParser', 'XML', 'HTMLParser', 'HTML']
__docformat__ = 'restructuredtext en'
def ET(element):
"""Convert a given ElementTree element to a markup stream.
:param element: an ElementTree element
:return: a markup stream
"""
tag_name = QName(element.tag.lstrip('{'))
attrs = Attrs([(QName(attr.lstrip('{')), value)
for attr, value in element.items()])
yield START, (tag_name, attrs), (None, -1, -1)
if element.text:
yield TEXT, element.text, (None, -1, -1)
for child in element.getchildren():
for item in ET(child):
yield item
yield END, tag_name, (None, -1, -1)
if element.tail:
yield TEXT, element.tail, (None, -1, -1)
class ParseError(Exception):
"""Exception raised when fatal syntax errors are found in the input being
parsed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Exception initializer.
:param message: the error message from the parser
:param filename: the path to the file that was parsed
:param lineno: the number of the line on which the error was encountered
:param offset: the column number where the error was encountered
"""
self.msg = message
if filename:
message += ', in ' + filename
Exception.__init__(self, message)
self.filename = filename or '<string>'
self.lineno = lineno
self.offset = offset
class XMLParser(object):
"""Generator-based XML parser based on roughly equivalent code in
Kid/ElementTree.
The parsing is initiated by iterating over the parser object:
>>> parser = XMLParser(StringIO('<root id="2"><child>Foo</child></root>'))
>>> for kind, data, pos in parser:
... print('%s %s' % (kind, data))
START (QName('root'), Attrs([(QName('id'), u'2')]))
START (QName('child'), Attrs())
TEXT Foo
END child
END root
"""
_entitydefs = ['<!ENTITY %s "&#%d;">' % (name, value) for name, value in
entities.name2codepoint.items()]
_external_dtd = '\n'.join(_entitydefs)
def __init__(self, source, filename=None, encoding=None):
"""Initialize the parser for the given XML input.
:param source: the XML text as a file-like object
:param filename: the name of the file, if appropriate
:param encoding: the encoding of the file; if not specified, the
encoding is assumed to be ASCII, UTF-8, or UTF-16, or
whatever the encoding specified in the XML declaration
(if any)
"""
self.source = source
self.filename = filename
# Setup the Expat parser
parser = expat.ParserCreate(encoding, '}')
parser.buffer_text = True
parser.returns_unicode = True
parser.ordered_attributes = True
parser.StartElementHandler = self._handle_start
parser.EndElementHandler = self._handle_end
parser.CharacterDataHandler = self._handle_data
parser.StartDoctypeDeclHandler = self._handle_doctype
parser.StartNamespaceDeclHandler = self._handle_start_ns
parser.EndNamespaceDeclHandler = self._handle_end_ns
parser.StartCdataSectionHandler = self._handle_start_cdata
parser.EndCdataSectionHandler = self._handle_end_cdata
parser.ProcessingInstructionHandler = self._handle_pi
parser.XmlDeclHandler = self._handle_xml_decl
parser.CommentHandler = self._handle_comment
# Tell Expat that we'll handle non-XML entities ourselves
# (in _handle_other)
parser.DefaultHandler = self._handle_other
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.UseForeignDTD()
parser.ExternalEntityRefHandler = self._build_foreign
self.expat = parser
self._queue = []
def parse(self):
"""Generator that parses the XML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the XML text is not well formed
"""
def _generate():
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = self.source.read(bufsize)
if data == '': # end of data
if hasattr(self, 'expat'):
self.expat.Parse('', True)
del self.expat # get rid of circular references
done = True
else:
if isinstance(data, unicode):
data = data.encode('utf-8')
self.expat.Parse(data, False)
for event in self._queue:
yield event
self._queue = []
if done:
break
except expat.ExpatError, e:
msg = str(e)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _build_foreign(self, context, base, sysid, pubid):
parser = self.expat.ExternalEntityParserCreate(context)
parser.ParseFile(StringIO(self._external_dtd))
return 1
def _enqueue(self, kind, data=None, pos=None):
if pos is None:
pos = self._getpos()
if kind is TEXT:
# Expat reports the *end* of the text event as current position. We
# try to fix that up here as much as possible. Unfortunately, the
# offset is only valid for single-line text. For multi-line text,
# it is apparently not possible to determine at what offset it
# started
if '\n' in data:
lines = data.splitlines()
lineno = pos[1] - len(lines) + 1
offset = -1
else:
lineno = pos[1]
offset = pos[2] - len(data)
pos = (pos[0], lineno, offset)
self._queue.append((kind, data, pos))
def _getpos_unknown(self):
return (self.filename, -1, -1)
def _getpos(self):
return (self.filename, self.expat.CurrentLineNumber,
self.expat.CurrentColumnNumber)
def _handle_start(self, tag, attrib):
attrs = Attrs([(QName(name), value) for name, value in
zip(*[iter(attrib)] * 2)])
self._enqueue(START, (QName(tag), attrs))
def _handle_end(self, tag):
self._enqueue(END, QName(tag))
def _handle_data(self, text):
self._enqueue(TEXT, text)
def _handle_xml_decl(self, version, encoding, standalone):
self._enqueue(XML_DECL, (version, encoding, standalone))
def _handle_doctype(self, name, sysid, pubid, has_internal_subset):
self._enqueue(DOCTYPE, (name, pubid, sysid))
def _handle_start_ns(self, prefix, uri):
self._enqueue(START_NS, (prefix or '', uri))
def _handle_end_ns(self, prefix):
self._enqueue(END_NS, prefix or '')
def _handle_start_cdata(self):
self._enqueue(START_CDATA)
def _handle_end_cdata(self):
self._enqueue(END_CDATA)
def _handle_pi(self, target, data):
self._enqueue(PI, (target, data))
def _handle_comment(self, text):
self._enqueue(COMMENT, text)
def _handle_other(self, text):
if text.startswith('&'):
# deal with undefined entities
try:
text = unichr(entities.name2codepoint[text[1:-1]])
self._enqueue(TEXT, text)
except KeyError:
filename, lineno, offset = self._getpos()
error = expat.error('undefined entity "%s": line %d, column %d'
% (text, lineno, offset))
error.code = expat.errors.XML_ERROR_UNDEFINED_ENTITY
error.lineno = lineno
error.offset = offset
raise error
def XML(text):
"""Parse the given XML source and return a markup stream.
Unlike with `XMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> xml = XML('<doc><elem>Foo</elem><elem>Bar</elem></doc>')
>>> print(xml)
<doc><elem>Foo</elem><elem>Bar</elem></doc>
>>> print(xml.select('elem'))
<elem>Foo</elem><elem>Bar</elem>
>>> print(xml.select('elem/text()'))
FooBar
:param text: the XML source
:return: the parsed XML event stream
:raises ParseError: if the XML text is not well-formed
"""
return Stream(list(XMLParser(StringIO(text))))
class HTMLParser(html.HTMLParser, object):
"""Parser for HTML input based on the Python `HTMLParser` module.
This class provides the same interface for generating stream events as
`XMLParser`, and attempts to automatically balance tags.
The parsing is initiated by iterating over the parser object:
>>> parser = HTMLParser(StringIO('<UL compact><LI>Foo</UL>'))
>>> for kind, data, pos in parser:
... print('%s %s' % (kind, data))
START (QName('ul'), Attrs([(QName('compact'), u'compact')]))
START (QName('li'), Attrs())
TEXT Foo
END li
END ul
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
def __init__(self, source, filename=None, encoding='utf-8'):
"""Initialize the parser for the given HTML input.
:param source: the HTML text as a file-like object
:param filename: the name of the file, if known
:param filename: encoding of the file; ignored if the input is unicode
"""
html.HTMLParser.__init__(self)
self.source = source
self.filename = filename
self.encoding = encoding
self._queue = []
self._open_tags = []
def parse(self):
"""Generator that parses the HTML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the HTML text is not well formed
"""
def _generate():
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = self.source.read(bufsize)
if data == '': # end of data
self.close()
done = True
else:
self.feed(data)
for kind, data, pos in self._queue:
yield kind, data, pos
self._queue = []
if done:
open_tags = self._open_tags
open_tags.reverse()
for tag in open_tags:
yield END, QName(tag), pos
break
except html.HTMLParseError, e:
msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _enqueue(self, kind, data, pos=None):
if pos is None:
pos = self._getpos()
self._queue.append((kind, data, pos))
def _getpos(self):
lineno, column = self.getpos()
return (self.filename, lineno, column)
def handle_starttag(self, tag, attrib):
fixed_attrib = []
for name, value in attrib: # Fixup minimized attributes
if value is None:
value = unicode(name)
elif not isinstance(value, unicode):
value = value.decode(self.encoding, 'replace')
fixed_attrib.append((QName(name), stripentities(value)))
self._enqueue(START, (QName(tag), Attrs(fixed_attrib)))
if tag in self._EMPTY_ELEMS:
self._enqueue(END, QName(tag))
else:
self._open_tags.append(tag)
def handle_endtag(self, tag):
if tag not in self._EMPTY_ELEMS:
while self._open_tags:
open_tag = self._open_tags.pop()
self._enqueue(END, QName(open_tag))
if open_tag.lower() == tag.lower():
break
def handle_data(self, text):
if not isinstance(text, unicode):
text = text.decode(self.encoding, 'replace')
self._enqueue(TEXT, text)
def handle_charref(self, name):
if name.lower().startswith('x'):
text = unichr(int(name[1:], 16))
else:
text = unichr(int(name))
self._enqueue(TEXT, text)
def handle_entityref(self, name):
try:
text = unichr(entities.name2codepoint[name])
except KeyError:
text = '&%s;' % name
self._enqueue(TEXT, text)
def handle_pi(self, data):
target, data = data.split(None, 1)
if data.endswith('?'):
data = data[:-1]
self._enqueue(PI, (target.strip(), data.strip()))
def handle_comment(self, text):
self._enqueue(COMMENT, text)
def HTML(text, encoding='utf-8'):
"""Parse the given HTML source and return a markup stream.
Unlike with `HTMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> html = HTML('<body><h1>Foo</h1></body>')
>>> print(html)
<body><h1>Foo</h1></body>
>>> print(html.select('h1'))
<h1>Foo</h1>
>>> print(html.select('h1/text()'))
Foo
:param text: the HTML source
:return: the parsed XML event stream
:raises ParseError: if the HTML text is not well-formed, and error recovery
fails
"""
return Stream(list(HTMLParser(StringIO(text), encoding=encoding)))
def _coalesce(stream):
"""Coalesces adjacent TEXT events into a single event."""
textbuf = []
textpos = None
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
textbuf.append(data)
if textpos is None:
textpos = pos
else:
if textbuf:
yield TEXT, ''.join(textbuf), textpos
del textbuf[:]
textpos = None
if kind:
yield kind, data, pos
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""A filter for functional-style transformations of markup streams.
The `Transformer` filter provides a variety of transformations that can be
applied to parts of streams that match given XPath expressions. These
transformations can be chained to achieve results that would be comparitively
tedious to achieve by writing stream filters by hand. The approach of chaining
node selection and transformation has been inspired by the `jQuery`_ Javascript
library.
.. _`jQuery`: http://jquery.com/
For example, the following transformation removes the ``<title>`` element from
the ``<head>`` of the input document:
>>> from genshi.builder import tag
>>> html = HTML('''<html>
... <head><title>Some Title</title></head>
... <body>
... Some <em>body</em> text.
... </body>
... </html>''')
>>> print(html | Transformer('body/em').map(unicode.upper, TEXT)
... .unwrap().wrap(tag.u))
<html>
<head><title>Some Title</title></head>
<body>
Some <u>BODY</u> text.
</body>
</html>
The ``Transformer`` support a large number of useful transformations out of the
box, but custom transformations can be added easily.
:since: version 0.5
"""
import re
import sys
from genshi.builder import Element
from genshi.core import Stream, Attrs, QName, TEXT, START, END, _ensure, Markup
from genshi.path import Path
__all__ = ['Transformer', 'StreamBuffer', 'InjectorTransformation', 'ENTER',
'EXIT', 'INSIDE', 'OUTSIDE', 'BREAK']
class TransformMark(str):
"""A mark on a transformation stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
ENTER = TransformMark('ENTER')
"""Stream augmentation mark indicating that a selected element is being
entered."""
INSIDE = TransformMark('INSIDE')
"""Stream augmentation mark indicating that processing is currently inside a
selected element."""
OUTSIDE = TransformMark('OUTSIDE')
"""Stream augmentation mark indicating that a match occurred outside a selected
element."""
ATTR = TransformMark('ATTR')
"""Stream augmentation mark indicating a selected element attribute."""
EXIT = TransformMark('EXIT')
"""Stream augmentation mark indicating that a selected element is being
exited."""
BREAK = TransformMark('BREAK')
"""Stream augmentation mark indicating a break between two otherwise contiguous
blocks of marked events.
This is used primarily by the cut() transform to provide later transforms with
an opportunity to operate on the cut buffer.
"""
class PushBackStream(object):
"""Allows a single event to be pushed back onto the stream and re-consumed.
"""
def __init__(self, stream):
self.stream = iter(stream)
self.peek = None
def push(self, event):
assert self.peek is None
self.peek = event
def __iter__(self):
while True:
if self.peek is not None:
peek = self.peek
self.peek = None
yield peek
else:
try:
event = self.stream.next()
yield event
except StopIteration:
if self.peek is None:
raise
class Transformer(object):
"""Stream filter that can apply a variety of different transformations to
a stream.
This is achieved by selecting the events to be transformed using XPath,
then applying the transformations to the events matched by the path
expression. Each marked event is in the form (mark, (kind, data, pos)),
where mark can be any of `ENTER`, `INSIDE`, `EXIT`, `OUTSIDE`, or `None`.
The first three marks match `START` and `END` events, and any events
contained `INSIDE` any selected XML/HTML element. A non-element match
outside a `START`/`END` container (e.g. ``text()``) will yield an `OUTSIDE`
mark.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
Transformations act on selected stream events matching an XPath expression.
Here's an example of removing some markup (the title, in this case)
selected by an expression:
>>> print(html | Transformer('head/title').remove())
<html><head/><body>Some <em>body</em> text.</body></html>
Inserted content can be passed in the form of a string, or a markup event
stream, which includes streams generated programmatically via the
`builder` module:
>>> from genshi.builder import tag
>>> print(html | Transformer('body').prepend(tag.h1('Document Title')))
<html><head><title>Some Title</title></head><body><h1>Document
Title</h1>Some <em>body</em> text.</body></html>
Each XPath expression determines the set of tags that will be acted upon by
subsequent transformations. In this example we select the ``<title>`` text,
copy it into a buffer, then select the ``<body>`` element and paste the
copied text into the body as ``<h1>`` enclosed text:
>>> buffer = StreamBuffer()
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some
<em>body</em> text.</body></html>
Transformations can also be assigned and reused, although care must be
taken when using buffers, to ensure that buffers are cleared between
transforms:
>>> emphasis = Transformer('body//em').attr('class', 'emphasis')
>>> print(html | emphasis)
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> text.</body></html>
"""
__slots__ = ['transforms']
def __init__(self, path='.'):
"""Construct a new transformation filter.
:param path: an XPath expression (as string) or a `Path` instance
"""
self.transforms = [SelectTransformation(path)]
def __call__(self, stream, keep_marks=False):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
:param keep_marks: Do not strip transformer selection marks from the
stream. Useful for testing.
:return: the transformed stream
:rtype: `Stream`
"""
transforms = self._mark(stream)
for link in self.transforms:
transforms = link(transforms)
if not keep_marks:
transforms = self._unmark(transforms)
return Stream(transforms,
serializer=getattr(stream, 'serializer', None))
def apply(self, function):
"""Apply a transformation to the stream.
Transformations can be chained, similar to stream filters. Any callable
accepting a marked stream can be used as a transform.
As an example, here is a simple `TEXT` event upper-casing transform:
>>> def upper(stream):
... for mark, (kind, data, pos) in stream:
... if mark and kind is TEXT:
... yield mark, (kind, data.upper(), pos)
... else:
... yield mark, (kind, data, pos)
>>> short_stream = HTML('<body>Some <em>test</em> text</body>')
>>> print(short_stream | Transformer('.//em/text()').apply(upper))
<body>Some <em>TEST</em> text</body>
"""
transformer = Transformer()
transformer.transforms = self.transforms[:]
if isinstance(function, Transformer):
transformer.transforms.extend(function.transforms)
else:
transformer.transforms.append(function)
return transformer
#{ Selection operations
def select(self, path):
"""Mark events matching the given XPath expression, within the current
selection.
>>> html = HTML('<body>Some <em>test</em> text</body>')
>>> print(html | Transformer().select('.//em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param path: an XPath expression (as string) or a `Path` instance
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(SelectTransformation(path))
def invert(self):
"""Invert selection so that marked events become unmarked, and vice
versa.
Specificaly, all marks are converted to null marks, and all null marks
are converted to OUTSIDE marks.
>>> html = HTML('<body>Some <em>test</em> text</body>')
>>> print(html | Transformer('//em').invert().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
(None, ('START', (QName('em'), Attrs()), (None, 1, 11)))
(None, ('TEXT', u'test', (None, 1, 15)))
(None, ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:rtype: `Transformer`
"""
return self.apply(InvertTransformation())
def end(self):
"""End current selection, allowing all events to be selected.
Example:
>>> html = HTML('<body>Some <em>test</em> text</body>')
>>> print(html | Transformer('//em').end().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
('OUTSIDE', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('OUTSIDE', ('TEXT', u'test', (None, 1, 15)))
('OUTSIDE', ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(EndTransformation())
#{ Deletion operations
def empty(self):
"""Empty selected elements of all content.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').empty())
<html><head><title>Some Title</title></head><body>Some <em/>
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(EmptyTransformation())
def remove(self):
"""Remove selection from the stream.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').remove())
<html><head><title>Some Title</title></head><body>Some
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(RemoveTransformation())
#{ Direct element operations
def unwrap(self):
"""Remove outermost enclosing elements from selection.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').unwrap())
<html><head><title>Some Title</title></head><body>Some body
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(UnwrapTransformation())
def wrap(self, element):
"""Wrap selection in an element.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').wrap('strong'))
<html><head><title>Some Title</title></head><body>Some
<strong><em>body</em></strong> text.</body></html>
:param element: either a tag name (as string) or an `Element` object
:rtype: `Transformer`
"""
return self.apply(WrapTransformation(element))
#{ Content insertion operations
def replace(self, content):
"""Replace selection with content.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//title/text()').replace('New Title'))
<html><head><title>New Title</title></head><body>Some <em>body</em>
text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(ReplaceTransformation(content))
def before(self, content):
"""Insert content before selection.
In this example we insert the word 'emphasised' before the <em> opening
tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').before('emphasised '))
<html><head><title>Some Title</title></head><body>Some emphasised
<em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(BeforeTransformation(content))
def after(self, content):
"""Insert content after selection.
Here, we insert some text after the </em> closing tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em').after(' rock'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
rock text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AfterTransformation(content))
def prepend(self, content):
"""Insert content after the ENTER event of the selection.
Inserting some new text at the start of the <body>:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//body').prepend('Some new body text. '))
<html><head><title>Some Title</title></head><body>Some new body text.
Some <em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(PrependTransformation(content))
def append(self, content):
"""Insert content before the END event of the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//body').append(' Some new body text.'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
text. Some new body text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AppendTransformation(content))
#{ Attribute manipulation
def attr(self, name, value):
"""Add, replace or delete an attribute on selected elements.
If `value` evaulates to `None` the attribute will be deleted from the
element:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em class="before">body</em> <em>text</em>.</body>'
... '</html>')
>>> print(html | Transformer('body/em').attr('class', None))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
<em>text</em>.</body></html>
Otherwise the attribute will be set to `value`:
>>> print(html | Transformer('body/em').attr('class', 'emphasis'))
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> <em class="emphasis">text</em>.</body></html>
If `value` is a callable it will be called with the attribute name and
the `START` event for the matching element. Its return value will then
be used to set the attribute:
>>> def print_attr(name, event):
... attrs = event[1][1]
... print(attrs)
... return attrs.get(name)
>>> print(html | Transformer('body/em').attr('class', print_attr))
Attrs([(QName('class'), u'before')])
Attrs()
<html><head><title>Some Title</title></head><body>Some <em
class="before">body</em> <em>text</em>.</body></html>
:param name: the name of the attribute
:param value: the value that should be set for the attribute.
:rtype: `Transformer`
"""
return self.apply(AttrTransformation(name, value))
#{ Buffer operations
def copy(self, buffer, accumulate=False):
"""Copy selection into buffer.
The buffer is replaced by each *contiguous* selection before being passed
to the next transformation. If accumulate=True, further selections will
be appended to the buffer rather than replacing it.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
This example illustrates that only a single contiguous selection will
be buffered:
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body/em').copy(buffer).end().select('body')
... .prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
>>> print(buffer)
<em>body</em>
Element attributes can also be copied for later use:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body><em>Some</em> <em class="before">body</em>'
... '<em>text</em>.</body></html>')
>>> buffer = StreamBuffer()
>>> def apply_attr(name, entry):
... return list(buffer)[0][1][1].get('class')
>>> print(html | Transformer('body/em[@class]/@class').copy(buffer)
... .end().buffer().select('body/em[not(@class)]')
... .attr('class', apply_attr))
<html><head><title>Some Title</title></head><body><em
class="before">Some</em> <em class="before">body</em><em
class="before">text</em>.</body></html>
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: Copy (and cut) copy each individual selected object into the
buffer before passing to the next transform. For example, the
XPath ``*|text()`` will select all elements and text, each
instance of which will be copied to the buffer individually
before passing to the next transform. This has implications for
how ``StreamBuffer`` objects can be used, so some
experimentation may be required.
"""
return self.apply(CopyTransformation(buffer, accumulate))
def cut(self, buffer, accumulate=False):
"""Copy selection into buffer and remove the selection from the stream.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('.//em/text()').cut(buffer)
... .end().select('.//em').after(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body>Some
<em/><h1>body</h1> text.</body></html>
Specifying accumulate=True, appends all selected intervals onto the
buffer. Combining this with the .buffer() operation allows us operate
on all copied events rather than per-segment. See the documentation on
buffer() for more information.
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: this transformation will buffer the entire input stream
"""
return self.apply(CutTransformation(buffer, accumulate))
def buffer(self):
"""Buffer the entire stream (can consume a considerable amount of
memory).
Useful in conjunction with copy(accumulate=True) and
cut(accumulate=True) to ensure that all marked events in the entire
stream are copied to the buffer before further transformations are
applied.
For example, to move all <note> elements inside a <notes> tag at the
top of the document:
>>> doc = HTML('<doc><notes></notes><body>Some <note>one</note> '
... 'text <note>two</note>.</body></doc>')
>>> buffer = StreamBuffer()
>>> print(doc | Transformer('body/note').cut(buffer, accumulate=True)
... .end().buffer().select('notes').prepend(buffer))
<doc><notes><note>one</note><note>two</note></notes><body>Some text
.</body></doc>
"""
return self.apply(list)
#{ Miscellaneous operations
def filter(self, filter):
"""Apply a normal stream filter to the selection. The filter is called
once for each contiguous block of marked events.
>>> from genshi.filters.html import HTMLSanitizer
>>> html = HTML('<html><body>Some text<script>alert(document.cookie)'
... '</script> and some more text</body></html>')
>>> print(html | Transformer('body/*').filter(HTMLSanitizer()))
<html><body>Some text and some more text</body></html>
:param filter: The stream filter to apply.
:rtype: `Transformer`
"""
return self.apply(FilterTransformation(filter))
def map(self, function, kind):
"""Applies a function to the ``data`` element of events of ``kind`` in
the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>')
>>> print(html | Transformer('head/title').map(unicode.upper, TEXT))
<html><head><title>SOME TITLE</title></head><body>Some <em>body</em>
text.</body></html>
:param function: the function to apply
:param kind: the kind of event the function should be applied to
:rtype: `Transformer`
"""
return self.apply(MapTransformation(function, kind))
def substitute(self, pattern, replace, count=1):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b>\\n'
... '<i>some italicised text</i></body></html>')
>>> print(html | Transformer('body/b').substitute('(?i)some', 'SOME'))
<html><body>Some text, some more text and <b>SOME bold text</b>
<i>some italicised text</i></body></html>
>>> tags = tag.html(tag.body('Some text, some more text and\\n',
... Markup('<b>some bold text</b>')))
>>> print(tags.generate() | Transformer('body').substitute(
... '(?i)some', 'SOME'))
<html><body>SOME text, some more text and
<b>SOME bold text</b></body></html>
:param pattern: A regular expression object or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
:rtype: `Transformer`
"""
return self.apply(SubstituteTransformation(pattern, replace, count))
def rename(self, name):
"""Rename matching elements.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b></body></html>')
>>> print(html | Transformer('body/b').rename('strong'))
<html><body>Some text, some more text and <strong>some bold text</strong></body></html>
"""
return self.apply(RenameTransformation(name))
def trace(self, prefix='', fileobj=None):
"""Print events as they pass through the transform.
>>> html = HTML('<body>Some <em>test</em> text</body>')
>>> print(html | Transformer('em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param prefix: a string to prefix each event with in the output
:param fileobj: the writable file-like object to write to; defaults to
the standard output stream
:rtype: `Transformer`
"""
return self.apply(TraceTransformation(prefix, fileobj=fileobj))
# Internal methods
def _mark(self, stream):
for event in stream:
yield OUTSIDE, event
def _unmark(self, stream):
for mark, event in stream:
kind = event[0]
if not (kind is None or kind is ATTR or kind is BREAK):
yield event
class SelectTransformation(object):
"""Select and mark events that match an XPath expression."""
def __init__(self, path):
"""Create selection.
:param path: an XPath expression (as string) or a `Path` object
"""
if not isinstance(path, Path):
path = Path(path)
self.path = path
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
namespaces = {}
variables = {}
test = self.path.test()
stream = iter(stream)
next = stream.next
for mark, event in stream:
if mark is None:
yield mark, event
continue
result = test(event, namespaces, variables)
# XXX This is effectively genshi.core._ensure() for transform
# streams.
if result is True:
if event[0] is START:
yield ENTER, event
depth = 1
while depth > 0:
mark, subevent = next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
if depth == 0:
yield EXIT, subevent
else:
yield INSIDE, subevent
test(subevent, namespaces, variables, updateonly=True)
else:
yield OUTSIDE, event
elif isinstance(result, Attrs):
# XXX Selected *attributes* are given a "kind" of None to
# indicate they are not really part of the stream.
yield ATTR, (ATTR, (QName(event[1][0] + '@*'), result), event[2])
yield None, event
elif isinstance(result, tuple):
yield OUTSIDE, result
elif result:
# XXX Assume everything else is "text"?
yield None, (TEXT, unicode(result), (None, -1, -1))
else:
yield None, event
class InvertTransformation(object):
"""Invert selection so that marked events become unmarked, and vice versa.
Specificaly, all input marks are converted to null marks, and all input
null marks are converted to OUTSIDE marks.
"""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark:
yield None, event
else:
yield OUTSIDE, event
class EndTransformation(object):
"""End the current selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield OUTSIDE, event
class EmptyTransformation(object):
"""Empty selected elements of all content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
yield mark, event
break
class RemoveTransformation(object):
"""Remove selection from the stream."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark is None:
yield mark, event
class UnwrapTransformation(object):
"""Remove outtermost enclosing elements from selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark not in (ENTER, EXIT):
yield mark, event
class WrapTransformation(object):
"""Wrap selection in an element."""
def __init__(self, element):
if isinstance(element, Element):
self.element = element
else:
self.element = Element(element)
def __call__(self, stream):
for mark, event in stream:
if mark:
element = list(self.element.generate())
for prefix in element[:-1]:
yield None, prefix
yield mark, event
start = mark
stopped = False
for mark, event in stream:
if start is ENTER and mark is EXIT:
yield mark, event
stopped = True
break
if not mark:
break
yield mark, event
else:
stopped = True
yield None, element[-1]
if not stopped:
yield mark, event
else:
yield mark, event
class TraceTransformation(object):
"""Print events as they pass through the transform."""
def __init__(self, prefix='', fileobj=None):
"""Trace constructor.
:param prefix: text to prefix each traced line with.
:param fileobj: the writable file-like object to write to
"""
self.prefix = prefix
self.fileobj = fileobj or sys.stdout
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for event in stream:
self.fileobj.write('%s%s\n' % (self.prefix, event))
yield event
class FilterTransformation(object):
"""Apply a normal stream filter to the selection. The filter is called once
for each selection."""
def __init__(self, filter):
"""Create the transform.
:param filter: The stream filter to apply.
"""
self.filter = filter
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
def flush(queue):
if queue:
for event in self.filter(queue):
yield OUTSIDE, event
del queue[:]
queue = []
for mark, event in stream:
if mark is ENTER:
queue.append(event)
for mark, event in stream:
queue.append(event)
if mark is EXIT:
break
for queue_event in flush(queue):
yield queue_event
elif mark is OUTSIDE:
stopped = False
queue.append(event)
for mark, event in stream:
if mark is not OUTSIDE:
break
queue.append(event)
else:
stopped = True
for queue_event in flush(queue):
yield queue_event
if not stopped:
yield mark, event
else:
yield mark, event
for queue_event in flush(queue):
yield queue_event
class MapTransformation(object):
"""Apply a function to the `data` element of events of ``kind`` in the
selection.
"""
def __init__(self, function, kind):
"""Create the transform.
:param function: the function to apply; the function must take one
argument, the `data` element of each selected event
:param kind: the stream event ``kind`` to apply the `function` to
"""
self.function = function
self.kind = kind
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark and self.kind in (None, kind):
yield mark, (kind, self.function(data), pos)
else:
yield mark, (kind, data, pos)
class SubstituteTransformation(object):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
"""
def __init__(self, pattern, replace, count=0):
"""Create the transform.
:param pattern: A regular expression object, or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
"""
if isinstance(pattern, basestring):
self.pattern = re.compile(pattern)
else:
self.pattern = pattern
self.count = count
self.replace = replace
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is not None and kind is TEXT:
new_data = self.pattern.sub(self.replace, data, self.count)
if isinstance(data, Markup):
data = Markup(new_data)
else:
data = new_data
yield mark, (kind, data, pos)
class RenameTransformation(object):
"""Rename matching elements."""
def __init__(self, name):
"""Create the transform.
:param name: New element name.
"""
self.name = QName(name)
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is ENTER:
data = self.name, data[1]
elif mark is EXIT:
data = self.name
yield mark, (kind, data, pos)
class InjectorTransformation(object):
"""Abstract base class for transformations that inject content into a
stream.
>>> class Top(InjectorTransformation):
... def __call__(self, stream):
... for event in self._inject():
... yield event
... for event in stream:
... yield event
>>> html = HTML('<body>Some <em>test</em> text</body>')
>>> print(html | Transformer('.//em').apply(Top('Prefix ')))
Prefix <body>Some <em>test</em> text</body>
"""
def __init__(self, content):
"""Create a new injector.
:param content: An iterable of Genshi stream events, or a string to be
injected.
"""
self.content = content
def _inject(self):
content = self.content
if hasattr(content, '__call__'):
content = content()
for event in _ensure(content):
yield None, event
class ReplaceTransformation(InjectorTransformation):
"""Replace selection with content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
for mark, event in stream:
if start is ENTER:
if mark is EXIT:
break
elif mark != start:
stream.push((mark, event))
break
else:
yield mark, event
class BeforeTransformation(InjectorTransformation):
"""Insert content before selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
yield mark, event
for mark, event in stream:
if mark != start and start is not ENTER:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
else:
yield mark, event
class AfterTransformation(InjectorTransformation):
"""Insert content after selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
yield mark, event
if mark:
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
for subevent in self._inject():
yield subevent
class PrependTransformation(InjectorTransformation):
"""Prepend content to the inside of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for subevent in self._inject():
yield subevent
class AppendTransformation(InjectorTransformation):
"""Append content after the content of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
break
yield mark, event
for subevent in self._inject():
yield subevent
yield mark, event
class AttrTransformation(object):
"""Set an attribute on selected elements."""
def __init__(self, name, value):
"""Construct transform.
:param name: name of the attribute that should be set
:param value: the value to set
"""
self.name = name
self.value = value
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
callable_value = hasattr(self.value, '__call__')
for mark, (kind, data, pos) in stream:
if mark is ENTER:
if callable_value:
value = self.value(self.name, (kind, data, pos))
else:
value = self.value
if value is None:
attrs = data[1] - [QName(self.name)]
else:
attrs = data[1] | [(QName(self.name), value)]
data = (data[0], attrs)
yield mark, (kind, data, pos)
class StreamBuffer(Stream):
"""Stream event buffer used for cut and copy transformations."""
def __init__(self):
"""Create the buffer."""
Stream.__init__(self, [])
def append(self, event):
"""Add an event to the buffer.
:param event: the markup event to add
"""
self.events.append(event)
def reset(self):
"""Empty the buffer of events."""
del self.events[:]
class CopyTransformation(object):
"""Copy selected events into a buffer for later insertion."""
def __init__(self, buffer, accumulate=False):
"""Create the copy transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
if not accumulate:
buffer.reset()
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transformation to the marked stream.
:param stream: the marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark:
if not self.accumulate:
self.buffer.reset()
events = [(mark, event)]
self.buffer.append(event)
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
events.append((mark, event))
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
for i in events:
yield i
else:
yield mark, event
class CutTransformation(object):
"""Cut selected events into a buffer for later insertion and remove the
selection.
"""
def __init__(self, buffer, accumulate=False):
"""Create the cut transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
attributes = []
stream = PushBackStream(stream)
broken = False
if not self.accumulate:
self.buffer.reset()
for mark, event in stream:
if mark:
# Send a BREAK event if there was no other event sent between
if not self.accumulate:
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
self.buffer.reset()
self.buffer.append(event)
start = mark
if mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
for mark, event in stream:
if start is mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
# Handle non-element contiguous selection
if start is not ENTER and mark != start:
# Operating on the attributes of a START event
if start is ATTR:
kind, data, pos = event
assert kind is START
data = (data[0], data[1] - attributes)
attributes = None
stream.push((mark, (kind, data, pos)))
else:
stream.push((mark, event))
break
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
broken = False
else:
broken = True
yield mark, event
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
from pprint import pprint
import unittest
from genshi import HTML
from genshi.builder import Element
from genshi.core import START, END, TEXT, QName, Attrs
from genshi.filters.transform import Transformer, StreamBuffer, ENTER, EXIT, \
OUTSIDE, INSIDE, ATTR, BREAK
import genshi.filters.transform
FOO = '<root>ROOT<foo name="foo">FOO</foo></root>'
FOOBAR = '<root>ROOT<foo name="foo" size="100">FOO</foo><bar name="bar">BAR</bar></root>'
def _simplify(stream, with_attrs=False):
"""Simplify a marked stream."""
def _generate():
for mark, (kind, data, pos) in stream:
if kind is START:
if with_attrs:
data = (unicode(data[0]), dict((unicode(k), v)
for k, v in data[1]))
else:
data = unicode(data[0])
elif kind is END:
data = unicode(data)
elif kind is ATTR:
kind = ATTR
data = dict((unicode(k), v) for k, v in data[1])
yield mark, kind, data
return list(_generate())
def _transform(html, transformer, with_attrs=False):
"""Apply transformation returning simplified marked stream."""
if isinstance(html, basestring):
html = HTML(html)
stream = transformer(html, keep_marks=True)
return _simplify(stream, with_attrs)
class SelectTest(unittest.TestCase):
"""Test .select()"""
def _select(self, select):
html = HTML(FOOBAR)
if isinstance(select, basestring):
select = [select]
transformer = Transformer(select[0])
for sel in select[1:]:
transformer = transformer.select(sel)
return _transform(html, transformer)
def test_select_single_element(self):
self.assertEqual(
self._select('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
)
def test_select_context(self):
self.assertEqual(
self._select('.'),
[(ENTER, START, u'root'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(EXIT, END, u'root')]
)
def test_select_inside_select(self):
self.assertEqual(
self._select(['.', 'foo']),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
)
def test_select_text(self):
self.assertEqual(
self._select('*/text()'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(OUTSIDE, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
)
def test_select_attr(self):
self.assertEqual(
self._select('foo/@name'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ATTR, ATTR, {'name': u'foo'}),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_select_text_context(self):
self.assertEqual(
list(Transformer('.')(HTML('foo'), keep_marks=True)),
[('OUTSIDE', ('TEXT', u'foo', (None, 1, 0)))],
)
class InvertTest(unittest.TestCase):
def _invert(self, select):
return _transform(FOO, Transformer(select).invert())
def test_invert_element(self):
self.assertEqual(
self._invert('foo'),
[(OUTSIDE, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(OUTSIDE, END, u'root')]
)
def test_invert_inverted_element(self):
self.assertEqual(
_transform(FOO, Transformer('foo').invert().invert()),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(OUTSIDE, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(OUTSIDE, END, u'foo'),
(None, END, u'root')]
)
def test_invert_text(self):
self.assertEqual(
self._invert('foo/text()'),
[(OUTSIDE, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(OUTSIDE, START, u'foo'),
(None, TEXT, u'FOO'),
(OUTSIDE, END, u'foo'),
(OUTSIDE, END, u'root')]
)
def test_invert_attribute(self):
self.assertEqual(
self._invert('foo/@name'),
[(OUTSIDE, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, ATTR, {'name': u'foo'}),
(OUTSIDE, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(OUTSIDE, END, u'foo'),
(OUTSIDE, END, u'root')]
)
def test_invert_context(self):
self.assertEqual(
self._invert('.'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_invert_text_context(self):
self.assertEqual(
_simplify(Transformer('.').invert()(HTML('foo'), keep_marks=True)),
[(None, 'TEXT', u'foo')],
)
class EndTest(unittest.TestCase):
def test_end(self):
stream = _transform(FOO, Transformer('foo').end())
self.assertEqual(
stream,
[(OUTSIDE, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(OUTSIDE, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(OUTSIDE, END, u'foo'),
(OUTSIDE, END, u'root')]
)
class EmptyTest(unittest.TestCase):
def _empty(self, select):
return _transform(FOO, Transformer(select).empty())
def test_empty_element(self):
self.assertEqual(
self._empty('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(EXIT, END, u'foo'),
(None, END, u'root')],
)
def test_empty_text(self):
self.assertEqual(
self._empty('foo/text()'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_empty_attr(self):
self.assertEqual(
self._empty('foo/@name'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ATTR, ATTR, {'name': u'foo'}),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_empty_context(self):
self.assertEqual(
self._empty('.'),
[(ENTER, START, u'root'),
(EXIT, END, u'root')]
)
def test_empty_text_context(self):
self.assertEqual(
_simplify(Transformer('.')(HTML('foo'), keep_marks=True)),
[(OUTSIDE, TEXT, u'foo')],
)
class RemoveTest(unittest.TestCase):
def _remove(self, select):
return _transform(FOO, Transformer(select).remove())
def test_remove_element(self):
self.assertEqual(
self._remove('foo|bar'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, END, u'root')]
)
def test_remove_text(self):
self.assertEqual(
self._remove('//text()'),
[(None, START, u'root'),
(None, START, u'foo'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_remove_attr(self):
self.assertEqual(
self._remove('foo/@name'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_remove_context(self):
self.assertEqual(
self._remove('.'),
[],
)
def test_remove_text_context(self):
self.assertEqual(
_transform('foo', Transformer('.').remove()),
[],
)
class UnwrapText(unittest.TestCase):
def _unwrap(self, select):
return _transform(FOO, Transformer(select).unwrap())
def test_unwrap_element(self):
self.assertEqual(
self._unwrap('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(INSIDE, TEXT, u'FOO'),
(None, END, u'root')]
)
def test_unwrap_text(self):
self.assertEqual(
self._unwrap('foo/text()'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_unwrap_attr(self):
self.assertEqual(
self._unwrap('foo/@name'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ATTR, ATTR, {'name': u'foo'}),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_unwrap_adjacent(self):
self.assertEqual(
_transform(FOOBAR, Transformer('foo|bar').unwrap()),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, TEXT, u'BAR'),
(None, END, u'root')]
)
def test_unwrap_root(self):
self.assertEqual(
self._unwrap('.'),
[(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo')]
)
def test_unwrap_text_root(self):
self.assertEqual(
_transform('foo', Transformer('.').unwrap()),
[(OUTSIDE, TEXT, 'foo')],
)
class WrapTest(unittest.TestCase):
def _wrap(self, select, wrap='wrap'):
return _transform(FOO, Transformer(select).wrap(wrap))
def test_wrap_element(self):
self.assertEqual(
self._wrap('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'wrap'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, END, u'wrap'),
(None, END, u'root')]
)
def test_wrap_adjacent_elements(self):
self.assertEqual(
_transform(FOOBAR, Transformer('foo|bar').wrap('wrap')),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'wrap'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, END, u'wrap'),
(None, START, u'wrap'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'wrap'),
(None, END, u'root')]
)
def test_wrap_text(self):
self.assertEqual(
self._wrap('foo/text()'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, START, u'wrap'),
(OUTSIDE, TEXT, u'FOO'),
(None, END, u'wrap'),
(None, END, u'foo'),
(None, END, u'root')]
)
def test_wrap_root(self):
self.assertEqual(
self._wrap('.'),
[(None, START, u'wrap'),
(ENTER, START, u'root'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(EXIT, END, u'root'),
(None, END, u'wrap')]
)
def test_wrap_text_root(self):
self.assertEqual(
_transform('foo', Transformer('.').wrap('wrap')),
[(None, START, u'wrap'),
(OUTSIDE, TEXT, u'foo'),
(None, END, u'wrap')],
)
def test_wrap_with_element(self):
element = Element('a', href='http://localhost')
self.assertEqual(
_transform('foo', Transformer('.').wrap(element), with_attrs=True),
[(None, START, (u'a', {u'href': u'http://localhost'})),
(OUTSIDE, TEXT, u'foo'),
(None, END, u'a')]
)
class FilterTest(unittest.TestCase):
def _filter(self, select, html=FOOBAR):
"""Returns a list of lists of filtered elements."""
output = []
def filtered(stream):
interval = []
output.append(interval)
for event in stream:
interval.append(event)
yield event
_transform(html, Transformer(select).filter(filtered))
simplified = []
for sub in output:
simplified.append(_simplify([(None, event) for event in sub]))
return simplified
def test_filter_element(self):
self.assertEqual(
self._filter('foo'),
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')]]
)
def test_filter_adjacent_elements(self):
self.assertEqual(
self._filter('foo|bar'),
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')],
[(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar')]]
)
def test_filter_text(self):
self.assertEqual(
self._filter('*/text()'),
[[(None, TEXT, u'FOO')],
[(None, TEXT, u'BAR')]]
)
def test_filter_root(self):
self.assertEqual(
self._filter('.'),
[[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]]
)
def test_filter_text_root(self):
self.assertEqual(
self._filter('.', 'foo'),
[[(None, TEXT, u'foo')]])
def test_filter_after_outside(self):
stream = _transform(
'<root>x</root>', Transformer('//root/text()').filter(lambda x: x))
self.assertEqual(
list(stream),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'x'),
(None, END, u'root')])
class MapTest(unittest.TestCase):
def _map(self, select, kind=None):
data = []
def record(d):
data.append(d)
return d
_transform(FOOBAR, Transformer(select).map(record, kind))
return data
def test_map_element(self):
self.assertEqual(
self._map('foo'),
[(QName('foo'), Attrs([(QName('name'), u'foo'),
(QName('size'), u'100')])),
u'FOO',
QName('foo')]
)
def test_map_with_text_kind(self):
self.assertEqual(
self._map('.', TEXT),
[u'ROOT', u'FOO', u'BAR']
)
def test_map_with_root_and_end_kind(self):
self.assertEqual(
self._map('.', END),
[QName('foo'), QName('bar'), QName('root')]
)
def test_map_with_attribute(self):
self.assertEqual(
self._map('foo/@name'),
[(QName('foo@*'), Attrs([('name', u'foo')]))]
)
class SubstituteTest(unittest.TestCase):
def _substitute(self, select, pattern, replace):
return _transform(FOOBAR, Transformer(select).substitute(pattern, replace))
def test_substitute_foo(self):
self.assertEqual(
self._substitute('foo', 'FOO|BAR', 'FOOOOO'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOOOOO'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_substitute_foobar_with_group(self):
self.assertEqual(
self._substitute('foo|bar', '(FOO|BAR)', r'(\1)'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'(FOO)'),
(EXIT, END, u'foo'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'(BAR)'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
class RenameTest(unittest.TestCase):
def _rename(self, select):
return _transform(FOOBAR, Transformer(select).rename('foobar'))
def test_rename_root(self):
self.assertEqual(
self._rename('.'),
[(ENTER, START, u'foobar'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(EXIT, END, u'foobar')]
)
def test_rename_element(self):
self.assertEqual(
self._rename('foo|bar'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foobar'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foobar'),
(ENTER, START, u'foobar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'foobar'),
(None, END, u'root')]
)
def test_rename_text(self):
self.assertEqual(
self._rename('foo/text()'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(OUTSIDE, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
class ContentTestMixin(object):
def _apply(self, select, content=None, html=FOOBAR):
class Injector(object):
count = 0
def __iter__(self):
self.count += 1
return iter(HTML('CONTENT %i' % self.count))
if isinstance(html, basestring):
html = HTML(html)
if content is None:
content = Injector()
elif isinstance(content, basestring):
content = HTML(content)
return _transform(html, getattr(Transformer(select), self.operation)
(content))
class ReplaceTest(unittest.TestCase, ContentTestMixin):
operation = 'replace'
def test_replace_element(self):
self.assertEqual(
self._apply('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_replace_text(self):
self.assertEqual(
self._apply('text()'),
[(None, START, u'root'),
(None, TEXT, u'CONTENT 1'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_replace_context(self):
self.assertEqual(
self._apply('.'),
[(None, TEXT, u'CONTENT 1')],
)
def test_replace_text_context(self):
self.assertEqual(
self._apply('.', html='foo'),
[(None, TEXT, u'CONTENT 1')],
)
def test_replace_adjacent_elements(self):
self.assertEqual(
self._apply('*'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(None, TEXT, u'CONTENT 2'),
(None, END, u'root')],
)
def test_replace_all(self):
self.assertEqual(
self._apply('*|text()'),
[(None, START, u'root'),
(None, TEXT, u'CONTENT 1'),
(None, TEXT, u'CONTENT 2'),
(None, TEXT, u'CONTENT 3'),
(None, END, u'root')],
)
def test_replace_with_callback(self):
count = [0]
def content():
count[0] += 1
yield '%2i.' % count[0]
self.assertEqual(
self._apply('*', content),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, TEXT, u' 1.'),
(None, TEXT, u' 2.'),
(None, END, u'root')]
)
class BeforeTest(unittest.TestCase, ContentTestMixin):
operation = 'before'
def test_before_element(self):
self.assertEqual(
self._apply('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_before_text(self):
self.assertEqual(
self._apply('text()'),
[(None, START, u'root'),
(None, TEXT, u'CONTENT 1'),
(OUTSIDE, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_before_context(self):
self.assertEqual(
self._apply('.'),
[(None, TEXT, u'CONTENT 1'),
(ENTER, START, u'root'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(EXIT, END, u'root')]
)
def test_before_text_context(self):
self.assertEqual(
self._apply('.', html='foo'),
[(None, TEXT, u'CONTENT 1'),
(OUTSIDE, TEXT, u'foo')]
)
def test_before_adjacent_elements(self):
self.assertEqual(
self._apply('*'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, TEXT, u'CONTENT 2'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_before_all(self):
self.assertEqual(
self._apply('*|text()'),
[(None, START, u'root'),
(None, TEXT, u'CONTENT 1'),
(OUTSIDE, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 2'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, TEXT, u'CONTENT 3'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_before_with_callback(self):
count = [0]
def content():
count[0] += 1
yield '%2i.' % count[0]
self.assertEqual(
self._apply('foo/text()', content),
[(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(None, 'START', u'foo'),
(None, 'TEXT', u' 1.'),
('OUTSIDE', 'TEXT', u'FOO'),
(None, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar'),
(None, 'END', u'root')]
)
class AfterTest(unittest.TestCase, ContentTestMixin):
operation = 'after'
def test_after_element(self):
self.assertEqual(
self._apply('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, TEXT, u'CONTENT 1'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_after_text(self):
self.assertEqual(
self._apply('text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_after_context(self):
self.assertEqual(
self._apply('.'),
[(ENTER, START, u'root'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(EXIT, END, u'root'),
(None, TEXT, u'CONTENT 1')]
)
def test_after_text_context(self):
self.assertEqual(
self._apply('.', html='foo'),
[(OUTSIDE, TEXT, u'foo'),
(None, TEXT, u'CONTENT 1')]
)
def test_after_adjacent_elements(self):
self.assertEqual(
self._apply('*'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, TEXT, u'CONTENT 1'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, TEXT, u'CONTENT 2'),
(None, END, u'root')]
)
def test_after_all(self):
self.assertEqual(
self._apply('*|text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, TEXT, u'CONTENT 1'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, TEXT, u'CONTENT 2'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, TEXT, u'CONTENT 3'),
(None, END, u'root')]
)
def test_after_with_callback(self):
count = [0]
def content():
count[0] += 1
yield '%2i.' % count[0]
self.assertEqual(
self._apply('foo/text()', content),
[(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(None, 'START', u'foo'),
('OUTSIDE', 'TEXT', u'FOO'),
(None, 'TEXT', u' 1.'),
(None, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar'),
(None, 'END', u'root')]
)
class PrependTest(unittest.TestCase, ContentTestMixin):
operation = 'prepend'
def test_prepend_element(self):
self.assertEqual(
self._apply('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(None, TEXT, u'CONTENT 1'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_prepend_text(self):
self.assertEqual(
self._apply('text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_prepend_context(self):
self.assertEqual(
self._apply('.'),
[(ENTER, START, u'root'),
(None, TEXT, u'CONTENT 1'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(EXIT, END, u'root')],
)
def test_prepend_text_context(self):
self.assertEqual(
self._apply('.', html='foo'),
[(OUTSIDE, TEXT, u'foo')]
)
def test_prepend_adjacent_elements(self):
self.assertEqual(
self._apply('*'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(None, TEXT, u'CONTENT 1'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(ENTER, START, u'bar'),
(None, TEXT, u'CONTENT 2'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_prepend_all(self):
self.assertEqual(
self._apply('*|text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(None, TEXT, u'CONTENT 1'),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(ENTER, START, u'bar'),
(None, TEXT, u'CONTENT 2'),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_prepend_with_callback(self):
count = [0]
def content():
count[0] += 1
yield '%2i.' % count[0]
self.assertEqual(
self._apply('foo', content),
[(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(ENTER, 'START', u'foo'),
(None, 'TEXT', u' 1.'),
(INSIDE, 'TEXT', u'FOO'),
(EXIT, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar'),
(None, 'END', u'root')]
)
class AppendTest(unittest.TestCase, ContentTestMixin):
operation = 'append'
def test_append_element(self):
self.assertEqual(
self._apply('foo'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(None, TEXT, u'CONTENT 1'),
(EXIT, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_append_text(self):
self.assertEqual(
self._apply('text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_append_context(self):
self.assertEqual(
self._apply('.'),
[(ENTER, START, u'root'),
(INSIDE, TEXT, u'ROOT'),
(INSIDE, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(INSIDE, END, u'foo'),
(INSIDE, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(INSIDE, END, u'bar'),
(None, TEXT, u'CONTENT 1'),
(EXIT, END, u'root')],
)
def test_append_text_context(self):
self.assertEqual(
self._apply('.', html='foo'),
[(OUTSIDE, TEXT, u'foo')]
)
def test_append_adjacent_elements(self):
self.assertEqual(
self._apply('*'),
[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(None, TEXT, u'CONTENT 1'),
(EXIT, END, u'foo'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(None, TEXT, u'CONTENT 2'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_append_all(self):
self.assertEqual(
self._apply('*|text()'),
[(None, START, u'root'),
(OUTSIDE, TEXT, u'ROOT'),
(ENTER, START, u'foo'),
(INSIDE, TEXT, u'FOO'),
(None, TEXT, u'CONTENT 1'),
(EXIT, END, u'foo'),
(ENTER, START, u'bar'),
(INSIDE, TEXT, u'BAR'),
(None, TEXT, u'CONTENT 2'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_append_with_callback(self):
count = [0]
def content():
count[0] += 1
yield '%2i.' % count[0]
self.assertEqual(
self._apply('foo', content),
[(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(ENTER, 'START', u'foo'),
(INSIDE, 'TEXT', u'FOO'),
(None, 'TEXT', u' 1.'),
(EXIT, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar'),
(None, 'END', u'root')]
)
class AttrTest(unittest.TestCase):
def _attr(self, select, name, value):
return _transform(FOOBAR, Transformer(select).attr(name, value),
with_attrs=True)
def test_set_existing_attr(self):
self.assertEqual(
self._attr('foo', 'name', 'FOO'),
[(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(ENTER, START, (u'foo', {u'name': 'FOO', u'size': '100'})),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_set_new_attr(self):
self.assertEqual(
self._attr('foo', 'title', 'FOO'),
[(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(ENTER, START, (u'foo', {u'name': u'foo', u'title': 'FOO', u'size': '100'})),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_attr_from_function(self):
def set(name, event):
self.assertEqual(name, 'name')
return event[1][1].get('name').upper()
self.assertEqual(
self._attr('foo|bar', 'name', set),
[(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(ENTER, START, (u'foo', {u'name': 'FOO', u'size': '100'})),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(ENTER, START, (u'bar', {u'name': 'BAR'})),
(INSIDE, TEXT, u'BAR'),
(EXIT, END, u'bar'),
(None, END, u'root')]
)
def test_remove_attr(self):
self.assertEqual(
self._attr('foo', 'name', None),
[(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(ENTER, START, (u'foo', {u'size': '100'})),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
def test_remove_attr_with_function(self):
def set(name, event):
return None
self.assertEqual(
self._attr('foo', 'name', set),
[(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(ENTER, START, (u'foo', {u'size': '100'})),
(INSIDE, TEXT, u'FOO'),
(EXIT, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]
)
class BufferTestMixin(object):
def _apply(self, select, with_attrs=False):
buffer = StreamBuffer()
events = buffer.events
class Trace(object):
last = None
trace = []
def __call__(self, stream):
for event in stream:
if events and hash(tuple(events)) != self.last:
self.last = hash(tuple(events))
self.trace.append(list(events))
yield event
trace = Trace()
output = _transform(FOOBAR, getattr(Transformer(select), self.operation)
(buffer).apply(trace), with_attrs=with_attrs)
simplified = []
for interval in trace.trace:
simplified.append(_simplify([(None, e) for e in interval],
with_attrs=with_attrs))
return output, simplified
class CopyTest(unittest.TestCase, BufferTestMixin):
operation = 'copy'
def test_copy_element(self):
self.assertEqual(
self._apply('foo')[1],
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')]]
)
def test_copy_adjacent_elements(self):
self.assertEqual(
self._apply('foo|bar')[1],
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')],
[(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar')]]
)
def test_copy_all(self):
self.assertEqual(
self._apply('*|text()')[1],
[[(None, TEXT, u'ROOT')],
[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')],
[(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar')]]
)
def test_copy_text(self):
self.assertEqual(
self._apply('*/text()')[1],
[[(None, TEXT, u'FOO')],
[(None, TEXT, u'BAR')]]
)
def test_copy_context(self):
self.assertEqual(
self._apply('.')[1],
[[(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')]]
)
def test_copy_attribute(self):
self.assertEqual(
self._apply('foo/@name', with_attrs=True)[1],
[[(None, ATTR, {'name': u'foo'})]]
)
def test_copy_attributes(self):
self.assertEqual(
self._apply('foo/@*', with_attrs=True)[1],
[[(None, ATTR, {u'name': u'foo', u'size': u'100'})]]
)
class CutTest(unittest.TestCase, BufferTestMixin):
operation = 'cut'
def test_cut_element(self):
self.assertEqual(
self._apply('foo'),
([(None, START, u'root'),
(None, TEXT, u'ROOT'),
(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')]])
)
def test_cut_adjacent_elements(self):
self.assertEqual(
self._apply('foo|bar'),
([(None, START, u'root'),
(None, TEXT, u'ROOT'),
(BREAK, BREAK, None),
(None, END, u'root')],
[[(None, START, u'foo'),
(None, TEXT, u'FOO'),
(None, END, u'foo')],
[(None, START, u'bar'),
(None, TEXT, u'BAR'),
(None, END, u'bar')]])
)
def test_cut_all(self):
self.assertEqual(
self._apply('*|text()'),
([(None, 'START', u'root'),
('BREAK', 'BREAK', None),
('BREAK', 'BREAK', None),
(None, 'END', u'root')],
[[(None, 'TEXT', u'ROOT')],
[(None, 'START', u'foo'),
(None, 'TEXT', u'FOO'),
(None, 'END', u'foo')],
[(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar')]])
)
def test_cut_text(self):
self.assertEqual(
self._apply('*/text()'),
([(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(None, 'START', u'foo'),
(None, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'END', u'bar'),
(None, 'END', u'root')],
[[(None, 'TEXT', u'FOO')],
[(None, 'TEXT', u'BAR')]])
)
def test_cut_context(self):
self.assertEqual(
self._apply('.')[1],
[[(None, 'START', u'root'),
(None, 'TEXT', u'ROOT'),
(None, 'START', u'foo'),
(None, 'TEXT', u'FOO'),
(None, 'END', u'foo'),
(None, 'START', u'bar'),
(None, 'TEXT', u'BAR'),
(None, 'END', u'bar'),
(None, 'END', u'root')]]
)
def test_cut_attribute(self):
self.assertEqual(
self._apply('foo/@name', with_attrs=True),
([(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(None, START, (u'foo', {u'size': u'100'})),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
[[(None, ATTR, {u'name': u'foo'})]])
)
def test_cut_attributes(self):
self.assertEqual(
self._apply('foo/@*', with_attrs=True),
([(None, START, (u'root', {})),
(None, TEXT, u'ROOT'),
(None, START, (u'foo', {})),
(None, TEXT, u'FOO'),
(None, END, u'foo'),
(None, START, (u'bar', {u'name': u'bar'})),
(None, TEXT, u'BAR'),
(None, END, u'bar'),
(None, END, u'root')],
[[(None, ATTR, {u'name': u'foo', u'size': u'100'})]])
)
# XXX Test this when the XPath implementation is fixed (#233).
# def test_cut_attribute_or_attribute(self):
# self.assertEqual(
# self._apply('foo/@name | foo/@size', with_attrs=True),
# ([(None, START, (u'root', {})),
# (None, TEXT, u'ROOT'),
# (None, START, (u'foo', {})),
# (None, TEXT, u'FOO'),
# (None, END, u'foo'),
# (None, START, (u'bar', {u'name': u'bar'})),
# (None, TEXT, u'BAR'),
# (None, END, u'bar'),
# (None, END, u'root')],
# [[(None, ATTR, {u'name': u'foo', u'size': u'100'})]])
# )
def suite():
from genshi.input import HTML
from genshi.core import Markup
from genshi.builder import tag
suite = unittest.TestSuite()
for test in (SelectTest, InvertTest, EndTest,
EmptyTest, RemoveTest, UnwrapText, WrapTest, FilterTest,
MapTest, SubstituteTest, RenameTest, ReplaceTest, BeforeTest,
AfterTest, PrependTest, AppendTest, AttrTest, CopyTest, CutTest):
suite.addTest(unittest.makeSuite(test, 'test'))
suite.addTest(doctest.DocTestSuite(
genshi.filters.transform, optionflags=doctest.NORMALIZE_WHITESPACE,
extraglobs={'HTML': HTML, 'tag': tag, 'Markup': Markup}))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.input import HTML, ParseError
from genshi.filters.html import HTMLFormFiller, HTMLSanitizer
from genshi.template import MarkupTemplate
class HTMLFormFillerTestCase(unittest.TestCase):
def test_fill_input_text_no_value(self):
html = HTML("""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="text" name="foo"/>
</p></form>""", html.render())
def test_fill_input_text_single_value(self):
html = HTML("""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<input type="text" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_text_multi_value(self):
html = HTML("""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<input type="text" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_hidden_no_value(self):
html = HTML("""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="hidden" name="foo"/>
</p></form>""", html.render())
def test_fill_input_hidden_single_value(self):
html = HTML("""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<input type="hidden" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_hidden_multi_value(self):
html = HTML("""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<input type="hidden" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_textarea_no_value(self):
html = HTML("""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<textarea name="foo"/>
</p></form>""", html.render())
def test_fill_textarea_single_value(self):
html = HTML("""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<textarea name="foo">bar</textarea>
</p></form>""", html.render())
def test_fill_textarea_multi_value(self):
html = HTML("""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<textarea name="foo">bar</textarea>
</p></form>""", html.render())
def test_fill_input_checkbox_no_value(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", html.render())
def test_fill_input_checkbox_single_value_auto(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ''})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': 'on'})).render())
def test_fill_input_checkbox_single_value_defined(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render())
def test_fill_input_checkbox_multi_value_auto(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': []})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['on']})).render())
def test_fill_input_checkbox_multi_value_defined(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render())
def test_fill_input_radio_no_value(self):
html = HTML("""<form><p>
<input type="radio" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="radio" name="foo"/>
</p></form>""", html.render())
def test_fill_input_radio_single_value(self):
html = HTML("""<form><p>
<input type="radio" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render())
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render())
def test_fill_input_radio_multi_value(self):
html = HTML("""<form><p>
<input type="radio" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render())
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render())
def test_fill_select_no_value_auto(self):
html = HTML("""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""", html.render())
def test_fill_select_no_value_defined(self):
html = HTML("""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_single_value_auto(self):
html = HTML("""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': '1'})
self.assertEquals("""<form><p>
<select name="foo">
<option selected="selected">1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""", html.render())
def test_fill_select_single_value_defined(self):
html = HTML("""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': '1'})
self.assertEquals("""<form><p>
<select name="foo">
<option value="1" selected="selected">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_multi_value_auto(self):
html = HTML("""<form><p>
<select name="foo" multiple>
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']})
self.assertEquals("""<form><p>
<select name="foo" multiple="multiple">
<option selected="selected">1</option>
<option>2</option>
<option selected="selected">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_multi_value_defined(self):
html = HTML("""<form><p>
<select name="foo" multiple>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']})
self.assertEquals("""<form><p>
<select name="foo" multiple="multiple">
<option value="1" selected="selected">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
</select>
</p></form>""", html.render())
def test_fill_option_segmented_text(self):
html = MarkupTemplate("""<form>
<select name="foo">
<option value="1">foo $x</option>
</select>
</form>""").generate(x=1) | HTMLFormFiller(data={'foo': '1'})
self.assertEquals("""<form>
<select name="foo">
<option value="1" selected="selected">foo 1</option>
</select>
</form>""", html.render())
def test_fill_option_segmented_text_no_value(self):
html = MarkupTemplate("""<form>
<select name="foo">
<option>foo $x bar</option>
</select>
</form>""").generate(x=1) | HTMLFormFiller(data={'foo': 'foo 1 bar'})
self.assertEquals("""<form>
<select name="foo">
<option selected="selected">foo 1 bar</option>
</select>
</form>""", html.render())
def test_fill_option_unicode_value(self):
html = HTML("""<form>
<select name="foo">
<option value="ö">foo</option>
</select>
</form>""") | HTMLFormFiller(data={'foo': u'ö'})
self.assertEquals(u"""<form>
<select name="foo">
<option value="ö" selected="selected">foo</option>
</select>
</form>""", html.render(encoding=None))
def test_fill_input_password_disabled(self):
html = HTML("""<form><p>
<input type="password" name="pass" />
</p></form>""") | HTMLFormFiller(data={'pass': 'bar'})
self.assertEquals("""<form><p>
<input type="password" name="pass"/>
</p></form>""", html.render())
def test_fill_input_password_enabled(self):
html = HTML("""<form><p>
<input type="password" name="pass" />
</p></form>""") | HTMLFormFiller(data={'pass': '1234'}, passwords=True)
self.assertEquals("""<form><p>
<input type="password" name="pass" value="1234"/>
</p></form>""", html.render())
class HTMLSanitizerTestCase(unittest.TestCase):
def test_sanitize_unchanged(self):
html = HTML('<a href="#">fo<br />o</a>')
self.assertEquals('<a href="#">fo<br/>o</a>',
(html | HTMLSanitizer()).render())
html = HTML('<a href="#with:colon">foo</a>')
self.assertEquals('<a href="#with:colon">foo</a>',
(html | HTMLSanitizer()).render())
def test_sanitize_escape_text(self):
html = HTML('<a href="#">fo&</a>')
self.assertEquals('<a href="#">fo&</a>',
(html | HTMLSanitizer()).render())
html = HTML('<a href="#"><foo></a>')
self.assertEquals('<a href="#"><foo></a>',
(html | HTMLSanitizer()).render())
def test_sanitize_entityref_text(self):
html = HTML('<a href="#">foö</a>')
self.assertEquals(u'<a href="#">foö</a>',
(html | HTMLSanitizer()).render(encoding=None))
def test_sanitize_escape_attr(self):
html = HTML('<div title="<foo>"></div>')
self.assertEquals('<div title="<foo>"/>',
(html | HTMLSanitizer()).render())
def test_sanitize_close_empty_tag(self):
html = HTML('<a href="#">fo<br>o</a>')
self.assertEquals('<a href="#">fo<br/>o</a>',
(html | HTMLSanitizer()).render())
def test_sanitize_invalid_entity(self):
html = HTML('&junk;')
self.assertEquals('&junk;', (html | HTMLSanitizer()).render())
def test_sanitize_remove_script_elem(self):
html = HTML('<script>alert("Foo")</script>')
self.assertEquals('', (html | HTMLSanitizer()).render())
html = HTML('<SCRIPT SRC="http://example.com/"></SCRIPT>')
self.assertEquals('', (html | HTMLSanitizer()).render())
self.assertRaises(ParseError, HTML, '<SCR\0IPT>alert("foo")</SCR\0IPT>')
self.assertRaises(ParseError, HTML,
'<SCRIPT&XYZ SRC="http://example.com/"></SCRIPT>')
def test_sanitize_remove_onclick_attr(self):
html = HTML('<div onclick=\'alert("foo")\' />')
self.assertEquals('<div/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_input_password(self):
html = HTML('<form><input type="password" /></form>')
self.assertEquals('<form/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_comments(self):
html = HTML('''<div><!-- conditional comment crap --></div>''')
self.assertEquals('<div/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_style_scripts(self):
sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style']))
# Inline style with url() using javascript: scheme
html = HTML('<DIV STYLE=\'background: url(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# Inline style with url() using javascript: scheme, using control char
html = HTML('<DIV STYLE=\'background: url(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# Inline style with url() using javascript: scheme, in quotes
html = HTML('<DIV STYLE=\'background: url("javascript:alert(foo)")\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# IE expressions in CSS not allowed
html = HTML('<DIV STYLE=\'width: expression(alert("foo"));\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'width: e/**/xpression(alert("foo"));\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'background: url(javascript:alert("foo"));'
'color: #fff\'>')
self.assertEquals('<div style="color: #fff"/>',
(html | sanitizer).render())
# Inline style with url() using javascript: scheme, using unicode
# escapes
html = HTML('<DIV STYLE=\'background: \\75rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'background: \\000075rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'background: \\75 rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'background: \\000075 rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<DIV STYLE=\'background: \\000075\r\nrl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
def test_sanitize_remove_style_phishing(self):
sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style']))
# The position property is not allowed
html = HTML('<div style="position:absolute;top:0"></div>')
self.assertEquals('<div style="top:0"/>', (html | sanitizer).render())
# Normal margins get passed through
html = HTML('<div style="margin:10px 20px"></div>')
self.assertEquals('<div style="margin:10px 20px"/>',
(html | sanitizer).render())
# But not negative margins
html = HTML('<div style="margin:-1000px 0 0"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<div style="margin-left:-2000px 0 0"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML('<div style="margin-left:1em 1em 1em -4000px"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
def test_sanitize_remove_src_javascript(self):
html = HTML('<img src=\'javascript:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Case-insensitive protocol matching
html = HTML('<IMG SRC=\'JaVaScRiPt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Grave accents (not parsed)
self.assertRaises(ParseError, HTML,
'<IMG SRC=`javascript:alert("RSnake says, \'foo\'")`>')
# Protocol encoded using UTF-8 numeric entities
html = HTML('<IMG SRC=\'javascri'
'pt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Protocol encoded using UTF-8 numeric entities without a semicolon
# (which is allowed because the max number of digits is used)
html = HTML('<IMG SRC=\'java'
'script'
':alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Protocol encoded using UTF-8 numeric hex entities without a semicolon
# (which is allowed because the max number of digits is used)
html = HTML('<IMG SRC=\'javascri'
'pt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Embedded tab character in protocol
html = HTML('<IMG SRC=\'jav\tascript:alert("foo");\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Embedded tab character in protocol, but encoded this time
html = HTML('<IMG SRC=\'jav	ascript:alert("foo");\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(HTMLFormFiller.__module__))
suite.addTest(unittest.makeSuite(HTMLFormFillerTestCase, 'test'))
suite.addTest(unittest.makeSuite(HTMLSanitizerTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
def suite():
from genshi.filters.tests import html, i18n, transform
suite = unittest.TestSuite()
suite.addTest(html.suite())
suite.addTest(i18n.suite())
if hasattr(doctest, 'NORMALIZE_WHITESPACE'):
suite.addTest(transform.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
from datetime import datetime
import doctest
from gettext import NullTranslations
from StringIO import StringIO
import unittest
from genshi.core import Attrs
from genshi.template import MarkupTemplate, Context
from genshi.filters.i18n import Translator, extract
from genshi.input import HTML
class DummyTranslations(NullTranslations):
_domains = {}
def __init__(self, catalog=()):
NullTranslations.__init__(self)
self._catalog = catalog or {}
self.plural = lambda n: n != 1
def add_domain(self, domain, catalog):
translation = DummyTranslations(catalog)
translation.add_fallback(self)
self._domains[domain] = translation
def _domain_call(self, func, domain, *args, **kwargs):
return getattr(self._domains.get(domain, self), func)(*args, **kwargs)
def ugettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.ugettext(message)
return unicode(message)
return tmsg
def dugettext(self, domain, message):
return self._domain_call('ugettext', domain, message)
def ungettext(self, msgid1, msgid2, n):
try:
return self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def dungettext(self, domain, singular, plural, numeral):
return self._domain_call('ungettext', domain, singular, plural, numeral)
class TranslatorTestCase(unittest.TestCase):
def test_translate_included_attribute_text(self):
"""
Verify that translated attributes end up in a proper `Attrs` instance.
"""
html = HTML("""<html>
<span title="Foo"></span>
</html>""")
translator = Translator(lambda s: u"Voh")
stream = list(html.filter(translator))
kind, data, pos = stream[2]
assert isinstance(data[1], Attrs)
def test_extract_without_text(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p title="Bar">Foo</p>
${ngettext("Singular", "Plural", num)}
</html>""")
translator = Translator(extract_text=False)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, 'ngettext', ('Singular', 'Plural', None), []),
messages[0])
def test_extract_plural_form(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
${ngettext("Singular", "Plural", num)}
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, 'ngettext', ('Singular', 'Plural', None), []),
messages[0])
def test_extract_funky_plural_form(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
${ngettext(len(items), *widget.display_names)}
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, 'ngettext', (None, None), []), messages[0])
def test_extract_gettext_with_unicode_string(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
${gettext("Grüße")}
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, 'gettext', u'Gr\xfc\xdfe', []), messages[0])
def test_extract_included_attribute_text(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<span title="Foo"></span>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, None, 'Foo', []), messages[0])
def test_extract_attribute_expr(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<input type="submit" value="${_('Save')}" />
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, '_', 'Save', []), messages[0])
def test_extract_non_included_attribute_interpolated(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<a href="#anchor_${num}">Foo</a>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, None, 'Foo', []), messages[0])
def test_extract_text_from_sub(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<py:if test="foo">Foo</py:if>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, None, 'Foo', []), messages[0])
def test_ignore_tag_with_fixed_xml_lang(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p xml:lang="en">(c) 2007 Edgewall Software</p>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(0, len(messages))
def test_extract_tag_with_variable_xml_lang(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<p xml:lang="${lang}">(c) 2007 Edgewall Software</p>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((2, None, '(c) 2007 Edgewall Software', []),
messages[0])
def test_ignore_attribute_with_expression(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/">
<input type="submit" value="Reply" title="Reply to comment $num" />
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(0, len(messages))
def test_extract_i18n_msg(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html">Help</a> for details.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Please see [1:Help] for details.', messages[0][2])
def test_translate_i18n_msg(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html">Help</a> for details.
</p>
</html>""")
gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Für Details siehe bitte <a href="help.html">Hilfe</a>.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_nonewline(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Please see <a href="help.html">Help</a></p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Please see [1:Help]', messages[0][2])
def test_translate_i18n_msg_nonewline(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Please see <a href="help.html">Help</a></p>
</html>""")
gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Für Details siehe bitte <a href="help.html">Hilfe</a></p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_elt_nonewline(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:msg>Please see <a href="help.html">Help</a></i18n:msg>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Please see [1:Help]', messages[0][2])
def test_translate_i18n_msg_elt_nonewline(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:msg>Please see <a href="help.html">Help</a></i18n:msg>
</html>""")
gettext = lambda s: u"Für Details siehe bitte [1:Hilfe]"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
Für Details siehe bitte <a href="help.html">Hilfe</a>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_nested(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html"><em>Help</em> page</a> for details.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Please see [1:[2:Help] page] for details.',
messages[0][2])
def test_translate_i18n_msg_nested(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html"><em>Help</em> page</a> for details.
</p>
</html>""")
gettext = lambda s: u"Für Details siehe bitte [1:[2:Hilfeseite]]."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Für Details siehe bitte <a href="help.html"><em>Hilfeseite</em></a>.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_label_with_nested_input(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:msg="">
<label><input type="text" size="3" name="daysback" value="30" /> days back</label>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('[1:[2:] days back]',
messages[0][2])
def test_translate_i18n_msg_label_with_nested_input(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:msg="">
<label><input type="text" size="3" name="daysback" value="30" /> foo bar</label>
</div>
</html>""")
gettext = lambda s: "[1:[2:] foo bar]"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<div><label><input type="text" size="3" name="daysback" value="30"/> foo bar</label></div>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_empty(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" /> entries per page.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Show me [1:] entries per page.', messages[0][2])
def test_translate_i18n_msg_empty(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" /> entries per page.
</p>
</html>""")
gettext = lambda s: u"[1:] Einträge pro Seite anzeigen."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p><input type="text" name="num"/> Einträge pro Seite anzeigen.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_multiple(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html">Help</a> for <em>details</em>.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Please see [1:Help] for [2:details].', messages[0][2])
def test_translate_i18n_msg_multiple(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Please see <a href="help.html">Help</a> for <em>details</em>.
</p>
</html>""")
gettext = lambda s: u"Für [2:Details] siehe bitte [1:Hilfe]."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Für <em>Details</em> siehe bitte <a href="help.html">Hilfe</a>.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_multiple_empty(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Show me [1:] entries per page, starting at page [2:].',
messages[0][2])
def test_translate_i18n_msg_multiple_empty(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" /> entries per page, starting at page <input type="text" name="num" />.
</p>
</html>""")
gettext = lambda s: u"[1:] Einträge pro Seite, beginnend auf Seite [2:]."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p><input type="text" name="num"/> Eintr\xc3\xa4ge pro Seite, beginnend auf Seite <input type="text" name="num"/>.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_with_param(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="name">
Hello, ${user.name}!
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Hello, %(name)s!', messages[0][2])
def test_translate_i18n_msg_with_param(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="name">
Hello, ${user.name}!
</p>
</html>""")
gettext = lambda s: u"Hallo, %(name)s!"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Hallo, Jim!</p>
</html>""", tmpl.generate(user=dict(name='Jim')).render())
def test_translate_i18n_msg_with_param_reordered(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="name">
Hello, ${user.name}!
</p>
</html>""")
gettext = lambda s: u"%(name)s, sei gegrüßt!"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Jim, sei gegrüßt!</p>
</html>""", tmpl.generate(user=dict(name='Jim')).render())
def test_translate_i18n_msg_with_attribute_param(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Hello, <a href="#${anchor}">dude</a>!
</p>
</html>""")
gettext = lambda s: u"Sei gegrüßt, [1:Alter]!"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Sei gegrüßt, <a href="#42">Alter</a>!</p>
</html>""", tmpl.generate(anchor='42').render())
def test_extract_i18n_msg_with_two_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="name, time">
Posted by ${post.author} at ${entry.time.strftime('%H:%m')}
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Posted by %(name)s at %(time)s', messages[0][2])
def test_translate_i18n_msg_with_two_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="name, time">
Written by ${entry.author} at ${entry.time.strftime('%H:%M')}
</p>
</html>""")
gettext = lambda s: u"%(name)s schrieb dies um %(time)s"
translator = Translator(gettext)
translator.setup(tmpl)
entry = {
'author': 'Jim',
'time': datetime(2008, 4, 1, 14, 30)
}
self.assertEqual("""<html>
<p>Jim schrieb dies um 14:30</p>
</html>""", tmpl.generate(entry=entry).render())
def test_extract_i18n_msg_with_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" py:attrs="{'value': x}" /> entries per page.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual('Show me [1:] entries per page.', messages[0][2])
def test_translate_i18n_msg_with_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">
Show me <input type="text" name="num" py:attrs="{'value': 'x'}" /> entries per page.
</p>
</html>""")
gettext = lambda s: u"[1:] Einträge pro Seite anzeigen."
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p><input type="text" name="num" value="x"/> Einträge pro Seite anzeigen.</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_with_comment(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:comment="As in foo bar" i18n:msg="">Foo</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0])
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" i18n:comment="As in foo bar">Foo</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, None, 'Foo', ['As in foo bar']), messages[0])
def test_translate_i18n_msg_with_comment(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" i18n:comment="As in foo bar">Foo</p>
</html>""")
gettext = lambda s: u"Voh"
translator = Translator(gettext)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_with_attr(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" title="Foo bar">Foo</p>
</html>""")
translator = Translator()
messages = list(translator.extract(tmpl.stream))
self.assertEqual(2, len(messages))
self.assertEqual((3, None, 'Foo bar', []), messages[0])
self.assertEqual((3, None, 'Foo', []), messages[1])
def test_translate_i18n_msg_with_attr(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" title="Foo bar">Foo</p>
</html>""")
gettext = lambda s: u"Voh"
translator = Translator(DummyTranslations({
'Foo': 'Voh',
'Foo bar': u'Voh bär'
}))
tmpl.filters.insert(0, translator)
tmpl.add_directives(Translator.NAMESPACE, translator)
self.assertEqual("""<html>
<p title="Voh bär">Voh</p>
</html>""", tmpl.generate().render())
def test_translate_with_translations_object(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" i18n:comment="As in foo bar">Foo</p>
</html>""")
translator = Translator(DummyTranslations({'Foo': 'Voh'}))
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_msg_and_py_strip_directives(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" py:strip="">Foo</p>
<p py:strip="" i18n:msg="">Foo</p>
</html>""")
translator = Translator(DummyTranslations({'Foo': 'Voh'}))
translator.setup(tmpl)
self.assertEqual("""<html>
Voh
Voh
</html>""", tmpl.generate().render())
def test_i18n_msg_ticket_300_extract(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:msg params="date, author">
Changed ${ '10/12/2008' } ago by ${ 'me, the author' }
</i18n:msg>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual(
(3, None, 'Changed %(date)s ago by %(author)s', []), messages[0]
)
def test_i18n_msg_ticket_300_translate(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:msg params="date, author">
Changed ${ date } ago by ${ author }
</i18n:msg>
</html>""")
translations = DummyTranslations({
'Changed %(date)s ago by %(author)s': u'Modificado à %(date)s por %(author)s'
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
Modificado à um dia por Pedro
</html>""", tmpl.generate(date='um dia', author="Pedro").render())
def test_i18n_msg_ticket_251_extract(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg=""><tt><b>Translation[ 0 ]</b>: <em>One coin</em></tt></p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual(
(3, None, u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]', []), messages[0]
)
def test_i18n_msg_ticket_251_translate(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg=""><tt><b>Translation[ 0 ]</b>: <em>One coin</em></tt></p>
</html>""")
translations = DummyTranslations({
u'[1:[2:Translation\\[\xa00\xa0\\]]: [3:One coin]]':
u'[1:[2:Trandução\\[\xa00\xa0\\]]: [3:Uma moeda]]'
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p><tt><b>Trandução[ 0 ]</b>: <em>Uma moeda</em></tt></p>
</html>""", tmpl.generate().render())
def test_extract_i18n_msg_with_other_directives_nested(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" py:with="q = quote_plus(message[:80])">Before you do that, though, please first try
<strong><a href="${trac.homepage}search?ticket=yes&noquickjump=1&q=$q">searching</a>
for similar issues</strong>, as it is quite likely that this problem
has been reported before. For questions about installation
and configuration of Trac, please try the
<a href="${trac.homepage}wiki/MailingList">mailing list</a>
instead of filing a ticket.
</p>
</html>""")
translator = Translator()
translator.setup(tmpl)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual(
'Before you do that, though, please first try\n '
'[1:[2:searching]\n for similar issues], as it is '
'quite likely that this problem\n has been reported '
'before. For questions about installation\n and '
'configuration of Trac, please try the\n '
'[3:mailing list]\n instead of filing a ticket.',
messages[0][2]
)
def test_translate_i18n_msg_with_other_directives_nested(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Before you do that, though, please first try
<strong><a href="${trac.homepage}search?ticket=yes&noquickjump=1&q=q">searching</a>
for similar issues</strong>, as it is quite likely that this problem
has been reported before. For questions about installation
and configuration of Trac, please try the
<a href="${trac.homepage}wiki/MailingList">mailing list</a>
instead of filing a ticket.
</p>
</html>""")
translations = DummyTranslations({
'Before you do that, though, please first try\n '
'[1:[2:searching]\n for similar issues], as it is '
'quite likely that this problem\n has been reported '
'before. For questions about installation\n and '
'configuration of Trac, please try the\n '
'[3:mailing list]\n instead of filing a ticket.':
u'Antes de o fazer, porém,\n '
u'[1:por favor tente [2:procurar]\n por problemas semelhantes], uma vez que '
u'é muito provável que este problema\n já tenha sido reportado '
u'anteriormente. Para questões relativas à instalação\n e '
u'configuração do Trac, por favor tente a\n '
u'[3:mailing list]\n em vez de criar um assunto.'
})
translator = Translator(translations)
translator.setup(tmpl)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
ctx = Context()
ctx.push({'trac': {'homepage': 'http://trac.edgewall.org/'}})
self.assertEqual("""<html>
<p>Antes de o fazer, porém,
<strong>por favor tente <a href="http://trac.edgewall.org/search?ticket=yes&noquickjump=1&q=q">procurar</a>
por problemas semelhantes</strong>, uma vez que é muito provável que este problema
já tenha sido reportado anteriormente. Para questões relativas à instalação
e configuração do Trac, por favor tente a
<a href="http://trac.edgewall.org/wiki/MailingList">mailing list</a>
em vez de criar um assunto.</p>
</html>""", tmpl.generate(ctx).render())
def test_i18n_msg_with_other_nested_directives_with_reordered_content(self):
# See: http://genshi.edgewall.org/ticket/300#comment:10
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p py:if="not editable" class="hint" i18n:msg="">
<strong>Note:</strong> This repository is defined in
<code><a href="${ 'href.wiki(TracIni)' }">trac.ini</a></code>
and cannot be edited on this page.
</p>
</html>""")
translations = DummyTranslations({
'[1:Note:] This repository is defined in\n '
'[2:[3:trac.ini]]\n and cannot be edited on this page.':
u'[1:Nota:] Este repositório está definido em \n '
u'[2:[3:trac.ini]]\n e não pode ser editado nesta página.',
})
translator = Translator(translations)
translator.setup(tmpl)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual(
'[1:Note:] This repository is defined in\n '
'[2:[3:trac.ini]]\n and cannot be edited on this page.',
messages[0][2]
)
self.assertEqual("""<html>
<p class="hint"><strong>Nota:</strong> Este repositório está definido em
<code><a href="href.wiki(TracIni)">trac.ini</a></code>
e não pode ser editado nesta página.</p>
</html>""", tmpl.generate(editable=False).render())
def test_translate_i18n_domain_with_msg_directives(self):
#"""translate with i18n:domain and nested i18n:msg directives """
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:domain="foo">
<p i18n:msg="">FooBar</p>
<p i18n:msg="">Bar</p>
</div>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>BarFoo</p>
<p>PT_Foo</p>
</div>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_with_inline_directives(self):
#"""translate with inlined i18n:domain and i18n:msg directives"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" i18n:domain="foo">FooBar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>BarFoo</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_without_msg_directives(self):
#"""translate domain call without i18n:msg directives still uses current domain"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Bar</p>
<div i18n:domain="foo">
<p i18n:msg="">FooBar</p>
<p i18n:msg="">Bar</p>
<p>Bar</p>
</div>
<p>Bar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Voh</p>
<div>
<p>BarFoo</p>
<p>PT_Foo</p>
<p>PT_Foo</p>
</div>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_as_directive_not_attribute(self):
#"""translate with domain as directive"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:domain name="foo">
<p i18n:msg="">FooBar</p>
<p i18n:msg="">Bar</p>
<p>Bar</p>
</i18n:domain>
<p>Bar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'PT_Foo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>BarFoo</p>
<p>PT_Foo</p>
<p>PT_Foo</p>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_nested_directives(self):
#"""translate with nested i18n:domain directives"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Bar</p>
<div i18n:domain="foo">
<p i18n:msg="">FooBar</p>
<p i18n:domain="bar" i18n:msg="">Bar</p>
<p>Bar</p>
</div>
<p>Bar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'})
translations.add_domain('bar', {'Bar': 'bar_Bar'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Voh</p>
<div>
<p>BarFoo</p>
<p>bar_Bar</p>
<p>foo_Bar</p>
</div>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_with_empty_nested_domain_directive(self):
#"""translate with empty nested i18n:domain directive does not use dngettext"""
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="">Bar</p>
<div i18n:domain="foo">
<p i18n:msg="">FooBar</p>
<p i18n:domain="" i18n:msg="">Bar</p>
<p>Bar</p>
</div>
<p>Bar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'})
translations.add_domain('bar', {'Bar': 'bar_Bar'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Voh</p>
<div>
<p>BarFoo</p>
<p>Voh</p>
<p>foo_Bar</p>
</div>
<p>Voh</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_choose_as_attribute(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="one">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</div>
<div i18n:choose="two">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</div>
</html>""")
translations = DummyTranslations()
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>FooBar</p>
</div>
<div>
<p>FooBars</p>
</div>
</html>""", tmpl.generate(one=1, two=2).render())
def test_translate_i18n_choose_as_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:choose numeral="two">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</i18n:choose>
<i18n:choose numeral="one">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</i18n:choose>
</html>""")
translations = DummyTranslations()
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>FooBars</p>
<p>FooBar</p>
</html>""", tmpl.generate(one=1, two=2).render())
def test_translate_i18n_choose_as_attribute_with_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translations = DummyTranslations({
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>Vohs John Doe</p>
</div>
</html>""", tmpl.generate(two=2, fname='John', lname='Doe').render())
def test_translate_i18n_choose_as_attribute_with_params_and_domain_as_param(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translations = DummyTranslations()
translations.add_domain('foo', {
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>Vohs John Doe</p>
</div>
</html>""", tmpl.generate(two=2, fname='John', lname='Doe').render())
def test_translate_i18n_choose_as_directive_with_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:choose numeral="two" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
<i18n:choose numeral="one" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</html>""")
translations = DummyTranslations({
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Vohs John Doe</p>
<p>Voh John Doe</p>
</html>""", tmpl.generate(one=1, two=2,
fname='John', lname='Doe').render())
def test_translate_i18n_choose_as_directive_with_params_and_domain_as_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:domain name="foo">
<i18n:choose numeral="two" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</i18n:domain>
<i18n:choose numeral="one" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</html>""")
translations = DummyTranslations()
translations.add_domain('foo', {
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>Vohs John Doe</p>
<p>Foo John Doe</p>
</html>""", tmpl.generate(one=1, two=2,
fname='John', lname='Doe').render())
def test_extract_i18n_choose_as_attribute(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="one">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</div>
<div i18n:choose="two">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(2, len(messages))
self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0])
self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1])
def test_extract_i18n_choose_as_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:choose numeral="two">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</i18n:choose>
<i18n:choose numeral="one">
<p i18n:singular="">FooBar</p>
<p i18n:plural="">FooBars</p>
</i18n:choose>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(2, len(messages))
self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0])
self.assertEqual((7, 'ngettext', ('FooBar', 'FooBars'), []), messages[1])
def test_extract_i18n_choose_as_attribute_with_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[0])
def test_extract_i18n_choose_as_attribute_with_params_and_domain_as_param(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[0])
def test_extract_i18n_choose_as_directive_with_params(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:choose numeral="two" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
<i18n:choose numeral="one" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(2, len(messages))
self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[0])
self.assertEqual((7, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[1])
def test_extract_i18n_choose_as_directive_with_params_and_domain_as_directive(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:domain name="foo">
<i18n:choose numeral="two" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</i18n:domain>
<i18n:choose numeral="one" params="fname, lname">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(2, len(messages))
self.assertEqual((4, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[0])
self.assertEqual((9, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'), []),
messages[1])
def test_extract_i18n_choose_as_attribute_with_params_and_comment(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="two; fname, lname" i18n:comment="As in Foo Bar">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'),
['As in Foo Bar']),
messages[0])
def test_extract_i18n_choose_as_directive_with_params_and_comment(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<i18n:choose numeral="two" params="fname, lname" i18n:comment="As in Foo Bar">
<p i18n:singular="">Foo ${fname} ${lname}</p>
<p i18n:plural="">Foos ${fname} ${lname}</p>
</i18n:choose>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, 'ngettext', ('Foo %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s'),
['As in Foo Bar']),
messages[0])
def test_translate_i18n_domain_with_nested_inlcudes(self):
import os, shutil, tempfile
from genshi.template.loader import TemplateLoader
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
for idx in range(7):
file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w')
try:
file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip="">
<div>Included tmpl$idx</div>
<p i18n:msg="idx">Bar $idx</p>
<p i18n:domain="bar">Bar</p>
<p i18n:msg="idx" i18n:domain="">Bar $idx</p>
<p i18n:domain="" i18n:msg="idx">Bar $idx</p>
<py:if test="idx < 6">
<xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/>
</py:if>
</html>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo">
<xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/>
</html>""")
finally:
file2.close()
def callback(template):
translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s'})
translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'})
translations.add_domain('bar', {'Bar': 'bar_Bar'})
translator = Translator(translations)
translator.setup(template)
loader = TemplateLoader([dirname], callback=callback)
tmpl = loader.load('tmpl10.html')
self.assertEqual("""<html>
<div>Included tmpl0</div>
<p>foo_Bar 0</p>
<p>bar_Bar</p>
<p>Voh 0</p>
<p>Voh 0</p>
<div>Included tmpl1</div>
<p>foo_Bar 1</p>
<p>bar_Bar</p>
<p>Voh 1</p>
<p>Voh 1</p>
<div>Included tmpl2</div>
<p>foo_Bar 2</p>
<p>bar_Bar</p>
<p>Voh 2</p>
<p>Voh 2</p>
<div>Included tmpl3</div>
<p>foo_Bar 3</p>
<p>bar_Bar</p>
<p>Voh 3</p>
<p>Voh 3</p>
<div>Included tmpl4</div>
<p>foo_Bar 4</p>
<p>bar_Bar</p>
<p>Voh 4</p>
<p>Voh 4</p>
<div>Included tmpl5</div>
<p>foo_Bar 5</p>
<p>bar_Bar</p>
<p>Voh 5</p>
<p>Voh 5</p>
<div>Included tmpl6</div>
<p>foo_Bar 6</p>
<p>bar_Bar</p>
<p>Voh 6</p>
<p>Voh 6</p>
</html>""", tmpl.generate(idx=-1).render())
finally:
shutil.rmtree(dirname)
def test_translate_i18n_domain_with_nested_inlcudes_with_translatable_attrs(self):
import os, shutil, tempfile
from genshi.template.loader import TemplateLoader
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
for idx in range(4):
file1 = open(os.path.join(dirname, 'tmpl%d.html' % idx), 'w')
try:
file1.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n" py:strip="">
<div>Included tmpl$idx</div>
<p title="${dg('foo', 'Bar %(idx)s') % dict(idx=idx)}" i18n:msg="idx">Bar $idx</p>
<p title="Bar" i18n:domain="bar">Bar</p>
<p title="Bar" i18n:msg="idx" i18n:domain="">Bar $idx</p>
<p i18n:msg="idx" i18n:domain="" title="Bar">Bar $idx</p>
<p i18n:domain="" i18n:msg="idx" title="Bar">Bar $idx</p>
<py:if test="idx < 3">
<xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/>
</py:if>
</html>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl10.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo">
<xi:include href="tmpl${idx}.html" py:with="idx = idx+1"/>
</html>""")
finally:
file2.close()
translations = DummyTranslations({'Bar %(idx)s': 'Voh %(idx)s',
'Bar': 'Voh'})
translations.add_domain('foo', {'Bar %(idx)s': 'foo_Bar %(idx)s'})
translations.add_domain('bar', {'Bar': 'bar_Bar'})
translator = Translator(translations)
def callback(template):
translator.setup(template)
loader = TemplateLoader([dirname], callback=callback)
tmpl = loader.load('tmpl10.html')
self.assertEqual("""<html>
<div>Included tmpl0</div>
<p title="foo_Bar 0">foo_Bar 0</p>
<p title="bar_Bar">bar_Bar</p>
<p title="Voh">Voh 0</p>
<p title="Voh">Voh 0</p>
<p title="Voh">Voh 0</p>
<div>Included tmpl1</div>
<p title="foo_Bar 1">foo_Bar 1</p>
<p title="bar_Bar">bar_Bar</p>
<p title="Voh">Voh 1</p>
<p title="Voh">Voh 1</p>
<p title="Voh">Voh 1</p>
<div>Included tmpl2</div>
<p title="foo_Bar 2">foo_Bar 2</p>
<p title="bar_Bar">bar_Bar</p>
<p title="Voh">Voh 2</p>
<p title="Voh">Voh 2</p>
<p title="Voh">Voh 2</p>
<div>Included tmpl3</div>
<p title="foo_Bar 3">foo_Bar 3</p>
<p title="bar_Bar">bar_Bar</p>
<p title="Voh">Voh 3</p>
<p title="Voh">Voh 3</p>
<p title="Voh">Voh 3</p>
</html>""", tmpl.generate(idx=-1,
dg=translations.dugettext).render())
finally:
shutil.rmtree(dirname)
def test_translate_i18n_msg_and_comment_with_py_strip_directives(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" i18n:comment="As in foo bar" py:strip="">Foo</p>
<p py:strip="" i18n:msg="" i18n:comment="As in foo bar">Foo</p>
</html>""")
translator = Translator(DummyTranslations({'Foo': 'Voh'}))
translator.setup(tmpl)
self.assertEqual("""<html>
Voh
Voh
</html>""", tmpl.generate().render())
def test_translate_i18n_choose_and_py_strip(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translations = DummyTranslations({
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>Vohs John Doe</p>
</div>
</html>""", tmpl.generate(two=2, fname='John', lname='Doe').render())
def test_translate_i18n_choose_and_domain_and_py_strip(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo">
<div i18n:choose="two; fname, lname">
<p i18n:singular="">Foo $fname $lname</p>
<p i18n:plural="">Foos $fname $lname</p>
</div>
</html>""")
translations = DummyTranslations()
translations.add_domain('foo', {
('Foo %(fname)s %(lname)s', 0): 'Voh %(fname)s %(lname)s',
('Foo %(fname)s %(lname)s', 1): 'Vohs %(fname)s %(lname)s',
'Foo %(fname)s %(lname)s': 'Voh %(fname)s %(lname)s',
'Foos %(fname)s %(lname)s': 'Vohs %(fname)s %(lname)s',
})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<div>
<p>Vohs John Doe</p>
</div>
</html>""", tmpl.generate(two=2, fname='John', lname='Doe').render())
def test_extract_i18n_msg_with_py_strip(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" py:strip="">
Please see <a href="help.html">Help</a> for details.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, None, 'Please see [1:Help] for details.', []),
messages[0])
def test_extract_i18n_msg_with_py_strip_and_comment(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<p i18n:msg="" py:strip="" i18n:comment="Foo">
Please see <a href="help.html">Help</a> for details.
</p>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, None, 'Please see [1:Help] for details.',
['Foo']), messages[0])
def test_extract_i18n_choose_as_attribute_and_py_strip(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n">
<div i18n:choose="one" py:strip="">
<p i18n:singular="" py:strip="">FooBar</p>
<p i18n:plural="" py:strip="">FooBars</p>
</div>
</html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(1, len(messages))
self.assertEqual((3, 'ngettext', ('FooBar', 'FooBars'), []), messages[0])
def test_translate_i18n_domain_with_inline_directive_on_START_NS(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n" i18n:domain="foo">
<p i18n:msg="">FooBar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""<html>
<p>BarFoo</p>
</html>""", tmpl.generate().render())
def test_translate_i18n_domain_with_inline_directive_on_START_NS_with_py_strip(self):
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:i18n="http://genshi.edgewall.org/i18n"
i18n:domain="foo" py:strip="">
<p i18n:msg="">FooBar</p>
</html>""")
translations = DummyTranslations({'Bar': 'Voh'})
translations.add_domain('foo', {'FooBar': 'BarFoo'})
translator = Translator(translations)
translator.setup(tmpl)
self.assertEqual("""
<p>BarFoo</p>
""", tmpl.generate().render())
class ExtractTestCase(unittest.TestCase):
def test_markup_template_extraction(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title>Example</title>
</head>
<body>
<h1>Example</h1>
<p>${_("Hello, %(name)s") % dict(name=username)}</p>
<p>${ngettext("You have %d item", "You have %d items", num)}</p>
</body>
</html>""")
results = list(extract(buf, ['_', 'ngettext'], [], {}))
self.assertEqual([
(3, None, 'Example', []),
(6, None, 'Example', []),
(7, '_', 'Hello, %(name)s', []),
(8, 'ngettext', ('You have %d item', 'You have %d items', None),
[]),
], results)
def test_extraction_without_text(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
<p title="Bar">Foo</p>
${ngettext("Singular", "Plural", num)}
</html>""")
results = list(extract(buf, ['_', 'ngettext'], [], {
'extract_text': 'no'
}))
self.assertEqual([
(3, 'ngettext', ('Singular', 'Plural', None), []),
], results)
def test_text_template_extraction(self):
buf = StringIO("""${_("Dear %(name)s") % {'name': name}},
${ngettext("Your item:", "Your items", len(items))}
#for item in items
* $item
#end
All the best,
Foobar""")
results = list(extract(buf, ['_', 'ngettext'], [], {
'template_class': 'genshi.template:TextTemplate'
}))
self.assertEqual([
(1, '_', 'Dear %(name)s', []),
(3, 'ngettext', ('Your item:', 'Your items', None), []),
(7, None, 'All the best,\n Foobar', [])
], results)
def test_extraction_with_keyword_arg(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
${gettext('Foobar', foo='bar')}
</html>""")
results = list(extract(buf, ['gettext'], [], {}))
self.assertEqual([
(2, 'gettext', ('Foobar'), []),
], results)
def test_extraction_with_nonstring_arg(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
${dgettext(curdomain, 'Foobar')}
</html>""")
results = list(extract(buf, ['dgettext'], [], {}))
self.assertEqual([
(2, 'dgettext', (None, 'Foobar'), []),
], results)
def test_extraction_inside_ignored_tags(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
<script type="text/javascript">
$('#llist').tabs({
remote: true,
spinner: "${_('Please wait...')}"
});
</script>
</html>""")
results = list(extract(buf, ['_'], [], {}))
self.assertEqual([
(5, '_', 'Please wait...', []),
], results)
def test_extraction_inside_ignored_tags_with_directives(self):
buf = StringIO("""<html xmlns:py="http://genshi.edgewall.org/">
<script type="text/javascript">
<py:if test="foobar">
alert("This shouldn't be extracted");
</py:if>
</script>
</html>""")
self.assertEqual([], list(extract(buf, ['_'], [], {})))
def test_extract_py_def_directive_with_py_strip(self):
# Failed extraction from Trac
tmpl = MarkupTemplate("""<html xmlns:py="http://genshi.edgewall.org/" py:strip="">
<py:def function="diff_options_fields(diff)">
<label for="style">View differences</label>
<select id="style" name="style">
<option selected="${diff.style == 'inline' or None}"
value="inline">inline</option>
<option selected="${diff.style == 'sidebyside' or None}"
value="sidebyside">side by side</option>
</select>
<div class="field">
Show <input type="text" name="contextlines" id="contextlines" size="2"
maxlength="3" value="${diff.options.contextlines < 0 and 'all' or diff.options.contextlines}" />
<label for="contextlines">lines around each change</label>
</div>
<fieldset id="ignore" py:with="options = diff.options">
<legend>Ignore:</legend>
<div class="field">
<input type="checkbox" id="ignoreblanklines" name="ignoreblanklines"
checked="${options.ignoreblanklines or None}" />
<label for="ignoreblanklines">Blank lines</label>
</div>
<div class="field">
<input type="checkbox" id="ignorecase" name="ignorecase"
checked="${options.ignorecase or None}" />
<label for="ignorecase">Case changes</label>
</div>
<div class="field">
<input type="checkbox" id="ignorewhitespace" name="ignorewhitespace"
checked="${options.ignorewhitespace or None}" />
<label for="ignorewhitespace">White space changes</label>
</div>
</fieldset>
<div class="buttons">
<input type="submit" name="update" value="${_('Update')}" />
</div>
</py:def></html>""")
translator = Translator()
tmpl.add_directives(Translator.NAMESPACE, translator)
messages = list(translator.extract(tmpl.stream))
self.assertEqual(10, len(messages))
self.assertEqual([
(3, None, 'View differences', []),
(6, None, 'inline', []),
(8, None, 'side by side', []),
(10, None, 'Show', []),
(13, None, 'lines around each change', []),
(16, None, 'Ignore:', []),
(20, None, 'Blank lines', []),
(25, None, 'Case changes',[]),
(30, None, 'White space changes', []),
(34, '_', 'Update', [])], messages)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Translator.__module__))
suite.addTest(unittest.makeSuite(TranslatorTestCase, 'test'))
suite.addTest(unittest.makeSuite(ExtractTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Implementation of a number of stream filters."""
try:
any
except NameError:
from genshi.util import any
import re
from genshi.core import Attrs, QName, stripentities
from genshi.core import END, START, TEXT, COMMENT
__all__ = ['HTMLFormFiller', 'HTMLSanitizer']
__docformat__ = 'restructuredtext en'
class HTMLFormFiller(object):
"""A stream filter that can populate HTML forms from a dictionary of values.
>>> from genshi.input import HTML
>>> html = HTML('''<form>
... <p><input type="text" name="foo" /></p>
... </form>''')
>>> filler = HTMLFormFiller(data={'foo': 'bar'})
>>> print(html | filler)
<form>
<p><input type="text" name="foo" value="bar"/></p>
</form>
"""
# TODO: only select the first radio button, and the first select option
# (if not in a multiple-select)
# TODO: only apply to elements in the XHTML namespace (or no namespace)?
def __init__(self, name=None, id=None, data=None, passwords=False):
"""Create the filter.
:param name: The name of the form that should be populated. If this
parameter is given, only forms where the ``name`` attribute
value matches the parameter are processed.
:param id: The ID of the form that should be populated. If this
parameter is given, only forms where the ``id`` attribute
value matches the parameter are processed.
:param data: The dictionary of form values, where the keys are the names
of the form fields, and the values are the values to fill
in.
:param passwords: Whether password input fields should be populated.
This is off by default for security reasons (for
example, a password may end up in the browser cache)
:note: Changed in 0.5.2: added the `passwords` option
"""
self.name = name
self.id = id
if data is None:
data = {}
self.data = data
self.passwords = passwords
def __call__(self, stream):
"""Apply the filter to the given stream.
:param stream: the markup event stream to filter
"""
in_form = in_select = in_option = in_textarea = False
select_value = option_value = textarea_value = None
option_start = None
option_text = []
no_option_value = False
for kind, data, pos in stream:
if kind is START:
tag, attrs = data
tagname = tag.localname
if tagname == 'form' and (
self.name and attrs.get('name') == self.name or
self.id and attrs.get('id') == self.id or
not (self.id or self.name)):
in_form = True
elif in_form:
if tagname == 'input':
type = attrs.get('type', '').lower()
if type in ('checkbox', 'radio'):
name = attrs.get('name')
if name and name in self.data:
value = self.data[name]
declval = attrs.get('value')
checked = False
if isinstance(value, (list, tuple)):
if declval:
checked = declval in [unicode(v) for v
in value]
else:
checked = any(value)
else:
if declval:
checked = declval == unicode(value)
elif type == 'checkbox':
checked = bool(value)
if checked:
attrs |= [(QName('checked'), 'checked')]
elif 'checked' in attrs:
attrs -= 'checked'
elif type in ('', 'hidden', 'text') \
or type == 'password' and self.passwords:
name = attrs.get('name')
if name and name in self.data:
value = self.data[name]
if isinstance(value, (list, tuple)):
value = value[0]
if value is not None:
attrs |= [
(QName('value'), unicode(value))
]
elif tagname == 'select':
name = attrs.get('name')
if name in self.data:
select_value = self.data[name]
in_select = True
elif tagname == 'textarea':
name = attrs.get('name')
if name in self.data:
textarea_value = self.data.get(name)
if isinstance(textarea_value, (list, tuple)):
textarea_value = textarea_value[0]
in_textarea = True
elif in_select and tagname == 'option':
option_start = kind, data, pos
option_value = attrs.get('value')
if option_value is None:
no_option_value = True
option_value = ''
in_option = True
continue
yield kind, (tag, attrs), pos
elif in_form and kind is TEXT:
if in_select and in_option:
if no_option_value:
option_value += data
option_text.append((kind, data, pos))
continue
elif in_textarea:
continue
yield kind, data, pos
elif in_form and kind is END:
tagname = data.localname
if tagname == 'form':
in_form = False
elif tagname == 'select':
in_select = False
select_value = None
elif in_select and tagname == 'option':
if isinstance(select_value, (tuple, list)):
selected = option_value in [unicode(v) for v
in select_value]
else:
selected = option_value == unicode(select_value)
okind, (tag, attrs), opos = option_start
if selected:
attrs |= [(QName('selected'), 'selected')]
elif 'selected' in attrs:
attrs -= 'selected'
yield okind, (tag, attrs), opos
if option_text:
for event in option_text:
yield event
in_option = False
no_option_value = False
option_start = option_value = None
option_text = []
elif tagname == 'textarea':
if textarea_value:
yield TEXT, unicode(textarea_value), pos
in_textarea = False
yield kind, data, pos
else:
yield kind, data, pos
class HTMLSanitizer(object):
"""A filter that removes potentially dangerous HTML tags and attributes
from the stream.
>>> from genshi import HTML
>>> html = HTML('<div><script>alert(document.cookie)</script></div>')
>>> print(html | HTMLSanitizer())
<div/>
The default set of safe tags and attributes can be modified when the filter
is instantiated. For example, to allow inline ``style`` attributes, the
following instantation would work:
>>> html = HTML('<div style="background: #000"></div>')
>>> sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style']))
>>> print(html | sanitizer)
<div style="background: #000"/>
Note that even in this case, the filter *does* attempt to remove dangerous
constructs from style attributes:
>>> html = HTML('<div style="background: url(javascript:void); color: #000"></div>')
>>> print(html | sanitizer)
<div style="color: #000"/>
This handles HTML entities, unicode escapes in CSS and Javascript text, as
well as a lot of other things. However, the style tag is still excluded by
default because it is very hard for such sanitizing to be completely safe,
especially considering how much error recovery current web browsers perform.
It also does some basic filtering of CSS properties that may be used for
typical phishing attacks. For more sophisticated filtering, this class
provides a couple of hooks that can be overridden in sub-classes.
:warn: Note that this special processing of CSS is currently only applied to
style attributes, **not** style elements.
"""
SAFE_TAGS = frozenset(['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var'])
SAFE_ATTRS = frozenset(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'bgcolor', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width'])
SAFE_SCHEMES = frozenset(['file', 'ftp', 'http', 'https', 'mailto', None])
URI_ATTRS = frozenset(['action', 'background', 'dynsrc', 'href', 'lowsrc',
'src'])
def __init__(self, safe_tags=SAFE_TAGS, safe_attrs=SAFE_ATTRS,
safe_schemes=SAFE_SCHEMES, uri_attrs=URI_ATTRS):
"""Create the sanitizer.
The exact set of allowed elements and attributes can be configured.
:param safe_tags: a set of tag names that are considered safe
:param safe_attrs: a set of attribute names that are considered safe
:param safe_schemes: a set of URI schemes that are considered safe
:param uri_attrs: a set of names of attributes that contain URIs
"""
self.safe_tags = safe_tags
"The set of tag names that are considered safe."
self.safe_attrs = safe_attrs
"The set of attribute names that are considered safe."
self.uri_attrs = uri_attrs
"The set of names of attributes that may contain URIs."
self.safe_schemes = safe_schemes
"The set of URI schemes that are considered safe."
def __call__(self, stream):
"""Apply the filter to the given stream.
:param stream: the markup event stream to filter
"""
waiting_for = None
for kind, data, pos in stream:
if kind is START:
if waiting_for:
continue
tag, attrs = data
if not self.is_safe_elem(tag, attrs):
waiting_for = tag
continue
new_attrs = []
for attr, value in attrs:
value = stripentities(value)
if attr not in self.safe_attrs:
continue
elif attr in self.uri_attrs:
# Don't allow URI schemes such as "javascript:"
if not self.is_safe_uri(value):
continue
elif attr == 'style':
# Remove dangerous CSS declarations from inline styles
decls = self.sanitize_css(value)
if not decls:
continue
value = '; '.join(decls)
new_attrs.append((attr, value))
yield kind, (tag, Attrs(new_attrs)), pos
elif kind is END:
tag = data
if waiting_for:
if waiting_for == tag:
waiting_for = None
else:
yield kind, data, pos
elif kind is not COMMENT:
if not waiting_for:
yield kind, data, pos
def is_safe_css(self, propname, value):
"""Determine whether the given css property declaration is to be
considered safe for inclusion in the output.
:param propname: the CSS property name
:param value: the value of the property
:return: whether the property value should be considered safe
:rtype: bool
:since: version 0.6
"""
if propname == 'position':
return False
if propname.startswith('margin') and '-' in value:
# Negative margins can be used for phishing
return False
return True
def is_safe_elem(self, tag, attrs):
"""Determine whether the given element should be considered safe for
inclusion in the output.
:param tag: the tag name of the element
:type tag: QName
:param attrs: the element attributes
:type attrs: Attrs
:return: whether the element should be considered safe
:rtype: bool
:since: version 0.6
"""
if tag not in self.safe_tags:
return False
if tag.localname == 'input':
input_type = attrs.get('type', '').lower()
if input_type == 'password':
return False
return True
def is_safe_uri(self, uri):
"""Determine whether the given URI is to be considered safe for
inclusion in the output.
The default implementation checks whether the scheme of the URI is in
the set of allowed URIs (`safe_schemes`).
>>> sanitizer = HTMLSanitizer()
>>> sanitizer.is_safe_uri('http://example.org/')
True
>>> sanitizer.is_safe_uri('javascript:alert(document.cookie)')
False
:param uri: the URI to check
:return: `True` if the URI can be considered safe, `False` otherwise
:rtype: `bool`
:since: version 0.4.3
"""
if '#' in uri:
uri = uri.split('#', 1)[0] # Strip out the fragment identifier
if ':' not in uri:
return True # This is a relative URI
chars = [char for char in uri.split(':', 1)[0] if char.isalnum()]
return ''.join(chars).lower() in self.safe_schemes
def sanitize_css(self, text):
"""Remove potentially dangerous property declarations from CSS code.
In particular, properties using the CSS ``url()`` function with a scheme
that is not considered safe are removed:
>>> sanitizer = HTMLSanitizer()
>>> sanitizer.sanitize_css(u'''
... background: url(javascript:alert("foo"));
... color: #000;
... ''')
[u'color: #000']
Also, the proprietary Internet Explorer function ``expression()`` is
always stripped:
>>> sanitizer.sanitize_css(u'''
... background: #fff;
... color: #000;
... width: e/**/xpression(alert("foo"));
... ''')
[u'background: #fff', u'color: #000']
:param text: the CSS text; this is expected to be `unicode` and to not
contain any character or numeric references
:return: a list of declarations that are considered safe
:rtype: `list`
:since: version 0.4.3
"""
decls = []
text = self._strip_css_comments(self._replace_unicode_escapes(text))
for decl in text.split(';'):
decl = decl.strip()
if not decl:
continue
try:
propname, value = decl.split(':', 1)
except ValueError:
continue
if not self.is_safe_css(propname.strip().lower(), value.strip()):
continue
is_evil = False
if 'expression' in value:
is_evil = True
for match in re.finditer(r'url\s*\(([^)]+)', value):
if not self.is_safe_uri(match.group(1)):
is_evil = True
break
if not is_evil:
decls.append(decl.strip())
return decls
_NORMALIZE_NEWLINES = re.compile(r'\r\n').sub
_UNICODE_ESCAPE = re.compile(r'\\([0-9a-fA-F]{1,6})\s?').sub
def _replace_unicode_escapes(self, text):
def _repl(match):
return unichr(int(match.group(1), 16))
return self._UNICODE_ESCAPE(_repl, self._NORMALIZE_NEWLINES('\n', text))
_CSS_COMMENTS = re.compile(r'/\*.*?\*/').sub
def _strip_css_comments(self, text):
return self._CSS_COMMENTS('', text)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Implementation of a number of stream filters."""
from genshi.filters.html import HTMLFormFiller, HTMLSanitizer
from genshi.filters.i18n import Translator
from genshi.filters.transform import Transformer
__docformat__ = 'restructuredtext en'
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Directives and utilities for internationalization and localization of
templates.
:since: version 0.4
:note: Directives support added since version 0.6
"""
try:
any
except NameError:
from genshi.util import any
from gettext import NullTranslations
import os
import re
from types import FunctionType
from genshi.core import Attrs, Namespace, QName, START, END, TEXT, START_NS, \
END_NS, XML_NAMESPACE, _ensure, StreamEventKind
from genshi.template.eval import _ast
from genshi.template.base import DirectiveFactory, EXPR, SUB, _apply_directives
from genshi.template.directives import Directive, StripDirective
from genshi.template.markup import MarkupTemplate, EXEC
__all__ = ['Translator', 'extract']
__docformat__ = 'restructuredtext en'
I18N_NAMESPACE = Namespace('http://genshi.edgewall.org/i18n')
MSGBUF = StreamEventKind('MSGBUF')
SUB_START = StreamEventKind('SUB_START')
SUB_END = StreamEventKind('SUB_END')
class I18NDirective(Directive):
"""Simple interface for i18n directives to support messages extraction."""
def __call__(self, stream, directives, ctxt, **vars):
return _apply_directives(stream, directives, ctxt, vars)
class ExtractableI18NDirective(I18NDirective):
"""Simple interface for directives to support messages extraction."""
def extract(self, stream, comment_stack):
raise NotImplementedError
class CommentDirective(I18NDirective):
"""Implementation of the ``i18n:comment`` template directive which adds
translation comments.
>>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <p i18n:comment="As in Foo Bar">Foo</p>
... </html>''')
>>> translator = Translator()
>>> translator.setup(tmpl)
>>> list(translator.extract(tmpl.stream))
[(2, None, u'Foo', [u'As in Foo Bar'])]
"""
__slots__ = ['comment']
def __init__(self, value, template, hints=None, namespaces=None,
lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.comment = value
class MsgDirective(ExtractableI18NDirective):
r"""Implementation of the ``i18n:msg`` directive which marks inner content
as translatable. Consider the following examples:
>>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <div i18n:msg="">
... <p>Foo</p>
... <p>Bar</p>
... </div>
... <p i18n:msg="">Foo <em>bar</em>!</p>
... </html>''')
>>> translator = Translator()
>>> translator.setup(tmpl)
>>> list(translator.extract(tmpl.stream))
[(2, None, u'[1:Foo]\n [2:Bar]', []), (6, None, u'Foo [1:bar]!', [])]
>>> print(tmpl.generate().render())
<html>
<div><p>Foo</p>
<p>Bar</p></div>
<p>Foo <em>bar</em>!</p>
</html>
>>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <div i18n:msg="fname, lname">
... <p>First Name: ${fname}</p>
... <p>Last Name: ${lname}</p>
... </div>
... <p i18n:msg="">Foo <em>bar</em>!</p>
... </html>''')
>>> translator.setup(tmpl)
>>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE
[(2, None, u'[1:First Name: %(fname)s]\n [2:Last Name: %(lname)s]', []),
(6, None, u'Foo [1:bar]!', [])]
>>> tmpl = MarkupTemplate('''<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <div i18n:msg="fname, lname">
... <p>First Name: ${fname}</p>
... <p>Last Name: ${lname}</p>
... </div>
... <p i18n:msg="">Foo <em>bar</em>!</p>
... </html>''')
>>> translator.setup(tmpl)
>>> print(tmpl.generate(fname='John', lname='Doe').render())
<html>
<div><p>First Name: John</p>
<p>Last Name: Doe</p></div>
<p>Foo <em>bar</em>!</p>
</html>
Starting and ending white-space is stripped of to make it simpler for
translators. Stripping it is not that important since it's on the html
source, the rendered output will remain the same.
"""
__slots__ = ['params']
def __init__(self, value, template, hints=None, namespaces=None,
lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.params = [param.strip() for param in value.split(',') if param]
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('params', '').strip()
return super(MsgDirective, cls).attach(template, stream, value.strip(),
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
gettext = ctxt.get('_i18n.gettext')
dgettext = ctxt.get('_i18n.dgettext')
if ctxt.get('_i18n.domain'):
assert hasattr(dgettext, '__call__'), \
'No domain gettext function passed'
gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)
def _generate():
msgbuf = MessageBuffer(self)
previous = stream.next()
if previous[0] is START:
yield previous
else:
msgbuf.append(*previous)
previous = stream.next()
for kind, data, pos in stream:
msgbuf.append(*previous)
previous = kind, data, pos
if previous[0] is not END:
msgbuf.append(*previous)
previous = None
for event in msgbuf.translate(gettext(msgbuf.format())):
yield event
if previous:
yield previous
return _apply_directives(_generate(), directives, ctxt, vars)
def extract(self, stream, comment_stack):
msgbuf = MessageBuffer(self)
stream = iter(stream)
previous = stream.next()
if previous[0] is START:
previous = stream.next()
for event in stream:
msgbuf.append(*previous)
previous = event
msgbuf.append(*previous)
yield None, msgbuf.format(), comment_stack[-1:]
class ChooseBranchDirective(I18NDirective):
__slots__ = ['params']
def __call__(self, stream, directives, ctxt, **vars):
self.params = ctxt.get('_i18n.choose.params', [])[:]
msgbuf = MessageBuffer(self)
stream = iter(_apply_directives(stream, directives, ctxt, vars))
yield stream.next() # the outer start tag
previous = stream.next()
for kind, data, pos in stream:
msgbuf.append(*previous)
previous = kind, data, pos
yield MSGBUF, (), -1 # the place holder for msgbuf output
yield previous # the outer end tag
ctxt['_i18n.choose.%s' % type(self).__name__] = msgbuf
def extract(self, stream, comment_stack, msgbuf):
stream = iter(stream)
previous = stream.next()
if previous[0] is START:
previous = stream.next()
for event in stream:
msgbuf.append(*previous)
previous = event
if previous[0] is not END:
msgbuf.append(*previous)
return msgbuf
class SingularDirective(ChooseBranchDirective):
"""Implementation of the ``i18n:singular`` directive to be used with the
``i18n:choose`` directive."""
class PluralDirective(ChooseBranchDirective):
"""Implementation of the ``i18n:plural`` directive to be used with the
``i18n:choose`` directive."""
class ChooseDirective(ExtractableI18NDirective):
"""Implementation of the ``i18n:choose`` directive which provides plural
internationalisation of strings.
This directive requires at least one parameter, the one which evaluates to
an integer which will allow to choose the plural/singular form. If you also
have expressions inside the singular and plural version of the string you
also need to pass a name for those parameters. Consider the following
examples:
>>> tmpl = MarkupTemplate('''\
<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <div i18n:choose="num; num">
... <p i18n:singular="">There is $num coin</p>
... <p i18n:plural="">There are $num coins</p>
... </div>
... </html>''')
>>> translator = Translator()
>>> translator.setup(tmpl)
>>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE
[(2, 'ngettext', (u'There is %(num)s coin',
u'There are %(num)s coins'), [])]
>>> tmpl = MarkupTemplate('''\
<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <div i18n:choose="num; num">
... <p i18n:singular="">There is $num coin</p>
... <p i18n:plural="">There are $num coins</p>
... </div>
... </html>''')
>>> translator.setup(tmpl)
>>> print(tmpl.generate(num=1).render())
<html>
<div>
<p>There is 1 coin</p>
</div>
</html>
>>> print(tmpl.generate(num=2).render())
<html>
<div>
<p>There are 2 coins</p>
</div>
</html>
When used as a directive and not as an attribute:
>>> tmpl = MarkupTemplate('''\
<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <i18n:choose numeral="num" params="num">
... <p i18n:singular="">There is $num coin</p>
... <p i18n:plural="">There are $num coins</p>
... </i18n:choose>
... </html>''')
>>> translator.setup(tmpl)
>>> list(translator.extract(tmpl.stream)) #doctest: +NORMALIZE_WHITESPACE
[(2, 'ngettext', (u'There is %(num)s coin',
u'There are %(num)s coins'), [])]
"""
__slots__ = ['numeral', 'params']
def __init__(self, value, template, hints=None, namespaces=None,
lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
params = [v.strip() for v in value.split(';')]
self.numeral = self._parse_expr(params.pop(0), template, lineno, offset)
self.params = params and [name.strip() for name in
params[0].split(',') if name] or []
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
numeral = value.get('numeral', '').strip()
assert numeral is not '', "at least pass the numeral param"
params = [v.strip() for v in value.get('params', '').split(',')]
value = '%s; ' % numeral + ', '.join(params)
return super(ChooseDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
ctxt.push({'_i18n.choose.params': self.params,
'_i18n.choose.SingularDirective': None,
'_i18n.choose.PluralDirective': None})
new_stream = []
singular_stream = None
singular_msgbuf = None
plural_stream = None
plural_msgbuf = None
ngettext = ctxt.get('_i18n.ungettext')
assert hasattr(ngettext, '__call__'), 'No ngettext function available'
dngettext = ctxt.get('_i18n.dngettext')
if not dngettext:
dngettext = lambda d, s, p, n: ngettext(s, p, n)
for kind, event, pos in stream:
if kind is SUB:
subdirectives, substream = event
if isinstance(subdirectives[0],
SingularDirective) and not singular_stream:
# Apply directives to update context
singular_stream = list(_apply_directives(substream,
subdirectives,
ctxt, vars))
new_stream.append((MSGBUF, (), ('', -1))) # msgbuf place holder
singular_msgbuf = ctxt.get('_i18n.choose.SingularDirective')
elif isinstance(subdirectives[0],
PluralDirective) and not plural_stream:
# Apply directives to update context
plural_stream = list(_apply_directives(substream,
subdirectives,
ctxt, vars))
plural_msgbuf = ctxt.get('_i18n.choose.PluralDirective')
else:
new_stream.append((kind, event, pos))
else:
new_stream.append((kind, event, pos))
if ctxt.get('_i18n.domain'):
ngettext = lambda s, p, n: dngettext(ctxt.get('_i18n.domain'),
s, p, n)
for kind, data, pos in new_stream:
if kind is MSGBUF:
for skind, sdata, spos in singular_stream:
if skind is MSGBUF:
translation = ngettext(singular_msgbuf.format(),
plural_msgbuf.format(),
self.numeral.evaluate(ctxt))
for event in singular_msgbuf.translate(translation):
yield event
else:
yield skind, sdata, spos
else:
yield kind, data, pos
ctxt.pop()
def extract(self, stream, comment_stack):
stream = iter(stream)
previous = stream.next()
if previous is START:
stream.next()
singular_msgbuf = MessageBuffer(self)
plural_msgbuf = MessageBuffer(self)
for kind, event, pos in stream:
if kind is SUB:
subdirectives, substream = event
for subdirective in subdirectives:
if isinstance(subdirective, SingularDirective):
singular_msgbuf = subdirective.extract(substream, comment_stack,
singular_msgbuf)
elif isinstance(subdirective, PluralDirective):
plural_msgbuf = subdirective.extract(substream, comment_stack,
plural_msgbuf)
elif not isinstance(subdirective, StripDirective):
singular_msgbuf.append(kind, event, pos)
plural_msgbuf.append(kind, event, pos)
else:
singular_msgbuf.append(kind, event, pos)
plural_msgbuf.append(kind, event, pos)
yield 'ngettext', \
(singular_msgbuf.format(), plural_msgbuf.format()), \
comment_stack[-1:]
class DomainDirective(I18NDirective):
"""Implementation of the ``i18n:domain`` directive which allows choosing
another i18n domain(catalog) to translate from.
>>> from genshi.filters.tests.i18n import DummyTranslations
>>> tmpl = MarkupTemplate('''\
<html xmlns:i18n="http://genshi.edgewall.org/i18n">
... <p i18n:msg="">Bar</p>
... <div i18n:domain="foo">
... <p i18n:msg="">FooBar</p>
... <p>Bar</p>
... <p i18n:domain="bar" i18n:msg="">Bar</p>
... <p i18n:domain="">Bar</p>
... </div>
... <p>Bar</p>
... </html>''')
>>> translations = DummyTranslations({'Bar': 'Voh'})
>>> translations.add_domain('foo', {'FooBar': 'BarFoo', 'Bar': 'foo_Bar'})
>>> translations.add_domain('bar', {'Bar': 'bar_Bar'})
>>> translator = Translator(translations)
>>> translator.setup(tmpl)
>>> print(tmpl.generate().render())
<html>
<p>Voh</p>
<div>
<p>BarFoo</p>
<p>foo_Bar</p>
<p>bar_Bar</p>
<p>Voh</p>
</div>
<p>Voh</p>
</html>
"""
__slots__ = ['domain']
def __init__(self, value, template, hints=None, namespaces=None,
lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
self.domain = value and value.strip() or '__DEFAULT__'
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('name')
return super(DomainDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
ctxt.push({'_i18n.domain': self.domain})
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt.pop()
class Translator(DirectiveFactory):
"""Can extract and translate localizable strings from markup streams and
templates.
For example, assume the following template:
>>> tmpl = MarkupTemplate('''<html xmlns:py="http://genshi.edgewall.org/">
... <head>
... <title>Example</title>
... </head>
... <body>
... <h1>Example</h1>
... <p>${_("Hello, %(name)s") % dict(name=username)}</p>
... </body>
... </html>''', filename='example.html')
For demonstration, we define a dummy ``gettext``-style function with a
hard-coded translation table, and pass that to the `Translator` initializer:
>>> def pseudo_gettext(string):
... return {
... 'Example': 'Beispiel',
... 'Hello, %(name)s': 'Hallo, %(name)s'
... }[string]
>>> translator = Translator(pseudo_gettext)
Next, the translator needs to be prepended to any already defined filters
on the template:
>>> tmpl.filters.insert(0, translator)
When generating the template output, our hard-coded translations should be
applied as expected:
>>> print(tmpl.generate(username='Hans', _=pseudo_gettext))
<html>
<head>
<title>Beispiel</title>
</head>
<body>
<h1>Beispiel</h1>
<p>Hallo, Hans</p>
</body>
</html>
Note that elements defining ``xml:lang`` attributes that do not contain
variable expressions are ignored by this filter. That can be used to
exclude specific parts of a template from being extracted and translated.
"""
directives = [
('domain', DomainDirective),
('comment', CommentDirective),
('msg', MsgDirective),
('choose', ChooseDirective),
('singular', SingularDirective),
('plural', PluralDirective)
]
IGNORE_TAGS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
INCLUDE_ATTRS = frozenset([
'abbr', 'alt', 'label', 'prompt', 'standby', 'summary', 'title'
])
NAMESPACE = I18N_NAMESPACE
def __init__(self, translate=NullTranslations(), ignore_tags=IGNORE_TAGS,
include_attrs=INCLUDE_ATTRS, extract_text=True):
"""Initialize the translator.
:param translate: the translation function, for example ``gettext`` or
``ugettext``.
:param ignore_tags: a set of tag names that should not be localized
:param include_attrs: a set of attribute names should be localized
:param extract_text: whether the content of text nodes should be
extracted, or only text in explicit ``gettext``
function calls
:note: Changed in 0.6: the `translate` parameter can now be either
a ``gettext``-style function, or an object compatible with the
``NullTransalations`` or ``GNUTranslations`` interface
"""
self.translate = translate
self.ignore_tags = ignore_tags
self.include_attrs = include_attrs
self.extract_text = extract_text
def __call__(self, stream, ctxt=None, search_text=True):
"""Translate any localizable strings in the given stream.
This function shouldn't be called directly. Instead, an instance of
the `Translator` class should be registered as a filter with the
`Template` or the `TemplateLoader`, or applied as a regular stream
filter. If used as a template filter, it should be inserted in front of
all the default filters.
:param stream: the markup event stream
:param ctxt: the template context (not used)
:param search_text: whether text nodes should be translated (used
internally)
:return: the localized stream
"""
ignore_tags = self.ignore_tags
include_attrs = self.include_attrs
skip = 0
xml_lang = XML_NAMESPACE['lang']
if type(self.translate) is FunctionType:
gettext = self.translate
if ctxt:
ctxt['_i18n.gettext'] = gettext
else:
gettext = self.translate.ugettext
try:
dgettext = self.translate.dugettext
except AttributeError:
dgettext = lambda x, y: gettext(y)
ngettext = self.translate.ungettext
try:
dngettext = self.translate.dungettext
except AttributeError:
dngettext = lambda d, s, p, n: ngettext(s, p, n)
if ctxt:
ctxt['_i18n.gettext'] = gettext
ctxt['_i18n.ugettext'] = gettext
ctxt['_i18n.dgettext'] = dgettext
ctxt['_i18n.ngettext'] = ngettext
ctxt['_i18n.ungettext'] = ngettext
ctxt['_i18n.dngettext'] = dngettext
extract_text = self.extract_text
if not extract_text:
search_text = False
if ctxt and ctxt.get('_i18n.domain'):
old_gettext = gettext
gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)
for kind, data, pos in stream:
# skip chunks that should not be localized
if skip:
if kind is START:
skip += 1
elif kind is END:
skip -= 1
yield kind, data, pos
continue
# handle different events that can be localized
if kind is START:
tag, attrs = data
if tag in self.ignore_tags or \
isinstance(attrs.get(xml_lang), basestring):
skip += 1
yield kind, data, pos
continue
new_attrs = []
changed = False
for name, value in attrs:
newval = value
if extract_text and isinstance(value, basestring):
if name in include_attrs:
newval = gettext(value)
else:
newval = list(
self(_ensure(value), ctxt, search_text=False)
)
if newval != value:
value = newval
changed = True
new_attrs.append((name, value))
if changed:
attrs = Attrs(new_attrs)
yield kind, (tag, attrs), pos
elif search_text and kind is TEXT:
text = data.strip()
if text:
data = data.replace(text, unicode(gettext(text)))
yield kind, data, pos
elif kind is SUB:
directives, substream = data
current_domain = None
for idx, directive in enumerate(directives):
# Organize directives to make everything work
if isinstance(directive, DomainDirective):
# Grab current domain and update context
current_domain = directive.domain
ctxt.push({'_i18n.domain': current_domain})
# Put domain directive as the first one in order to
# update context before any other directives evaluation
directives.insert(0, directives.pop(idx))
# If this is an i18n directive, no need to translate text
# nodes here
is_i18n_directive = any([
isinstance(d, ExtractableI18NDirective)
for d in directives
])
substream = list(self(substream, ctxt,
search_text=not is_i18n_directive))
yield kind, (directives, substream), pos
if current_domain:
ctxt.pop()
else:
yield kind, data, pos
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext', 'dgettext', 'dngettext',
'ugettext', 'ungettext')
def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS,
search_text=True, msgbuf=None, comment_stack=None):
"""Extract localizable strings from the given template stream.
For every string found, this function yields a ``(lineno, function,
message, comments)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string
arguments).
* ``comments`` is a list of comments related to the message, extracted
from ``i18n:comment`` attributes found in the markup
>>> tmpl = MarkupTemplate('''<html xmlns:py="http://genshi.edgewall.org/">
... <head>
... <title>Example</title>
... </head>
... <body>
... <h1>Example</h1>
... <p>${_("Hello, %(name)s") % dict(name=username)}</p>
... <p>${ngettext("You have %d item", "You have %d items", num)}</p>
... </body>
... </html>''', filename='example.html')
>>> for line, func, msg, comments in Translator().extract(tmpl.stream):
... print('%d, %r, %r' % (line, func, msg))
3, None, u'Example'
6, None, u'Example'
7, '_', u'Hello, %(name)s'
8, 'ngettext', (u'You have %d item', u'You have %d items', None)
:param stream: the event stream to extract strings from; can be a
regular stream or a template stream
:param gettext_functions: a sequence of function names that should be
treated as gettext-style localization
functions
:param search_text: whether the content of text nodes should be
extracted (used internally)
:note: Changed in 0.4.1: For a function with multiple string arguments
(such as ``ngettext``), a single item with a tuple of strings is
yielded, instead an item for each string argument.
:note: Changed in 0.6: The returned tuples now include a fourth
element, which is a list of comments for the translator.
"""
if not self.extract_text:
search_text = False
if comment_stack is None:
comment_stack = []
skip = 0
# Un-comment bellow to extract messages without adding directives
xml_lang = XML_NAMESPACE['lang']
for kind, data, pos in stream:
if skip:
if kind is START:
skip += 1
if kind is END:
skip -= 1
if kind is START and not skip:
tag, attrs = data
if tag in self.ignore_tags or \
isinstance(attrs.get(xml_lang), basestring):
skip += 1
continue
for name, value in attrs:
if search_text and isinstance(value, basestring):
if name in self.include_attrs:
text = value.strip()
if text:
# XXX: Do we need to grab i18n:comment from comment_stack ???
yield pos[1], None, text, []
else:
for lineno, funcname, text, comments in self.extract(
_ensure(value), gettext_functions,
search_text=False):
yield lineno, funcname, text, comments
if msgbuf:
msgbuf.append(kind, data, pos)
elif not skip and search_text and kind is TEXT:
if not msgbuf:
text = data.strip()
if text and [ch for ch in text if ch.isalpha()]:
yield pos[1], None, text, comment_stack[-1:]
else:
msgbuf.append(kind, data, pos)
elif not skip and msgbuf and kind is END:
msgbuf.append(kind, data, pos)
if not msgbuf.depth:
yield msgbuf.lineno, None, msgbuf.format(), [
c for c in msgbuf.comment if c
]
msgbuf = None
elif kind is EXPR or kind is EXEC:
if msgbuf:
msgbuf.append(kind, data, pos)
for funcname, strings in extract_from_code(data,
gettext_functions):
# XXX: Do we need to grab i18n:comment from comment_stack ???
yield pos[1], funcname, strings, []
elif kind is SUB:
directives, substream = data
in_comment = False
for idx, directive in enumerate(directives):
# Do a first loop to see if there's a comment directive
# If there is update context and pop it from directives
if isinstance(directive, CommentDirective):
in_comment = True
comment_stack.append(directive.comment)
if len(directives) == 1:
# in case we're in the presence of something like:
# <p i18n:comment="foo">Foo</p>
messages = self.extract(
substream, gettext_functions,
search_text=search_text and not skip,
msgbuf=msgbuf, comment_stack=comment_stack)
for lineno, funcname, text, comments in messages:
yield lineno, funcname, text, comments
directives.pop(idx)
elif not isinstance(directive, I18NDirective):
# Remove all other non i18n directives from the process
directives.pop(idx)
if not directives and not in_comment:
# Extract content if there's no directives because
# strip was pop'ed and not because comment was pop'ed.
# Extraction in this case has been taken care of.
messages = self.extract(
substream, gettext_functions,
search_text=search_text and not skip, msgbuf=msgbuf)
for lineno, funcname, text, comments in messages:
yield lineno, funcname, text, comments
for directive in directives:
if isinstance(directive, ExtractableI18NDirective):
messages = directive.extract(substream, comment_stack)
for funcname, text, comments in messages:
yield pos[1], funcname, text, comments
else:
messages = self.extract(
substream, gettext_functions,
search_text=search_text and not skip, msgbuf=msgbuf)
for lineno, funcname, text, comments in messages:
yield lineno, funcname, text, comments
if in_comment:
comment_stack.pop()
def get_directive_index(self, dir_cls):
total = len(self._dir_order)
if dir_cls in self._dir_order:
return self._dir_order.index(dir_cls) - total
return total
def setup(self, template):
"""Convenience function to register the `Translator` filter and the
related directives with the given template.
:param template: a `Template` instance
"""
template.filters.insert(0, self)
if hasattr(template, 'add_directives'):
template.add_directives(Translator.NAMESPACE, self)
class MessageBuffer(object):
"""Helper class for managing internationalized mixed content.
:since: version 0.5
"""
def __init__(self, directive=None):
"""Initialize the message buffer.
:param params: comma-separated list of parameter names
:type params: `basestring`
:param lineno: the line number on which the first stream event
belonging to the message was found
"""
# params list needs to be copied so that directives can be evaluated
# more than once
self.orig_params = self.params = directive.params[:]
self.directive = directive
self.string = []
self.events = {}
self.values = {}
self.depth = 1
self.order = 1
self.stack = [0]
self.subdirectives = {}
def append(self, kind, data, pos):
"""Append a stream event to the buffer.
:param kind: the stream event kind
:param data: the event data
:param pos: the position of the event in the source
"""
if kind is SUB:
# The order needs to be +1 because a new START kind event will
# happen and we we need to wrap those events into our custom kind(s)
order = self.stack[-1] + 1
subdirectives, substream = data
# Store the directives that should be applied after translation
self.subdirectives.setdefault(order, []).extend(subdirectives)
self.events.setdefault(order, []).append((SUB_START, None, pos))
for skind, sdata, spos in substream:
self.append(skind, sdata, spos)
self.events.setdefault(order, []).append((SUB_END, None, pos))
elif kind is TEXT:
if '[' in data or ']' in data:
# Quote [ and ] if it ain't us adding it, ie, if the user is
# using those chars in his templates, escape them
data = data.replace('[', '\[').replace(']', '\]')
self.string.append(data)
self.events.setdefault(self.stack[-1], []).append((kind, data, pos))
elif kind is EXPR:
if self.params:
param = self.params.pop(0)
else:
params = ', '.join(['"%s"' % p for p in self.orig_params if p])
if params:
params = "(%s)" % params
raise IndexError("%d parameters%s given to 'i18n:%s' but "
"%d or more expressions used in '%s', line %s"
% (len(self.orig_params), params,
self.directive.tagname,
len(self.orig_params)+1,
os.path.basename(pos[0] or
'In Memmory Template'),
pos[1]))
self.string.append('%%(%s)s' % param)
self.events.setdefault(self.stack[-1], []).append((kind, data, pos))
self.values[param] = (kind, data, pos)
else:
if kind is START:
self.string.append('[%d:' % self.order)
self.stack.append(self.order)
self.events.setdefault(self.stack[-1],
[]).append((kind, data, pos))
self.depth += 1
self.order += 1
elif kind is END:
self.depth -= 1
if self.depth:
self.events[self.stack[-1]].append((kind, data, pos))
self.string.append(']')
self.stack.pop()
def format(self):
"""Return a message identifier representing the content in the
buffer.
"""
return ''.join(self.string).strip()
def translate(self, string, regex=re.compile(r'%\((\w+)\)s')):
"""Interpolate the given message translation with the events in the
buffer and return the translated stream.
:param string: the translated message string
"""
substream = None
def yield_parts(string):
for idx, part in enumerate(regex.split(string)):
if idx % 2:
yield self.values[part]
elif part:
yield (TEXT,
part.replace('\[', '[').replace('\]', ']'),
(None, -1, -1)
)
parts = parse_msg(string)
parts_counter = {}
for order, string in parts:
parts_counter.setdefault(order, []).append(None)
while parts:
order, string = parts.pop(0)
if len(parts_counter[order]) == 1:
events = self.events[order]
else:
events = [self.events[order].pop(0)]
parts_counter[order].pop()
for event in events:
if event[0] is SUB_START:
substream = []
elif event[0] is SUB_END:
# Yield a substream which might have directives to be
# applied to it (after translation events)
yield SUB, (self.subdirectives[order], substream), event[2]
substream = None
elif event[0] is TEXT:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
elif event[0] is START:
if substream is not None:
substream.append(event)
else:
yield event
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
elif event[0] is END:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
if substream is not None:
substream.append(event)
else:
yield event
elif event[0] is EXPR:
# These are handled on the strings itself
continue
else:
if string:
for part in yield_parts(string):
if substream is not None:
substream.append(part)
else:
yield part
# String handled, reset it
string = None
if substream is not None:
substream.append(event)
else:
yield event
def parse_msg(string, regex=re.compile(r'(?:\[(\d+)\:)|(?<!\\)\]')):
"""Parse a translated message using Genshi mixed content message
formatting.
>>> parse_msg("See [1:Help].")
[(0, 'See '), (1, 'Help'), (0, '.')]
>>> parse_msg("See [1:our [2:Help] page] for details.")
[(0, 'See '), (1, 'our '), (2, 'Help'), (1, ' page'), (0, ' for details.')]
>>> parse_msg("[2:Details] finden Sie in [1:Hilfe].")
[(2, 'Details'), (0, ' finden Sie in '), (1, 'Hilfe'), (0, '.')]
>>> parse_msg("[1:] Bilder pro Seite anzeigen.")
[(1, ''), (0, ' Bilder pro Seite anzeigen.')]
:param string: the translated message string
:return: a list of ``(order, string)`` tuples
:rtype: `list`
"""
parts = []
stack = [0]
while True:
mo = regex.search(string)
if not mo:
break
if mo.start() or stack[-1]:
parts.append((stack[-1], string[:mo.start()]))
string = string[mo.end():]
orderno = mo.group(1)
if orderno is not None:
stack.append(int(orderno))
else:
stack.pop()
if not stack:
break
if string:
parts.append((stack[-1], string))
return parts
def extract_from_code(code, gettext_functions):
"""Extract strings from Python bytecode.
>>> from genshi.template.eval import Expression
>>> expr = Expression('_("Hello")')
>>> list(extract_from_code(expr, Translator.GETTEXT_FUNCTIONS))
[('_', u'Hello')]
>>> expr = Expression('ngettext("You have %(num)s item", '
... '"You have %(num)s items", num)')
>>> list(extract_from_code(expr, Translator.GETTEXT_FUNCTIONS))
[('ngettext', (u'You have %(num)s item', u'You have %(num)s items', None))]
:param code: the `Code` object
:type code: `genshi.template.eval.Code`
:param gettext_functions: a sequence of function names
:since: version 0.5
"""
def _walk(node):
if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \
and node.func.id in gettext_functions:
strings = []
def _add(arg):
if isinstance(arg, _ast.Str) and isinstance(arg.s, basestring):
strings.append(unicode(arg.s, 'utf-8'))
elif arg:
strings.append(None)
[_add(arg) for arg in node.args]
_add(node.starargs)
_add(node.kwargs)
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.func.id, strings
elif node._fields:
children = []
for field in node._fields:
child = getattr(node, field, None)
if isinstance(child, list):
for elem in child:
children.append(elem)
elif isinstance(child, _ast.AST):
children.append(child)
for child in children:
for funcname, strings in _walk(child):
yield funcname, strings
return _walk(code.ast)
def extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Genshi templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
template_class = options.get('template_class', MarkupTemplate)
if isinstance(template_class, basestring):
module, clsname = template_class.split(':', 1)
template_class = getattr(__import__(module, {}, {}, [clsname]), clsname)
encoding = options.get('encoding', None)
extract_text = options.get('extract_text', True)
if isinstance(extract_text, basestring):
extract_text = extract_text.lower() in ('1', 'on', 'yes', 'true')
ignore_tags = options.get('ignore_tags', Translator.IGNORE_TAGS)
if isinstance(ignore_tags, basestring):
ignore_tags = ignore_tags.split()
ignore_tags = [QName(tag) for tag in ignore_tags]
include_attrs = options.get('include_attrs', Translator.INCLUDE_ATTRS)
if isinstance(include_attrs, basestring):
include_attrs = include_attrs.split()
include_attrs = [QName(attr) for attr in include_attrs]
tmpl = template_class(fileobj, filename=getattr(fileobj, 'name', None),
encoding=encoding)
translator = Translator(None, ignore_tags, include_attrs, extract_text)
if hasattr(tmpl, 'add_directives'):
tmpl.add_directives(Translator.NAMESPACE, translator)
for message in translator.extract(tmpl.stream, gettext_functions=keywords):
yield message
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This package provides various means for generating and processing web markup
(XML or HTML).
The design is centered around the concept of streams of markup events (similar
in concept to SAX parsing events) which can be processed in a uniform manner
independently of where or how they are produced.
"""
__docformat__ = 'restructuredtext en'
__version__ = '0.6'
from genshi.core import *
from genshi.input import ParseError, XML, HTML
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for programmatically generating markup streams from Python code using
a very simple syntax. The main entry point to this module is the `tag` object
(which is actually an instance of the ``ElementFactory`` class). You should
rarely (if ever) need to directly import and use any of the other classes in
this module.
Elements can be created using the `tag` object using attribute access. For
example:
>>> doc = tag.p('Some text and ', tag.a('a link', href='http://example.org/'), '.')
>>> doc
<Element "p">
This produces an `Element` instance which can be further modified to add child
nodes and attributes. This is done by "calling" the element: positional
arguments are added as child nodes (alternatively, the `Element.append` method
can be used for that purpose), whereas keywords arguments are added as
attributes:
>>> doc(tag.br)
<Element "p">
>>> print(doc)
<p>Some text and <a href="http://example.org/">a link</a>.<br/></p>
If an attribute name collides with a Python keyword, simply append an underscore
to the name:
>>> doc(class_='intro')
<Element "p">
>>> print(doc)
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
As shown above, an `Element` can easily be directly rendered to XML text by
printing it or using the Python ``str()`` function. This is basically a
shortcut for converting the `Element` to a stream and serializing that
stream:
>>> stream = doc.generate()
>>> stream #doctest: +ELLIPSIS
<genshi.core.Stream object at ...>
>>> print(stream)
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
The `tag` object also allows creating "fragments", which are basically lists
of nodes (elements or text) that don't have a parent element. This can be useful
for creating snippets of markup that are attached to a parent element later (for
example in a template). Fragments are created by calling the `tag` object, which
returns an object of type `Fragment`:
>>> fragment = tag('Hello, ', tag.em('world'), '!')
>>> fragment
<Fragment>
>>> print(fragment)
Hello, <em>world</em>!
"""
from genshi.core import Attrs, Markup, Namespace, QName, Stream, \
START, END, TEXT
__all__ = ['Fragment', 'Element', 'ElementFactory', 'tag']
__docformat__ = 'restructuredtext en'
class Fragment(object):
"""Represents a markup fragment, which is basically just a list of element
or text nodes.
"""
__slots__ = ['children']
def __init__(self):
"""Create a new fragment."""
self.children = []
def __add__(self, other):
return Fragment()(self, other)
def __call__(self, *args):
"""Append any positional arguments as child nodes.
:see: `append`
"""
for arg in args:
self.append(arg)
return self
def __iter__(self):
return self._generate()
def __repr__(self):
return '<%s>' % type(self).__name__
def __str__(self):
return str(self.generate())
def __unicode__(self):
return unicode(self.generate())
def __html__(self):
return Markup(self.generate())
def append(self, node):
"""Append an element or string as child node.
:param node: the node to append; can be an `Element`, `Fragment`, or a
`Stream`, or a Python string or number
"""
if isinstance(node, (Stream, Element, basestring, int, float, long)):
# For objects of a known/primitive type, we avoid the check for
# whether it is iterable for better performance
self.children.append(node)
elif isinstance(node, Fragment):
self.children.extend(node.children)
elif node is not None:
try:
for child in node:
self.append(child)
except TypeError:
self.children.append(node)
def _generate(self):
for child in self.children:
if isinstance(child, Fragment):
for event in child._generate():
yield event
elif isinstance(child, Stream):
for event in child:
yield event
else:
if not isinstance(child, basestring):
child = unicode(child)
yield TEXT, child, (None, -1, -1)
def generate(self):
"""Return a markup event stream for the fragment.
:rtype: `Stream`
"""
return Stream(self._generate())
def _kwargs_to_attrs(kwargs):
attrs = []
names = set()
for name, value in kwargs.items():
name = name.rstrip('_').replace('_', '-')
if value is not None and name not in names:
attrs.append((QName(name), unicode(value)))
names.add(name)
return Attrs(attrs)
class Element(Fragment):
"""Simple XML output generator based on the builder pattern.
Construct XML elements by passing the tag name to the constructor:
>>> print(Element('strong'))
<strong/>
Attributes can be specified using keyword arguments. The values of the
arguments will be converted to strings and any special XML characters
escaped:
>>> print(Element('textarea', rows=10, cols=60))
<textarea rows="10" cols="60"/>
>>> print(Element('span', title='1 < 2'))
<span title="1 < 2"/>
>>> print(Element('span', title='"baz"'))
<span title=""baz""/>
The " character is escaped using a numerical entity.
The order in which attributes are rendered is undefined.
If an attribute value evaluates to `None`, that attribute is not included
in the output:
>>> print(Element('a', name=None))
<a/>
Attribute names that conflict with Python keywords can be specified by
appending an underscore:
>>> print(Element('div', class_='warning'))
<div class="warning"/>
Nested elements can be added to an element using item access notation.
The call notation can also be used for this and for adding attributes
using keyword arguments, as one would do in the constructor.
>>> print(Element('ul')(Element('li'), Element('li')))
<ul><li/><li/></ul>
>>> print(Element('a')('Label'))
<a>Label</a>
>>> print(Element('a')('Label', href="target"))
<a href="target">Label</a>
Text nodes can be nested in an element by adding strings instead of
elements. Any special characters in the strings are escaped automatically:
>>> print(Element('em')('Hello world'))
<em>Hello world</em>
>>> print(Element('em')(42))
<em>42</em>
>>> print(Element('em')('1 < 2'))
<em>1 < 2</em>
This technique also allows mixed content:
>>> print(Element('p')('Hello ', Element('b')('world')))
<p>Hello <b>world</b></p>
Quotes are not escaped inside text nodes:
>>> print(Element('p')('"Hello"'))
<p>"Hello"</p>
Elements can also be combined with other elements or strings using the
addition operator, which results in a `Fragment` object that contains the
operands:
>>> print(Element('br') + 'some text' + Element('br'))
<br/>some text<br/>
Elements with a namespace can be generated using the `Namespace` and/or
`QName` classes:
>>> from genshi.core import Namespace
>>> xhtml = Namespace('http://www.w3.org/1999/xhtml')
>>> print(Element(xhtml.html, lang='en'))
<html xmlns="http://www.w3.org/1999/xhtml" lang="en"/>
"""
__slots__ = ['tag', 'attrib']
def __init__(self, tag_, **attrib):
Fragment.__init__(self)
self.tag = QName(tag_)
self.attrib = _kwargs_to_attrs(attrib)
def __call__(self, *args, **kwargs):
"""Append any positional arguments as child nodes, and keyword arguments
as attributes.
:return: the element itself so that calls can be chained
:rtype: `Element`
:see: `Fragment.append`
"""
self.attrib |= _kwargs_to_attrs(kwargs)
Fragment.__call__(self, *args)
return self
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.tag)
def _generate(self):
yield START, (self.tag, self.attrib), (None, -1, -1)
for kind, data, pos in Fragment._generate(self):
yield kind, data, pos
yield END, self.tag, (None, -1, -1)
def generate(self):
"""Return a markup event stream for the fragment.
:rtype: `Stream`
"""
return Stream(self._generate())
class ElementFactory(object):
"""Factory for `Element` objects.
A new element is created simply by accessing a correspondingly named
attribute of the factory object:
>>> factory = ElementFactory()
>>> print(factory.foo)
<foo/>
>>> print(factory.foo(id=2))
<foo id="2"/>
Markup fragments (lists of nodes without a parent element) can be created
by calling the factory:
>>> print(factory('Hello, ', factory.em('world'), '!'))
Hello, <em>world</em>!
A factory can also be bound to a specific namespace:
>>> factory = ElementFactory('http://www.w3.org/1999/xhtml')
>>> print(factory.html(lang="en"))
<html xmlns="http://www.w3.org/1999/xhtml" lang="en"/>
The namespace for a specific element can be altered on an existing factory
by specifying the new namespace using item access:
>>> factory = ElementFactory()
>>> print(factory.html(factory['http://www.w3.org/2000/svg'].g(id=3)))
<html><g xmlns="http://www.w3.org/2000/svg" id="3"/></html>
Usually, the `ElementFactory` class is not be used directly. Rather, the
`tag` instance should be used to create elements.
"""
def __init__(self, namespace=None):
"""Create the factory, optionally bound to the given namespace.
:param namespace: the namespace URI for any created elements, or `None`
for no namespace
"""
if namespace and not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
self.namespace = namespace
def __call__(self, *args):
"""Create a fragment that has the given positional arguments as child
nodes.
:return: the created `Fragment`
:rtype: `Fragment`
"""
return Fragment()(*args)
def __getitem__(self, namespace):
"""Return a new factory that is bound to the specified namespace.
:param namespace: the namespace URI or `Namespace` object
:return: an `ElementFactory` that produces elements bound to the given
namespace
:rtype: `ElementFactory`
"""
return ElementFactory(namespace)
def __getattr__(self, name):
"""Create an `Element` with the given name.
:param name: the tag name of the element to create
:return: an `Element` with the specified name
:rtype: `Element`
"""
return Element(self.namespace and self.namespace[name] or name)
tag = ElementFactory()
"""Global `ElementFactory` bound to the default namespace.
:type: `ElementFactory`
"""
| Python |
import unittest
import doctest
class OptionalExtensionTestSuite(unittest.TestSuite):
def run(self, result):
import simplejson
run = unittest.TestSuite.run
run(self, result)
simplejson._toggle_speedups(False)
run(self, result)
simplejson._toggle_speedups(True)
return result
def additional_tests(suite=None):
import simplejson
import simplejson.encoder
import simplejson.decoder
if suite is None:
suite = unittest.TestSuite()
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
suite.addTest(doctest.DocTestSuite(mod))
suite.addTest(doctest.DocFileSuite('../../index.rst'))
return suite
def all_tests_suite():
suite = unittest.TestLoader().loadTestsFromNames([
'simplejson.tests.test_check_circular',
'simplejson.tests.test_decode',
'simplejson.tests.test_default',
'simplejson.tests.test_dump',
'simplejson.tests.test_encode_basestring_ascii',
'simplejson.tests.test_encode_for_html',
'simplejson.tests.test_fail',
'simplejson.tests.test_float',
'simplejson.tests.test_indent',
'simplejson.tests.test_pass1',
'simplejson.tests.test_pass2',
'simplejson.tests.test_pass3',
'simplejson.tests.test_recursion',
'simplejson.tests.test_scanstring',
'simplejson.tests.test_separators',
'simplejson.tests.test_speedups',
'simplejson.tests.test_unicode',
'simplejson.tests.test_decimal',
])
suite = additional_tests(suite)
return OptionalExtensionTestSuite([suite])
def main():
runner = unittest.TextTestRunner()
suite = all_tests_suite()
runner.run(suite)
if __name__ == '__main__':
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
import logging
import os
#initialize logging
def create_env():
"""Sets logging level if LOGGING_DEBUG is set"""
default_log_level = logging.INFO
if os.environ.get("LOGGING_DEBUG") in ("1", "True", "on"):
default_log_level = logging.DEBUG
return default_log_level
class Formatter(object):
"""A core log formatter"""
def console_formatter(self):
"""Console Formatter"""
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
console_format = logging.Formatter(format)
return console_format
def initialize_handlers():
"""Initializes Handlers"""
default_log_level = create_env()
handlers = []
formatter = Formatter()
#setup console handler
console = logging.StreamHandler()
console.setFormatter(formatter.console_formatter())
console.setLevel(default_log_level)
handlers.append(console)
return handlers
_log = None
def logger(name, handlers=initialize_handlers):
"""Initializes Logging"""
global _log
if _log is not None:
return _log
else:
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
for handler in handlers():
log.addHandler(handler)
return log | Python |
"""
Author: Noah Gift
Date: 08/05/2009
A really simple set of defaults for logging
============================================
You simply do this::
>>> from sensible.loginit import logger
>>> log = logger("MyApp")
>>> log.info("stuff")
2009-08-04 23:56:22,583 - MyApp - INFO - stuff
Environmental Variable
---------------------
If you want to print log.debug messages, you simply set
the environmental variable
export LOGGING_DEBUG = 1
"""
| Python |
from _BookShelf import _BookShelf
class BookShelf(_BookShelf):
pass
# Custom logic goes here. | Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.