code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# mako/pygen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re, string
from StringIO import StringIO
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
decreased_indent = False
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not decreased_indent and
not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
#return False
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring
* self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
| Python |
# mako/lookup.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import os, stat, posixpath, re
from mako import exceptions, util
from mako.template import Template
try:
import threading
except:
import dummy_threading as threading
class TemplateCollection(object):
"""Represent a collection of :class:`.Template` objects,
identifiable via uri.
A :class:`.TemplateCollection` is linked to the usage of
all template tags that address other templates, such
as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
The ``file`` attribute of each of those tags refers
to a string URI that is passed to that :class:`.Template`
object's :class:`.TemplateCollection` for resolution.
:class:`.TemplateCollection` is an abstract class,
with the usual default implementation being :class:`.TemplateLookup`.
"""
def has_template(self, uri):
"""Return ``True`` if this :class:`.TemplateLookup` is
capable of returning a :class:`.Template` object for the
given URL.
:param uri: String uri of the template to be resolved.
"""
try:
self.get_template(uri)
return True
except exceptions.TemplateLookupException:
return False
def get_template(self, uri, relativeto=None):
"""Return a :class:`.Template` object corresponding to the given
URL.
The default implementation raises
:class:`.NotImplementedError`. Implementations should
raise :class:`.TemplateLookupException` if the given uri
cannot be resolved.
:param uri: String uri of the template to be resolved.
:param relativeto: if present, the given URI is assumed to
be relative to this uri.
"""
raise NotImplementedError()
def filename_to_uri(self, uri, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
return uri
def adjust_uri(self, uri, filename):
"""Adjust the given uri based on the calling filename.
When this method is called from the runtime, the
'filename' parameter is taken directly to the 'filename'
attribute of the calling template. Therefore a custom
TemplateCollection subclass can place any string
identifier desired in the "filename" parameter of the
Template objects it constructs and have them come back
here.
"""
return uri
class TemplateLookup(TemplateCollection):
"""Represent a collection of templates that locates template source files
from the local filesystem.
The primary argument is the ``directories`` argument, the list of
directories to search::
lookup = TemplateLookup(["/path/to/templates"])
some_template = lookup.get_template("/index.html")
The :class:`.TemplateLookup` can also be given :class:`.Template` objects
programatically using :meth:`.put_string` or :meth:`.put_template`::
lookup = TemplateLookup()
lookup.put_string("base.html", '''
<html><body>${self.next()}</body></html>
''')
lookup.put_string("hello.html", '''
<%include file='base.html'/>
Hello, world !
''')
:param directories: A list of directory names which will be
searched for a particular template URI. The URI is appended
to each directory and the filesystem checked.
:param collection_size: Approximate size of the collection used
to store templates. If left at its default of -1, the size
is unbounded, and a plain Python dictionary is used to
relate URI strings to :class:`.Template` instances.
Otherwise, a least-recently-used cache object is used which
will maintain the size of the collection approximately to
the number given.
:param filesystem_checks: When at its default value of ``True``,
each call to :meth:`TemplateLookup.get_template()` will
compare the filesystem last modified time to the time in
which an existing :class:`.Template` object was created.
This allows the :class:`.TemplateLookup` to regenerate a
new :class:`.Template` whenever the original source has
been updated. Set this to ``False`` for a very minor
performance increase.
:param modulename_callable: A callable which, when present,
is passed the path of the source file as well as the
requested URI, and then returns the full path of the
generated Python module file. This is used to inject
alternate schemes for Pyhton module location. If left at
its default of ``None``, the built in system of generation
based on ``module_directory`` plus ``uri`` is used.
All other keyword parameters available for
:class:`.Template` are mirrored here. When new
:class:`.Template` objects are created, the keywords
established with this :class:`.TemplateLookup` are passed on
to each new :class:`.Template`.
"""
def __init__(self,
directories=None,
module_directory=None,
filesystem_checks=True,
collection_size=-1,
format_exceptions=False,
error_handler=None,
disable_unicode=False,
bytestring_passthrough=False,
output_encoding=None,
encoding_errors='strict',
cache_type=None,
cache_dir=None, cache_url=None,
cache_enabled=True,
modulename_callable=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
input_encoding=None,
preprocessor=None):
self.directories = [posixpath.normpath(d) for d in
util.to_list(directories, ())
]
self.module_directory = module_directory
self.modulename_callable = modulename_callable
self.filesystem_checks = filesystem_checks
self.collection_size = collection_size
self.template_args = {
'format_exceptions':format_exceptions,
'error_handler':error_handler,
'disable_unicode':disable_unicode,
'bytestring_passthrough':bytestring_passthrough,
'output_encoding':output_encoding,
'encoding_errors':encoding_errors,
'input_encoding':input_encoding,
'module_directory':module_directory,
'cache_type':cache_type,
'cache_dir':cache_dir or module_directory,
'cache_url':cache_url,
'cache_enabled':cache_enabled,
'default_filters':default_filters,
'buffer_filters':buffer_filters,
'strict_undefined':strict_undefined,
'imports':imports,
'preprocessor':preprocessor}
if collection_size == -1:
self._collection = {}
self._uri_cache = {}
else:
self._collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock()
def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
URL.
Note the "relativeto" argument is not supported here at the moment.
"""
try:
if self.filesystem_checks:
return self._check(uri, self._collection[uri])
else:
return self._collection[uri]
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self._load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri)
def adjust_uri(self, uri, relativeto):
"""adjust the given uri based on the given relative uri."""
key = (uri, relativeto)
if key in self._uri_cache:
return self._uri_cache[key]
if uri[0] != '/':
if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(posixpath.dirname(relativeto), uri)
else:
v = self._uri_cache[key] = '/' + uri
else:
v = self._uri_cache[key] = uri
return v
def filename_to_uri(self, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
try:
return self._uri_cache[filename]
except KeyError:
value = self._relativeize(filename)
self._uri_cache[filename] = value
return value
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
def _load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one
# more time in case concurrent thread already loaded
return self._collection[uri]
except KeyError:
pass
try:
if self.modulename_callable is not None:
module_filename = self.modulename_callable(filename, uri)
else:
module_filename = None
self._collection[uri] = template = Template(
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
return template
except:
# if compilation fails etc, ensure
# template is removed from collection,
# re-raise
self._collection.pop(uri, None)
raise
finally:
self._mutex.release()
def _check(self, uri, template):
if template.filename is None:
return template
try:
template_stat = os.stat(template.filename)
if template.module._modified_time < \
template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None)
return self._load(template.filename, uri)
else:
return template
except OSError:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri)
def put_string(self, uri, text):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given string of
text.
"""
self._collection[uri] = Template(
text,
lookup=self,
uri=uri,
**self.template_args)
def put_template(self, uri, template):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given
:class:`.Template` object.
"""
self._collection[uri] = template
| Python |
# mako/lexer.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions, util
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source':self.text,
'lineno':self.matched_lineno,
'pos':self.matched_charpos,
'filename':self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException(
"Unmatched '%s'" %
match.group(1),
**self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return \
self.text[startpos:self.match_position-len(match.group(1))],\
match.group(1)
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, unicode):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text,
not self.disable_unicode,
self.encoding,
self.filename,)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.group(1), match.group(2), match.group(3)
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
if text:
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
| Python |
# mako/parsetree.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {'source':self.source, 'lineno':self.lineno,
'pos':self.pos, 'filename':self.filename}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for','if', 'while', 'try']
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
'if':set(['else', 'elif']),
'try':set(['except', 'finally']),
'for':set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if cls.__keyword__ is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(Node):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__metaclass__ = _TagMeta
__keyword__ = None
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r'(\${.+?})',
re.S).split(self.attributes[key]):
m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m:
code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = \
undeclared_identifiers.union(
code.undeclared_identifiers)
expr.append('(%s)' % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr('')
elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self).
undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword, attributes,
('file',),
('name','inheritable',
'import','module'),
(), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs)
if 'file' in attributes and 'module' in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword,
attributes, (),
('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
super(DefTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url'),
('name','filter', 'decorator'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.argnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers)
return res + list(self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
)
class BlockTag(Tag):
__keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs):
super(BlockTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url', 'args'),
('name','filter', 'decorator'),
(),
**kwargs)
name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs)
if not name and attributes.get('args', None):
raise exceptions.CompileException(
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.name = name
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno, )
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.argnames
def undeclared_identifiers(self):
return []
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.iteritems()
if k != 'args'])
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes,
('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
super(PageTag, self).__init__(
keyword,
attributes,
('cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url',
'args', 'expression_filter'),
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.argnames
| Python |
# ext/autohandler.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""adds autohandler functionality to Mako templates.
requires that the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context, name='somefilename')}"/>
"""
import posixpath, os, re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False
| Python |
# ext/preprocessors.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)
| Python |
# ext/babelplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO
from babel.messages.extract import extract_python
from mako import lexer, parsetree
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(fileobj.read(),
input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(),
keywords, comment_tags, options):
yield extracted
def extract_nodes(nodes, keywords, comment_tags, options):
"""Extract messages from Mako's lexer node objects
:param nodes: an iterable of Mako parsetree.Node objects to extract from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
translator_comments = []
in_translator_comments = False
for node in nodes:
child_nodes = None
if in_translator_comments and isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(_split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(_split_comment(node.lineno,
value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.BlockTag):
code = node.body_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
attribs = ', '.join(['%s=%s' % (key, val)
for key, val in node.attributes.iteritems()])
code = '{%s}' % attribs
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
translator_comments = []
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
# <% and <%! blocks would provide their own translator comments
translator_comments = []
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
translator_comments = []
in_translator_comments = False
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
else:
translator_comments = \
[comment[1] for comment in translator_comments]
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
code = StringIO(code)
for lineno, funcname, messages, python_translator_comments \
in extract_python(code, keywords, comment_tags, options):
yield (node.lineno + (lineno - 1), funcname, messages,
translator_comments + python_translator_comments)
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in extract_nodes(child_nodes, keywords, comment_tags,
options):
yield extracted
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]
| Python |
# ext/pygmentplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal
from pygments.util import html_doctype_matches, looks_like_xml
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
| Python |
# ext/turbogears.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, inspect
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.iteritems():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in inspect.getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' + self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, basestring):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)
| Python |
# mako/codegen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 6
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline(
"_template_filename=%r" % self.compiler.filename)
self.printer.writeline("_template_uri=%r" % self.compiler.uri)
self.printer.writeline(
"_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline(
"_source_encoding=%r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside <%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r, context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri, module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union([c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline(
"_mako_get_namespace(context, %r)._populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline("def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve
captured content, apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (
('cache_type', 'type'), ('cache_dir', 'data_dir'),
('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s))" % \
(cachekey, name,
''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]),
name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s)))" %
(cachekey, name,
''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]),
name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
if not node.get_children():
self.printer.writeline("pass")
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline('__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for '
'__M_key in [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline("context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"__M_caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node, (parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
| Python |
# mako/util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py24 = sys.version_info >= (2, 4) and sys.version_info < (2, 5)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
if py3k:
from io import StringIO
else:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time, operator
import collections
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
fn.__name__ = name
return fn
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if py24:
def exception_name(exc):
try:
return exc.__class__.__name__
except AttributeError:
return exc.__name__
else:
def exception_name(exc):
return exc.__class__.__name__
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, 0775)
except:
if tries > 5:
raise
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
self.data = collections.deque()
self.encoding = encoding
if unicode:
self.delim = u''
else:
self.delim = ''
self.unicode = unicode
self.errors = errors
self.write = self.data.append
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(dict.values(self),
key=operator.attrgetter('timestamp'), reverse=True)
for item in bytime[self.capacity:]:
try:
del self[item.key]
except KeyError:
# if we couldnt find a key, most likely some other thread broke in
# on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore'))
if not m:
try:
import parser
parser.suite(line1.decode('ascii', 'ignore'))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2.decode('ascii', 'ignore'))
if has_bom:
if m:
raise SyntaxError, \
"python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment"
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = d.keys()
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, 'AST'):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile("""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == 'mod':
_ast.mod = cls
elif cls.__name__ == 'AST':
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)
| Python |
# mako/ast.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, basestring):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" %
(arg,
pyparser.ExpressionGenerator(default).value()
)
)
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)
| Python |
# mako/pyparser.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
e.__class__.__name__,
e,
code[0:50]
), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python
expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print 'Node:', str(node)
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
| Python |
# mako/exceptions.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""Pulls the current exception from the sys traceback and extracts
Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`.
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
@property
def errorname(self):
return util.exception_name(self.error)
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = unicode(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, unicode):
self.message = unicode(self.message, 'ascii', 'replace')
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating
template.
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""return the same data as traceback, except in reverse order.
"""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples,
containing the regular four traceback tuple items, plus the original
template filename, the line number adjusted relative to the template
source, and code line from that line number of the template."""
import mako.template
mods = {}
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
if not line:
line = ''
try:
(line_map, template_lines) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = info.template_filename or filename
except KeyError:
# A normal .py file (not a Template)
if not util.py3k:
try:
fp = open(filename, 'rb')
encoding = util.parse_encoding(fp)
fp.close()
except IOError:
encoding = None
if encoding:
line = line.decode(encoding)
else:
line = line.decode('ascii', 'replace')
new_trcback.append((filename, lineno, function, line,
None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in
template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function,
line, template_filename, template_ln,
template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
if new_trcback:
try:
# A normal .py file (not a Template)
fp = open(new_trcback[-1][0], 'rb')
encoding = util.parse_encoding(fp)
fp.seek(0)
self.source = fp.read()
fp.close()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ''
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as
applicable.
"""
import mako.template
return mako.template.Template(r"""
<%page args="error=None, traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(error=error, traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | trim}
% endfor
${tback.errorname}: ${tback.message}
""")
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default encoding_errors value is 'htmlentityreplace'. the
template has two options. With the full option disabled, only a section of
an HTML document is returned. with the css option disabled, the default
stylesheet won't be included.
"""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
| Python |
# mako/__init__.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '0.5.0'
| Python |
# mako/template.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Template class, a facade for parsing, generating and executing
template strings, as well as template runtime operations."""
from mako.lexer import Lexer
from mako import runtime, util, exceptions, codegen
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref
class Template(object):
"""Represents a compiled template.
:class:`.Template` includes a reference to the original
template source (via the ``.source`` attribute)
as well as the source code of the
generated Python module (i.e. the ``.code`` attribute),
as well as a reference to an actual Python module.
:class:`.Template` is constructed using either a literal string
representing the template text, or a filename representing a filesystem
path to a source file.
:param text: textual template source. This argument is mutually
exclusive versus the "filename" parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the "text" parameter.
:param buffer_filters: string list of filters to be applied
to the output of %defs which are buffered, cached, or otherwise
filtered, after all filters
defined with the %def itself have been applied. Allows the
creation of default expression filters that let the output
of return-valued %defs "opt out" of that filtering via
passing special attributes or objects.
:param bytestring_passthrough: When True, and output_encoding is
set to None, and :meth:`.Template.render` is used to render,
the StringIO or cStringIO buffer will be used instead of the
default "fast" buffer. This allows raw bytestrings in the
output stream, such as in expressions, to pass straight
through to the buffer. New in 0.4 to provide the same
behavior as that of the previous series. This flag is forced
to True if disable_unicode is also configured.
:param cache_dir: Filesystem directory where cache files will be
placed. See :ref:`caching_toplevel`.
:param cache_enabled: Boolean flag which enables caching of this
template. See :ref:`caching_toplevel`.
:param cache_type: Type of Beaker caching to be applied to the
template. See :ref:`caching_toplevel`.
:param cache_url: URL of a memcached server with which to use
for caching. See :ref:`caching_toplevel`.
:param default_filters: List of string filter names that will
be applied to all expressions. See :ref:`filtering_default_filters`.
:param disable_unicode: Disables all awareness of Python Unicode
objects. See :ref:`unicode_disabled`.
:param encoding_errors: Error parameter passed to ``encode()`` when
string encoding is performed. See :ref:`usage_unicode`.
:param error_handler: Python callable which is called whenever
compile or runtime exceptions occur. The callable is passed
the current context as well as the exception. If the
callable returns ``True``, the exception is considered to
be handled, else it is re-raised after the function
completes. Is used to provide custom error-rendering
functions.
:param format_exceptions: if ``True``, exceptions which occur during
the render phase of this template will be caught and
formatted into an HTML error page, which then becomes the
rendered result of the :meth:`render` call. Otherwise,
runtime exceptions are propagated outwards.
:param imports: String list of Python statements, typically individual
"import" lines, which will be placed into the module level
preamble of all generated Python modules. See the example
in :ref:`filtering_default_filters`.
:param input_encoding: Encoding of the template's source code. Can
be used in lieu of the coding comment. See
:ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
details on source encoding.
:param lookup: a :class:`.TemplateLookup` instance that will be used
for all file lookups via the ``<%namespace>``,
``<%include>``, and ``<%inherit>`` tags. See
:ref:`usage_templatelookup`.
:param module_directory: Filesystem location where generated
Python module files will be placed.
:param module_filename: Overrides the filename of the generated
Python module file. For advanced usage only.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
:param preprocessor: Python callable which will be passed
the full template source before it is parsed. The return
result of the callable will be used as the template source
code.
:param strict_undefined: Replaces the automatic usage of
``UNDEFINED`` for any undeclared variables not located in
the :class:`.Context` with an immediate raise of
``NameError``. The advantage is immediate reporting of
missing variables which include the name. New in 0.3.6.
:param uri: string uri or other identifier for this template.
If not provided, the uri is generated from the filesystem
path, or from the in-memory identity of a non-file-based
template. The primary usage of the uri is to provide a key
within :class:`.TemplateLookup`, as well as to generate the
file path of the generated Python module file, if
``module_directory`` is specified.
"""
def __init__(self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors='strict',
module_directory=None,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
preprocessor=None,
cache_enabled=True):
if uri:
self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri
elif filename:
self.module_id = re.sub(r'\W', "_", filename)
drive, path = os.path.splitdrive(filename)
path = os.path.normpath(path).replace(os.path.sep, "/")
self.uri = path
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
u_norm = self.uri
if u_norm.startswith("/"):
u_norm = u_norm[1:]
u_norm = os.path.normpath(u_norm)
if u_norm.startswith(".."):
raise exceptions.TemplateLookupException(
"Template uri \"%s\" is invalid - "
"it cannot be relative outside "
"of the root path." % self.uri)
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.strict_undefined = strict_undefined
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
if default_filters is None:
if util.py3k or self.disable_unicode:
self.default_filters = ['str']
else:
self.default_filters = ['unicode']
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.preprocessor = preprocessor
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
self._code = code
self._source = text
ModuleInfo(module, None, self, filename, code, text)
elif filename is not None:
# if template filename and a module directory, load
# a filesystem-based module file, generating if needed
if module_filename is not None:
path = module_filename
elif module_directory is not None:
path = os.path.abspath(
os.path.join(
os.path.normpath(module_directory),
u_norm + ".py"
)
)
else:
path = None
module = self._compile_from_file(path, filename)
else:
raise exceptions.RuntimeException(
"Template requires text or filename")
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
def _compile_from_file(self, path, filename):
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or \
os.stat(path)[stat.ST_MTIME] < filemtime:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None)
else:
# template filename and no module directory, compile code
# in memory
code, module = _compile_text(
self,
open(filename, 'rb').read(),
filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None)
return module
@property
def source(self):
"""return the template source code for this Template."""
return _get_module_info_from_callable(self.callable_).source
@property
def code(self):
"""return the module source code for this Template"""
return _get_module_info_from_callable(self.callable_).code
@property
def cache(self):
return self.module._template_cache
def render(self, *args, **data):
"""Render the output of this template as a string.
if the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw
output uses cStringIO and can't handle multibyte
characters). a Context object is created corresponding
to the given data. Arguments that are explictly declared
by this template's internal rendering method are also
pulled from the given \*args, \**data members.
"""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
def render_context(self, context, *args, **kwargs):
"""Render this Template with the given context.
the data is written to the context's buffer.
"""
if getattr(context, '_with_template', None) is None:
context._with_template = self
runtime._render_context(self,
self.callable_,
context,
*args,
**kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""Return a def of this template as a :class:`.DefTemplate`."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name)
@property
def last_modified(self):
return self.module._modified_time
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print t.render()
"""
def __init__(self, module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors='strict',
disable_unicode=False,
bytestring_passthrough=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_type=None,
cache_dir=None,
cache_url=None,
cache_enabled=True
):
self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
self.module = module
self.filename = template_filename
ModuleInfo(module,
module_filename,
self,
template_filename,
module_source,
template_source)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
class DefTemplate(Template):
"""a Template which represents a callable def in a parent
template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
self.output_encoding = parent.output_encoding
self.module = parent.module
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.lookup = parent.lookup
self.bytestring_passthrough = parent.bytestring_passthrough
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""Stores information about a module currently loaded into
memory, provides reverse lookups of template source, module
source code based on a module's identifier.
"""
_modules = weakref.WeakValueDictionary()
def __init__(self,
module,
module_filename,
template,
template_filename,
module_source,
template_source):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
self.module_source = module_source
self.template_source = template_source
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
@property
def code(self):
if self.module_source is not None:
return self.module_source
else:
return open(self.module_filename).read()
@property
def source(self):
if self.template_source is not None:
if self.module._source_encoding and \
not isinstance(self.template_source, unicode):
return self.template_source.decode(
self.module._source_encoding)
else:
return self.template_source
else:
if self.module._source_encoding:
return open(self.template_filename, 'rb').read().\
decode(self.module._source_encoding)
else:
return open(self.template_filename).read()
def _compile_text(template, text, filename):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=template.disable_unicode,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
cid = identifier
if not util.py3k and isinstance(cid, unicode):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=True,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
if isinstance(source, unicode):
source = source.encode(lexer.encoding or 'ascii')
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename):
return ModuleInfo._modules[filename]
| Python |
# mako/filters.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs
from StringIO import StringIO
from mako import util
xml_escapes = {
'&' : '&',
'>' : '>',
'<' : '<',
'"' : '"', # also " in html-only
"'" : ''' # also ' in html-only
}
# XXX: " is valid in HTML and XML
# ' is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
try:
import markupsafe
html_escape = markupsafe.escape
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
else:
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
}
if util.py3k:
DEFAULT_ESCAPES.update({
'unicode':'str'
})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape'
| Python |
# mako/_ast_util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
| Python |
# mako/cache.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import exceptions
cache = None
class BeakerMissing(object):
def get_cache(self, name, **kwargs):
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
class Cache(object):
"""Represents a data content cache made available to the module
space of a :class:`.Template` object.
:class:`.Cache` is a wrapper on top of a Beaker CacheManager object.
This object in turn references any number of "containers", each of
which defines its own backend (i.e. file, memory, memcached, etc.)
independently of the rest.
"""
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this template.
"""
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular <%def> within this template."""
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested <%def> within this template.
Caching of nested defs is a blunt tool as there is no
management of scope - nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
global cache
if not cache:
try:
from beaker import cache as beaker_cache
cache = beaker_cache.CacheManager()
except ImportError:
# keep a fake cache around so subsequent
# calls don't attempt to re-import
cache = BeakerMissing()
if type == 'memcached':
type = 'ext:memcached'
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {}))
else:
self.def_regions[defname] = (type, kw)
return cache.get_cache(self.id, type=type, **kw)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import web
import uuid
from mako.lookup import TemplateLookup
from mako import exceptions
__author__ = 'Michael Liao'
class emptyobject(object):
def __getattr__(self, attr):
return ''
def __setattr__(self, attr, value):
pass
class odict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
class WebError(StandardError):
def __init__(self, message):
super(WebError, self).__init__(message)
def next_id():
return uuid.uuid4().hex
def _create_db():
host = 'localhost'
db = 'weather'
port = 3306
user = 'weather'
pw = 'weather'
try:
import sae.const
db = sae.const.MYSQL_DB
user = sae.const.MYSQL_USER
pw = sae.const.MYSQL_PASS
host = sae.const.MYSQL_HOST
port = int(sae.const.MYSQL_PORT)
except ImportError:
pass
return web.database(dbn='mysql', host=host, port=port, db=db, user=user, pw=pw)
db = _create_db()
def _create_memcache_client():
try:
import pylibmc
return pylibmc.Client()
except ImportError:
import memcache
return memcache.Client(['127.0.0.1:11211'])
cache = _create_memcache_client()
TEMPLATE_PATH = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'templates')
logging.info('Init template path: %s' % TEMPLATE_PATH)
TEMPLATES_LOOKUP = TemplateLookup(directories=[TEMPLATE_PATH], input_encoding='utf-8', output_encoding='utf-8')
def handler(method='GET', use_template=True):
'''
using decorator:
@handler('GET')
def login():
return 'success'
is equal to:
def login():
return 'success'
login = handler('GET')(login)
'''
def _wrapper(func):
def _new_func(**kw):
if method=='GET' and web.ctx.method!='GET':
raise web.badrequest()
if method=='POST' and web.ctx.method!='POST':
raise web.badrequest()
r = func(**kw)
logging.info('Url handler returns: %s' % type(r))
if r is None or method=='POST':
return r
if func.use_template and isinstance(r, dict):
try:
template = TEMPLATES_LOOKUP.get_template('%s.html' % func.__name__)
logging.info('Model: %s' % str(r))
return template.render(**r)
except:
return exceptions.html_error_template().render()
if isinstance(r, str):
return r
if isinstance(r, unicode):
return r.encode('utf-8')
return str(r)
_new_func.__name__ = func.__name__
_new_func.handler = True
func.use_template = use_template
return _new_func
return _wrapper
| Python |
from pandas.io.data import DataReader
from pandas.io.data import DataFrame
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from datetime import datetime
from datetime import timedelta
import pandas as pd
import numpy as np
import urllib
import codecs
import csv
import os
import glob
import json
import requests
#from pykalman import UnscentedKalmanFilter
# Only provide the analysis tool, no ticker pick included
def ticker_retrive(ticker):
data = DataReader(ticker, "yahoo", datetime(2000, 1, 1), datetime.today())
filename = ticker + ".csv"
data.to_csv(filename)
def ticker_update():
return
def ticker_from_csv(ticker):
filename = ticker + ".csv"
df = DataFrame.from_csv(filename)
return df
def ticker_price_av(line):
line['Open'] = ''
def df_ticker_return(tickerdf):
'''
Not good strategy
'''
columnName = ['p12','p26','v12','v26', 'pAv']
ma = pd.DataFrame(index=tickerdf.index, columns=columnName)
# moving average
ma['p12'] = pd.rolling_mean(tickerdf['Adj Close'],12, min_periods=2)
ma['p26'] = pd.rolling_mean(tickerdf['Adj Close'],26, min_periods=2)
ma['v12'] = pd.rolling_mean(tickerdf['Volume'],12, min_periods=2)
ma['v26'] = pd.rolling_mean(tickerdf['Volume'],26, min_periods=2)
# average price
ma['gain'] = ma['v12'] - ma['v26']
ma['gainMA'] = pd.rolling_mean(ma['gain'],5, min_periods=2)
ma['close'] = tickerdf['Adj Close']*10000
ma = ma[-30:]
N = len(ma)
ind = np.arange(N) # the evenly spaced plot indices
fig, ax = plt.subplots()
ax.plot(ind, ma['gain'], '-')
ax.plot(ind, ma['gainMA'],'o-')
ax.bar(ind, ma['close'])
def format_date(x, pos=None):
thisind = np.clip(int(x+0.5), 0, N-1)
return ma.index[thisind].strftime('%Y-%m-%d')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
fig.autofmt_xdate()
plt.show()
#return ma
def df_ticker_volume(tickerdf):
tickerdf['VolMA10'] = pd.rolling_mean(tickerDf['Volume'],12, min_periods=2)
tickerdf['VolMA26'] = pd.rolling_mean(tickerDf['Volume'],26, min_periods=2)
return tickerdf
def chart_weekly(tickerdf):
''' Chart for weekly price and volume movement
'''
print "weekly chart"
if __name__ =='__main__':
ticker = 'amrs'
#ticker_retrive(ticker)
tickerdf = ticker_from_csv(ticker);
| Python |
import csv
import numpy as np
from pandas import DataFrame
import pandas as pd
# all digits are decreased by 1
len_total = 280
len_freq = 7
len_body = 5
len_star = 2
freq_body = []
freq_star = []
relation_body = np.zeros(shape=(50,50))
relation_star = np.zeros(shape=(11,11))
def rel_body(firstCounter, list_data):
first = firstCounter + 1
if (first>= len_body):
return
for i in range(first, len_body):
relation_body[int(list_data[firstCounter])-1, int(list_data[i])-1] += 1
relation_body[int(list_data[i])-1, int(list_data[firstCounter])-1] += 1
def rel_star(first,second):
relation_star[int(first)-1,int(second)-1] +=1
relation_star[int(second)-1,int(first)-1] +=1
def read():
with open('data.csv','rb') as csvfile:
data = csv.reader(csvfile, delimiter=';')
for row in data:
for i in range(len_body):
rel_body(i,row)
rel_star(row[len_body],row[len_body+1])
def read_freq():
with open('data.csv','rb') as csvfile:
data = csv.reader(csvfile, delimiter=';')
counter = 0
for row in data:
if counter > 28:
return
else :
counter += 1
for i in range(len_body):
freq_body
def to_csv():
df_body = pd.DataFrame(rel_body)
df_star = pd.DataFrame(rel_star)
| Python |
import sys, string, re, Queue
arith = ['sub', 'div', 'mod', 'cmple', 'add', 'mul', 'cmpeq', 'cmplt']
operators = ['-', '/', '%', '<=', '+', '*', '==', '<']
arith1 = ['neg']
targets = []
local_size = 0;
# get operand
def getOperand(t, sline, access_local):
#GP
if sline[t] == 'GP':
print '(char*)global',
return t+1
#FP
elif sline[t] == 'FP':
if access_local:
print '(char*)&local[' + str(local_size/8 -1) + ']',
else:
print '(char*)param',
return t+1
#constant
elif sline[t].isdigit():
print sline[t],
return t+1
#address offsets and field offsets
elif sline[t].endswith('_base') or sline[t].endswith('_offset'):
if sline[t+1][0] == '-':
print '(' + str(int(sline[t+1])+8) + ')',
return -(t+2)
else:
print str(int(sline[t+1])-8),
return t+2
#register name
elif sline[t][0] == '(':
print 'r' + sline[t].strip('()'),
return t+1
#code label
elif sline[t][0] == '[':
print 'instr_' + sline[t].strip('[]'),
return t+1
#local variables
else:
if sline[t+1][0] == '-':
print 'local[' + str((local_size-int(sline[t+1].strip('-')))/8) + ']',
else:
print 'param[' + str(int(sline[t+1])/8-1) + ']',
return t+2
# get next operand start
def getStart(t, sline):
#GP
if sline[t] == 'GP':
return t+1
#FP
elif sline[t] == 'FP':
return t+1
#constant
elif sline[t].isdigit():
return t+1
#address offsets and field offsets
elif sline[t].endswith('base') or sline[t].endswith('_offsets'):
if sline[t+1][0] == '-':
return -(t+2)
else:
return t+2
#register name
elif sline[t][0] == '(':
return t+1
#code label
elif sline[t][0] == '[':
return t+1
#local variables
else:
return t+2
#----------------- Main -----------------#
#if len(sys.argv) != 2:
# print "please specify input file name"
# sys.exit(0)
#
#ifile = open(sys.argv[1], 'r')
#parameters
params = Queue.LifoQueue()
params_n = 0
parsing_main = 0
# Print out header of the file
print '#include <stdio.h>\n\
#include <stdlib.h>\n\
#include <string.h>\n\
#define WriteLine() printf("\\n");\n\
#define WriteLong(x) printf(" %lld", x);\n\
#define ReadLong(a) if (fscanf(stdin, "%lld", &a) != 1) a = 0;\n\
#define long long long\n\n'
print 'long global[4096];\n'
# parse the file line by line
#for line in ifile:
for line in sys.stdin:
sline = re.split(': | |#', line.rstrip('\n').lstrip(' '))
if sline[2] == 'nop':
continue
#print label for next instruction
if sline[2] != 'enter' and sline[2] != 'entrypc':
print 'instr_' + sline[1] + ':;\n\t',
#function start
if sline[2] == 'enter':
assert int(sline[3]) % 8 == 0, 'operand not divisible by 8';
if not parsing_main:
print 'void func_' + sline[1] + '(long* param) {\n',
else:
print 'void main() {\n',
if (sline[3] != '0'):
print 'long local[' + str(int(sline[3])/8) + '];\n',
local_size = int(sline[3]);
parsing_main = 0
#main start
if sline[2] == 'entrypc':
parsing_main = 1
#function return
elif sline[2] == 'ret':
targets = []
print 'return;\n}\n',
#arithmatic
# elif sline[2] in arith:
# print 'long r' + sline[1] + ' =',
## t = getOperand(3, sline, 0)
## print operators[arith.index(sline[2])],
## if (t < 0):
## getOperand(-t, sline, 1)
## else:
## getOperand(t, sline, 0)
# t = getStart(3, sline)
# if (t < 0):
# getOperand(-t, sline, 1)
# else:
# getOperand(t, sline, 0)
# print operators[arith.index(sline[2])],
# getOperand(3, sline, 0)
# print ';\n',
elif sline[2] in arith:
if not sline[1] in targets:
sys.stdout.write('long ')
targets.append(sline[1])
print 'r'+sline[1] + ' =',
t = getOperand(3, sline, 0)
print operators[arith.index(sline[2])],
if (t < 0):
getOperand(-t, sline, 1)
else:
getOperand(t, sline, 0)
print ';\n',
elif sline[2] in arith1:
if not sline[1] in targets:
sys.stdout.write('long ')
targets.append(sline[1])
print 'r'+sline[1] + ' =',
t = getOperand(3, sline, 0)
print ' * (-1);\n',
#branch
elif sline[2] == 'br':
print 'goto ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'blbs':
print 'if (',
t = getOperand(3, sline, 0)
print '!= 0) goto',
getOperand(t, sline, 0)
print ';\n',
elif sline[2] == 'blbc':
print 'if (',
t = getOperand(3, sline, 0)
print '== 0) goto',
getOperand(t, sline, 0)
print ';\n',
#data movement
elif sline[2] == 'load':
print 'long r' + sline[1] + ' = *(long*)',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'move':
temp = sline[4].strip('()')
if (temp.isdigit()) and (temp == sline[1]):
print 'long',
t = getStart(3, sline);
getOperand(t, sline, 0)
print ' = ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'store':
print '*(long*)',
t = getStart(3, sline)
getOperand(t, sline, 0)
print ' =',
getOperand(3, sline, 0)
print ';\n',
#I/O
elif sline[2] == 'write':
print 'WriteLong(',
getOperand(3, sline, 0)
print ');\n',
elif sline[2] == 'wrl':
print 'WriteLine();\n',
elif sline[2] == 'read':
#TODO: read didn't appear in all any tests.. need to be tested
print 'long r' + sline[1] + ';\n\t',
print 'ReadLong( r' + sline[1],
print ');\n',
#Parameter and call
elif sline[2] == 'param':
print 'long r' + sline[1] + ' = ',
getOperand(3, sline, 0)
print ';//input parameter\n',
params.put(sline[1])
params_n += 1
elif sline[2] == 'call':
param_name = 'param_' + sline[1]
print 'long* ' + param_name + ' = (long*)malloc(sizeof(long)*' + str(params_n+1) + ');\n',
params_n = 0;
while not params.empty():
tt = params.get();
print 'memcpy(' + param_name + '+' + str(params_n+1) + ', &r' + tt + ', sizeof(long));\n',
params_n += 1
params_n = 0
print 'func_' + sline[3].strip('[]') + '(' + param_name + ');\n',
print 'free (' + str(param_name) + ');\n',
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright (C) 2008 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA
"""
This script demonstrates how python web clients can use
proxy auto-config (PAC) files for proxy configuration using pacparser.
It take a PAC file and an url as arguments, fetches the URL using the
proxy as determined by PAC file and URL and returns the retrieved webpage.
"""
__author__ = 'manugarg@gmail.com (Manu Garg)'
__copyright__ = 'Copyright (C) 2008 Manu Garg'
__license__ = 'LGPL'
import pacparser
import socket
import sys
import urllib
def fetch_url_using_pac(pac, url):
try:
proxy_string = pacparser.just_find_proxy(pac, url)
except:
sys.stderr.write('could not determine proxy using Pacfile\n')
return None
proxylist = proxy_string.split(";")
proxies = None # Dictionary to be passed to urlopen method of urllib
while proxylist:
proxy = proxylist.pop(0).strip()
if 'DIRECT' in proxy:
proxies = {}
break
if proxy[0:5].upper() == 'PROXY':
proxy = proxy[6:].strip()
if isproxyalive(proxy):
proxies = {'http': 'http://%s' % proxy}
break
try:
sys.stderr.write('trying to fetch the page using proxy %s\n' % proxy)
response = urllib.urlopen(url, proxies=proxies)
except Exception, e:
sys.stderr.write('could not fetch webpage %s using proxy %s\n' %
(url, proxies))
sys.stderr.write(str(e)+'\n')
return None
return response
def isproxyalive(proxy):
host_port = proxy.split(":")
if len(host_port) != 2:
sys.stderr.write('proxy host is not defined as host:port\n')
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
try:
s.connect((host_port[0], int(host_port[1])))
except Exception, e:
sys.stderr.write('proxy %s is not accessible\n' % proxy)
sys.stderr.write(str(e)+'\n')
return False
s.close()
return True
def main():
if len(sys.argv) != 3:
print 'Not enough arguments'
print 'Usage:\n%s <pacfile> <url>' % sys.argv[0]
return None
pacfile = sys.argv[1]
url = sys.argv[2]
response = fetch_url_using_pac(pacfile, url)
if response:
print response.read()
else:
sys.stderr.write('URL %s could not be retrieved using PAC file %s.' %
(url, pacfile))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
import pacparser
pacparser.init()
pacparser.parse_pac("wpad.dat")
proxy = pacparser.find_proxy("http://www.manugarg.com")
print proxy
pacparser.cleanup()
# Or simply,
print pacparser.just_find_proxy("wpad.dat", "http://www2.manugarg.com")
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
Python module to parse pac files. Look at project's homepage
http://code.google.com/p/pacparser for more information.
"""
__author__ = 'manugarg@gmail.com (Manu Garg)'
__copyright__ = 'Copyright (C) 2008 Manu Garg'
__license__ = 'LGPL'
from pacparser import _pacparser
import os
import re
import sys
url_regex = re.compile('.*\:\/\/([^\/]+).*')
def init():
"""
Initializes pacparser engine.
"""
_pacparser.init()
def parse_pac(pacfile):
"""
(Deprecated) Same as parse_pac_file.
"""
parse_pac_file(pacfile)
def parse_pac_file(pacfile):
"""
Reads the pacfile and evaluates it in the Javascript engine created by
init().
"""
try:
f = open(pacfile)
pac_script = f.read()
except IOError:
print('Could not read the pacfile: %s\n%s' % (pacfile, sys.exc_info()[1]))
return
f.close()
_pacparser.parse_pac_string(pac_script)
def parse_pac_string(pac_script):
"""
Evaluates pac_script in the Javascript engine created by init().
"""
_pacparser.parse_pac_string(pac_script)
def find_proxy(url, host=None):
"""
Finds proxy string for the given url and host. If host is not
defined, it's extracted from the url.
"""
if host is None:
m = url_regex.match(url)
if not m:
print('URL: %s is not a valid URL' % url)
return None
if len(m.groups()) is 1:
host = m.groups()[0]
else:
print('URL: %s is not a valid URL' % url)
return None
return _pacparser.find_proxy(url, host)
def version():
"""
Returns the compiled pacparser version.
"""
return _pacparser.version()
def cleanup():
"""
Destroys pacparser engine.
"""
_pacparser.cleanup()
def just_find_proxy(pacfile, url, host=None):
"""
This function is a wrapper around init, parse_pac, find_proxy
and cleanup. This is the function to call if you want to find
proxy just for one url.
"""
if os.path.isfile(pacfile):
pass
else:
print('PAC file: %s doesn\'t exist' % pacfile)
return None
if host is None:
m = url_regex.match(url)
if not m:
print('URL: %s is not a valid URL' % url)
return None
if len(m.groups()) is 1:
host = m.groups()[0]
else:
print('URL: %s is not a valid URL' % url)
return None
init()
parse_pac(pacfile)
proxy = find_proxy(url,host)
cleanup()
return proxy
def setmyip(ip_address):
"""
Set my ip address. This is the IP address returned by myIpAddress()
"""
_pacparser.setmyip(ip_address)
def enable_microsoft_extensions():
"""
Enables Microsoft PAC extensions (dnsResolveEx, isResolvableEx,
myIpAddressEx).
"""
_pacparser.enable_microsoft_extensions()
| Python |
import shutil
import sys
from distutils import sysconfig
def main():
if sys.platform == 'win32':
shutil.rmtree('%s\\pacparser' % sysconfig.get_python_lib(),
ignore_errors=True)
shutil.copytree('pacparser', '%s\\pacparser' % sysconfig.get_python_lib())
else:
print 'This script should be used only on Win32 systems.'
if __name__ == '__main__':
main()
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""
Wrapper script around python module Makefiles. This script take care of
identifying python setup and setting up some environment variables needed by
Makefiles.
"""
import sys
import os
from distutils import sysconfig
from distutils.core import setup
from distutils.core import Extension
def main():
# Use Makefile for windows. distutils doesn't work well with windows.
if sys.platform == 'win32':
pydll = ('C:\windows\system32\python%s.dll' %
sysconfig.get_config_vars('VERSION')[0])
os.system('make -f Makefile.win32 %s PY_HOME="%s" PY_DLL="%s"' %
(' '.join(sys.argv[1:]), sys.prefix, pydll))
return
pacparser_module = Extension('_pacparser',
include_dirs = ['../spidermonkey/js/src', '..'],
sources = ['pacparser_py.c'],
extra_objects = ['../pacparser.o', '../libjs.a'])
setup (name = 'pacparser',
version = '1',
description = 'Pacparser package',
author = 'Manu Garg',
author_email = 'manugarg@gmail.com',
url = 'http://code.google.com/p/pacparser',
long_description = 'python library to parse proxy auto-config (PAC) '
'files.',
license = 'LGPL',
ext_package = 'pacparser',
ext_modules = [pacparser_module],
py_modules = ['pacparser.__init__'])
if __name__ == '__main__':
main()
| Python |
# Copyright (C) 2007 Manu Garg.
# Author: Manu Garg <manugarg@gmail.com>
#
# pacparser is a library that provides methods to parse proxy auto-config
# (PAC) files. Please read README file included with this package for more
# information about this library.
#
# pacparser is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# pacparser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import getopt
import glob
import os
import sys
def runtests(pacfile, testdata, tests_dir):
py_ver = '.'.join([str(x) for x in sys.version_info[0:2]])
if sys.platform == 'win32':
pacparser_module_path = os.path.join(tests_dir, '..', 'src', 'pymod', 'dist')
if os.path.exists(os.path.join(pacparser_module_path, '_pacparser.pyd')):
raise Exception('Tests failed. Could not determine pacparser path.')
else:
try:
pacparser_module_path = glob.glob(os.path.join(
tests_dir, '..', 'src', 'pymod', 'build', 'lib*%s' % py_ver))[0]
except Exception:
raise Exception('Tests failed. Could not determine pacparser path.')
if 'DEBUG' in os.environ: print('Pacparser module path: %s' %
pacparser_module_path)
sys.path.insert(0, pacparser_module_path)
try:
import pacparser
except ImportError:
raise Exception('Tests failed. Could not import pacparser.')
if 'DEBUG' in os.environ: print('Imported pacparser module: %s' %
sys.modules['pacparser'])
f = open(testdata)
for line in f:
comment = ''
if '#' in line:
comment = line.split('#', 1)[1]
line = line.split('#', 1)[0].strip()
if not line:
continue
if ('NO_INTERNET' in os.environ and os.environ['NO_INTERNET'] and
'INTERNET_REQUIRED' in comment):
continue
if 'DEBUG' in os.environ: print(line)
(params, expected_result) = line.strip().split('|')
args = dict(getopt.getopt(params.split(), 'eu:c:')[0])
if '-e' in args:
pacparser.enable_microsoft_extensions()
if '-c' in args:
pacparser.setmyip(args['-c'])
pacparser.init()
pacparser.parse_pac_file(pacfile)
result = pacparser.find_proxy(args['-u'])
pacparser.cleanup()
if result != expected_result:
raise Exception('Tests failed. Got "%s", expected "%s"' % (result, expected_result))
print('All tests were successful.')
def main():
tests_dir = os.path.dirname(os.path.join(os.getcwd(), sys.argv[0]))
pacfile = os.path.join(tests_dir, 'proxy.pac')
testdata = os.path.join(tests_dir, 'testdata')
runtests(pacfile, testdata, tests_dir)
if __name__ == '__main__':
main()
| Python |
import cherrypy
import sqlite3
import Common
import os
import time
import hashlib
from postmarkup import render_bbcode
localdir = os.path.dirname(__file__)
absdir = os.path.join(os.getcwd(), localdir)
class User(Common.Template):
def index (self):
return self.default()
index.exposed = True
# this function receives post data from the register page. at this point we already know the username is unique. all we do here is add the user to the db and set the user's session. we then display the /User index
def user_register(self, Username=None, Password=None, Avatar=None, Signature=None):
c = cherrypy.thread_data.db.cursor()
c.execute('select max(ID) from User')
uid = c.fetchone()[0] + 1
# add the user to the system
c.execute('insert into User (ID,Username,Password,Permission,Avatar,Signature,JoinDate) values (NULL,?,?,2,?,?, date(\'now\'))', (self.strip_html_bbcode(Username), hashlib.md5(Password).hexdigest(), str(self.upload(Avatar, uid)), Signature,))
cherrypy.thread_data.db.commit()
# create the user's session
cherrypy.session['ID'] = uid
cherrypy.session['Username'] = self.strip_html_bbcode(Username)
cherrypy.session['Permission'] = c.execute('select Permission from User where ID=?', (uid,)).fetchone()[0]
return self.index()
user_register.exposed = True
#this page displays the registration form on the /login page after the user hits the register button. the post information is directed here and put in its respective fields. the form itself posts to user_register
def registerUserPage(self, user, passwd):
cursor = cherrypy.thread_data.db.cursor()
yield self.header()
yield self.links()
yield "<h1>Registration:</h1>"
yield "<table class='registration' width='100%'>"
#selects the admin which will always be there
cursor.execute("SELECT * FROM User WHERE ID IS 0")
info = cursor.fetchone()
#grabs a list of all column names
attr = cursor.description
yield "<form method='post' action='User/user_register' enctype='multipart/form-data'>"
#loop through all 8 columns
for i in range(8):
attrTmp = attr[i]
yield "<tr>"
#horrific if/else block for the register form. basically makes a row for each column and selectively lets the user edit them.
if not (attrTmp[0]=='ID' or attrTmp[0]=='BanTimestamp' or attrTmp[0]=='Permission' or attrTmp[0]=='JoinDate'):
yield "<td class='registration_list'>%s</td>"%str(attrTmp[0])
yield "<td class='registration_list_edit'>"
if attrTmp[0]=='Password':
yield "<input name='%s' type='password' value='%s' readonly></input>"%(attrTmp[0],str(passwd))
elif attrTmp[0]=='Avatar':
yield "<input name='%s' type='file'></input>"%attrTmp[0]
elif attrTmp[0]=='Username':
yield "<input name='%s' type='text' value='%s' readonly></input>"%(attrTmp[0],str(user))
elif attrTmp[0]=='Signature':
yield "<textarea class='monospace' name='Signature' rows='20' cols='80'>Signature</textarea>"
yield "</td>"
yield "</tr>"
yield "<tr><td class='registration_list'><input type='submit' value='Register'></input>"
yield "</form>"
yield "</table>"
yield "<br />"+self.footer()
registerUserPage.exposed = True
# simple method to determine if you can edit userinfo
def ownpage (self, id):
if self.getLoginPermission() == 0 or id == self.getLoginID() or ( self.getLoginPermission() == 1 and id != 0 ) or ( self.getLoginPermission() == 0 and id == 0 ):
return True
else:
return False
def upload (self, image=None, id=None):
#because image is still an object even if nothing is uploaded this is the only way i could find to determine if nothing was uploaded. seems like there should be an easier way to do this...
if image.fullvalue()=="":
#reutrn empty string so it doesnt break all avatar pics in the forum
return ""
saved_image = open('images/avatars/%s.dat'%id, 'wb')
while True:
#write pic file in 512bit(byte?) chunks
data = image.file.read(512)
if not data:
break
saved_image.write(data)
saved_image.close()
return saved_image.name
def user_edit (self, Username=None, Permission=None, BanTimestamp=None, Avatar=None, Password=None, ID = None, JoinDate = None, Signature = None):
c = cherrypy.thread_data.db.cursor()
#somewhat better if block than the register form. if a value was entered, change it in the db.
if Password:
c.execute("UPDATE User SET Password=? WHERE ID=?",(hashlib.md5(Password).hexdigest(), int(ID),))
cherrypy.thread_data.db.commit()
#would probably work fine with just the fullvalue()==""
if Avatar and (not Avatar.fullvalue() == ""):
Av = self.upload(Avatar, ID)
c.execute("UPDATE User Set Avatar=? WHERE ID=?",(Av,int(ID),))
cherrypy.thread_data.db.commit()
if Permission and self.getLoginPermission() < 1:
c.execute("UPDATE User Set Permission=? WHERE ID=?",(Permission,int(ID),))
cherrypy.thread_data.db.commit()
if BanTimestamp and self.getLoginPermission() < 2:
#parse correct timestamp
ts = int(BanTimestamp)
if ts < 0:
ts = "NULL"
elif ts > 0:
ts = time.time() + ts * 24 * 60 * 60
c.execute("UPDATE User Set BanTimestamp=? WHERE ID=?",(ts,int(ID),))
cherrypy.thread_data.db.commit()
if Signature:
c.execute("UPDATE User Set Signature=? WHERE ID=?",(Signature,int(ID),))
cherrypy.thread_data.db.commit()
yield self.redirect("/User/%s"%ID)
user_edit.exposed = True
#should be -1 by default to indicate something went wrong if its not changed
def default (self, id = -1):
# for some reason id is passed as a string
id = int(id)
cursor = cherrypy.thread_data.db.cursor()
cursor2 = cherrypy.thread_data.db.cursor()
yield self.header()
yield self.links()
# i dont think i need breadcrumbs in this pageview
yield self.breadcrumbs()
yield "<br/>"
# if the user is looking at the root Users page, display list of all + links
if id == -1:
cursor.execute('''
SELECT User.ID, User.Username, User.Avatar, User.Joindate, User.Signature
FROM User''')
#print list of all users
yield "<table class='user_list' align='center' width='100%'>"
yield "<tr><th class='user_list' width='200px'>Username</th><th class='user_list' width='125px'>Avatar</th><th class='user_list' width='125px'>Post Count</th><th class='user_list' width='125px'>Join Date</th><th class='user_list'>Signature</th></tr>"
#select number of posts by UserID
for UserID, Username, Avatar, Joindate, Signature in cursor.fetchall():
cursor2.execute('''SELECT COUNT(Post.ID) FROM Post WHERE Post.UserID = ?''', (UserID,))
count = cursor2.fetchone()[0]
if Avatar == "" or Avatar == None:
yield "<tr align='center'><td class='user_list'><a href='%s'>%s</a></td><td class='user_list'><td class='user_list'>%s</td><td class='user_list'>%s</td><td class='user_list'>%s</td></tr>" % (UserID, Username, count, Joindate, render_bbcode(Signature))
else:
yield "<tr align='center' valign='center'><td class='user_list'><a href='%s'>%s</a></td><td class='user_list'><img src='/%s' width='100px' height='100px' /></td><td class='user_list'>%s</td><td class='user_list'>%s</td><td class='user_list'>%s</td></tr>" % (UserID, Username, Avatar, count, Joindate, render_bbcode(Signature))
yield "</table>"
else:
#print all the user info for a given user
yield "<br /><h1>%s's User Info</h1><br />"%self.getUsername(id)
yield "<table class='user_info' width='100%'>"
cursor.execute("SELECT * FROM User WHERE ID IS ?",(id,))
info = cursor.fetchone()
attr = cursor.description
yield "<form method='post' action='user_edit' enctype='multipart/form-data'>"
#loop like the other loops through all columns in the user table
for i in range(8):
attrTmp = attr[i]
yield "<tr>"
#someone elses page
if attrTmp[0] != 'Password' or self.ownpage(id):
yield "<td class='user_info_list'>%s</td>"%str(attrTmp[0])
if attrTmp[0]=='BanTimestamp' and info[i] != None and info[i] != "NULL":
# decode the timestamp
yield "<td class='user_info_list'>%s</td>"%str(time.ctime(int(info[i])))
elif attrTmp[0]=="Password":
yield "<td class='user_info_list'> </td>"
else:
yield "<td class='user_info_list'>%s</td>"%render_bbcode(str(info[i]))
if self.ownpage(id):
yield "<td class='user_info_list_edit'>"
if attrTmp[0]=='Password':
yield "<input name='%s' "%attrTmp[0]
yield "type='password'>"
elif attrTmp[0]=='JoinDate':
yield "<input name='%s' "%attrTmp[0]
yield "type='hidden' readonly>"
elif attrTmp[0]=='Avatar':
yield "<input name='%s' "%attrTmp[0]
yield "type='file'>"
elif attrTmp[0]=='ID':
yield "<input name='%s' "%attrTmp[0]
yield "type='hidden' value='%s' readonly>"%id
elif attrTmp[0]=='Signature':
yield "<textarea class='monospace' name='%s' rows='20' cols='80'>%s</textarea>" % (attrTmp[0], info[i])
elif attrTmp[0]=='Username':
yield "<input name='%s' "%attrTmp[0]
yield "type='hidden' value='%s' readonly>"%str(info[i])
#mod/admin editable stuff below
elif attrTmp[0]=='BanTimestamp':
if self.getLoginPermission() < 2 and id != self.getLoginID():
yield "<input name='%s' "%attrTmp[0]
yield "type='text'> <i>(offest in days, 0 == permaban, -1 == clear)</i>"
else:
yield "<input name='%s' "%attrTmp[0]
yield "type='hidden' readonly>"
elif attrTmp[0]=='Permission':
if self.getLoginPermission() < 1:
yield "<input name='%s' "%attrTmp[0]
yield "type='text'>"
else:
yield "<input name='%s' "%attrTmp[0]
yield "type='hidden' readonly>"
yield "</input></td>"
yield "</tr>"
# now print all your info
yield "<tr><td class='user_info_list'>"
# display submit button only if the user has permission to
if self.ownpage(id):
yield "<input type='submit' value='Submit Changes'></input>"
yield "</form>"
yield "</table>"
yield "<br />"+self.footer()
default.exposed = True
| Python |
# -*- coding: UTF-8 -*-
"""
Post Markup
Author: Will McGugan (http://www.willmcgugan.com)
"""
__version__ = "1.1.4"
import re
from urllib import quote, unquote, quote_plus, urlencode
from urlparse import urlparse, urlunparse
pygments_available = True
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.formatters import HtmlFormatter
except ImportError:
# Make Pygments optional
pygments_available = False
def annotate_link(domain):
"""This function is called by the url tag. Override to disable or change behaviour.
domain -- Domain parsed from url
"""
return u" [%s]"%_escape(domain)
_re_url = re.compile(r"((https?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.MULTILINE|re.UNICODE)
_re_html=re.compile('<.*?>|\&.*?\;', re.UNICODE)
def textilize(s):
"""Remove markup from html"""
return _re_html.sub("", s)
_re_excerpt = re.compile(r'\[".*?\]+?.*?\[/".*?\]+?', re.DOTALL|re.UNICODE)
_re_remove_markup = re.compile(r'\[.*?\]', re.DOTALL|re.UNICODE)
_re_break_groups = re.compile(r'\n+', re.DOTALL|re.UNICODE)
def get_excerpt(post):
"""Returns an excerpt between ["] and [/"]
post -- BBCode string"""
match = _re_excerpt.search(post)
if match is None:
return ""
excerpt = match.group(0)
excerpt = excerpt.replace(u'\n', u"<br/>")
return _re_remove_markup.sub("", excerpt)
def strip_bbcode(bbcode):
"""Strips bbcode tags from a string.
bbcode -- A string to remove tags from
"""
return u"".join([t[1] for t in PostMarkup.tokenize(bbcode) if t[0] == PostMarkup.TOKEN_TEXT])
def create(include=None, exclude=None, use_pygments=True, **kwargs):
"""Create a postmarkup object that converts bbcode to XML snippets. Note
that creating postmarkup objects is _not_ threadsafe, but rendering the
html _is_ threadsafe. So typically you will need just one postmarkup instance
to render the bbcode accross threads.
include -- List or similar iterable containing the names of the tags to use
If omitted, all tags will be used
exclude -- List or similar iterable containing the names of the tags to exclude.
If omitted, no tags will be excluded
use_pygments -- If True, Pygments (http://pygments.org/) will be used for the code tag,
otherwise it will use <pre>code</pre>
kwargs -- Remaining keyword arguments are passed to tag constructors.
"""
postmarkup = PostMarkup()
postmarkup_add_tag = postmarkup.tag_factory.add_tag
def add_tag(tag_class, name, *args, **kwargs):
if include is None or name in include:
if exclude is not None and name in exclude:
return
postmarkup_add_tag(tag_class, name, *args, **kwargs)
add_tag(SimpleTag, 'b', 'strong')
add_tag(SimpleTag, 'i', 'em')
add_tag(SimpleTag, 'u', 'u')
add_tag(SimpleTag, 's', 'strike')
add_tag(LinkTag, 'link', **kwargs)
add_tag(LinkTag, 'url', **kwargs)
add_tag(QuoteTag, 'quote')
add_tag(SearchTag, u'wiki',
u"http://en.wikipedia.org/wiki/Special:Search?search=%s", u'wikipedia.com', **kwargs)
add_tag(SearchTag, u'google',
u"http://www.google.com/search?hl=en&q=%s&btnG=Google+Search", u'google.com', **kwargs)
add_tag(SearchTag, u'dictionary',
u"http://dictionary.reference.com/browse/%s", u'dictionary.com', **kwargs)
add_tag(SearchTag, u'dict',
u"http://dictionary.reference.com/browse/%s", u'dictionary.com', **kwargs)
add_tag(ImgTag, u'img')
add_tag(ListTag, u'list')
add_tag(ListItemTag, u'*')
add_tag(SizeTag, u"size")
add_tag(ColorTag, u"color")
add_tag(CenterTag, u"center")
if use_pygments:
assert pygments_available, "Install Pygments (http://pygments.org/) or call create with use_pygments=False"
add_tag(PygmentsCodeTag, u'code', **kwargs)
else:
add_tag(CodeTag, u'code', **kwargs)
add_tag(ParagraphTag, u"p")
return postmarkup
class TagBase(object):
def __init__(self, name, enclosed=False, auto_close=False, inline=False, strip_first_newline=False, **kwargs):
"""Base class for all tags.
name -- The name of the bbcode tag
enclosed -- True if the contents of the tag should not be bbcode processed.
auto_close -- True if the tag is standalone and does not require a close tag.
inline -- True if the tag generates an inline html tag.
"""
self.name = name
self.enclosed = enclosed
self.auto_close = auto_close
self.inline = inline
self.strip_first_newline = strip_first_newline
self.open_pos = None
self.close_pos = None
self.open_node_index = None
self.close_node_index = None
def open(self, parser, params, open_pos, node_index):
""" Called when the open tag is initially encountered. """
self.params = params
self.open_pos = open_pos
self.open_node_index = node_index
def close(self, parser, close_pos, node_index):
""" Called when the close tag is initially encountered. """
self.close_pos = close_pos
self.close_node_index = node_index
def render_open(self, parser, node_index):
""" Called to render the open tag. """
pass
def render_close(self, parser, node_index):
""" Called to render the close tag. """
pass
def get_contents(self, parser):
"""Returns the string between the open and close tag."""
return parser.markup[self.open_pos:self.close_pos]
def get_contents_text(self, parser):
"""Returns the string between the the open and close tag, minus bbcode tags."""
return u"".join( parser.get_text_nodes(self.open_node_index, self.close_node_index) )
def skip_contents(self, parser):
"""Skips the contents of a tag while rendering."""
parser.skip_to_node(self.close_node_index)
def __str__(self):
return '[%s]'%self.name
class SimpleTag(TagBase):
"""A tag that can be rendered with a simple substitution. """
def __init__(self, name, html_name, **kwargs):
""" html_name -- the html tag to substitute."""
TagBase.__init__(self, name, inline=True)
self.html_name = html_name
def render_open(self, parser, node_index):
return u"<%s>"%self.html_name
def render_close(self, parser, node_index):
return u"</%s>"%self.html_name
class DivStyleTag(TagBase):
"""A simple tag that is replaces with a div and a style."""
def __init__(self, name, style, value, **kwargs):
TagBase.__init__(self, name)
self.style = style
self.value = value
def render_open(self, parser, node_index):
return u'<div style="%s:%s;">' % (self.style, self.value)
def render_close(self, parser, node_index):
return u'</div>'
class LinkTag(TagBase):
_safe_chars = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'_.-=/&?:%&')
_re_domain = re.compile(r"//([a-z0-9-\.]*)", re.UNICODE)
def __init__(self, name, annotate_links=True, **kwargs):
TagBase.__init__(self, name, inline=True)
self.annotate_links = annotate_links
def render_open(self, parser, node_index):
self.domain = u''
tag_data = parser.tag_data
nest_level = tag_data['link_nest_level'] = tag_data.setdefault('link_nest_level', 0) + 1
if nest_level > 1:
return u""
if self.params:
url = self.params.strip()
else:
url = self.get_contents_text(parser).strip()
url = _unescape(url)
self.domain = ""
if u"javascript:" in url.lower():
return ""
if ':' not in url:
url = 'http://' + url
scheme, uri = url.split(':', 1)
if scheme not in ['http', 'https']:
return u''
try:
domain = self._re_domain.search(uri.lower()).group(1)
except IndexError:
return u''
domain = domain.lower()
if domain.startswith('www.'):
domain = domain[4:]
def percent_encode(s):
safe_chars = self._safe_chars
def replace(c):
if c not in safe_chars:
return "%%%02X"%ord(c)
else:
return c
return "".join([replace(c) for c in s])
self.url = percent_encode(url.encode('utf-8', 'replace'))
self.domain = domain
if not self.url:
return u""
if self.domain:
return u'<a href="%s">'%self.url
else:
return u""
def render_close(self, parser, node_index):
tag_data = parser.tag_data
tag_data['link_nest_level'] -= 1
if tag_data['link_nest_level'] > 0:
return u''
if self.domain:
return u'</a>'+self.annotate_link(self.domain)
else:
return u''
def annotate_link(self, domain=None):
if domain and self.annotate_links:
return annotate_link(domain)
else:
return u""
class QuoteTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, strip_first_newline=True)
def open(self, parser, *args):
TagBase.open(self, parser, *args)
def close(self, parser, *args):
TagBase.close(self, parser, *args)
def render_open(self, parser, node_index):
if self.params:
return u'<blockquote><em>%s</em><br/>'%(PostMarkup.standard_replace(self.params))
else:
return u'<blockquote>'
def render_close(self, parser, node_index):
return u"</blockquote>"
class SearchTag(TagBase):
def __init__(self, name, url, label="", annotate_links=True, **kwargs):
TagBase.__init__(self, name, inline=True)
self.url = url
self.label = label
self.annotate_links = annotate_links
def render_open(self, parser, node_idex):
if self.params:
search=self.params
else:
search=self.get_contents(parser)
link = u'<a href="%s">' % self.url
if u'%' in link:
return link%quote_plus(search.encode("UTF-8"))
else:
return link
def render_close(self, parser, node_index):
if self.label:
if self.annotate_links:
return u'</a>'+ annotate_link(self.label)
else:
return u'</a>'
else:
return u''
class PygmentsCodeTag(TagBase):
def __init__(self, name, pygments_line_numbers=False, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
self.line_numbers = pygments_line_numbers
def render_open(self, parser, node_index):
contents = self.get_contents(parser)
self.skip_contents(parser)
try:
lexer = get_lexer_by_name(self.params, stripall=True)
except ClassNotFound:
contents = _escape(contents)
return '<div class="code"><pre>%s</pre></div>' % contents
formatter = HtmlFormatter(linenos=self.line_numbers, cssclass="code")
return highlight(contents, lexer, formatter)
class CodeTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
def render_open(self, parser, node_index):
contents = _escape_no_breaks(self.get_contents(parser))
self.skip_contents(parser)
return '<div class="code"><pre>%s</pre></div>' % contents
class ImgTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
contents = self.get_contents(parser)
self.skip_contents(parser)
contents = strip_bbcode(contents).replace(u'"', "%22")
return u'<img src="%s"></img>' % contents
class ListTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, strip_first_newline=True)
def open(self, parser, params, open_pos, node_index):
TagBase.open(self, parser, params, open_pos, node_index)
def close(self, parser, close_pos, node_index):
TagBase.close(self, parser, close_pos, node_index)
def render_open(self, parser, node_index):
self.close_tag = u""
tag_data = parser.tag_data
tag_data.setdefault("ListTag.count", 0)
if tag_data["ListTag.count"]:
return u""
tag_data["ListTag.count"] += 1
tag_data["ListItemTag.initial_item"]=True
if self.params == "1":
self.close_tag = u"</li></ol>"
return u"<ol><li>"
elif self.params == "a":
self.close_tag = u"</li></ol>"
return u'<ol style="list-style-type: lower-alpha;"><li>'
elif self.params == "A":
self.close_tag = u"</li></ol>"
return u'<ol style="list-style-type: upper-alpha;"><li>'
else:
self.close_tag = u"</li></ul>"
return u"<ul><li>"
def render_close(self, parser, node_index):
tag_data = parser.tag_data
tag_data["ListTag.count"] -= 1
return self.close_tag
class ListItemTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name)
self.closed = False
def render_open(self, parser, node_index):
tag_data = parser.tag_data
if not tag_data.setdefault("ListTag.count", 0):
return u""
if tag_data["ListItemTag.initial_item"]:
tag_data["ListItemTag.initial_item"] = False
return
return u"</li><li>"
class SizeTag(TagBase):
valid_chars = frozenset("0123456789")
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
try:
self.size = int( "".join([c for c in self.params if c in self.valid_chars]) )
except ValueError:
self.size = None
if self.size is None:
return u""
self.size = self.validate_size(self.size)
return u'<span style="font-size:%spx">' % self.size
def render_close(self, parser, node_index):
if self.size is None:
return u""
return u'</span>'
def validate_size(self, size):
size = min(64, size)
size = max(4, size)
return size
class ColorTag(TagBase):
valid_chars = frozenset("#0123456789abcdefghijklmnopqrstuvwxyz")
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
valid_chars = self.valid_chars
color = self.params.split()[0:1][0].lower()
self.color = "".join([c for c in color if c in valid_chars])
if not self.color:
return u""
return u'<span style="color:%s">' % self.color
def render_close(self, parser, node_index):
if not self.color:
return u''
return u'</span>'
class CenterTag(TagBase):
def render_open(self, parser, node_index, **kwargs):
return u'<div style="text-align:center;">'
def render_close(self, parser, node_index):
return u'</div>'
class ParagraphTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name)
def render_open(self, parser, node_index, **kwargs):
tag_data = parser.tag_data
level = tag_data.setdefault('ParagraphTag.level', 0)
ret = []
if level > 0:
ret.append(u'</p>')
tag_data['ParagraphTag.level'] -= 1;
ret.append(u'<p>')
tag_data['ParagraphTag.level'] += 1;
return u''.join(ret)
def render_close(self, parser, node_index):
tag_data = parser.tag_data
level = tag_data.setdefault('ParagraphTag.level', 0)
if not level:
return u''
tag_data['ParagraphTag.level'] -= 1;
return u'</p>'
class SectionTag(TagBase):
"""A specialised tag that stores its contents in a dictionary. Can be
used to define extra contents areas.
"""
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, enclosed=True)
def render_open(self, parser, node_index):
self.section_name = self.params.strip().lower().replace(u' ', u'_')
contents = self.get_contents(parser)
self.skip_contents(parser)
tag_data = parser.tag_data
sections = tag_data.setdefault('sections', {})
sections.setdefault(self.section_name, []).append(contents)
return u''
# http://effbot.org/zone/python-replace.htm
class MultiReplace:
def __init__(self, repl_dict):
# string to string mapping; use a regular expression
keys = repl_dict.keys()
keys.sort(reverse=True) # lexical order
pattern = u"|".join([re.escape(key) for key in keys])
self.pattern = re.compile(pattern)
self.dict = repl_dict
def replace(self, s):
# apply replacement dictionary to string
def repl(match, get=self.dict.get):
item = match.group(0)
return get(item, item)
return self.pattern.sub(repl, s)
__call__ = replace
def _escape(s):
return PostMarkup.standard_replace(s.rstrip('\n'))
def _escape_no_breaks(s):
return PostMarkup.standard_replace_no_break(s.rstrip('\n'))
def _unescape(s):
return PostMarkup.standard_unreplace(s)
class TagFactory(object):
def __init__(self):
self.tags = {}
@classmethod
def tag_factory_callable(cls, tag_class, name, *args, **kwargs):
"""
Returns a callable that returns a new tag instance.
"""
def make():
return tag_class(name, *args, **kwargs)
return make
def add_tag(self, cls, name, *args, **kwargs):
self.tags[name] = self.tag_factory_callable(cls, name, *args, **kwargs)
def __getitem__(self, name):
return self.tags[name]()
def __contains__(self, name):
return name in self.tags
def get(self, name, default=None):
if name in self.tags:
return self.tags[name]()
return default
class _Parser(object):
""" This is an interface to the parser, used by Tag classes. """
def __init__(self, post_markup, tag_data=None):
self.pm = post_markup
if tag_data is None:
self.tag_data = {}
else:
self.tag_data = tag_data
self.render_node_index = 0
def skip_to_node(self, node_index):
""" Skips to a node, ignoring intermediate nodes. """
assert node_index is not None, "Node index must be non-None"
self.render_node_index = node_index
def get_text_nodes(self, node1, node2):
""" Retrieves the text nodes between two node indices. """
if node2 is None:
node2 = node1+1
return [node for node in self.nodes[node1:node2] if not callable(node)]
def begin_no_breaks(self):
"""Disables replacing of newlines with break tags at the start and end of text nodes.
Can only be called from a tags 'open' method.
"""
assert self.phase==1, "Can not be called from render_open or render_close"
self.no_breaks_count += 1
def end_no_breaks(self):
"""Re-enables auto-replacing of newlines with break tags (see begin_no_breaks)."""
assert self.phase==1, "Can not be called from render_open or render_close"
if self.no_breaks_count:
self.no_breaks_count -= 1
class PostMarkup(object):
standard_replace = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&',
u'\n':u'<br/>'})
standard_unreplace = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&'})
standard_replace_no_break = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&',})
TOKEN_TAG, TOKEN_PTAG, TOKEN_TEXT = range(3)
_re_end_eq = re.compile(u"\]|\=", re.UNICODE)
_re_quote_end = re.compile(u'\"|\]', re.UNICODE)
# I tried to use RE's. Really I did.
@classmethod
def tokenize(cls, post):
re_end_eq = cls._re_end_eq
re_quote_end = cls._re_quote_end
text = True
pos = 0
def find_first(post, pos, re_ff):
try:
return re_ff.search(post, pos).start()
except AttributeError:
return -1
TOKEN_TAG, TOKEN_PTAG, TOKEN_TEXT = range(3)
post_find = post.find
while True:
brace_pos = post_find(u'[', pos)
if brace_pos == -1:
if pos<len(post):
yield TOKEN_TEXT, post[pos:], pos, len(post)
return
if brace_pos - pos > 0:
yield TOKEN_TEXT, post[pos:brace_pos], pos, brace_pos
pos = brace_pos
end_pos = pos+1
open_tag_pos = post_find(u'[', end_pos)
end_pos = find_first(post, end_pos, re_end_eq)
if end_pos == -1:
yield TOKEN_TEXT, post[pos:], pos, len(post)
return
if open_tag_pos != -1 and open_tag_pos < end_pos:
yield TOKEN_TEXT, post[pos:open_tag_pos], pos, open_tag_pos
end_pos = open_tag_pos
pos = end_pos
continue
if post[end_pos] == ']':
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos+1
pos = end_pos+1
continue
if post[end_pos] == '=':
try:
end_pos += 1
while post[end_pos] == ' ':
end_pos += 1
if post[end_pos] != '"':
end_pos = post_find(u']', end_pos+1)
if end_pos == -1:
return
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos+1
else:
end_pos = find_first(post, end_pos, re_quote_end)
if end_pos==-1:
return
if post[end_pos] == '"':
end_pos = post_find(u'"', end_pos+1)
if end_pos == -1:
return
end_pos = post_find(u']', end_pos+1)
if end_pos == -1:
return
yield TOKEN_PTAG, post[pos:end_pos+1], pos, end_pos+1
else:
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos
pos = end_pos+1
except IndexError:
return
def add_tag(self, cls, name, *args, **kwargs):
return self.tag_factory.add_tag(cls, name, *args, **kwargs)
def tagify_urls(self, postmarkup ):
""" Surrounds urls with url bbcode tags. """
def repl(match):
return u'[url]%s[/url]' % match.group(0)
text_tokens = []
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
for tag_type, tag_token, start_pos, end_pos in self.tokenize(postmarkup):
if tag_type == TOKEN_TEXT:
text_tokens.append(_re_url.sub(repl, tag_token))
else:
text_tokens.append(tag_token)
return u"".join(text_tokens)
def __init__(self, tag_factory=None):
self.tag_factory = tag_factory or TagFactory()
def default_tags(self):
""" Add some basic tags. """
add_tag = self.tag_factory.add_tag
add_tag(SimpleTag, u'b', u'strong')
add_tag(SimpleTag, u'i', u'em')
add_tag(SimpleTag, u'u', u'u')
add_tag(SimpleTag, u's', u's')
def get_supported_tags(self):
""" Returns a list of the supported tags. """
return sorted(self.tag_factory.tags.keys())
def insert_paragraphs(self, post_markup):
"""Inserts paragraph tags in place of newlines. A more complex task than
it may seem -- Multiple newlines result in just one paragraph tag, and
paragraph tags aren't inserted inside certain other tags (such as the
code tag). Returns a postmarkup string.
post_markup -- A string containing the raw postmarkup
"""
parts = [u'[p]']
tag_factory = self.tag_factory
enclosed_count = 0
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
TOKEN_TAG = PostMarkup.TOKEN_TAG
for tag_type, tag_token, start_pos, end_pos in self.tokenize(post_markup):
if tag_type == TOKEN_TEXT:
if enclosed_count:
parts.append(post_markup[start_pos:end_pos])
else:
txt = post_markup[start_pos:end_pos]
txt = _re_break_groups.sub(u'[p]', txt)
parts.append(txt)
continue
elif tag_type == TOKEN_TAG:
tag_token = tag_token[1:-1].lstrip()
if ' ' in tag_token:
tag_name = tag_token.split(u' ', 1)[0]
else:
if '=' in tag_token:
tag_name = tag_token.split(u'=', 1)[0]
else:
tag_name = tag_token
else:
tag_token = tag_token[1:-1].lstrip()
tag_name = tag_token.split(u'=', 1)[0]
tag_name = tag_name.strip().lower()
end_tag = False
if tag_name.startswith(u'/'):
end_tag = True
tag_name = tag_name[1:]
tag = tag_factory.get(tag_name, None)
if tag is not None and tag.enclosed:
if end_tag:
enclosed_count -= 1
else:
enclosed_count += 1
parts.append(post_markup[start_pos:end_pos])
new_markup = u"".join(parts)
return new_markup
# Matches simple blank tags containing only whitespace
_re_blank_tags = re.compile(r"\<(\w+?)\>\s*\</\1\>")
@classmethod
def cleanup_html(cls, html):
"""Cleans up html. Currently only removes blank tags, i.e. tags containing only
whitespace. Only applies to tags without attributes. Tag removal is done
recursively until there are no more blank tags. So <strong><em></em></strong>
would be completely removed.
html -- A string containing (X)HTML
"""
original_html = ''
while original_html != html:
original_html = html
html = cls._re_blank_tags.sub(u"", html)
return html
def render_to_html(self,
post_markup,
encoding="ascii",
exclude_tags=None,
auto_urls=True,
paragraphs=False,
clean=True,
tag_data=None):
"""Converts post markup (ie. bbcode) to XHTML. This method is threadsafe,
buy virtue that the state is entirely stored on the stack.
post_markup -- String containing bbcode.
encoding -- Encoding of string, defaults to "ascii" if the string is not
already unicode.
exclude_tags -- A collection of tag names to ignore.
auto_urls -- If True, then urls will be wrapped with url bbcode tags.
paragraphs -- If True then line breaks will be replaced with paragraph
tags, rather than break tags.
clean -- If True, html will be run through the cleanup_html method.
tag_data -- An optional dictionary to store tag data in. The default of
None will create a dictionary internaly. Set this to your own dictionary
if you want to retrieve information from the Tag Classes.
"""
if not isinstance(post_markup, unicode):
post_markup = unicode(post_markup, encoding, 'replace')
if auto_urls:
post_markup = self.tagify_urls(post_markup)
if paragraphs:
post_markup = self.insert_paragraphs(post_markup)
parser = _Parser(self, tag_data=tag_data)
parser.markup = post_markup
if exclude_tags is None:
exclude_tags = []
tag_factory = self.tag_factory
nodes = []
parser.nodes = nodes
parser.phase = 1
parser.no_breaks_count = 0
enclosed_count = 0
open_stack = []
tag_stack = []
break_stack = []
remove_next_newline = False
def check_tag_stack(tag_name):
for tag in reversed(tag_stack):
if tag_name == tag.name:
return True
return False
def redo_break_stack():
while break_stack:
tag = break_stack.pop()
open_tag(tag)
tag_stack.append(tag)
def break_inline_tags():
while tag_stack:
if tag_stack[-1].inline:
tag = tag_stack.pop()
close_tag(tag)
break_stack.append(tag)
else:
break
def open_tag(tag):
def call(node_index):
return tag.render_open(parser, node_index)
nodes.append(call)
def close_tag(tag):
def call(node_index):
return tag.render_close(parser, node_index)
nodes.append(call)
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
TOKEN_TAG = PostMarkup.TOKEN_TAG
# Pass 1
for tag_type, tag_token, start_pos, end_pos in self.tokenize(post_markup):
raw_tag_token = tag_token
if tag_type == TOKEN_TEXT:
if parser.no_breaks_count:
tag_token = tag_token.strip()
if not tag_token:
continue
if remove_next_newline:
tag_token = tag_token.lstrip(' ')
if tag_token.startswith('\n'):
tag_token = tag_token.lstrip(' ')[1:]
if not tag_token:
continue
remove_next_newline = False
if tag_stack and tag_stack[-1].strip_first_newline:
tag_token = tag_token.lstrip()
tag_stack[-1].strip_first_newline = False
if not tag_stack[-1]:
tag_stack.pop()
continue
if not enclosed_count:
redo_break_stack()
nodes.append(self.standard_replace(tag_token))
continue
elif tag_type == TOKEN_TAG:
tag_token = tag_token[1:-1].lstrip()
if ' ' in tag_token:
tag_name, tag_attribs = tag_token.split(u' ', 1)
tag_attribs = tag_attribs.strip()
else:
if '=' in tag_token:
tag_name, tag_attribs = tag_token.split(u'=', 1)
tag_attribs = tag_attribs.strip()
else:
tag_name = tag_token
tag_attribs = u""
else:
tag_token = tag_token[1:-1].lstrip()
tag_name, tag_attribs = tag_token.split(u'=', 1)
tag_attribs = tag_attribs.strip()[1:-1]
tag_name = tag_name.strip().lower()
end_tag = False
if tag_name.startswith(u'/'):
end_tag = True
tag_name = tag_name[1:]
if enclosed_count and tag_stack[-1].name != tag_name:
continue
if tag_name in exclude_tags:
continue
if not end_tag:
tag = tag_factory.get(tag_name, None)
if tag is None:
continue
redo_break_stack()
if not tag.inline:
break_inline_tags()
tag.open(parser, tag_attribs, end_pos, len(nodes))
if tag.enclosed:
enclosed_count += 1
tag_stack.append(tag)
open_tag(tag)
if tag.auto_close:
tag = tag_stack.pop()
tag.close(self, start_pos, len(nodes)-1)
close_tag(tag)
else:
if break_stack and break_stack[-1].name == tag_name:
break_stack.pop()
tag.close(parser, start_pos, len(nodes))
elif check_tag_stack(tag_name):
while tag_stack[-1].name != tag_name:
tag = tag_stack.pop()
break_stack.append(tag)
close_tag(tag)
tag = tag_stack.pop()
tag.close(parser, start_pos, len(nodes))
if tag.enclosed:
enclosed_count -= 1
close_tag(tag)
if not tag.inline:
remove_next_newline = True
if tag_stack:
redo_break_stack()
while tag_stack:
tag = tag_stack.pop()
tag.close(parser, len(post_markup), len(nodes))
if tag.enclosed:
enclosed_count -= 1
close_tag(tag)
parser.phase = 2
# Pass 2
parser.nodes = nodes
text = []
parser.render_node_index = 0
while parser.render_node_index < len(parser.nodes):
i = parser.render_node_index
node_text = parser.nodes[i]
if callable(node_text):
node_text = node_text(i)
if node_text is not None:
text.append(node_text)
parser.render_node_index += 1
html = u"".join(text)
if clean:
html = self.cleanup_html(html)
return html
# A shortcut for render_to_html
__call__ = render_to_html
_postmarkup = create(use_pygments=pygments_available)
def render_bbcode(bbcode,
encoding="ascii",
exclude_tags=None,
auto_urls=True,
paragraphs=False,
clean=True,
tag_data=None):
""" Renders a bbcode string in to XHTML. This is a shortcut if you don't
need to customize any tags.
post_markup -- String containing bbcode.
encoding -- Encoding of string, defaults to "ascii" if the string is not
already unicode.
exclude_tags -- A collection of tag names to ignore.
auto_urls -- If True, then urls will be wrapped with url bbcode tags.
paragraphs -- If True then line breaks will be replaces with paragraph
tags, rather than break tags.
clean -- If True, html will be run through a cleanup_html method.
tag_data -- An optional dictionary to store tag data in. The default of
None will create a dictionary internally.
"""
return _postmarkup(bbcode,
encoding,
exclude_tags=exclude_tags,
auto_urls=auto_urls,
paragraphs=paragraphs,
clean=clean,
tag_data=tag_data)
def _tests():
import sys
#sys.stdout=open('test.htm', 'w')
post_markup = create(use_pygments=True)
tests = []
print """<link rel="stylesheet" href="code.css" type="text/css" />\n"""
tests.append(']')
tests.append('[')
tests.append(':-[ Hello, [b]World[/b]')
tests.append("[link=http://www.willmcgugan.com]My homepage[/link]")
tests.append('[link="http://www.willmcgugan.com"]My homepage[/link]')
tests.append("[link http://www.willmcgugan.com]My homepage[/link]")
tests.append("[link]http://www.willmcgugan.com[/link]")
tests.append(u"[b]Hello André[/b]")
tests.append(u"[google]André[/google]")
tests.append("[s]Strike through[/s]")
tests.append("[b]bold [i]bold and italic[/b] italic[/i]")
tests.append("[google]Will McGugan[/google]")
tests.append("[wiki Will McGugan]Look up my name in Wikipedia[/wiki]")
tests.append("[quote Will said...]BBCode is very cool[/quote]")
tests.append("""[code python]
# A proxy object that calls a callback when converted to a string
class TagStringify(object):
def __init__(self, callback, raw):
self.callback = callback
self.raw = raw
r[b]=3
def __str__(self):
return self.callback()
def __repr__(self):
return self.__str__()
[/code]""")
tests.append(u"[img]http://upload.wikimedia.org/wikipedia/commons"\
"/6/61/Triops_longicaudatus.jpg[/img]")
tests.append("[list][*]Apples[*]Oranges[*]Pears[/list]")
tests.append("""[list=1]
[*]Apples
[*]Oranges
are not the only fruit
[*]Pears
[/list]""")
tests.append("[list=a][*]Apples[*]Oranges[*]Pears[/list]")
tests.append("[list=A][*]Apples[*]Oranges[*]Pears[/list]")
long_test="""[b]Long test[/b]
New lines characters are converted to breaks."""\
"""Tags my be [b]ove[i]rl[/b]apped[/i].
[i]Open tags will be closed.
[b]Test[/b]"""
tests.append(long_test)
tests.append("[dict]Will[/dict]")
tests.append("[code unknownlanguage]10 print 'In yr code'; 20 goto 10[/code]")
tests.append("[url=http://www.google.com/coop/cse?cx=006850030468302103399%3Amqxv78bdfdo]CakePHP Google Groups[/url]")
tests.append("[url=http://www.google.com/search?hl=en&safe=off&client=opera&rls=en&hs=pO1&q=python+bbcode&btnG=Search]Search for Python BBCode[/url]")
#tests = []
# Attempt to inject html in to unicode
tests.append("[url=http://www.test.com/sfsdfsdf/ter?t=\"></a><h1>HACK</h1><a>\"]Test Hack[/url]")
tests.append('Nested urls, i.e. [url][url]www.becontrary.com[/url][/url], are condensed in to a single tag.')
tests.append(u'[google]ɸβfvθðsz[/google]')
tests.append(u'[size 30]Hello, World![/size]')
tests.append(u'[color red]This should be red[/color]')
tests.append(u'[color #0f0]This should be green[/color]')
tests.append(u"[center]This should be in the center!")
tests.append('Nested urls, i.e. [url][url]www.becontrary.com[/url][/url], are condensed in to a single tag.')
#tests = []
tests.append('[b]Hello, [i]World[/b]! [/i]')
tests.append('[b][center]This should be centered![/center][/b]')
tests.append('[list][*]Hello[i][*]World![/i][/list]')
tests.append("""[list=1]
[*]Apples
[*]Oranges
are not the only fruit
[*]Pears
[/list]""")
tests.append("[b]urls such as http://www.willmcgugan.com are authomaticaly converted to links[/b]")
tests.append("""
[b]
[code python]
parser.markup[self.open_pos:self.close_pos]
[/code]
asdasdasdasdqweqwe
""")
tests.append("""[list 1]
[*]Hello
[*]World
[/list]""")
#tests = []
tests.append("[b][p]Hello, [p]World")
tests.append("[p][p][p]")
tests.append("http://www.google.com/search?as_q=bbcode&btnG=%D0%9F%D0%BE%D0%B8%D1%81%D0%BA")
#tests=["""[b]b[i]i[/b][/i]"""]
for test in tests:
print u"<pre>%s</pre>"%str(test.encode("ascii", "xmlcharrefreplace"))
print u"<p>%s</p>"%str(post_markup(test).encode("ascii", "xmlcharrefreplace"))
print u"<hr/>"
print
#print repr(post_markup('[url=<script>Attack</script>]Attack[/url]'))
#print repr(post_markup('http://www.google.com/search?as_q=%D0%9F%D0%BE%D0%B8%D1%81%D0%BA&test=hai'))
#p = create(use_pygments=False)
#print (p('[code]foo\nbar[/code]'))
#print render_bbcode("[b]For the lazy, use the http://www.willmcgugan.com render_bbcode function.[/b]")
smarkup = create()
smarkup.add_tag(SectionTag, 'section')
test = """Hello, World.[b][i]This in italics
[section sidebar]This is the [b]sidebar[/b][/section]
[section footer]
This is the footer
[/section]
More text"""
print smarkup(test, paragraphs=True, clean=False)
tag_data = {}
print smarkup(test, tag_data=tag_data, paragraphs=True, clean=True)
print tag_data
def _run_unittests():
# TODO: Expand tests for better coverage!
import unittest
class TestPostmarkup(unittest.TestCase):
def testcleanuphtml(self):
postmarkup = create()
tests = [("""\n<p>\n </p>\n""", ""),
("""<b>\n\n<i> </i>\n</b>Test""", "Test"),
("""<p id="test">Test</p>""", """<p id="test">Test</p>"""),]
for test, result in tests:
self.assertEqual(PostMarkup.cleanup_html(test).strip(), result)
def testsimpletag(self):
postmarkup = create()
tests= [ ('[b]Hello[/b]', "<strong>Hello</strong>"),
('[i]Italic[/i]', "<em>Italic</em>"),
('[s]Strike[/s]', "<strike>Strike</strike>"),
('[u]underlined[/u]', "<u>underlined</u>"),
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
def testoverlap(self):
postmarkup = create()
tests= [ ('[i][b]Hello[/i][/b]', "<em><strong>Hello</strong></em>"),
('[b]bold [u]both[/b] underline[/u]', '<strong>bold <u>both</u></strong><u> underline</u>')
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
def testlinks(self):
postmarkup = create(annotate_links=False)
tests= [ ('[link=http://www.willmcgugan.com]blog1[/link]', '<a href="http://www.willmcgugan.com">blog1</a>'),
('[link="http://www.willmcgugan.com"]blog2[/link]', '<a href="http://www.willmcgugan.com">blog2</a>'),
('[link http://www.willmcgugan.com]blog3[/link]', '<a href="http://www.willmcgugan.com">blog3</a>'),
('[link]http://www.willmcgugan.com[/link]', '<a href="http://www.willmcgugan.com">http://www.willmcgugan.com</a>')
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
suite = unittest.TestLoader().loadTestsFromTestCase(TestPostmarkup)
unittest.TextTestRunner(verbosity=2).run(suite)
def _ff_test():
def ff1(post, pos, c1, c2):
f1 = post.find(c1, pos)
f2 = post.find(c2, pos)
if f1 == -1:
return f2
if f2 == -1:
return f1
return min(f1, f2)
re_ff=re.compile('a|b', re.UNICODE)
def ff2(post, pos, c1, c2):
try:
return re_ff.search(post).group(0)
except AttributeError:
return -1
text = u"sdl;fk;sdlfks;dflksd;flksdfsdfwerwerwgwegwegwegwegwegegwweggewwegwegwegwettttttttttttttttttttttttttttttttttgggggggggg;slbdfkwelrkwelrkjal;sdfksdl;fksdf;lb"
REPEAT = 100000
from time import time
start = time()
for n in xrange(REPEAT):
ff1(text, 0, "a", "b")
end = time()
print end - start
start = time()
for n in xrange(REPEAT):
ff2(text, 0, "a", "b")
end = time()
print end - start
if __name__ == "__main__":
_tests()
_run_unittests()
#_ff_test()
| Python |
import cherrypy
import sqlite3
import time
from postmarkup import render_bbcode
import Common
class PrivateMessage(Common.Template):
# Call the default function with no arguments
def index(self):
return self.default()
index.exposed = True
# This function handles basically everything. If no ID is given, it
# displays in the inbox and outbox of the user. If an ID is given, it
# displays the PM with that ID. Pretty straightforward.
def default(self, id = None):
cursor = cherrypy.thread_data.db.cursor()
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
# Display the inbox and outbox
elif id == None:
yield self.header("Private Messages")
yield self.links()
yield self.breadcrumbs()
yield "<p align='center'><a href='New'>New Message</a></p>"
# Get all of the Private Messages to the logged in user
cursor.execute('''
SELECT PrivateMessage.ID, PrivateMessage.Title, User.ID, User.Username
FROM PrivateMessage, User
WHERE PrivateMessage.ToUserID = ? AND
PrivateMessage.FromUserID = User.ID
ORDER BY PrivateMessage.ID DESC
''', (self.getLoginID(),))
yield "<table width='100%'>"
yield "<tr valign='top'>"
yield "<td width='49%'>"
# Display these messages in the sent box
yield "<h2 align='center'>Inbox</h2>"
yield "<table width='100%' class='pm_list'>"
yield "<tr><th class='pm_list'>From</th><th class='pm_list'>Title</th></tr>"
for ID, Title, UserID, Username in cursor.fetchall():
yield "<tr>"
yield "<td width='35%%' class='pm_list'><a href='/User/%s'>%s</a></td>" % (UserID, Username)
yield "<td class='pm_list'><a href='%s'>%s</a></td>" % (ID, Title)
yield "</tr>"
yield "</table>"
yield "</td><td width='2%'> </td><td width='49%'>"
# Get all ofthe private messages from the logged in user
cursor.execute('''
SELECT PrivateMessage.ID, PrivateMessage.Title, User.ID, User.Username
FROM PrivateMessage, User
WHERE PrivateMessage.FromUserID = ? AND
PrivateMessage.ToUserID = User.ID
ORDER BY PrivateMessage.ID DESC
''', (self.getLoginID(),))
# Display these messages in the sent box
yield "<h2 align='center'>Sent Box</h2>"
yield "<table width='100%' class='pm_list'>"
yield "<tr><th class='pm_list'>To</th><th class='pm_list'>Title</th></tr>"
for ID, Title, UserID, Username in cursor.fetchall():
yield "<tr>"
yield "<td width='35%%' class='pm_list'><a href='/User/%s'>%s</a></td>" % (UserID, Username)
yield "<td class='pm_list'><a href='%s'>%s</a></td>" % (ID, Title)
yield "</tr>"
yield "</table>"
yield "</td>"
yield "</tr>"
yield "</table>"
yield self.footer()
else:
# An ID was given, show that PM
yield self.header("Private Messages")
yield self.links()
yield self.breadcrumbs()
# Get the private message, from and to user names
cursor.execute('''
SELECT PrivateMessage.ID, PrivateMessage.Title, PrivateMessage.Content, F.ID, F.Username, T.ID, T.Username
FROM PrivateMessage, User F, User T
WHERE
PrivateMessage.FromUserID = F.ID AND
PrivateMessage.ToUserID = T.ID AND
PrivateMessage.ID = ?
''', (id,))
(ID, Title, Content, FUserID, FUsername, TUserID, TUsername) = cursor.fetchone()
# Display the PM
yield "<p><b>Title:</b> %s</p>" % Title
yield "<p><b>To:</b> <a href='/User/%s'>%s</a></p>" % (TUserID, TUsername)
yield "<p><b>From:</b> <a href='/User/%s'>%s</a></p>" % (FUserID, FUsername)
yield "<p><b>Content:</b></p>"
yield "<p>%s</p>" % render_bbcode(Content)
yield self.footer()
default.exposed = True
# This message is how to send a new message. If any arguments are blank,
# then the new message form is showed. If not, the message is sent
def New(self, touser = "", title = "", content = ""):
# Get the uid of the user to send to
toid = self.get_uid(touser)
# Clean surrounding spaces
title = title.strip()
content = content.strip()
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
elif toid != None and title != "" and content != "":
cursor = cherrypy.thread_data.db.cursor()
# Create the PM
cursor.execute('''
INSERT INTO PrivateMessage (ID, Title, Content, FromUserID, ToUserID)
VALUES (NULL, ?, ?, ?, ?)
''', (title, content, self.getLoginID(), toid,))
cherrypy.thread_data.db.commit()
# Redirect back to the thread at the PM view
yield self.redirect("/PrivateMessage")
else:
yield self.header("New Private Message")
yield self.links()
yield self.breadcrumbs()
# Show the PM form
yield "<form method='post' action='/PrivateMessage/New'>"
yield "To User: <br />"
yield "<input class='monospace' type='text' name='touser' size='60' value='%s' /><br />" % touser
yield "Title: <br />"
yield "<input class='monospace' type='text' name='title' size='60' value='%s' /><br />" % title
yield "Content: <br />"
yield "<textarea class='monospace' name='content' rows='20' cols='80'>%s</textarea><br />" % content
yield "<input type='submit' value='Send' />"
yield "</form>"
yield self.footer()
New.exposed = True
# Get the userid of the a given usrename
def get_uid(self, username):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT User.ID FROM User WHERE User.Username = ?", (username,))
result = cursor.fetchone()
if result == None:
return None
else:
return result[0]
| Python |
import sqlite3
import getpass
# Create the connection
connection = sqlite3.connect("forum.db")
cursor = connection.cursor()
with open("dump.sql", "w") as f:
for line in connection.iterdump():
f.write("%s\n" % line)
| Python |
import cherrypy
import sqlite3
import time
import Common
class Post(Common.Template):
def index(self):
return self.default()
index.exposed = True
# This function allows users to create new posts. It requres the thread_id
# argument. If Reply is set, it takes the user name and content from
# the given post. If title and content are not set or are not valid,
# this function shows the new post form. Otherwise, it inserts the post
# and then redirects back to the thread
def New(self, thread_id = None, title = "", content = "", Reply = False):
# Get the thread name
thread_name = self.get_thread_name(thread_id)
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
elif thread_name == None:
yield self.error("Specified thread not found", True)
elif self.Thread_Closed(thread_id):
yield self.error("Thread is closed", True)
elif content == "" or Reply == True:
# The user is logged in and the thread is valid, so show the input form
yield self.header("Reply to %s" % thread_name)
yield self.links()
yield self.breadcrumbs(ThreadID = thread_id)
yield "<form method='post' action='/Post/New/%s'>" % thread_id
yield "Post Title (optional): <br />"
yield "<input class='monospace' type='text' name='title' size='60' value='%s' /><br />" % title
yield "Content: <br />"
yield "<textarea class='monospace' name='content' rows='20' cols='80'>%s</textarea><br />" % content
yield "<input type='submit' value='Reply' />"
yield "</form>"
yield self.footer()
else:
# There is content in the post, insert a new post and redirect to the thread
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
# Add the Post
cursor.execute('''
INSERT INTO Post (ID, Title, Content, Timestamp, UserID, ThreadID, EditTimestamp, EditUserID)
VALUES (NULL, ?, ?, ?, ?, ?, NULL, NULL)
''', (title, self.strip_html_bbcode(content), int(time.time()), self.getLoginID(), thread_id,))
cherrypy.thread_data.db.commit()
# After this commit, the post counts will update because of the triggers
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s#%s" % (thread_id, cursor.lastrowid))
New.exposed = True
# Reply takes the thread and post the user wants to reply to and loads
# up the new post page with bbcode for the quote already filled out
def Reply(self, thread_id = None, reply_id = None):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT User.Username, Post.Content FROM Post, User WHERE Post.ID = ? AND Post.UserID = User.ID", (reply_id,))
(reply_username, reply_content) = cursor.fetchone()
content = "[QUOTE=%s]%s[/QUOTE]" % (reply_username, reply_content)
return self.New(thread_id, "", content, True)
Reply.exposed = True
# Edit behaves very simililarily to New, just refer to that comment
def Edit(self, PostID = None, NewPostTitle = "", NewPostContent = "", Delete = False):
# Get the post info
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Post.Title, Post.Content, Post.UserID, Post.ThreadID FROM Post WHERE Post.ID = ?", (PostID,))
(PostTitle, PostContent, PostUserID, thread_id) = cursor.fetchone()
if (NewPostTitle == "" and NewPostContent == "" and Delete == False) or (NewPostTitle == "" and self.post_is_first(PostID) and Delete == False):
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
elif str(self.getLoginID()) != str(PostUserID) and self.getLoginPermission() >= 2:
yield self.error("You don't have suffecient permission to edit that post", True)
elif PostTitle == None and PostContent == None:
yield self.error("Specified post does not exist", True)
elif self.Thread_Closed(thread_id):
yield self.error("Thread is closed", True)
else:
# The user is logged in and the post is valid, so show the input form
yield self.header("Edit Post")
yield self.links()
yield self.breadcrumbs(ThreadID = thread_id)
# This forum has hidden userid and threadid values to feed to the SavePost function
yield "<form method='post' action='/Post/Edit/%s'>" % PostID
if self.post_is_first(PostID):
yield "Post Title: <br />"
else:
yield "Post Title (optional): <br />"
yield "<input class='monospace' type='text' name='NewPostTitle' size='60' value='%s'/><br />" % PostTitle
yield "Content: <br />"
yield "<textarea class='monospace' name='NewPostContent' rows='20' cols='80'>%s</textarea><br />" % PostContent
yield "<input type='submit' value='Update Post' />"
if not self.post_is_first(PostID):
yield "<input type='submit' name='Delete' value='Delete Post' />"
yield "</form>"
yield self.footer()
else:
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
elif str(self.getLoginID()) != str(PostUserID) and self.getLoginPermission() >= 2:
yield self.error("You don't have suffecient permission to edit that post", True)
elif PostTitle == None and PostContent == None:
yield self.error("Specified post does not exist", True)
elif self.Thread_Closed(thread_id):
yield self.error("Thread is closed", True)
else:
if Delete:
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
cursor.execute("DELETE FROM Post WHERE Post.ID = ?", (PostID,))
cherrypy.thread_data.db.commit()
# After this commit, the post counts will update because of the triggers
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s" % (thread_id))
else:
# Add the Post
cursor.execute('''UPDATE Post SET Title = ?, Content = ?, EditTimestamp = ?, EditUserID = ? WHERE Post.ID = ?''', (self.strip_html_bbcode(NewPostTitle), NewPostContent, int(time.time()), self.getLoginID(), PostID,))
cherrypy.thread_data.db.commit()
cursor.execute("SELECT Post.ThreadID FROM Post WHERE Post.ID = ?", (PostID,))
threadid = cursor.fetchone()[0]
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s#%s" % (threadid, PostID))
Edit.exposed = True
# This function takes in a post and determines if it is the first post.
# This allows us to figure out if the user should be allowed to delete the post.
def post_is_first(self, PostID):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT MIN(Post.ID) FROM Post WHERE Post.ThreadID = (SELECT Post.ThreadID FROM Post WHERE Post.ID = ?)", (PostID,))
FirstPostID = cursor.fetchone()[0]
return str(FirstPostID) == str(PostID)
| Python |
import cherrypy
import sqlite3
import time
from postmarkup import render_bbcode
import Common
class Thread(Common.Template):
# Call the default function with no arguments
def index(self):
return self.default()
index.exposed = True
# Default function displays a given thread.
def default(self, id = None):
# Get the thread name
thread_name = self.get_thread_name(id)
if self.Thread_Closed(id):
thread_name += " - CLOSED"
if thread_name == None:
yield self.error("The specified thread does not exist!", True)
else:
# The thread exists
# Print the top of the page
yield self.header(thread_name)
yield self.links()
yield self.breadcrumbs(ThreadID = id)
yield '<br />'
# Print the action buttons
yield self.generator_expand(self.action_buttons(id))
# Print the posts
yield self.generator_expand(self.post_list(id))
# Print the action buttons again
yield self.generator_expand(self.action_buttons(id))
# Print the footer
yield self.footer()
# The page was viewed, so increment the viewcount
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("UPDATE Thread SET ViewCount = ViewCount + 1 WHERE ID = ?", (id,))
cherrypy.thread_data.db.commit()
default.exposed = True
# The action buttons are reply and the moderator actions. This function
# takes a given ID and prints the actions for that thread, respecting
# the permisisons of the logged in user
def action_buttons(self, id):
if self.getLoginID() != None:
yield "<p align='right'>"
if self.hasPermission(id):
# If the user is a mod in this given subforum, print the
# moderator action buttons
# Determine if the given thread is stickied to figure out
# what value to give the sticky function
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Thread.Sticky FROM Thread WHERE Thread.ID = ?", (id,))
Sticky = cursor.fetchone()[0]
if Sticky == 0:
yield "<a href='Sticky/%s/1'>Sticky Thread</a> | " % id
else:
yield "<a href='Sticky/%s/0'>Unsticky Thread</a> | " % id
# Determine if the given thread is closed to figure out
# what value to give the close function
if self.Thread_Closed(id):
yield "<a href='Closed/%s/0'>Open Thread</a> | " % id
else:
yield "<a href='Closed/%s/1'>Close Thread</a> | " % id
# Print the Move and Delete buttons
yield "<a href='Move/%s'>Move Thread</a> | " % id
yield "<a href='Delete/%s'>Delete Thread</a><br />" % id
if self.Thread_Closed(id) == False:
# If the thread isn't closed, print the reply button
yield "<a href='/Post/New/%s'>New Reply</a>" % id
yield "</p>"
# This function prints all of the posts in a given thread
def post_list(self, id):
# We actually need two cursors for this view
cursor = cherrypy.thread_data.db.cursor()
cursor2 = cherrypy.thread_data.db.cursor()
# Get all the posts in this thread, sorted by ascending post ID (chronological order)
cursor.execute('''
SELECT
Post.ID, Post.Title, Post.Content, Post.Timestamp, User.ID, User.Username, User.Permission, User.Avatar, User.Signature, Post.EditTimestamp, Post.EditUserID
FROM
Post, User
WHERE
Post.ThreadID = ? AND
Post.UserID = User.ID
ORDER BY
Post.ID ASC
''', (id,))
# Print the post, each in a separate table
count = 1
# An enum of the different permisison levles
permissions = ["Administrator", "Moderator", "Member"]
# If the user is logged in, we need an extra row for the reply with
# quote button.
if self.getLoginPermission() <= 2:
rowspan = 4
else:
rowspan = 3
# Print each post
for ID, Title, Content, Timestamp, UserID, Username, Permission, Avatar, Signature, EditTimestamp, EditUserID in cursor.fetchall():
yield "<table width='100%' cols='2' class='thread' >"
yield "<tr>"
yield "<td class='thread' colspan='2'><a name='%s'></a>%s <span style='float:right; text-align: right;'><a href='#%s'>#%s</a></span></td>" % (ID, Title, ID, count)
yield "</tr>"
yield "<tr>"
# If avatar is not set, print a 100x100 blank div, otherwise, print the image
if Avatar == "" or Avatar == None:
yield "<td class='thread' rowspan='%s' width='200px'><a href='/User/%s'>%s</a><br />%s<br /><div width='100px' height='100px'> </div></td><td class='thread' height='75px' >%s</td>" % (rowspan, UserID, Username, permissions[Permission], render_bbcode(Content))
else:
yield "<td class='thread' rowspan='%s' width='200px'><a href='/User/%s'>%s</a><br />%s<br /><img src='/%s' width='100px' height='100px' /></td><td class='thread' height='75px' >%s</td>" % (rowspan, UserID, Username, permissions[Permission], Avatar, render_bbcode(Content))
yield "</tr>"
yield "<tr>"
# Print the bbcode-ized sig
yield "<td class='thread'>%s</td>" % render_bbcode(Signature)
yield "</tr>"
yield "<tr>"
if EditTimestamp == None:
# The post hasn't been editted, just print the timestamp
yield "<td class='thread'>Posted on %s</td>" % time.strftime("%c", time.localtime(Timestamp))
else:
# The post has been editted, get the editing user's name
cursor2.execute('''SELECT User.Username FROM User WHERE User.ID = ?''', (EditUserID,))
EditUsername = cursor2.fetchone()[0]
yield "<td class='thread'>Posted on %s, Last Edited on %s by %s</td>" % (time.strftime("%c", time.localtime(Timestamp)), time.strftime("%c", time.localtime(EditTimestamp)), EditUsername)
yield "</tr>"
# If the thread isn't closed, print the reply buttons
if self.Thread_Closed(id) == False and self.getLoginPermission() <= 2:
yield "<tr>"
yield "<td class='thread' align='right'>"
if self.getLoginPermission() <= 1 or self.getLoginID() == UserID:
yield "<a href='/Post/Edit/%s'>Edit</a> " % ID
if self.getLoginPermission() <= 2:
yield "<a href='/Post/Reply/%s/%s'>Reply with Quote</a>" % (id, ID)
yield "</td>"
yield "</tr>"
yield "</table>"
yield "<br />"
count = count + 1
# This function is what creates new threads. If title and content aren't
# set, the form is showed. Otherwise, the new thread and new post
# are created
def New(self, subforum_id, title = "", content = ""):
# Get the thread name
subforum_name = self.get_subforum_name(subforum_id)
title = title.strip()
content = content.strip()
if self.getLoginPermission() >= 3:
yield self.error("You are not logged in", True)
elif subforum_name == None and subforum_id != "NULL":
yield self.error("Specified subforum doesn't exist", True)
elif title != "" and content != "":
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
# Add the Thread
cursor.execute('''
INSERT INTO Thread (ID, SubforumID, PostCount, ViewCount, Sticky, Closed)
VALUES (NULL, ?, 0, 0, 0, 0)
''', (subforum_id,))
cherrypy.thread_data.db.commit()
thread_id = cursor.lastrowid
# Add the Post
cursor.execute('''
INSERT INTO Post (ID, Title, Content, Timestamp, UserID, ThreadID, EditTimestamp, EditUserID)
VALUES (NULL, ?, ?, ?, ?, ?, NULL, NULL)
''', (self.strip_html_bbcode(title), content, int(time.time()), self.getLoginID(), thread_id,))
cherrypy.thread_data.db.commit()
# After this commit, the post counts will update because of the triggers
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s" % thread_id)
else:
# Show the create thread form
yield self.header("New Thread in %s" % subforum_name)
yield self.links()
yield self.breadcrumbs(SubforumID = subforum_id)
yield "<form method='post' action='/Thread/New'>"
yield "Thread Title: <br />"
yield "<input class='monospace' type='text' name='title' size='60' value='%s' /><br />" % title
yield "Content: <br />"
yield "<textarea class='monospace' name='content' rows='20' cols='80'>%s</textarea><br />" % content
yield "<input type='hidden' readonly='readonly' name='subforum_id' value='%s'>" % subforum_id
yield "<input type='submit' value='New Thread' />"
yield "</form>"
yield self.footer()
New.exposed = True
# This function sets or unsets the Sticky attribute for a given thread
def Sticky(self, id, value):
if self.hasPermission(id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("UPDATE Thread SET Sticky = ? WHERE ID = ?", (value, id,))
cherrypy.thread_data.db.commit()
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s" % (id))
else:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
Sticky.exposed = True
# This function sets or unsets the Closed attribute for a given thread
def Closed(self, id, value):
if self.hasPermission(id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("UPDATE Thread SET Closed = ? WHERE ID = ?", (value, id,))
cherrypy.thread_data.db.commit()
# Redirect back to the thread at the new post location
yield self.redirect("/Thread/%s" % (id))
else:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
Closed.exposed = True
# This function moves a given thread. If NewParentID isn't set, the form
# is displayed, otherwise, the thread is moved
def Move(self, id, NewParentID = None):
if self.hasPermission(id) and NewParentID == None:
# show the move form
yield self.header("Move Thread")
yield self.links()
yield self.breadcrumbs(ThreadID = id)
yield "<p>"
yield "<form method='post' action='%s'>" % id
yield self.RelocateTree()
yield "<br /><br /><input type='submit' value='Move Thread' />"
yield "</form>"
yield "</p>"
yield self.footer()
elif self.hasPermission(id):
# Move the thread
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
cursor.execute("UPDATE Thread SET SubforumID = ? WHERE ID = ?", (NewParentID, id,))
cherrypy.thread_data.db.commit()
# Triggers will delete posts and update post/thread counts
yield self.redirect("/Thread/%s" % (id))
else:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
Move.exposed = True
# This function recursively builds the tree of the available subforums
def RelocateTree(self, id = "", depth = ""):
if id == "" and depth == "":
out = "<select name='NewParentID'><option value='NULL' style='font-style: italic;'>(Forum Root)</option>" + self.RelocateTree("NULL", "—") + "</select>"
else:
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT ID, Name FROM Subforum WHERE ParentSubforumID IS ?", (id,))
out = ""
for ID, Name in cursor.fetchall():
out += "<option value='%s'>%s%s</option>" % (ID, depth, Name)
out += self.RelocateTree(ID, depth + "—")
return out
# This function deletes the thread with the given id. Triggers will take
# care of the posts contained in the thraed
def Delete(self, id):
if self.hasPermission(id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Thread.SubforumID FROM Thread WHERE Thread.ID = ?", (id,))
SubforumID = cursor.fetchone()[0]
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
cursor.execute("DELETE FROM Thread WHERE ID = ?", (id,))
cherrypy.thread_data.db.commit()
# Triggers will delete posts and update post/thread counts
# Redirect back to the thread at the new post location
yield self.redirect("/Subforum/%s" % (SubforumID))
else:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
Delete.exposed = True
| Python |
import cherrypy
import sqlite3
import os.path
import hashlib
# Import our modules
import Common
import Subforum
import Thread
import Post
import User
import Search
import Structure
import PrivateMessage
# root_dir is used by the config file
root_dir = os.path.dirname(os.path.abspath(__file__))
# connect to the database
def connect(thread_index):
cherrypy.thread_data.db = sqlite3.connect('forum.db')
cherrypy.engine.subscribe('start_thread', connect)
# the actual forum
class Forum(Common.Template):
def __init__(self):
# Register our modules
self.Subforum = Subforum.Subforum()
self.Thread = Thread.Thread()
self.Post = Post.Post()
self.User = User.User()
self.Search = Search.Search()
self.Structure = Structure.Structure()
self.PrivateMessage = PrivateMessage.PrivateMessage()
# For the index, just load the root forum display
def index(self):
return self.Subforum.default()
index.exposed = True
# event: log in or register a user
def login(self, user = None, passwd = None, reg = None):
if user and passwd:
c = cherrypy.thread_data.db.cursor()
uid = None
# check if the user pressed register
if reg:
c.execute('select ID from User where Username=?', (user,))
uid = c.fetchone()
# if the user is not registered
if uid is None:
# find a unique id for the new user
return self.User.registerUserPage(user, passwd)
else:
# display an error
return self.error('Username already exists.')
# otherwise, the default action is to login
else:
# check the database
c.execute('select ID from User where Username=? AND Password=?', (user, hashlib.md5(passwd).hexdigest(),))
uid = c.fetchone()
if uid is None:
# display an error
return self.error('Incorrect user/password combination.')
else:
uid = uid[0]
# display an error if the user is banned
ban = self.banCheck(uid)
if ban != None:
return self.error('You are banned. You will be unbanned: %s' % ban, True)
# create the user's session
cherrypy.session['ID'] = uid
cherrypy.session['Username'] = user
cherrypy.session['Permission'] = c.execute('select Permission from User where ID=?', (uid,)).fetchone()[0]
# if the user pressed register
if reg:
# take them to their user info page
return self.User.index()
else:
# otherwise, redirect them to the page they came from
#[todo] this seems unstable for some reason
return self.redirect("javascript:r=document.referrer;if(r.indexOf('logout')!=-1|r.indexOf('login')!=-1){r='/'};location.replace(r)")
else:
# display an error
return self.error('One or more login fields are blank.')
#raise cherrypy.HTTPError(401)
login.exposed = True
# event: log out a user
def logout(self):
# destroy the user's session
cherrypy.session['ID'] = None
cherrypy.session['Username'] = None
cherrypy.session['Permission'] = 3
return self.index()
logout.exposed = True
# event: user pressed search
# this function must be here, otherwise it won't capture the GET requests
def search(self, q = None):
return self.Search.default(q)
search.exposed = True
# Load the config file
conf = os.path.join(os.path.dirname(__file__), 'forum.conf')
# This is how they start the server in the tutorials
if __name__ == '__main__':
cherrypy.quickstart(Forum(), config=conf)
else:
cherrypy.tree.mount(Forum(), config=conf)
| Python |
import sqlite3
import getpass
import hashlib
# Create the connection
connection = sqlite3.connect("forum.db")
cursor = connection.cursor()
# Restore the dump
install_dump = open('install.sql', 'r')
cursor.executescript(install_dump.read())
install_dump.close()
# Add an administrator account
cursor.execute('INSERT INTO User (ID, Username, Password, Permission, JoinDate, Signature) VALUES (0, "Administrator", ?, 0, date(\'now\'),"ADMIN")', (hashlib.md5(getpass.getpass("Enter new administrator password: ")).hexdigest(),))
connection.commit()
# Populate the database if the user wants us to
if raw_input("Populate Database with Sample Data? [y/N]: ").lower() == "y":
populate_dump = open('populate.sql', 'r')
cursor.executescript(populate_dump.read())
populate_dump.close()
connection.commit()
# Otherwise, ask for forum information and create a blank froum
else:
name=raw_input("Forum Name: ")
desc=raw_input("Forum Description: ")
legal=raw_input("Forum Legal info [d=default]: ")
if legal.lower() == "d":
legal = "<br />Copyright 2011: Sean Fox, Mark Schultz, Jacob Snyder <br /> This software is under development. Use at your own risk."
logo=raw_input("Path to forum logo [d=default]: ")
if logo.lower() == "d":
logo = "/images/logos/logo.png"
cursor.execute('INSERT INTO Forum (Name, Description, LegalInfo, Logo) VALUES (?, ?, ?, ?)', (name,desc,legal,logo,))
connection.commit()
print "you entered: "
print name
print desc
print legal
print logo
| Python |
import cherrypy
import sqlite3
import time
import Common
class Subforum(Common.Template):
# Call the default function with no arguments
def index(self):
return self.default()
index.exposed = True
# The default view displays a given subforum. If no ID is given, it
# displays the root subforum
def default(self, id = None):
# If no ID is given, we are viewing the root. Set id to NULL for queries
if id == None or id == "None":
id = "NULL"
# Get the subforum name
subforum_name = self.get_subforum_name(id)
if subforum_name == None and id != "NULL":
# The subforum doesn't exist so make an error page
yield self.error("The specified subforum does not exist!", True)
else:
# Print the top of the page
yield self.header(subforum_name)
yield self.links()
yield self.breadcrumbs(SubforumID = id)
# Print the subforum list
yield self.generator_expand(self.subforum_list(id))
# Print the thread list
yield self.generator_expand(self.thread_list(id))
yield self.footer()
default.exposed = True
# This function prints the subforums contained by the parent subforum
def subforum_list(self, id):
cursor = cherrypy.thread_data.db.cursor()
if self.is_parent(id):
yield "<br /><table class='subforum_list' width='100%'>"
yield "<tr><th colspan='3' class='subforum_list'>Subforums</th></tr>"
# This is a "parent" subforum, which means there are subforum beneath it, so list these
cursor.execute("SELECT * FROM Subforum WHERE ParentSubforumID IS ? ORDER BY RelativeOrder", (id,))
for ID, Name, Description, ParentSubforumID, ThreadCount, PostCount, RelativeOrder in cursor.fetchall():
# compile list of moderators for this subforum
modlist = self.getModeratorsOf(ID)
mods = ""
if len(modlist) > 0:
for uid, mod in modlist:
mods += '<a href="/User/%s">%s</a>, ' % (uid, mod)
mods = mods.rstrip(', ')
else:
mods = "<i>None</i>"
# output
yield "<tr>"
yield "<td class='subforum_list'><h2 class='subforum_list'><a href='/Subforum/%s'>%s</a></h2>%s<br />Moderators: %s</td>" % (ID, Name, Description, mods)
yield "<td class='subforum_list' width='100px'>Threads: %s<br />Posts: %s</td>" % (ThreadCount, PostCount)
yield "</tr>"
yield "</table>"
# Returns if a given subforum is a "parent". This is if the subforum has
# subforums beneath it.
def is_parent(self, id):
cursor = cherrypy.thread_data.db.cursor()
# Find children of this subforum
cursor.execute("SELECT * FROM Subforum WHERE ParentSubforumID IS ?", (id,))
for row in cursor:
# There is at least one child, so this is a parent
return True
return False
# This function prints the threads contained by the parent
def thread_list(self, id):
cursor = cherrypy.thread_data.db.cursor()
yield "<p align='right'> "
# If a user is logged in, show the new thread button
if self.getLoginPermission() <= 2:
yield "<a href='/Thread/New/%s'>New Thread</a>" % id
yield " </p>"
# Make a table across the screen
yield "<table class='thread_list' width='100%'>"
yield "<tr><th colspan='3' class='thread_list'>Threads</th></tr>"
# Get the threads in the current subforum. Sort them by decreasing ID of their latest post (chronological)
cursor.execute('''
SELECT
Thread.ID, Post.Title, User.ID, User.Username, Post.Timestamp, Thread.PostCount, Thread.ViewCount, Thread.Sticky, Thread.Closed, Post2.ID, Post2.Title, Post2.Content, User2.ID, User2.Username, Post2.Timestamp
FROM
Post, Thread, User, Post Post2, User User2,
(SELECT ThreadID, MIN(Post.ID) AS MinPostID, MAX(Post.ID) AS MaxPostID
FROM Post
GROUP BY ThreadID) Times
WHERE
Thread.ID = Times.ThreadID AND
Post.ID = Times.MinPostID AND
Post.UserID = User.ID AND
Thread.SubforumID IS ? AND
Post2.ID = Times.MaxPostID AND
Post2.UserID = User2.ID
ORDER BY
Thread.Sticky DESC, Times.MaxPostID DESC
''', (id,))
# Print the thread
count = 0
for ID, Title, UserID, Username, Timestamp, PostCount, ViewCount, Sticky, Closed, PostID, PostTitle, PostContent, PostUserID, PostUsername, PostTimestamp in cursor.fetchall():
if Sticky == 1:
Title += " - STICKY"
if Closed == 1:
Title += " - CLOSED"
yield "<tr>"
yield "<td class='thread_list'><h2 class='thread_list'><a href='/Thread/%s'>%s</a></h2>Started by <a href='/User/%s'>%s</a> on %s</td>" % (ID, Title, UserID, Username, time.strftime("%c", time.localtime(Timestamp)))
yield "<td class='thread_list' width='100px'>Replies: %s<br />Views: %s<br /></td>" % (PostCount, ViewCount)
if PostTitle == None or PostTitle == "":
blurb = PostContent
else:
blurb = PostTitle
if len(blurb) > 18:
blurb = "%s%s" % (blurb[0:18], "...")
yield "<td class='thread_list' width='200px'><a href='/Thread/%s#%s'>%s</a><br /><a href='/User/%s'>%s</a><br />%s</td>" % (ID, PostID, blurb, PostUserID, PostUsername, time.strftime("%c", time.localtime(PostTimestamp)))
count += 1
if count == 0:
yield "<tr><td class='thread_list' colspan='3'><i>There are no threads here</i></td></tr>"
yield "</table>"
| Python |
import cherrypy
import sqlite3
import time
import re
# central base class that other pages inherit
class Template:
# header includes the logo and login/register form
def header(self, title=None):
if title:
title = "%s - %s" % (title,self.getForumName())
else:
title = "%s" % self.getForumName()
out = '''
<html>
<head>
<title>%s</title>
''' % title
out += '''
<link rel="stylesheet" type="text/css" href="/style.css" />
</head>
<body>
<table width='100%' height='100px'>
<tr id="header" valign='top'>
<td id="logo">
<a href="/"><h1>''' + self.getForumName() + '''</h1></a>''' + self.getForumDescription() + '''
</td>
<td id="logo">'''
if not (self.getForumLogo() == "" or self.getForumLogo() == None):
out += '''<img src="''' + self.getForumLogo() + '''" alt="No Logo"></img>'''
out+='''</td>
<td id="login" align="right">
'''
# if the user is logged in
if self.getLoginID() != None:
# display their username and a logout button
out += '''Welcome, <a href="/User/%d">%s</a><br><br />
<form method='post' action="/logout">
<input type="submit" value="Logout"></input>
</form>
''' % (self.getLoginID(), self.getLoginUsername())
else:
# display the login/register form
out += '''
<form method=post action="/login">
Username: <input name="user" type="text"></input>
<br>
Password: <input name="passwd" type="password"></input>
<br>
<input type="submit" value="Login"></input>
<input type="submit" name="reg" value="Register"></input>
</form>
'''
out += '''
</td>
</tr>
</table>
'''
return out
# The useful links section has the user list, PMs, and admin functions
# links depending onthe currently logged in user's permissions
def links(self):
out = '''
<table width='100%'>
<tr valign='top'>
<td id="links">
'''
out += '''Useful links: '''
out += '''<a href="/User">User List</a>'''
# display private message link if they're logged in
if self.getLoginID() != None:
out += ''' | <a href="/PrivateMessage">Private Messages</a>'''
# if the user is logged in as an admin
if self.getLoginPermission() == 0:
# link to admin functions
out += ''' | Administrator functions: <a href="/Structure">Modify forum structure</a>'''
out += '''
</td>
<td width=30% align="right">
<form method=get action="/search">
<input name="q" type="text"></input><input type="submit" value="Search"></input>
</form>
</td>
</tr>
</table>
'''
return out
# breadcrumbs section
#
# breadcrumbs are the directory-like path to where the user is looking.
# There are multiple arguments to this function. Only set one. They are
# self-explanatory, just set the ID for the view you're in and you're set.
def breadcrumbs(self, ThreadID = None, SubforumID = None):
cursor = cherrypy.thread_data.db.cursor()
breadcrumb = ""
# Thread is set, so the thread is at the end of the breadcrumb
if ThreadID != None:
# Get the thread's name and the subforum it's in
cursor.execute('''
SELECT Post.Title, Thread.SubforumID
FROM
Post, Thread,
(SELECT ThreadID, MIN(Post.ID) AS MinPostID
FROM Post
GROUP BY ThreadID) Time
WHERE
Time.ThreadID = %s AND
Thread.ID = Time.ThreadID AND
Time.MinPostID = Post.ID
''' % ThreadID)
thread = cursor.fetchone()
thread_name = thread[0]
if self.Thread_Closed(ThreadID):
thread_name += " - CLOSED"
# Put the thread at the end of the breadcrumb
breadcrumb = " > <a href='/Thread/%s'>%s</a> %s" % (ThreadID, thread_name, breadcrumb)
# Set the SubforumID to generate the rest of the path in the next loop
SubforumID = thread[1]
# Subforum ID is set, so generate the path up to the root
while SubforumID != "NULL" and SubforumID != None:
# Get the Name of this subforum and the parent's ID
cursor.execute("SELECT Name, ParentSubforumID FROM Subforum WHERE ID IS ?", (SubforumID,))
row = cursor.fetchone()
# Add this subforum to the breadcrumb
breadcrumb = " > <a href='/Subforum/%s'>%s</a> %s" % (SubforumID, row[0], breadcrumb)
SubforumID = row[1]
# Print the forum root in the breadcrumb
breadcrumb = "> <a href='/Subforum'>%s</a> %s<br />" % (self.getForumName(), breadcrumb)
return breadcrumb
# This function expands generators. This is needed if a function is
# yielding a value from a function that yields a value. Sometimes. Yields
# behave wieredly.
def generator_expand(self, generator):
out = ""
for item in generator:
out = "%s%s" % (out, item)
return out
# display an error page with a custom message
def error(self, msg = 'Unknown Error', severe = False):
out = self.header('Error')
out += self.links()
out += self.breadcrumbs()
if severe:
# red text is severe!
out += '<font color="red">'
out += '<h2>%s</h2>' % msg
if severe:
out += '</font>'
out += self.footer()
return out
# static footer
def footer(self):
out = "<p align='center'>%s</div>"%self.getForumLegal()
return out
# Returns name/ title of forum
def getForumName(self):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Name from Forum")
name = cursor.fetchone()
print "name: %s" % name
if name != None:
return name[0]
else:
return "Default Forum"
# Return description of forum
def getForumDescription(self):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Description from Forum")
name = cursor.fetchone()
if name != None:
return name[0]
else:
return "Default Forum Info"
# Return the legal info for the footer
def getForumLegal(self):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT LegalInfo from Forum")
name = cursor.fetchone()
if name != None:
return name[0]
else:
return "Default Forum Info"
# Return the path to the forum logo
def getForumLogo(self):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Logo from Forum")
name = cursor.fetchone()
if name != None:
return name[0]
else:
return ""
# Returns the currently logged in user or None
def getLoginID(self):
return cherrypy.session.get('ID', None)
# Returns the currently logged in user's name or None
def getLoginUsername(self):
return cherrypy.session.get('Username', None)
# Returns user info
def getUserInfo(self, id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT * FROM User WHERE ID IS ?", (id,))
return cursor.fetchall()
# Returns username of particular userid
def getUsername(self, id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Username FROM User WHERE ID IS ?", (id,))
uname=cursor.fetchone()
if uname != None:
return uname[0]
else:
return None
# Returns the currently logged in permission (3 if guest)
def getLoginPermission(self):
return cherrypy.session.get('Permission', 3)
# This returns the name of the current subforum given its id
def get_subforum_name(self, id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Name FROM Subforum WHERE ID IS ?", (id,))
subforum = cursor.fetchone()
if subforum != None:
return subforum[0]
else:
return None
# return a list of (id, name) tuples of moderators for a given a subforum id
def getModeratorsOf(self, SubforumID = "NULL"):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute('''
SELECT
User.ID, User.Username
FROM
User, Moderator
WHERE
User.Permission = 1 AND
User.ID = Moderator.UserID AND
Moderator.SubforumID IS ?
''', (SubforumID,))
return cursor.fetchall()
# does the user have moderator permissions on the given thread?
def hasPermission(self, ThreadID):
p = self.getLoginPermission()
if p == 3:
# they are a guest, of course not
return False
if p == 0:
# they are an admin, of course they do
return True
# otherwise, they're a moderator
cursor = cherrypy.thread_data.db.cursor()
# check if this user id is a mod of this subforum
cursor.execute('''
SELECT
Moderator.UserID
FROM
Thread, Moderator
WHERE
Thread.ID IS ? AND
Thread.SubforumID = Moderator.SubforumID
''', (ThreadID,))
uids = cursor.fetchall()
if len(uids) > 0:
id = self.getLoginID()
for uid in uids:
if id == uid[0]:
return True
return False
else:
return False
# return the date a ban will be lifted, or None
# unlift the ban if it has expired
def banCheck(self, uid):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute('''SELECT BanTimestamp FROM User WHERE ID IS ?''', (uid,))
ts = cursor.fetchone()
if ts == None:
return None
ts = ts[0]
if ts == 0:
# permaban
return "never"
if time.time() > ts:
# ban expired, lift it
cursor.execute('''UPDATE User SET BanTimestamp = NULL WHERE ID IS ?''', (uid,))
cherrypy.thread_data.db.commit()
return None
print type(ts)
return time.ctime(ts)
# get the ID of a thread containing a given post ID
def getThreadID(self, postID):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute('''
SELECT
ThreadID
FROM
Post
WHERE
ID = ?
''', (postID,))
threadID = cursor.fetchone()
if threadID:
return threadID[0]
else:
return None
# This returns the name of the current thread given its id
def get_thread_name(self, id):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute('''
SELECT Post.Title
FROM
Post, Thread,
(SELECT ThreadID, MIN(Post.ID) AS MinPostID
FROM Post
GROUP BY ThreadID) Time
WHERE
Time.ThreadID = ? AND
Thread.ID = Time.ThreadID AND
Time.MinPostID = Post.ID
''', (id,))
thread = cursor.fetchone()
if thread != None:
return thread[0]
else:
return None
# This generates a simple HTML document that will redirect to a given URL.
# Note that this does not actually redirect; callers of this funciton will
# have to manually return / yield this
def redirect(self, path):
return "<html><head><meta HTTP-EQUIV='REFRESH' content=\"0; url=%s\"></head></html>" % path
def Thread_Closed(self, threadid):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT Thread.Closed FROM Thread WHERE Thread.ID = ?", (threadid,))
Closed = cursor.fetchone()[0]
return Closed == 1
def strip_html_bbcode(self, string):
p = re.compile("<.*?>")
q = re.compile("\[.*?\]")
return p.sub("", q.sub("", string))
| Python |
import cherrypy
import sqlite3
import time
import Common
from postmarkup import render_bbcode
class Search(Common.Template):
def index(self):
return self.default()
index.exposed = True
def default(self, q = None):
# if the user didn't search for anything
if q is None or q == '':
# return an error
yield self.error('Please enter valid search terms.')
else:
# build a query from the search terms
expr = '''
SELECT
Post.ID, Post.Title, Post.Content, Post.Timestamp, User.ID, User.Username, User.Permission, User.Avatar, User.Signature, Post.EditTimestamp, Post.EditUserID
FROM
Post, User
WHERE
User.ID = Post.UserID AND
'''
# place ANDs between each term
for term in q.split(' '):
expr += '''
Post.Content like "%''' + term + '''%" AND
'''
expr = expr.rstrip('\n\tAND') + '''
ORDER BY
Post.EditTimestamp DESC
'''
cursor = cherrypy.thread_data.db.cursor()
cursor.execute(expr)
# return the search results
res = cursor.fetchall()
yield self.header()
yield self.links()
yield self.breadcrumbs()
yield '<h2>Search Results for "%s"</h2>' % q
# if there are no results
if len(res) == 0:
yield '<p>No results.</p>'
else:
# copied with modifications from Thread.py
count = 1
permissions = ["Administrator", "Moderator", "Member"]
for ID, Title, Content, Timestamp, UserID, Username, Permission, Avatar, Signature, EditTimestamp, EditUserID in res:
# link in corner takes the user to the actual thread and post
tid = self.getThreadID(ID)
yield "<table width='100%' cols='2' class='thread' >"
yield "<tr>"
yield "<td class='thread' colspan='2'><a name='%s'></a>%s <span style='float:right; text-align: right;'><a href='/Thread/%s#%s'>#%s</a></span></td>" % (ID, Title, tid, ID, count)
yield "</tr>"
yield "<tr>"
if Avatar == "" or Avatar == None:
yield "<td class='thread' rowspan='3' width='200px'><a href='/User/%s'>%s</a><br />%s<br /><div width='100px' height='100px'> </div></td><td class='thread' height='75px' >%s</td>" % (UserID, Username, permissions[Permission], render_bbcode(Content))
else:
yield "<td class='thread' rowspan='3' width='200px'><a href='/User/%s'>%s</a><br />%s<br /><img src='/%s' width='100px' height='100px' /></td><td class='thread' height='75px' >%s</td>" % (UserID, Username, permissions[Permission], Avatar, render_bbcode(Content))
yield "</tr>"
yield "<tr>"
yield "<td class='thread'>%s</td>" % render_bbcode(Signature)
yield "</tr>"
yield "<tr>"
if EditTimestamp == None:
# post wasn't edited
yield "<td class='thread'>Posted on %s</td>" % time.strftime("%c", time.localtime(Timestamp))
else:
# show the username of the post editor
c = cherrypy.thread_data.db.cursor()
c.execute('''SELECT User.Username FROM User WHERE User.ID = ?''', (EditUserID,))
EditUsername = c.fetchone()[0]
yield "<td class='thread'>Posted on %s, Last Edited on %s by %s</td>" % (time.strftime("%c", time.localtime(Timestamp)), time.strftime("%c", time.localtime(EditTimestamp)), EditUsername)
yield "</tr>"
yield "</table>"
yield "<br />"
count = count + 1
yield self.footer()
default.exposed = True
| Python |
import cherrypy
import sqlite3
import time
import Common
# restructure the forum
class Structure(Common.Template):
# Call the default function with no arguments
def index(self):
return self.default()
index.exposed = True
# This function is the default view for the restructure view and shows the
# subforum tree along with all of the commands for each subforum
def default(self):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
else:
# display the current forum structure
yield self.header("Restructure")
yield self.links()
yield self.breadcrumbs()
# Print the tree
yield self.Tree()
yield self.footer()
# This function is recursively called by default to build the subforum Tree.
# It continualy builds nested unordered (bulleted) lists.
def Tree(self, id = "NULL"):
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT ID, Name FROM Subforum WHERE ParentSubforumID IS ? ORDER BY RelativeOrder", (id,))
out = ""
for ID, Name in cursor.fetchall():
out += "<li>"
out += Name + " (<a href='AddSub/%s'>Add Subforum</a> | <a href='Relocate/%s'>Relocate</a> | <a href='Delete/%s'>Delete</a> | <a href='EditMod/%s'>Edit Moderators</a> | <a href='Up/%s'>Up</a> | <a href='Down/%s'>Down</a> | <a href='Top/%s'>Top</a> | <a href='Bottom/%s'>Bottom</a>)" % (ID, ID, ID, ID, ID, ID, ID, ID)
out += "</li>"
out += self.Tree(ID)
if out != "":
out = "<ul>" + out + "</ul>"
if id == "NULL":
out = "<ul><li><i>Forum Root</i> (<a href='AddSub/NULL'>Add Subforum</a> | <font style='color: gray'><u>Relocate</u></font> | <font style='color: gray'><u>Delete</u></font> | <a href='EditMod/NULL'>Edit Moderators</a> | <font style='color: gray'><u>Up</u></font> | <font style='color: gray'><u>Down</u></font> | <font style='color: gray'><u>Top</u></font> | <font style='color: gray'><u>Bottom</u></font>)</li>" + out + "</ul>"
return out
#[todo] Add more robust error-checking.
def EditMod(self, ForumID, ModList = None):
# figure out the subforum name
name = self.get_subforum_name(ForumID)
if name == None and ForumID == "NULL":
name = "Forum Root"
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif name == "Forum Root" and ForumID != "NULL":
# The thread is not found, so error
yield self.error('Specified subforum not found.')
elif ModList == None:
# get list of mods for the subforum
cursor = cherrypy.thread_data.db.cursor()
cursor.execute('''
SELECT
User.Username
FROM
User, Moderator
WHERE
User.Permission = 1 AND
User.ID = Moderator.UserID AND
Moderator.SubforumID IS ?
''', (ForumID,))
mods = ""
for mod in cursor.fetchall():
mods += mod[0] + "\n"
# display moderator entry form
yield self.header("Edit Moderators")
yield self.links()
yield self.breadcrumbs()
yield '''
<form method="post" action="%s">
<h2>Moderator List for "%s"</h2>
Please enter one username per line:<br /><br />
<textarea class="monospace" type="text" name="ModList" rows="8" cols="40">%s</textarea><br />
<input type="submit" value="Update" />
</form>
''' % (ForumID, name, mods)
yield self.footer()
else:
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
# get a list of the old mod's user ids
cursor.execute('''
SELECT
User.ID
FROM
User, Moderator
WHERE
User.Permission = 1 AND
User.ID = Moderator.UserID AND
Moderator.SubforumID IS ?
''', (ForumID,))
oldmods = cursor.fetchall()
# format old mod ids into a query
if len(oldmods) > 0:
old = ""
for uid in oldmods:
old += '''ID = %s OR ''' % uid
old = old.rstrip(' OR')
# remove all moderators for this subforum
cursor.execute('''UPDATE User SET Permission = 2 WHERE %s''' % old)
cherrypy.thread_data.db.commit()
cursor.execute('''DELETE FROM Moderator WHERE SubforumID IS ?''', (ForumID,))
cherrypy.thread_data.db.commit()
# format the moderator list into a query
mods = ""
ModList = list(set([mod.rstrip() for mod in ModList.split("\n") if mod.strip() != '']))
for mod in ModList:
mods += '''Username LIKE "%s" OR ''' % mod
mods = mods.rstrip(' OR')
# get the new mod's user ids
cursor.execute('''SELECT ID FROM User WHERE %s''' % mods)
ModList = cursor.fetchall()
# add the new list of moderators for this subforum
cursor.execute('''UPDATE User SET Permission = 1 WHERE %s''' % mods)
cherrypy.thread_data.db.commit()
for uid in ModList:
cursor.execute('''INSERT INTO Moderator VALUES (%s,%s)''' % (uid[0], ForumID))
# commit changes
cherrypy.thread_data.db.commit()
# display success message
yield self.redirect("/Structure")
EditMod.exposed = True
# This function is the add subforum form. If the Name and Description args
# are not given, then the form to create the subforum is showed. If they
# are set, then the given subforum is created benesath the Parent subforum
def AddSub(self, ParentID, Name = None, Description = None):
cursor = cherrypy.thread_data.db.cursor()
# Determine if the given subforum has child threads
cursor.execute("SELECT * FROM Thread WHERE Thread.SubforumID IS ?", (ParentID,))
hasthreads = cursor.fetchone() != None
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(ParentID) == None and ParentID != "NULL":
# The thread is not found, so error
yield self.error('Specified subforum not found.')
elif Name == None or Name == "":
# There is no name given, so show the create form
yield self.header("Add Subforum")
yield self.links()
yield self.breadcrumbs()
yield "<form method='post' action='%s'>" % ParentID
yield "Subforum Name: <br />"
yield "<input class='monospace' type='text' name='Name' size='60' /><br />"
yield "Description: <br />"
yield "<textarea class='monospace' name='Description' rows='3' cols='80'></textarea><br />"
yield "<input type='submit' value='New Subforum' />"
yield "</form>"
yield self.footer()
else:
# Get the largest relative order value in this subforum. If there are
# no subforums, set it to 0
cursor.execute("SELECT MAX(Subforum.RelativeOrder) FROM Subforum WHERE Subforum.ParentSubforumID IS ?", (ParentID,))
max = cursor.fetchone()[0]
if max == None:
order = 0
else:
order = max + 1
# Create the new subforum. Recursive triggers are not needed in this case
cursor.execute('''
INSERT INTO Subforum (ID, Name, Description, ParentSubforumID, ThreadCount, PostCount, RelativeOrder)
VALUES (NULL, ?, ?, ?, 0, 0, ?)
''', (Name, Description, ParentID, order,))
cherrypy.thread_data.db.commit()
# Return to the restructure page
yield self.redirect("/Structure")
AddSub.exposed = True
# This function moves a subforum underneath a new parent. If NewParentID
# is not set, it shows the form, otherwise, it moves the subforum
def Relocate(self, id, NewParentID = None):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(id) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
elif NewParentID == None or id == NewParentID:
# The new parent is not valid, so show the form
yield self.header("Relocate Subforum")
yield self.links()
yield self.breadcrumbs()
yield "<p>"
yield "<form method='post' action='%s'>" % id
# Show the subforum tree
yield self.RelocateTree()
yield "<br /><br /><input type='submit' value='Move Subforum' />"
yield "</form>"
yield "</p>"
yield self.footer()
else:
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
# Movethe subforum to the new parent
cursor.execute("UPDATE Subforum SET ParentSubforumID = ? WHERE ID = ?", (NewParentID, id,))
cherrypy.thread_data.db.commit()
# Move this subforum to the bottom of the subforum
self.Bottom(id)
yield self.redirect("/Structure")
Relocate.exposed = True
# This function creates the drop down tree structure of the subforum tree.
# Depth is acutally a string of mdashes that is added to at each level
def RelocateTree(self, id = "", depth = ""):
if id == "" and depth == "":
# This is the root, so make the root node and then recursively call self
out = "<select name='NewParentID'><option value='NULL' style='font-style: italic;'>(Forum Root)</option>" + self.RelocateTree("NULL", "—") + "</select>"
else:
# Get all of the children of the given parent and then recursively call RelocateTree
cursor = cherrypy.thread_data.db.cursor()
cursor.execute("SELECT ID, Name FROM Subforum WHERE ParentSubforumID IS ?", (id,))
out = ""
for ID, Name in cursor.fetchall():
out += "<option value='%s'>%s%s</option>" % (ID, depth, Name)
out += self.RelocateTree(ID, depth + "—")
return out
# This deletes a given subforum and in turn all of its containing subforums,
# threads, and posts via triggers
def Delete(self, id):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(id) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
else:
cursor = cherrypy.thread_data.db.cursor()
# This PRAGMA is needed to ensure recursive triggers work
cursor.execute("PRAGMA recursive_triggers = 1;")
cherrypy.thread_data.db.commit()
cursor.execute("DELETE FROM Subforum WHERE ID = ?", (id,))
cherrypy.thread_data.db.commit()
# Triggers will delete subforums, threads, posts and update post/thread counts
yield self.redirect("/Structure")
Delete.exposed = True
# This moves a given subforum to the top of the ordering in the given
# subforum by setting its order to one less than the minimum in that
# subforum
def Top(self, id):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(id) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
else:
cursor = cherrypy.thread_data.db.cursor()
# Get the minimum subforum in that subforum
cursor.execute("SELECT MIN(Subforum.RelativeOrder) FROM Subforum WHERE Subforum.ParentSubforumID IS (SELECT Subforum.ParentSubforumID FROM Subforum WHERE Subforum.ID = ?)", (id,))
# Change the given subforum's order
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", ((cursor.fetchone()[0] - 1), id,))
cherrypy.thread_data.db.commit()
yield self.redirect("/Structure")
Top.exposed = True
# This moves a given subforum to the bottom of the ordering in the given
# subforum by setting its order to one more than the maximum in that
# subforum
def Bottom(self, id):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(id) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
else:
cursor = cherrypy.thread_data.db.cursor()
# Get the maximum subforum in that subforum
cursor.execute("SELECT MAX(Subforum.RelativeOrder) FROM Subforum WHERE Subforum.ParentSubforumID IS (SELECT Subforum.ParentSubforumID FROM Subforum WHERE Subforum.ID = ?)", (id,))
# Change the given subforum's order
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", ((cursor.fetchone()[0] + 1), id,))
cherrypy.thread_data.db.commit()
yield self.redirect("/Structure")
Bottom.exposed = True
# This function moves a given subforum down. To do this, it swaps its order
# with the subforum "below" it
def Down(self, MoveDownID):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(MoveDownID) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
else:
cursor = cherrypy.thread_data.db.cursor()
# Get this subforum's relative order
cursor.execute("SELECT Subforum.RelativeOrder FROM Subforum WHERE Subforum.ID = ?", (MoveDownID,))
MoveDownOrder = cursor.fetchone()[0]
# Get the ID and Relative order of the subforum below this
cursor.execute('''
SELECT Subforum.ID, Subforum.RelativeOrder
FROM Subforum
WHERE Subforum.RelativeOrder IS
(
SELECT MIN(Subforum.RelativeOrder)
FROM Subforum
WHERE
Subforum.ParentSubforumID IS
(
SELECT Subforum.ParentSubforumID
FROM Subforum
WHERE Subforum.ID = ?
)
AND
Subforum.RelativeOrder >
(
SELECT Subforum.RelativeOrder
FROM Subforum
WHERE Subforum.ID = ?
)
)
AND Subforum.ParentSubforumID IS
(
SELECT Subforum.ParentSubforumID
FROM Subforum
WHERE Subforum.ID = ?
)
''', (MoveDownID, MoveDownID, MoveDownID,))
result = cursor.fetchone()
if result != None:
(MoveUpID, MoveUpOrder) = result
# Swap the orders
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", (MoveDownOrder,MoveUpID,))
cherrypy.thread_data.db.commit()
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", (MoveUpOrder,MoveDownID,))
cherrypy.thread_data.db.commit()
yield self.redirect("/Structure")
Down.exposed = True
# This function moves a given subforum up. To do this, it swaps its order
# with the subforum "above" it
def Up(self, MoveUpID):
if self.getLoginPermission() != 0:
# Not an admin, error
yield self.error('You do not have permission to view this page!', True)
elif self.get_subforum_name(MoveUpID) == None:
# The thread is not found, so error
yield self.error('Specified subforum not found.')
else:
cursor = cherrypy.thread_data.db.cursor()
# Get this subforum's relative order
cursor.execute("SELECT Subforum.RelativeOrder FROM Subforum WHERE Subforum.ID = ?", (MoveUpID,))
MoveUpOrder = cursor.fetchone()[0]
# Get the ID and Relative order of the subforum above this
cursor.execute('''
SELECT Subforum.ID, Subforum.RelativeOrder
FROM Subforum
WHERE Subforum.RelativeOrder IS
(
SELECT MAX(Subforum.RelativeOrder)
FROM Subforum
WHERE
Subforum.ParentSubforumID IS
(
SELECT Subforum.ParentSubforumID
FROM Subforum
WHERE Subforum.ID = ?
)
AND
Subforum.RelativeOrder <
(
SELECT Subforum.RelativeOrder
FROM Subforum
WHERE Subforum.ID = ?
)
)
AND Subforum.ParentSubforumID IS
(
SELECT Subforum.ParentSubforumID
FROM Subforum
WHERE Subforum.ID = ?
)
''', (MoveUpID, MoveUpID, MoveUpID,))
result = cursor.fetchone()
if result != None:
(MoveDownID, MoveDownOrder) = result
# Swap the orders
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", (MoveDownOrder,MoveUpID,))
cherrypy.thread_data.db.commit()
cursor.execute("UPDATE Subforum SET RelativeOrder = ? WHERE ID = ?", (MoveUpOrder,MoveDownID,))
cherrypy.thread_data.db.commit()
yield self.redirect("/Structure")
Up.exposed = True
| Python |
import sqlite3 as sql
conn = sql.connect('sample.db')
curs = conn.cursor()
# Create Item table
curs.execute('''create table item
(id integer primary key, itemno text unique,
scancode text, descr text, price real)''')
curs.execute("insert into item values\
(NULL,0001,32187645,'Milk',2.50)")
curs.execute("insert into item values\
(NULL,0002,45321876,'Beer',4.50)")
curs.execute("insert into item values\
(NULL,0003,18764532,'Bread',1.50)")
conn.commit()
curs.execute("select * from item")
for row in curs:
print row
curs.execute('''create table itemvendor
(id integer primary key, itemno text, vendor text)''')
curs.execute("insert into itemvendor values\
(NULL,0001,2345)")
curs.execute("insert into itemvendor values\
(NULL,0002,6789)")
curs.execute("insert into itemvendor values\
(NULL,0001,0543)")
conn.commit()
curs.execute("select * from item, itemvendor WHERE item.itemno = itemvendor.itemno")
for row in curs:
print row
| Python |
import cherrypy
import sqlite3
def connect(thread_index):
# Create a connection and store it in the current thread
cherrypy.thread_data.db = sqlite3.connect('sample.db')
# Tell CherryPy to call "connect" for each thread, when it starts up
cherrypy.engine.subscribe('start_thread', connect)
class Root:
def index(self):
# Sample page that displays the number of records in "table"
# Open a cursor, using the DB connection for the current thread
c = cherrypy.thread_data.db.cursor()
c.execute('select * from item')
yield "<table border=1>\n"
for row in c:
yield "<tr>"
for col in row:
yield "<td>%s</td>" % col
yield "</tr>\n"
yield "</table>"
index.exposed = True
cherrypy.quickstart(Root())
| Python |
"""
Tutorial - Multiple objects
This tutorial shows you how to create a site structure through multiple
possibly nested request handler objects.
"""
import cherrypy
class HomePage:
def index(self):
return '''
<p>Hi, this is the home page! Check out the other
fun stuff on this site:</p>
<ul>
<li><a href="/joke/">A silly joke</a></li>
<li><a href="/links/">Useful links</a></li>
</ul>'''
index.exposed = True
class JokePage:
def index(self):
return '''
<p>"In Python, how do you create a string of random
characters?" -- "Read a Perl file!"</p>
<p>[<a href="../">Return</a>]</p>'''
index.exposed = True
class LinksPage:
def __init__(self):
# Request handler objects can create their own nested request
# handler objects. Simply create them inside their __init__
# methods!
self.extra = ExtraLinksPage()
def index(self):
# Note the way we link to the extra links page (and back).
# As you can see, this object doesn't really care about its
# absolute position in the site tree, since we use relative
# links exclusively.
return '''
<p>Here are some useful links:</p>
<ul>
<li><a href="http://www.cherrypy.org">The CherryPy Homepage</a></li>
<li><a href="http://www.python.org">The Python Homepage</a></li>
</ul>
<p>You can check out some extra useful
links <a href="./extra/">here</a>.</p>
<p>[<a href="../">Return</a>]</p>
'''
index.exposed = True
class ExtraLinksPage:
def index(self):
# Note the relative link back to the Links page!
return '''
<p>Here are some extra useful links:</p>
<ul>
<li><a href="http://del.icio.us">del.icio.us</a></li>
<li><a href="http://www.mornography.de">Hendrik's weblog</a></li>
</ul>
<p>[<a href="../">Return to links page</a>]</p>'''
index.exposed = True
# Of course we can also mount request handler objects right here!
root = HomePage()
root.joke = JokePage()
root.links = LinksPage()
# Remember, we don't need to mount ExtraLinksPage here, because
# LinksPage does that itself on initialization. In fact, there is
# no reason why you shouldn't let your root object take care of
# creating all contained request handler objects.
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(root, config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(root, config=tutconf)
| Python |
"""
Tutorial: HTTP errors
HTTPError is used to return an error response to the client.
CherryPy has lots of options regarding how such errors are
logged, displayed, and formatted.
"""
import os
localDir = os.path.dirname(__file__)
curpath = os.path.normpath(os.path.join(os.getcwd(), localDir))
import cherrypy
class HTTPErrorDemo(object):
# Set a custom response for 403 errors.
_cp_config = {'error_page.403' : os.path.join(curpath, "custom_error.html")}
def index(self):
# display some links that will result in errors
tracebacks = cherrypy.request.show_tracebacks
if tracebacks:
trace = 'off'
else:
trace = 'on'
return """
<html><body>
<p>Toggle tracebacks <a href="toggleTracebacks">%s</a></p>
<p><a href="/doesNotExist">Click me; I'm a broken link!</a></p>
<p><a href="/error?code=403">Use a custom error page from a file.</a></p>
<p>These errors are explicitly raised by the application:</p>
<ul>
<li><a href="/error?code=400">400</a></li>
<li><a href="/error?code=401">401</a></li>
<li><a href="/error?code=402">402</a></li>
<li><a href="/error?code=500">500</a></li>
</ul>
<p><a href="/messageArg">You can also set the response body
when you raise an error.</a></p>
</body></html>
""" % trace
index.exposed = True
def toggleTracebacks(self):
# simple function to toggle tracebacks on and off
tracebacks = cherrypy.request.show_tracebacks
cherrypy.config.update({'request.show_tracebacks': not tracebacks})
# redirect back to the index
raise cherrypy.HTTPRedirect('/')
toggleTracebacks.exposed = True
def error(self, code):
# raise an error based on the get query
raise cherrypy.HTTPError(status = code)
error.exposed = True
def messageArg(self):
message = ("If you construct an HTTPError with a 'message' "
"argument, it wil be placed on the error page "
"(underneath the status line by default).")
raise cherrypy.HTTPError(500, message=message)
messageArg.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HTTPErrorDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HTTPErrorDemo(), config=tutconf)
| Python |
"""
Tutorial - Hello World
The most basic (working) CherryPy application possible.
"""
# Import CherryPy global namespace
import cherrypy
class HelloWorld:
""" Sample request handler class. """
def index(self):
# CherryPy will call this method for the root URI ("/") and send
# its return value to the client. Because this is tutorial
# lesson number 01, we'll just send something really simple.
# How about...
return "Hello world!"
# Expose the index method through the web. CherryPy will never
# publish methods that don't have the exposed attribute set to True.
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HelloWorld(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HelloWorld(), config=tutconf)
| Python |
'''
Bonus Tutorial: Using SQLObject
This is a silly little contacts manager application intended to
demonstrate how to use SQLObject from within a CherryPy2 project. It
also shows how to use inline Cheetah templates.
SQLObject is an Object/Relational Mapper that allows you to access
data stored in an RDBMS in a pythonic fashion. You create data objects
as Python classes and let SQLObject take care of all the nasty details.
This code depends on the latest development version (0.6+) of SQLObject.
You can get it from the SQLObject Subversion server. You can find all
necessary information at <http://www.sqlobject.org>. This code will NOT
work with the 0.5.x version advertised on their website!
This code also depends on a recent version of Cheetah. You can find
Cheetah at <http://www.cheetahtemplate.org>.
After starting this application for the first time, you will need to
access the /reset URI in order to create the database table and some
sample data. Accessing /reset again will drop and re-create the table,
so you may want to be careful. :-)
This application isn't supposed to be fool-proof, it's not even supposed
to be very GOOD. Play around with it some, browse the source code, smile.
:)
-- Hendrik Mans <hendrik@mans.de>
'''
import cherrypy
from Cheetah.Template import Template
from sqlobject import *
# configure your database connection here
__connection__ = 'mysql://root:@localhost/test'
# this is our (only) data class.
class Contact(SQLObject):
lastName = StringCol(length = 50, notNone = True)
firstName = StringCol(length = 50, notNone = True)
phone = StringCol(length = 30, notNone = True, default = '')
email = StringCol(length = 30, notNone = True, default = '')
url = StringCol(length = 100, notNone = True, default = '')
class ContactManager:
def index(self):
# Let's display a list of all stored contacts.
contacts = Contact.select()
template = Template('''
<h2>All Contacts</h2>
#for $contact in $contacts
<a href="mailto:$contact.email">$contact.lastName, $contact.firstName</a>
[<a href="./edit?id=$contact.id">Edit</a>]
[<a href="./delete?id=$contact.id">Delete</a>]
<br/>
#end for
<p>[<a href="./edit">Add new contact</a>]</p>
''', [locals(), globals()])
return template.respond()
index.exposed = True
def edit(self, id = 0):
# we really want id as an integer. Since GET/POST parameters
# are always passed as strings, let's convert it.
id = int(id)
if id > 0:
# if an id is specified, we're editing an existing contact.
contact = Contact.get(id)
title = "Edit Contact"
else:
# if no id is specified, we're entering a new contact.
contact = None
title = "New Contact"
# In the following template code, please note that we use
# Cheetah's $getVar() construct for the form values. We have
# to do this because contact may be set to None (see above).
template = Template('''
<h2>$title</h2>
<form action="./store" method="POST">
<input type="hidden" name="id" value="$id" />
Last Name: <input name="lastName" value="$getVar('contact.lastName', '')" /><br/>
First Name: <input name="firstName" value="$getVar('contact.firstName', '')" /><br/>
Phone: <input name="phone" value="$getVar('contact.phone', '')" /><br/>
Email: <input name="email" value="$getVar('contact.email', '')" /><br/>
URL: <input name="url" value="$getVar('contact.url', '')" /><br/>
<input type="submit" value="Store" />
</form>
''', [locals(), globals()])
return template.respond()
edit.exposed = True
def delete(self, id):
# Delete the specified contact
contact = Contact.get(int(id))
contact.destroySelf()
return 'Deleted. <a href="./">Return to Index</a>'
delete.exposed = True
def store(self, lastName, firstName, phone, email, url, id = None):
if id and int(id) > 0:
# If an id was specified, update an existing contact.
contact = Contact.get(int(id))
# We could set one field after another, but that would
# cause multiple UPDATE clauses. So we'll just do it all
# in a single pass through the set() method.
contact.set(
lastName = lastName,
firstName = firstName,
phone = phone,
email = email,
url = url)
else:
# Otherwise, add a new contact.
contact = Contact(
lastName = lastName,
firstName = firstName,
phone = phone,
email = email,
url = url)
return 'Stored. <a href="./">Return to Index</a>'
store.exposed = True
def reset(self):
# Drop existing table
Contact.dropTable(True)
# Create new table
Contact.createTable()
# Create some sample data
Contact(
firstName = 'Hendrik',
lastName = 'Mans',
email = 'hendrik@mans.de',
phone = '++49 89 12345678',
url = 'http://www.mornography.de')
return "reset completed!"
reset.exposed = True
print("If you're running this application for the first time, please go to http://localhost:8080/reset once in order to create the database!")
cherrypy.quickstart(ContactManager())
| Python |
"""
Tutorial: File upload and download
Uploads
-------
When a client uploads a file to a CherryPy application, it's placed
on disk immediately. CherryPy will pass it to your exposed method
as an argument (see "myFile" below); that arg will have a "file"
attribute, which is a handle to the temporary uploaded file.
If you wish to permanently save the file, you need to read()
from myFile.file and write() somewhere else.
Note the use of 'enctype="multipart/form-data"' and 'input type="file"'
in the HTML which the client uses to upload the file.
Downloads
---------
If you wish to send a file to the client, you have two options:
First, you can simply return a file-like object from your page handler.
CherryPy will read the file and serve it as the content (HTTP body)
of the response. However, that doesn't tell the client that
the response is a file to be saved, rather than displayed.
Use cherrypy.lib.static.serve_file for that; it takes four
arguments:
serve_file(path, content_type=None, disposition=None, name=None)
Set "name" to the filename that you expect clients to use when they save
your file. Note that the "name" argument is ignored if you don't also
provide a "disposition" (usually "attachement"). You can manually set
"content_type", but be aware that if you also use the encoding tool, it
may choke if the file extension is not recognized as belonging to a known
Content-Type. Setting the content_type to "application/x-download" works
in most cases, and should prompt the user with an Open/Save dialog in
popular browsers.
"""
import os
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
import cherrypy
from cherrypy.lib import static
class FileDemo(object):
def index(self):
return """
<html><body>
<h2>Upload a file</h2>
<form action="upload" method="post" enctype="multipart/form-data">
filename: <input type="file" name="myFile" /><br />
<input type="submit" />
</form>
<h2>Download a file</h2>
<a href='download'>This one</a>
</body></html>
"""
index.exposed = True
def upload(self, myFile):
out = """<html>
<body>
myFile length: %s<br />
myFile filename: %s<br />
myFile mime-type: %s
</body>
</html>"""
# Although this just counts the file length, it demonstrates
# how to read large files in chunks instead of all at once.
# CherryPy reads the uploaded file into a temporary file;
# myFile.file.read reads from that.
size = 0
while True:
data = myFile.file.read(8192)
if not data:
break
size += len(data)
return out % (size, myFile.filename, myFile.content_type)
upload.exposed = True
def download(self):
path = os.path.join(absDir, "pdf_file.pdf")
return static.serve_file(path, "application/x-download",
"attachment", os.path.basename(path))
download.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(FileDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(FileDemo(), config=tutconf)
| Python |
"""
Bonus Tutorial: Using generators to return result bodies
Instead of returning a complete result string, you can use the yield
statement to return one result part after another. This may be convenient
in situations where using a template package like CherryPy or Cheetah
would be overkill, and messy string concatenation too uncool. ;-)
"""
import cherrypy
class GeneratorDemo:
def header(self):
return "<html><body><h2>Generators rule!</h2>"
def footer(self):
return "</body></html>"
def index(self):
# Let's make up a list of users for presentation purposes
users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']
# Every yield line adds one part to the total result body.
yield self.header()
yield "<h3>List of users:</h3>"
for user in users:
yield "%s<br/>" % user
yield self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(GeneratorDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(GeneratorDemo(), config=tutconf)
| Python |
"""
Tutorial - The default method
Request handler objects can implement a method called "default" that
is called when no other suitable method/object could be found.
Essentially, if CherryPy2 can't find a matching request handler object
for the given request URI, it will use the default method of the object
located deepest on the URI path.
Using this mechanism you can easily simulate virtual URI structures
by parsing the extra URI string, which you can access through
cherrypy.request.virtualPath.
The application in this tutorial simulates an URI structure looking
like /users/<username>. Since the <username> bit will not be found (as
there are no matching methods), it is handled by the default method.
"""
import cherrypy
class UsersPage:
def index(self):
# Since this is just a stupid little example, we'll simply
# display a list of links to random, made-up users. In a real
# application, this could be generated from a database result set.
return '''
<a href="./remi">Remi Delon</a><br/>
<a href="./hendrik">Hendrik Mans</a><br/>
<a href="./lorenzo">Lorenzo Lamas</a><br/>
'''
index.exposed = True
def default(self, user):
# Here we react depending on the virtualPath -- the part of the
# path that could not be mapped to an object method. In a real
# application, we would probably do some database lookups here
# instead of the silly if/elif/else construct.
if user == 'remi':
out = "Remi Delon, CherryPy lead developer"
elif user == 'hendrik':
out = "Hendrik Mans, CherryPy co-developer & crazy German"
elif user == 'lorenzo':
out = "Lorenzo Lamas, famous actor and singer!"
else:
out = "Unknown user. :-("
return '%s (<a href="./">back</a>)' % out
default.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(UsersPage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(UsersPage(), config=tutconf)
| Python |
"""
Tutorial - Multiple methods
This tutorial shows you how to link to other methods of your request
handler.
"""
import cherrypy
class HelloWorld:
def index(self):
# Let's link to another method here.
return 'We have an <a href="showMessage">important message</a> for you!'
index.exposed = True
def showMessage(self):
# Here's the important message!
return "Hello world!"
showMessage.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HelloWorld(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HelloWorld(), config=tutconf)
| Python |
"""
Tutorial - Passing variables
This tutorial shows you how to pass GET/POST variables to methods.
"""
import cherrypy
class WelcomePage:
def index(self):
# Ask for the user's name.
return '''
<form action="greetUser" method="GET">
What is your name?
<input type="text" name="name" />
<input type="submit" />
</form>'''
index.exposed = True
def greetUser(self, name = None):
# CherryPy passes all GET and POST variables as method parameters.
# It doesn't make a difference where the variables come from, how
# large their contents are, and so on.
#
# You can define default parameter values as usual. In this
# example, the "name" parameter defaults to None so we can check
# if a name was actually specified.
if name:
# Greet the user!
return "Hey %s, what's up?" % name
else:
if name is None:
# No name was specified
return 'Please enter your name <a href="./">here</a>.'
else:
return 'No, really, enter your name <a href="./">here</a>.'
greetUser.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(WelcomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(WelcomePage(), config=tutconf)
| Python |
"""
Tutorial - Sessions
Storing session data in CherryPy applications is very easy: cherrypy
provides a dictionary called "session" that represents the session
data for the current user. If you use RAM based sessions, you can store
any kind of object into that dictionary; otherwise, you are limited to
objects that can be pickled.
"""
import cherrypy
class HitCounter:
_cp_config = {'tools.sessions.on': True}
def index(self):
# Increase the silly hit counter
count = cherrypy.session.get('count', 0) + 1
# Store the new value in the session dictionary
cherrypy.session['count'] = count
# And display a silly hit count message!
return '''
During your current session, you've viewed this
page %s times! Your life is a patio of fun!
''' % count
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HitCounter(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HitCounter(), config=tutconf)
| Python |
# This is used in test_config to test unrepr of "from A import B"
thing2 = object() | Python |
"""
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
index.exposed = True
class AnotherPage(Page):
title = 'Another Page'
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HomePage(), config=tutconf)
| Python |
###**********************************************###
### Unique Keychain Python Code ###
### ECE 387, Miami University, Spring 2013 ###
### Created By: Andrew Heldt, Lee Mondini and ###
### Shiloh Womack ###
###**********************************************###
import serial, sys, feedparser, pprint, time, imaplib
#Settings - Change these to match your account details
USERNAME="387keychain"
PASSWORD="keychain2013"
SERIALPORT = 'COM4'
NO_MAIL = b'm'
YES_MAIL = b'n'
# Set up COM4 - Our arduino port
try:
ser = serial.Serial(SERIALPORT, 9600)
except serial.SerialException:
print ("no device connected - exiting")
sys.exit()
# Need to figure out how to get this to keep running forever
for num in range(0,10):
obj = imaplib.IMAP4_SSL('imap.gmail.com','993')
obj.login(USERNAME,PASSWORD)
obj.select()
newmails = len(obj.search(None, 'UnSeen')[1][0].split())
time.sleep(5)
# Output data to serial port
if newmails > 0:
ser.write(YES_MAIL)
else:
ser.write(NO_MAIL)
#print data to terminal
# Close serial port
ser.close()
| Python |
#!/usr/bin/python
import pyglet
from anim3d import *
#3d projection setup func
def setup_gl(dims):
global angle
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
gluLookAt(0, -20, 10, 0, 0, 0, 0, 1, 0)
#dummy timer func
def tfunc(dt):
pass
#Texture related stuff
img = pyglet.image.load('../../../../blendfiles/lowpoly_colored.tga')
tex = img.get_texture()
#model related stuff
model = Model('../../../../blendfiles/lowpoly_tris.txt')
obj = AnimObject()
obj.setModel(model)
obj.setAction("run")
#windowing/pyglet stuff
w = pyglet.window.Window(640, 480)
clock = pyglet.clock.schedule_interval(tfunc, 1/30.0)
fps_display = pyglet.clock.ClockDisplay()
#opengl init stuff
glClearColor(0.2, 0.2, 0.2, 1)
glEnable(GL_DEPTH_TEST)
@w.event
def on_draw():
w.clear()
setup_gl(w.get_size())
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, tex.id)
obj.renderObject()
glPushMatrix();
glTranslatef(-5, 5, 1);
obj.renderObject();
glTranslatef(10, 5, 1);
obj.renderObject();
glTranslatef(-5, 5, 1);
obj.renderObject();
glPopMatrix();
glDisable(GL_TEXTURE_2D)
obj.cur_frame += 1
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
fps_display.draw()
pyglet.app.run()
| Python |
#!/usr/bin/python
import pyglet
from anim3d import *
#3d projection setup func
def setup_gl(dims):
global angle
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(40, float(dims[0])/dims[1], 0.1, 100)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
gluLookAt(0, -20, 10, 0, 0, 0, 0, 1, 0)
#dummy timer func
def tfunc(dt):
pass
#Texture related stuff
img = pyglet.image.load('../../../../blendfiles/lowpoly_colored.tga')
tex = img.get_texture()
#model related stuff
model = Model('../../../../blendfiles/lowpoly_tris.txt')
obj = AnimObject()
obj.setModel(model)
obj.setAction("run")
#windowing/pyglet stuff
w = pyglet.window.Window(640, 480)
clock = pyglet.clock.schedule_interval(tfunc, 1/30.0)
fps_display = pyglet.clock.ClockDisplay()
#opengl init stuff
glClearColor(0.2, 0.2, 0.2, 1)
glEnable(GL_DEPTH_TEST)
@w.event
def on_draw():
w.clear()
setup_gl(w.get_size())
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, tex.id)
obj.renderObject()
glPushMatrix();
glTranslatef(-5, 5, 1);
obj.renderObject();
glTranslatef(10, 5, 1);
obj.renderObject();
glTranslatef(-5, 5, 1);
obj.renderObject();
glPopMatrix();
glDisable(GL_TEXTURE_2D)
obj.cur_frame += 1
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
fps_display.draw()
pyglet.app.run()
| Python |
from pyglet.gl import *
import math
class Face:
def __init__(self, ind, uv):
self.indices = ind
self.uv = uv
class KeyFrame:
def __init__(self, fp, nv):
self.verts = []
self.frame_num = int(fp.readline())
for i in xrange(nv):
(v1, v2, v3) = fp.readline().split(' ')
self.verts.append(float(v1))
self.verts.append(float(v2))
self.verts.append(float(v3))
class Action:
def __init__(self, fp, nv):
(self.name, self.num_frames) = fp.readline().split(' ')
self.num_frames = int(self.num_frames)
self.kf = []
for j in xrange(self.num_frames):
k = KeyFrame(fp, nv)
self.kf.append(k)
#function to initialize values in a list.
def init_uv(v):
return -1
class Model:
def __init__(self, fname):
fp = open(fname)
(self.nv, self.nf, self.na) = fp.readline().split(' ')
self.nv = int(self.nv)
self.nf = int(self.nf)
self.na = int(self.na)
self.indices = ()
self.uv = range(0, 2*self.nv)
self.uv = map(init_uv, self.uv)
self.repeats = ()
#load the model data
for i in xrange(self.nf):
line = fp.readline().rstrip(' \n')
indx = map(int, line.split(' '))
for j in xrange(3):
(s, t) = map(float, fp.readline().split(' '))
if(self.uv[int(indx[j])*2] < 0):
self.uv[int(indx[j])*2] = s
self.uv[int(indx[j])*2+1] = t
elif math.fabs(self.uv[int(indx[j])*2]-s) < 0.0001 and math.fabs(self.uv[int(indx[j])*2+1]-t) < 0.0001:
self.repeats += (int(indx[j]),)
self.uv += [s, t]
indx[j] = self.nv + (len(self.repeats)-1)
self.indices += (indx[0], indx[1], indx[2])
#load the animation data
self.acts = []
for i in xrange(self.na):
act = Action(fp, self.nv)
self.acts.append(act)
fp.close()
print "Model: ", fname
print "Faces: %d Anims: %d" % (self.nf, len(self.acts))
for act in self.acts:
print act.name
def linear_interop(v, v1, t):
return v + t*(v1-v)
class AnimObject:
def __init__(self):
self.model = None
self.act = None
self.cur_frame = 0
def loadModel(self, fname):
pass
def setModel(self, model):
self.model = model
def setAction(self, actname):
self.act = None
for i in xrange(self.model.na):
a = self.model.acts[i]
if(actname == a.name):
self.act = a
self.cur_frame = a.kf[0].frame_num
if(self.act == None):
self.act = self.obj.model.acts[0]
def renderObject(self):
if(self.cur_frame > self.act.kf[self.act.num_frames-1].frame_num):
self.cur_frame = self.act.kf[0].frame_num+1
n_index = 0
while (self.cur_frame > self.act.kf[n_index].frame_num):
n_index += 1
index = n_index - 1
v = self.act.kf[index].verts
v1 = self.act.kf[n_index].verts
t = (self.cur_frame - self.act.kf[index].frame_num)/float(self.act.kf[n_index].frame_num - self.act.kf[index].frame_num)
dv = map(linear_interop, v, v1, (t, ) * len(v))
for re in self.model.repeats:
dv += (dv[re*3], dv[re*3+1], dv[re*3+2])
pyglet.graphics.draw_indexed(self.model.nv + len(self.model.repeats), pyglet.gl.GL_TRIANGLES, self.model.indices, ('v3f', dv), ('t2f', self.model.uv))
| Python |
#!BPY
"""
Name: '3Danim (.txt)'
Blender: 243
Group: 'Export'
Tooltip: 'export an animated text format.'
"""
__author__ = 'V Vamsi Krishna'
__version__ = '0.1'
__url__ = ["3danim project, http://code.google.com/p/3danim",
"", "blender", "blenderartists.org"]
__email__ = ["V.Vamsi Krishna, vamsikrishna.v:gmail*com", "3danim export"]
__bpydoc__ = """This script Exports animated 3d models in 3danim text format."""
import Blender
from Blender import *
from Blender.Armature.NLA import *
from Blender.Scene import *
from Blender import Window
def call_back(filename):
if not(filename.endswith(".txt")):
filename = filename + ".txt"
try:
fp = open(filename, "r")
fp.close()
result = Draw.PupMenu("SaveOver?%t|Yes|No")
if result == 2:
return
except:
print "Creating File ", filename
pass
fp = open(filename, "w")
#mesh_objs = Blender.Object.GetSelected()
mesh_objs = Scene.GetCurrent().objects
mesh_obj = mesh_objs[0]
armature_obj = mesh_objs[0]
for obj in mesh_objs:
if obj.getType() == "Mesh":
mesh_obj = obj
elif obj.getType() == "Armature":
armature_obj = obj
# return
mesh = mesh_obj.getData(False, True)
mesh = mesh.__copy__()
num_verts = len(mesh.verts)
num_faces = len(mesh.faces)
actions = GetActions()
print num_verts, num_faces, len(actions.keys())
fp.write("%d %d %d\n" % (num_verts, num_faces, len(actions.keys())))
for face in mesh.faces:
buff = ""
for v in face.verts:
print v.index,
buff += v.index.__str__() + " "
print "\n",
fp.write(buff+"\n")
if mesh.faceUV:
for uv in face.uv:
fp.write('%.3f %.3f\n' % (uv[0], uv[1]))
#get the actions devined on the armature
#get their keyframes and print the vertex coords
#do a linear interpolation of the vertieces
for key in actions.keys():
act = actions[key]
act.setActive(armature_obj)
keyframes = act.getFrameNumbers()
print act.getName(), len(keyframes)
fp.write(act.getName() + " %d" % len(keyframes) + "\n")
for frame in keyframes:
Blender.Set("curframe", frame)
print frame
fp.write("%d\n" % frame)
mesh.getFromObject(mesh_obj.name)
#-------------------------------
#for face in mesh.faces:
# for v in face.verts:
# print v.index,
# print ""
#-------------------------------
for vert in mesh.verts:
fp.write('%.3f %.3f %.3f\n' % (vert.co[0], vert.co[1], vert.co[2]))
fp.close()
defaultFileName = Blender.Get('filename')
Window.FileSelector(call_back, '3Danim Export *.txt', defaultFileName.replace('.blend', '.txt'))
| Python |
bl_info = {
"name": "Export 3DAnim Format(.txt)",
"author": "V.Vamsi Krishna(vkrishna)",
"version": (1, 0),
"blender": (2, 64, 0),
"api": 40000,
"location": "File > Export > 3DAnim (.txt)",
"description": "Export 3dAnim (.txt)",
"warning": "",
"category": "Import-Export"}
import bpy
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
import math
def getFrameNumbers(act):
"""
Function to return the keyframes given the
action object. We assume and use only one fcurve.
"""
fc = act.fcurves[0]
kf = []
for key in fc.keyframe_points:
kf.append(math.ceil(key.co[0]))
return kf
def call_back(operator, context, filename):
if not(filename.endswith(".txt")):
filename = filename + ".txt"
fp = open(filename, "w")
mesh_objs = context.selected_objects
mesh_obj = mesh_objs[0]
modifier = mesh_obj.modifiers[0] #get modifier associated with object
armature_obj = context.scene.objects[modifier.name]
mesh = mesh_obj.to_mesh(context.scene, True, 'RENDER')
num_verts = len(mesh.vertices)
num_faces = len(mesh.polygons)
actions = bpy.data.actions #get all actions from scene. Assume one obj per scene.
uv_layer = mesh.uv_layers.active.data
#start writing the model faces
print (num_verts, num_faces, len(actions))
fp.write("%d %d %d\n" % (num_verts, num_faces, len(actions)))
for (i,face) in enumerate(mesh.polygons):
buff = ""
for v in face.vertices:
print (v, )
buff += v.__str__() + " "
print ("\n",)
fp.write(buff+"\n")
for loop_index in range(face.loop_start, face.loop_start + face.loop_total):
uv = uv_layer[loop_index].uv
fp.write('%.3f %.3f\n' % (uv[0], uv[1]))
#remove the mesh created above.
bpy.data.meshes.remove(mesh)
#get the actions devined on the armature
#get their keyframes and print the vertex coords
#do a linear interpolation of the vertieces
for act in actions:
armature_obj.animation_data.action = act
keyframes = getFrameNumbers(act)
print (act.name, len(keyframes))
fp.write(act.name + " %d" % len(keyframes) + "\n")
for frame in keyframes:
context.scene.frame_set(frame)
print (frame)
fp.write("%d\n" % frame)
mesh = mesh_obj.to_mesh(context.scene, True, 'RENDER')
for vert in mesh.vertices:
fp.write('%.3f %.3f %.3f\n' % (vert.co[0], vert.co[1], vert.co[2]))
bpy.data.meshes.remove(mesh)
#finished writing animation data
fp.close() #close file
return {'FINISHED'}
class Export3DAnimModel(bpy.types.Operator, ExportHelper):
bl_idname = "filename.txt"
bl_label = "3DAnim Model (.txt)"
filename_ext = ".txt"
def execute(self, context):
if not self.filepath:
raise Exception("Filepath not set")
return call_back(self, context, self.filepath)
def invoke(self, context, event):
if not self.filepath:
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ".txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(Export3DAnimModel.bl_idname)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from os import path
from Cheetah.Template import Template
def main():
file = path.join(path.split(__file__)[0], 'home.html')
print 'Compile template %s...' % file
cc = Template.compile(source=None, file=file, returnAClass=False, moduleName='autogen', className='CompiledTemplate')
target = path.join(path.split(__file__)[0], 'autogen', '__init__.py')
print 'Writing file %s...' % target
f = open(target, 'w')
f.write(cc)
f.close()
from autogen import CompiledTemplate
CompiledTemplate(searchList=[])
print 'Compiled ok.'
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (\
self.chill or "null",
self.direction or "null",
self.speed or "null"
)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (\
self.humidity or "null",
self.visibility or "null",
self.pressure or "null",
self.rising or "null"
)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return '{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code].encode('utf-8'), self.low, self.high,
"http://weather.china.xappengine.com/static/w/img/d%s.png" % self.code,
"http://weather.china.xappengine.com/static/w/img/s%s.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def f2c(temp):
f = float(temp)
c = (f - 32) * 5 / 9 + 0.5
return int(c)
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
f2c(attrs['low']),
f2c(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, name, data):
self.__isLastBuildDate = False
if isinstance(name, unicode):
name = name.encode('utf-8')
self.name = name
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return '{"pub" : %s, "name" : "%s", "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.name, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
if __name__=='__main__':
import urllib
url = 'http://weather.yahooapis.com/forecastrss?u=c&w=2143712'
result = urllib.urlopen(url).read()
print Weather(result)
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
import __builtin__
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.1'
__CHEETAH_versionTuple__ = (2, 4, 1, 'final', 0)
__CHEETAH_genTime__ = 1284450634.7130001
__CHEETAH_genTimestamp__ = 'Tue Sep 14 15:50:34 2010'
__CHEETAH_src__ = 'D:\\workspace\\python\\weather-china\\src\\home.html'
__CHEETAH_srcLastModified__ = 'Wed Jul 28 10:35:46 2010'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class CompiledTemplate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(CompiledTemplate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>\u5929\u6c14\u9884\u62a5</title>
<script type="text/javascript" src="/static/js/jquery.js"></script>
<script type="text/javascript">
var days=["\u661f\u671f\u65e5", "\u661f\u671f\u4e00", "\u661f\u671f\u4e8c", "\u661f\u671f\u4e09", "\u661f\u671f\u56db", "\u661f\u671f\u4e94", "\u661f\u671f\u516d"]
jQuery(document).ready(function() {
jQuery.getJSON("/api?city=''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.first_alias",True) # u'${city.first_alias}' on line 11, col 29
if _v is not None: write(_filter(_v, rawExpr=u'${city.first_alias}')) # from line 11, col 29.
write(u'''", function(data) {
var today = data.forecasts[0];
\tvar tomorrow = data.forecasts[1];
jQuery("#x-today-date").html(today.date);
jQuery("#x-tomorrow-date").html(tomorrow.date);
jQuery("#x-today-day").html(days[today.day]);
jQuery("#x-tomorrow-day").html(days[tomorrow.day]);
jQuery("#x-today-text").html(today.text);
jQuery("#x-tomorrow-text").html(tomorrow.text);
jQuery("#x-today-temp").html(today.low + " ~ " + today.high + "\xb0");
\tjQuery("#x-tomorrow-temp").html(tomorrow.low + " ~ " + tomorrow.high + "\xb0");
jQuery("#x-today-icon").css("background-image", "url(" + today.image_large + ")");
\tjQuery("#x-tomorrow-icon").css("background-image", "url(" + tomorrow.image_large + ")");
\tjQuery("#x-today-icon-small").css("background-image", "url(" + today.image_small + ")");
jQuery("#x-pub").html(data.pub);
\tif (data.wind.chill!=null)
\t jQuery("#x-wind-chill").html(data.wind.chill);
\tif (data.wind.direction!=null)
\t jQuery("#x-wind-direction").html(data.wind.direction);
\tif (data.wind.speed!=null)
\t jQuery("#x-wind-speed").html(data.wind.speed);
if (data.atmosphere.humidity!=null)
\t jQuery("#x-atmosphere-humidity").html(data.atmosphere.humidity);
if (data.atmosphere.visibility!=null)
\t jQuery("#x-atmosphere-visibility").html(data.atmosphere.visibility);
if (data.atmosphere.pressure!=null)
\t jQuery("#x-atmosphere-pressure").html(data.atmosphere.pressure);
if (data.astronomy.sunrise!=null)
\t jQuery("#x-astronomy-sunrise").html(data.astronomy.sunrise);
if (data.astronomy.sunset!=null)
\t jQuery("#x-astronomy-sunset").html(data.astronomy.sunset);
});
});
function change_city(key){
if (key=="-")
return;
location.assign("/?city=" + key);
}
</script>
<link rel="stylesheet" href="/static/css/screen.css" type="text/css" media="screen, projection">
<link rel="stylesheet" href="/static/css/print.css" type="text/css" media="print">
<!--[if lt IE 8]>
\t<link rel="stylesheet" href="/static/css/ie.css" type="text/css" media="screen, projection">
<![endif]-->
<style type="text/css">
div.w-report span.h {
\tmargin:3px 0px;
\tfont-weight:bold;
font-size:24px;
\tdisplay:inline;
}
div.w-report span.date {
\tmargin:3px 0px 3px 12px;
\tfont-weight:bold;
\tfont-size:16px;
}
div.weather-report {
\tbackground-image:url(static/img/w-bg.png);
\tbackground-repeat:no-repeat;
\tbackground-position:56px 70px;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-icon {
\tbackground-image:url(static/w/img/d44.png);
\tbackground-repeat:no-repeat;
\tmargin:0px;
\tpadding:0px;
\twidth:300px;
\theight:160px;
}
div.weather-text {
\ttext-align:right;
\tmargin:0px;
\tpadding-top:76px;
\tpadding-right:20px;
}
div.weather-text p {
\tmargin:0px;
\tcolor:#FFF;
\tfont-size: 20px;
\tfont-weight: bold;
\ttext-shadow: #315895 0px -1px 1px;
\tline-height:28px;
}
</style>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push([\'_setAccount\', \'UA-251595-22\']);
_gaq.push([\'_trackPageview\']);
(function() {
var ga = document.createElement(\'script\'); ga.type = \'text/javascript\'; ga.async = true;
ga.src = (\'https:\' == document.location.protocol ? \'https://ssl\' : \'http://www\') + \'.google-analytics.com/ga.js\';
var s = document.getElementsByTagName(\'script\')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body style="font-size:13px">
<div class="container" style="background-color:#FFF">
<div class="span-24 last">
</div>
<div class="span-24 last">
<div id="x-today-icon-small" style="background-repeat:no-repeat; height:34; padding:10px 0px 10px 60px; background-image:url(static/w/img/s44.png)"><strong>''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"city.name",True) # u'${city.name}' on line 125, col 163
if _v is not None: write(_filter(_v, rawExpr=u'${city.name}')) # from line 125, col 163.
write(u'''</strong>
<select name="change_city" id="change_city" onchange="change_city(this.value)">
<option value="-">\u66f4\u6539\u57ce\u5e02</option>
''')
for c in VFSL([locals()]+SL+[globals(), __builtin__],"cities",True): # generated from line 128, col 1
write(u''' <option value="''')
_v = VFN(VFSL([locals()]+SL+[globals(), __builtin__],"c",True),"first_alias",False)() # u'${c.first_alias()}' on line 129, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${c.first_alias()}')) # from line 129, col 26.
write(u'''">''')
_v = VFSL([locals()]+SL+[globals(), __builtin__],"c.name",True) # u'${c.name}' on line 129, col 46
if _v is not None: write(_filter(_v, rawExpr=u'${c.name}')) # from line 129, col 46.
write(u'''</option>
''')
write(u''' </select>
</div>
</div>
\t<div class="span-16">
<div class="span-16 last">
<div id="weather-today" class="w-report span-8">
<div><span class="h">\u4eca\u65e5\u5929\u6c14</span><span class="date"><span id="x-today-date"></span> <span id="x-today-day"></span></span></div>
<div class="weather-report">
<div id="x-today-icon" class="weather-icon">
<div class="weather-text">
<p id="x-today-text">Loading...</p>
<p id="x-today-temp"></p>
</div>
</div>
</div>
<div><span class="h">\u5176\u4ed6\u4fe1\u606f\uff1a</span></div>
<div style="padding:6px">
<div>\u98ce\u529b\uff1a<span id="x-wind-chill">N/A</span> \u98ce\u5411\uff1a<span id="x-wind-direction">N/A</span> \u98ce\u901f\uff1a<span id="x-wind-speed">N/A</span></div>
<div>\u80fd\u89c1\u5ea6\uff1a<span id="x-atmosphere-visibility">N/A</span> \u6e7f\u5ea6\uff1a<span id="x-atmosphere-humidity">N/A</span> \u6c14\u538b\uff1a<span id="x-atmosphere-pressure">N/A</span></div>
<div>\u65e5\u51fa\uff1a<span id="x-astronomy-sunrise">N/A</span> \u65e5\u843d\uff1a<span id="x-astronomy-sunset">N/A</span></div>
<div>\u53d1\u5e03\u4e8e\uff1a<span id="x-pub">N/A</span></div>
</div>
</div>
<div id="weather-tomorrow" class="w-report span-8 last">
<div><span class="h">\u660e\u65e5\u5929\u6c14</span><span class="date"><span id="x-tomorrow-date"></span> <span id="x-tomorrow-day"></span></span></div>
<div class="weather-report">
<div id="x-tomorrow-icon" class="weather-icon">
<div class="weather-text">
<p id="x-tomorrow-text">Loading...</p>
<p id="x-tomorrow-temp"></p>
</div>
</div>
</div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">\u5b89\u88c5Chrome\u63d2\u4ef6</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528\u7684\u662f\u652f\u6301HTML 5\u7684Google Chrome\u6d4f\u89c8\u5668\uff0c\u53ef\u4ee5<a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank">\u5b89\u88c5\u6700\u65b0\u63d2\u4ef6</a>\u4ee5\u4fbf\u968f\u65f6\u83b7\u53d6\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank"><img src="static/img/snapshot-chrome-extension.png" width="291" height="99" style="margin:12px"/></a></div>
</div>
</div>
<div class="w-report span-16 last" style="margin-top:6px">
<div><span class="h">GTalk\u673a\u5668\u4eba</span></div>
<div style="padding:6px">
<div>\u5982\u679c\u60a8\u4f7f\u7528Google Talk\uff0c\u53ef\u4ee5\u6dfb\u52a0\u673a\u5668\u4eba<strong>weather-china@appspot.com</strong>\u4e3a\u597d\u53cb\uff0c\u968f\u65f6\u5411\u4ed6\u8be2\u95ee\u5929\u6c14\u9884\u62a5\uff1a</div>
<div><img src="static/img/snapshot-xmpp.png" width="300" height="254" style="margin:12px"/></div>
</div>
</div>
</div>
<div class="span-8 last">
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
<script type="text/javascript"><!--
google_ad_client = "pub-6727358730461554";
/* 300x250 */
google_ad_slot = "8201905603";
google_ad_width = 300;
google_ad_height = 250;
//-->
</script>
<script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script>
</div>
<div class="span-24 last"></div>
<div class="span-24 last"><div style="text-align:center;padding:6px"><a href="http://code.google.com/p/weather-china/wiki/API" target="_blank">API\u670d\u52a1</a> | <a href="http://code.google.com/p/weather-china/issues/list" target="_blank">\u610f\u89c1\u53cd\u9988</a> | <a id="x-contact" href="#">\u8054\u7cfb\u6211\u4eec</a> | Copyright©2010</div></div>
</div>
<script type="text/javascript">
jQuery("#x-contact").attr("href", "mail" + "to:ask" + "xuefeng@" + "gm" + "ail.com");
</script>
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_CompiledTemplate= 'respond'
## END CLASS DEFINITION
if not hasattr(CompiledTemplate, '_initCheetahAttributes'):
templateAPIClass = getattr(CompiledTemplate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(CompiledTemplate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=CompiledTemplate()).run()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
from google.appengine.ext import db
class City(db.Model):
name = db.StringProperty(required=True)
aliases = db.StringListProperty(required=True)
code = db.IntegerProperty(required=True)
def first_alias(self):
return self.aliases[0]
def aliases_str(self):
return ', '.join(self.aliases)
def get_city(key=None):
city = None
if key:
city = City.get(key)
if city is None:
city = find_city('beijing')
return city
def get_cities():
return City.all().order('aliases').fetch(1000)
def find_city(name, return_default=True):
'''
Find city by name. Return City or None if not found.
'''
city = City.all().filter('aliases =', name).get()
if city is None:
city = City.all().filter('name =', name).get()
if city is None and return_default:
city = City.all().filter('aliases =', 'beijing').get()
return city
def create_city(name, aliases, code):
c = City(name=name, aliases=aliases, code=code)
c.put()
return c
def delete_city(key):
City.get(key).delete()
import urllib
import datetime
from xml.parsers.expat import ParserCreate
codes = {
0 : u'龙卷风', # tornado
1 : u'热带风暴', # tropical storm
2 : u'飓风', # hurricane
3 : u'风暴', # severe thunderstorms
4 : u'雷雨', # thunderstorms
5 : u'雨夹雪', # mixed rain and snow
6 : u'雨夹冰雹', # mixed rain and sleet
7 : u'雪夹冰雹', # mixed snow and sleet
8 : u'冰毛毛雨', # freezing drizzle
9 : u'毛毛雨', # drizzle
10 : u'冰雨', # freezing rain
11 : u'阵雨', # showers
12 : u'阵雨', # showers
13 : u'小雪', # snow flurries
14 : u'小雨雪', # light snow showers
15 : u'风雪', # blowing snow
16 : u'下雪', # snow
17 : u'冰雹', # hail
18 : u'雨夹雪', # sleet
19 : u'尘土', # dust
20 : u'雾', # foggy
21 : u'霾', # haze
22 : u'烟雾', # smoky
23 : u'狂风', # blustery
24 : u'大风', # windy
25 : u'寒冷', # cold
26 : u'多云', # cloudy
27 : u'多云', # mostly cloudy (night)
28 : u'多云', # mostly cloudy (day)
29 : u'局部多云', # partly cloudy (night)
30 : u'局部多云', # partly cloudy (day)
31 : u'晴朗', # clear (night)
32 : u'晴', # sunny
33 : u'晴朗', # fair (night)
34 : u'晴朗', # fair (day)
35 : u'雨夹冰雹', # mixed rain and hail
36 : u'炎热', # hot
37 : u'局部雷雨', # isolated thunderstorms
38 : u'零星雷雨', # scattered thunderstorms
39 : u'零星雷雨', # scattered thunderstorms
40 : u'零星阵雨', # scattered showers
41 : u'大雪', # heavy snow
42 : u'零星雨夹雪', # scattered snow showers
43 : u'大雪', # heavy snow
44 : u'局部多云', # partly cloudy
45 : u'雷阵雨', # thundershowers
46 : u'小雪', # snow showers
47 : u'局部雷雨', # isolated thundershowers
3200 : u'暂无数据' # not available
}
def load_rss(url):
f = urllib.urlopen(url)
data = f.read()
f.close()
return data
class Wind(object):
def __init__(self, chill, direction, speed):
self.chill = chill
self.direction = direction
self.speed = speed
def __str__(self):
return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (self.chill, self.direction, self.speed)
__repr__ = __str__
class Atmosphere(object):
def __init__(self, humidity, visibility, pressure, rising):
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.rising = rising
def __str__(self):
return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (self.humidity, self.visibility, self.pressure, self.rising)
__repr__ = __str__
class Astronomy(object):
def __init__(self, sunrise, sunset):
self.sunrise = sunrise
self.sunset = sunset
def __str__(self):
return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset)
__repr__ = __str__
class Forecast(object):
'<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />'
def __init__(self, day, date, low, high, code):
self.day = day
self.date = date
self.low = low
self.high = high
self.code = code
def __str__(self):
return u'{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % (
self.date, self.day, self.code, codes[self.code], self.low, self.high,
"http://l.yimg.com/a/i/us/nws/weather/gr/%sd.png" % self.code,
"http://l.yimg.com/a/i/us/nws/weather/gr/%ss.png" % self.code,
)
__repr__ = __str__
def index_of(list, data):
for i, item in enumerate(list):
if data==item:
return i
return None
def get_day(day):
return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day)
def get_date(date):
'30 Jun 2010'
ss = date.split(' ')
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1])
return datetime.date(int(ss[2]), month, int(ss[0]))
def to_24hour(time):
' convert "4:39 pm" to "16:39" '
if time.endswith(' am'):
return time[:-3]
if time.endswith(' pm'):
time = time[:-3]
n = time.find(':')
to_24h = int(time[:n]) + 12
return "%d:%s" % (to_24h, time[n+1:])
return time
class Weather(object):
def char_data(self, text):
if self.__isLastBuildDate:
n = text.find(', ')
text = text[n+2:]
n1 = text.find(' ')
n2 = text.find(' ', n1+1)
m = text[n1+1:n2]
month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m)
text = text.replace(m, str(month))
if not text.endswith(' CST'):
return
text = text[:-4]
is_pm = text.endswith(' pm')
text = text[:-3]
time = datetime.datetime.strptime(text, '%d %m %Y %I:%M')
h = time.hour
if is_pm:
h = h + 12
self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute)
def end_element(self, name):
if name=='lastBuildDate':
self.__isLastBuildDate = False
def start_element(self, name, attrs):
if name=='lastBuildDate':
self.__isLastBuildDate = True
return
if name=='yweather:forecast':
self.forecasts.append(Forecast(
get_day(attrs['day']),
get_date(attrs['date']),
int(attrs['low']),
int(attrs['high']),
int(attrs['code'])
))
if name=='yweather:astronomy':
self.astronomy.sunrise = to_24hour(attrs['sunrise'])
self.astronomy.sunset = to_24hour(attrs['sunset'])
if name=='yweather:atmosphere':
self.atmosphere.humidity = attrs['humidity']
self.atmosphere.visibility = attrs['visibility']
self.atmosphere.pressure = attrs['pressure']
self.atmosphere.rising = attrs['rising']
if name=='yweather:wind':
self.wind.chill = attrs['chill']
self.wind.direction = attrs['direction']
self.wind.speed = attrs['speed']
def __init__(self, data):
self.__isLastBuildDate = False
self.pub = None
self.wind = Wind(None, None, None)
self.atmosphere = Atmosphere(None, None, None, None)
self.astronomy = Astronomy(None, None)
self.forecasts = []
parser = ParserCreate()
parser.returns_unicode = False
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.char_data
parser.Parse(data)
def __str__(self):
pub = 'null'
if self.pub:
pub = r'"%s"' % self.pub
return u'{"pub" : %s, "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \
% (pub, self.wind, self.astronomy, self.atmosphere, self.forecasts)
__repr__ = __str__
class Subscriber(db.Model):
mobile = db.StringProperty(required=True)
city = db.StringProperty(required=True)
time = db.IntegerProperty(required=True)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import os
import cgi
import time
import logging
import simplejson
from datetime import date
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import memcache
from google.appengine.api import users
from Cheetah.Template import Template
from autogen import CompiledTemplate
import weather
import store
def get_city(request):
# try get city from cookie:
if 'Cookie' in request.headers:
all = request.headers['Cookie']
if all:
cookies = all.split(';')
for cookie in cookies:
c = cookie.strip()
if c.startswith('city='):
return c[5:]
return None
def fetch_weather_in_cache(city):
data = memcache.get(str(city.code))
if data:
return data
data = fetch_weather(city)
if data is None:
return None
memcache.set(str(city.code), data, 3600)
return data
def fetch_weather(city):
data = fetch_rss(city.code)
if data is None:
return None
return str(weather.Weather(city.name, data))
def fetch_rss(code):
url = 'http://weather.yahooapis.com/forecastrss?w=%s' % code
logging.info('Fetch RSS: %s' % url)
try:
result = urlfetch.fetch(url, follow_redirects=False)
except (urlfetch.Error, apiproxy_errors.Error):
return None
if result.status_code!=200:
return None
return result.content
class XmppHandler(webapp.RequestHandler):
def post(self):
message = xmpp.Message(self.request.POST)
logging.info('XMPP from %s: %s' % (message.sender, message.body))
name = message.body.strip().lower()
if name=='':
message.reply(u'''噢,啥都不输,怎么知道您要查询的城市啊?
http://weather-china.appspot.com/
''')
return
city = store.find_city(name, return_default=False)
if city is None:
message.reply(u''':( 噢,没有找到您要查询的城市 "%s"。
http://weather-china.appspot.com/
''' % name)
return
json = fetch_weather_in_cache(city)
if json is None:
return message.reply(u''':( 对不起,网络故障,暂时无法查询,请过几分钟再试试。
http://weather-china.appspot.com/
''')
if isinstance(json, unicode):
json = json.encode('utf-8')
w = simplejson.loads(json, encoding='utf-8')
return message.reply(
u'''%s:
今日:%s,%s~%s度
明日:%s,%s~%s度
更详细的预报请查看 http://weather-china.appspot.com/?city=%s
''' % (
w[u'name'],
w[u'forecasts'][0][u'text'], w[u'forecasts'][0][u'low'], w[u'forecasts'][0][u'high'],
w[u'forecasts'][1][u'text'], w[u'forecasts'][1][u'low'], w[u'forecasts'][1][u'high'],
city.first_alias(),)
)
class HomeHandler(webapp.RequestHandler):
def get(self):
time_1 = time.time()
name = self.request.get('city', '')
if not name:
name = get_city(self.request)
if not name:
name = 'beijing'
cities = memcache.get('__cities__')
if cities is None:
cities = store.get_cities()
memcache.set('__cities__', cities, 3600)
city = None
for c in cities:
if c.name==name or name in c.aliases:
city = c
break
if city is None:
self.response.set_status(500)
return
today = date.today()
target = date(today.year+3, today.month, today.day)
expires = target.strftime('%a, %d-%b-%Y %H:%M:%S GMT')
self.response.headers['Set-Cookie'] = 'city=%s; expires=%s; path=/' % (city.first_alias(), expires)
time_2 = time.time()
t = CompiledTemplate(searchList=[{'city' : city, 'cities' : cities}])
self.response.out.write(t)
time_3 = time.time()
logging.info('Performance: %f / %f of rendering / total.' % (time_3-time_2, time_3-time_1))
class AdminHandler(webapp.RequestHandler):
def get(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action', '')
if action=='delete_city':
key = self.request.get('key')
store.delete_city(key)
self.redirect_admin()
return
if action=='':
cities = store.get_cities()
root = os.path.dirname(__file__)
t = Template(file=os.path.join(root, 'admin.html'), searchList=[{'cities' : cities}])
self.response.out.write(t)
return
self.response.set_status(400)
def post(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action')
if action=='create_city':
name = cgi.escape(self.request.get('name')).strip().lower()
aliases = [cgi.escape(x).lower() for x in self.request.get_all('aliases') if x.strip()]
code = int(self.request.get('code'))
store.create_city(name, aliases, code)
self.redirect_admin()
return
self.response.set_status(400)
def get_login_url(self):
if not users.is_current_user_admin():
return users.create_login_url('/admin')
return None
def redirect_admin(self):
self.redirect('/admin?t=%s' % time.time())
class ApiHandler(webapp.RequestHandler):
CACHE_TIME = 600 # 600 seconds
def get(self):
callback = ''
c = ''
extension = self.request.get('extension', '')
if extension=='chrome':
# detect city from cookie:
c = get_city(self.request)
if not c:
c = 'beijing'
else:
callback = cgi.escape(self.request.get('callback', '').strip())
c = cgi.escape(self.request.get('city', '')).lower()
if not c:
return self.send_error('MISSING_PARAMETER', 'Missing parameter \'city\'')
city = store.find_city(c, return_default=False)
if city is None:
return self.send_error('CITY_NOT_FOUND', 'City not found')
weather = fetch_weather_in_cache(city)
if weather is None:
return self.send_error('SERVICE_UNAVAILABLE', 'Service unavailable')
if callback:
if isinstance(callback, unicode):
callback = callback.encode('utf-8')
self.write_json('%s(%s);' % (callback, weather))
else:
self.write_json(weather)
def send_error(self, code, msg):
json = '{ "error" : "%s", "message" : "%s"}' % (code, msg)
self.write_json(json)
def write_json(self, json):
if isinstance(json, unicode):
json = json.encode('utf-8')
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json)
application = webapp.WSGIApplication([
('^/$', HomeHandler),
('^/api$', ApiHandler),
('^/admin$', AdminHandler),
('^/_ah/xmpp/message/chat/$', XmppHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
"""
Provides an emulator/replacement for Python's standard import system.
@@TR: Be warned that Import Hooks are in the deepest, darkest corner of Python's
jungle. If you need to start hacking with this, be prepared to get lost for a
while. Also note, this module predates the newstyle import hooks in Python 2.3
http://www.python.org/peps/pep-0302.html.
This is a hacked/documented version of Gordon McMillan's iu.py. I have:
- made it a little less terse
- added docstrings and explanatations
- standardized the variable naming scheme
- reorganized the code layout to enhance readability
"""
import sys
import imp
import marshal
_installed = False
# _globalOwnerTypes is defined at the bottom of this file
_os_stat = _os_path_join = _os_getcwd = _os_path_dirname = None
##################################################
## FUNCTIONS
def _os_bootstrap():
"""Set up 'os' module replacement functions for use during import bootstrap."""
names = sys.builtin_module_names
join = dirname = None
if 'posix' in names:
sep = '/'
from posix import stat, getcwd
elif 'nt' in names:
sep = '\\'
from nt import stat, getcwd
elif 'dos' in names:
sep = '\\'
from dos import stat, getcwd
elif 'os2' in names:
sep = '\\'
from os2 import stat, getcwd
elif 'mac' in names:
from mac import stat, getcwd
def join(a, b):
if a == '':
return b
if ':' not in a:
a = ':' + a
if a[-1:] != ':':
a = a + ':'
return a + b
else:
raise ImportError('no os specific module found')
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
if dirname is None:
def dirname(a, sep=sep):
for i in range(len(a)-1, -1, -1):
c = a[i]
if c == '/' or c == sep:
return a[:i]
return ''
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
global _os_path_dirname
_os_path_dirname = dirname
global _os_getcwd
_os_getcwd = getcwd
_os_bootstrap()
def packageName(s):
for i in range(len(s)-1, -1, -1):
if s[i] == '.':
break
else:
return ''
return s[:i]
def nameSplit(s):
rslt = []
i = j = 0
for j in range(len(s)):
if s[j] == '.':
rslt.append(s[i:j])
i = j+1
if i < len(s):
rslt.append(s[i:])
return rslt
def getPathExt(fnm):
for i in range(len(fnm)-1, -1, -1):
if fnm[i] == '.':
return fnm[i:]
return ''
def pathIsDir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s[0] & 0170000) == 0040000
def getDescr(fnm):
ext = getPathExt(fnm)
for (suffix, mode, typ) in imp.get_suffixes():
if suffix == ext:
return (suffix, mode, typ)
##################################################
## CLASSES
class Owner:
"""An Owner does imports from a particular piece of turf That is, there's
an Owner for each thing on sys.path There are owners for directories and
.pyz files. There could be owners for zip files, or even URLs. A
shadowpath (a dictionary mapping the names in sys.path to their owners) is
used so that sys.path (or a package's __path__) is still a bunch of strings,
"""
def __init__(self, path):
self.path = path
def __str__(self):
return self.path
def getmod(self, nm):
return None
class DirOwner(Owner):
def __init__(self, path):
if path == '':
path = _os_getcwd()
if not pathIsDir(path):
raise ValueError("%s is not a directory" % path)
Owner.__init__(self, path)
def getmod(self, nm,
getsuffixes=imp.get_suffixes, loadco=marshal.loads, newmod=imp.new_module):
pth = _os_path_join(self.path, nm)
possibles = [(pth, 0, None)]
if pathIsDir(pth):
possibles.insert(0, (_os_path_join(pth, '__init__'), 1, pth))
py = pyc = None
for pth, ispkg, pkgpth in possibles:
for ext, mode, typ in getsuffixes():
attempt = pth+ext
try:
st = _os_stat(attempt)
except:
pass
else:
if typ == imp.C_EXTENSION:
fp = open(attempt, 'rb')
mod = imp.load_module(nm, fp, attempt, (ext, mode, typ))
mod.__file__ = attempt
return mod
elif typ == imp.PY_SOURCE:
py = (attempt, st)
else:
pyc = (attempt, st)
if py or pyc:
break
if py is None and pyc is None:
return None
while True:
if pyc is None or py and pyc[1][8] < py[1][8]:
try:
co = compile(open(py[0], 'r').read()+'\n', py[0], 'exec')
break
except SyntaxError, e:
print("Invalid syntax in %s" % py[0])
print(e.args)
raise
elif pyc:
stuff = open(pyc[0], 'rb').read()
try:
co = loadco(stuff[8:])
break
except (ValueError, EOFError):
pyc = None
else:
return None
mod = newmod(nm)
mod.__file__ = co.co_filename
if ispkg:
mod.__path__ = [pkgpth]
subimporter = PathImportDirector(mod.__path__)
mod.__importsub__ = subimporter.getmod
mod.__co__ = co
return mod
class ImportDirector(Owner):
"""ImportDirectors live on the metapath There's one for builtins, one for
frozen modules, and one for sys.path Windows gets one for modules gotten
from the Registry Mac would have them for PY_RESOURCE modules etc. A
generalization of Owner - their concept of 'turf' is broader"""
pass
class BuiltinImportDirector(ImportDirector):
"""Directs imports of builtin modules"""
def __init__(self):
self.path = 'Builtins'
def getmod(self, nm, isbuiltin=imp.is_builtin):
if isbuiltin(nm):
mod = imp.load_module(nm, None, nm, ('', '', imp.C_BUILTIN))
return mod
return None
class FrozenImportDirector(ImportDirector):
"""Directs imports of frozen modules"""
def __init__(self):
self.path = 'FrozenModules'
def getmod(self, nm,
isFrozen=imp.is_frozen, loadMod=imp.load_module):
if isFrozen(nm):
mod = loadMod(nm, None, nm, ('', '', imp.PY_FROZEN))
if hasattr(mod, '__path__'):
mod.__importsub__ = lambda name, pname=nm, owner=self: owner.getmod(pname+'.'+name)
return mod
return None
class RegistryImportDirector(ImportDirector):
"""Directs imports of modules stored in the Windows Registry"""
def __init__(self):
self.path = "WindowsRegistry"
self.map = {}
try:
import win32api
## import win32con
except ImportError:
pass
else:
HKEY_CURRENT_USER = -2147483647
HKEY_LOCAL_MACHINE = -2147483646
KEY_ALL_ACCESS = 983103
subkey = r"Software\Python\PythonCore\%s\Modules" % sys.winver
for root in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
try:
hkey = win32api.RegOpenKeyEx(root, subkey, 0, KEY_ALL_ACCESS)
except:
pass
else:
numsubkeys, numvalues, lastmodified = win32api.RegQueryInfoKey(hkey)
for i in range(numsubkeys):
subkeyname = win32api.RegEnumKey(hkey, i)
hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, KEY_ALL_ACCESS)
val = win32api.RegQueryValueEx(hskey, '')
desc = getDescr(val[0])
self.map[subkeyname] = (val[0], desc)
hskey.Close()
hkey.Close()
break
def getmod(self, nm):
stuff = self.map.get(nm)
if stuff:
fnm, desc = stuff
fp = open(fnm, 'rb')
mod = imp.load_module(nm, fp, fnm, desc)
mod.__file__ = fnm
return mod
return None
class PathImportDirector(ImportDirector):
"""Directs imports of modules stored on the filesystem."""
def __init__(self, pathlist=None, importers=None, ownertypes=None):
if pathlist is None:
self.path = sys.path
else:
self.path = pathlist
if ownertypes == None:
self._ownertypes = _globalOwnerTypes
else:
self._ownertypes = ownertypes
if importers:
self._shadowPath = importers
else:
self._shadowPath = {}
self._inMakeOwner = False
self._building = {}
def getmod(self, nm):
mod = None
for thing in self.path:
if isinstance(thing, basestring):
owner = self._shadowPath.get(thing, -1)
if owner == -1:
owner = self._shadowPath[thing] = self._makeOwner(thing)
if owner:
mod = owner.getmod(nm)
else:
mod = thing.getmod(nm)
if mod:
break
return mod
def _makeOwner(self, path):
if self._building.get(path):
return None
self._building[path] = 1
owner = None
for klass in self._ownertypes:
try:
# this may cause an import, which may cause recursion
# hence the protection
owner = klass(path)
except:
pass
else:
break
del self._building[path]
return owner
#=================ImportManager============================#
# The one-and-only ImportManager
# ie, the builtin import
UNTRIED = -1
class ImportManager:
# really the equivalent of builtin import
def __init__(self):
self.metapath = [
BuiltinImportDirector(),
FrozenImportDirector(),
RegistryImportDirector(),
PathImportDirector()
]
self.threaded = 0
self.rlock = None
self.locker = None
self.setThreaded()
def setThreaded(self):
thread = sys.modules.get('thread', None)
if thread and not self.threaded:
self.threaded = 1
self.rlock = thread.allocate_lock()
self._get_ident = thread.get_ident
def install(self):
import __builtin__
__builtin__.__import__ = self.importHook
__builtin__.reload = self.reloadHook
def importHook(self, name, globals=None, locals=None, fromlist=None, level=-1):
'''
NOTE: Currently importHook will accept the keyword-argument "level"
but it will *NOT* use it (currently). Details about the "level" keyword
argument can be found here: http://www.python.org/doc/2.5.2/lib/built-in-funcs.html
'''
# first see if we could be importing a relative name
#print "importHook(%s, %s, locals, %s)" % (name, globals['__name__'], fromlist)
_sys_modules_get = sys.modules.get
contexts = [None]
if globals:
importernm = globals.get('__name__', '')
if importernm:
if hasattr(_sys_modules_get(importernm), '__path__'):
contexts.insert(0, importernm)
else:
pkgnm = packageName(importernm)
if pkgnm:
contexts.insert(0, pkgnm)
# so contexts is [pkgnm, None] or just [None]
# now break the name being imported up so we get:
# a.b.c -> [a, b, c]
nmparts = nameSplit(name)
_self_doimport = self.doimport
threaded = self.threaded
for context in contexts:
ctx = context
for i in range(len(nmparts)):
nm = nmparts[i]
#print " importHook trying %s in %s" % (nm, ctx)
if ctx:
fqname = ctx + '.' + nm
else:
fqname = nm
if threaded:
self._acquire()
mod = _sys_modules_get(fqname, UNTRIED)
if mod is UNTRIED:
mod = _self_doimport(nm, ctx, fqname)
if threaded:
self._release()
if mod:
ctx = fqname
else:
break
else:
# no break, point i beyond end
i = i + 1
if i:
break
if i<len(nmparts):
if ctx and hasattr(sys.modules[ctx], nmparts[i]):
#print "importHook done with %s %s %s (case 1)" % (name, globals['__name__'], fromlist)
return sys.modules[nmparts[0]]
del sys.modules[fqname]
raise ImportError("No module named %s" % fqname)
if fromlist is None:
#print "importHook done with %s %s %s (case 2)" % (name, globals['__name__'], fromlist)
if context:
return sys.modules[context+'.'+nmparts[0]]
return sys.modules[nmparts[0]]
bottommod = sys.modules[ctx]
if hasattr(bottommod, '__path__'):
fromlist = list(fromlist)
i = 0
while i < len(fromlist):
nm = fromlist[i]
if nm == '*':
fromlist[i:i+1] = list(getattr(bottommod, '__all__', []))
if i >= len(fromlist):
break
nm = fromlist[i]
i = i + 1
if not hasattr(bottommod, nm):
if self.threaded:
self._acquire()
mod = self.doimport(nm, ctx, ctx+'.'+nm)
if self.threaded:
self._release()
if not mod:
raise ImportError("%s not found in %s" % (nm, ctx))
#print "importHook done with %s %s %s (case 3)" % (name, globals['__name__'], fromlist)
return bottommod
def doimport(self, nm, parentnm, fqname):
# Not that nm is NEVER a dotted name at this point
#print "doimport(%s, %s, %s)" % (nm, parentnm, fqname)
if parentnm:
parent = sys.modules[parentnm]
if hasattr(parent, '__path__'):
importfunc = getattr(parent, '__importsub__', None)
if not importfunc:
subimporter = PathImportDirector(parent.__path__)
importfunc = parent.__importsub__ = subimporter.getmod
mod = importfunc(nm)
if mod:
setattr(parent, nm, mod)
else:
#print "..parent not a package"
return None
else:
# now we're dealing with an absolute import
for director in self.metapath:
mod = director.getmod(nm)
if mod:
break
if mod:
mod.__name__ = fqname
sys.modules[fqname] = mod
if hasattr(mod, '__co__'):
co = mod.__co__
del mod.__co__
exec(co, mod.__dict__)
if fqname == 'thread' and not self.threaded:
## print "thread detected!"
self.setThreaded()
else:
sys.modules[fqname] = None
#print "..found %s" % mod
return mod
def reloadHook(self, mod):
fqnm = mod.__name__
nm = nameSplit(fqnm)[-1]
parentnm = packageName(fqnm)
newmod = self.doimport(nm, parentnm, fqnm)
mod.__dict__.update(newmod.__dict__)
## return newmod
def _acquire(self):
if self.rlock.locked():
if self.locker == self._get_ident():
self.lockcount = self.lockcount + 1
## print "_acquire incrementing lockcount to", self.lockcount
return
self.rlock.acquire()
self.locker = self._get_ident()
self.lockcount = 0
## print "_acquire first time!"
def _release(self):
if self.lockcount:
self.lockcount = self.lockcount - 1
## print "_release decrementing lockcount to", self.lockcount
else:
self.rlock.release()
## print "_release releasing lock!"
##################################################
## MORE CONSTANTS & GLOBALS
_globalOwnerTypes = [
DirOwner,
Owner,
]
| Python |
'''
Provides an abstract Servlet baseclass for Cheetah's Template class
'''
import sys
import os.path
isWebwareInstalled = False
try:
try:
from ds.appserver.Servlet import Servlet as BaseServlet
except:
from WebKit.Servlet import Servlet as BaseServlet
isWebwareInstalled = True
if not issubclass(BaseServlet, object):
class NewStyleBaseServlet(BaseServlet, object):
pass
BaseServlet = NewStyleBaseServlet
except:
class BaseServlet(object):
_reusable = 1
_threadSafe = 0
def awake(self, transaction):
pass
def sleep(self, transaction):
pass
def shutdown(self):
pass
##################################################
## CLASSES
class Servlet(BaseServlet):
"""This class is an abstract baseclass for Cheetah.Template.Template.
It wraps WebKit.Servlet and provides a few extra convenience methods that
are also found in WebKit.Page. It doesn't do any of the HTTP method
resolution that is done in WebKit.HTTPServlet
"""
transaction = None
application = None
request = None
session = None
def __init__(self, *args, **kwargs):
super(Servlet, self).__init__(*args, **kwargs)
# this default will be changed by the .awake() method
self._CHEETAH__isControlledByWebKit = False
## methods called by Webware during the request-response
def awake(self, transaction):
super(Servlet, self).awake(transaction)
# a hack to signify that the servlet is being run directly from WebKit
self._CHEETAH__isControlledByWebKit = True
self.transaction = transaction
#self.application = transaction.application
self.response = response = transaction.response
self.request = transaction.request
# Temporary hack to accomodate bug in
# WebKit.Servlet.Servlet.serverSidePath: it uses
# self._request even though this attribute does not exist.
# This attribute WILL disappear in the future.
self._request = transaction.request()
self.session = transaction.session
self.write = response().write
#self.writeln = response.writeln
def respond(self, trans=None):
raise NotImplementedError("""\
couldn't find the template's main method. If you are using #extends
without #implements, try adding '#implements respond' to your template
definition.""")
def sleep(self, transaction):
super(Servlet, self).sleep(transaction)
self.session = None
self.request = None
self._request = None
self.response = None
self.transaction = None
def shutdown(self):
pass
def serverSidePath(self, path=None,
normpath=os.path.normpath,
abspath=os.path.abspath
):
if self._CHEETAH__isControlledByWebKit:
return super(Servlet, self).serverSidePath(path)
elif path:
return normpath(abspath(path.replace("\\", '/')))
elif hasattr(self, '_filePath') and self._filePath:
return normpath(abspath(self._filePath))
else:
return None
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
#
| Python |
"Template support for Cheetah"
import sys, os, imp
from Cheetah import Compiler
import pkg_resources
def _recompile_template(package, basename, tfile, classname):
tmpl = pkg_resources.resource_string(package, "%s.tmpl" % basename)
c = Compiler.Compiler(source=tmpl, mainClassName='GenTemplate')
code = str(c)
mod = imp.new_module(classname)
ns = dict()
exec(code, ns)
tempclass = ns.get("GenTemplate",
ns.get('DynamicallyCompiledCheetahTemplate'))
assert tempclass
tempclass.__name__ = basename
setattr(mod, basename, tempclass)
sys.modules[classname] = mod
return mod
class TurboCheetah:
extension = "tmpl"
def __init__(self, extra_vars_func=None, options=None):
if options is None:
options = dict()
self.get_extra_vars = extra_vars_func
self.options = options
self.compiledTemplates = {}
self.search_path = []
def load_template(self, template=None,
template_string=None, template_file=None,
loadingSite=False):
"""Searches for a template along the Python path.
Template files must end in ".tmpl" and be in legitimate packages.
"""
given = len([_f for _f in (template, template_string, template_file) if _f])
if given > 1:
raise TypeError(
"You may give only one of template, template_string, and "
"template_file")
if not given:
raise TypeError(
"You must give one of template, template_string, or "
"template_file")
if template:
return self.load_template_module(template)
elif template_string:
return self.load_template_string(template_string)
elif template_file:
return self.load_template_file(template_file)
def load_template_module(self, classname):
ct = self.compiledTemplates
divider = classname.rfind(".")
if divider > -1:
package = classname[0:divider]
basename = classname[divider+1:]
else:
raise ValueError("All templates must be in a package")
if not self.options.get("cheetah.precompiled", False):
tfile = pkg_resources.resource_filename(package,
"%s.%s" %
(basename,
self.extension))
if classname in ct:
mtime = os.stat(tfile).st_mtime
if ct[classname] != mtime:
ct[classname] = mtime
del sys.modules[classname]
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
else:
ct[classname] = os.stat(tfile).st_mtime
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
tempclass = getattr(mod, basename)
return tempclass
def load_template_string(self, content):
raise NotImplementedError
def load_template_file(self, filename):
raise NotImplementedError
def render(self, info, format="html", fragment=False, template=None,
template_string=None, template_file=None):
tclass = self.load_template(
template=template, template_string=template_string,
template_file=template_file)
if self.get_extra_vars:
extra = self.get_extra_vars()
else:
extra = {}
tempobj = tclass(searchList=[info, extra])
if fragment:
return tempobj.fragment()
else:
return tempobj.respond()
| Python |
from turbocheetah import cheetahsupport
TurboCheetah = cheetahsupport.TurboCheetah
__all__ = ["TurboCheetah"] | Python |
"""
@@TR: This code is pretty much unsupported.
MondoReport.py -- Batching module for Python and Cheetah.
Version 2001-Nov-18. Doesn't do much practical yet, but the companion
testMondoReport.py passes all its tests.
-Mike Orr (Iron)
TODO: BatchRecord.prev/next/prev_batches/next_batches/query, prev.query,
next.query.
How about Report: .page(), .all(), .summary()? Or PageBreaker.
"""
import operator
try:
from functools import reduce
except ImportError:
# If functools doesn't exist, we must be on an old
# enough version that has reduce() in builtins
pass
try:
from Cheetah.NameMapper import valueForKey as lookup_func
except ImportError:
def lookup_func(obj, name):
if hasattr(obj, name):
return getattr(obj, name)
else:
return obj[name] # Raises KeyError.
########## PUBLIC GENERIC FUNCTIONS ##############################
class NegativeError(ValueError):
pass
def isNumeric(v):
return isinstance(v, (int, float))
def isNonNegative(v):
ret = isNumeric(v)
if ret and v < 0:
raise NegativeError(v)
def isNotNone(v):
return v is not None
def Roman(n):
n = int(n) # Raises TypeError.
if n < 1:
raise ValueError("roman numeral for zero or negative undefined: " + n)
roman = ''
while n >= 1000:
n = n - 1000
roman = roman + 'M'
while n >= 500:
n = n - 500
roman = roman + 'D'
while n >= 100:
n = n - 100
roman = roman + 'C'
while n >= 50:
n = n - 50
roman = roman + 'L'
while n >= 10:
n = n - 10
roman = roman + 'X'
while n >= 5:
n = n - 5
roman = roman + 'V'
while n < 5 and n >= 1:
n = n - 1
roman = roman + 'I'
roman = roman.replace('DCCCC', 'CM')
roman = roman.replace('CCCC', 'CD')
roman = roman.replace('LXXXX', 'XC')
roman = roman.replace('XXXX', 'XL')
roman = roman.replace('VIIII', 'IX')
roman = roman.replace('IIII', 'IV')
return roman
def sum(lis):
return reduce(operator.add, lis, 0)
def mean(lis):
"""Always returns a floating-point number.
"""
lis_len = len(lis)
if lis_len == 0:
return 0.00 # Avoid ZeroDivisionError (not raised for floats anyway)
total = float( sum(lis) )
return total / lis_len
def median(lis):
lis = sorted(lis[:])
return lis[int(len(lis)/2)]
def variance(lis):
raise NotImplementedError()
def variance_n(lis):
raise NotImplementedError()
def standardDeviation(lis):
raise NotImplementedError()
def standardDeviation_n(lis):
raise NotImplementedError()
class IndexFormats:
"""Eight ways to display a subscript index.
("Fifty ways to leave your lover....")
"""
def __init__(self, index, item=None):
self._index = index
self._number = index + 1
self._item = item
def index(self):
return self._index
__call__ = index
def number(self):
return self._number
def even(self):
return self._number % 2 == 0
def odd(self):
return not self.even()
def even_i(self):
return self._index % 2 == 0
def odd_i(self):
return not self.even_i()
def letter(self):
return self.Letter().lower()
def Letter(self):
n = ord('A') + self._index
return chr(n)
def roman(self):
return self.Roman().lower()
def Roman(self):
return Roman(self._number)
def item(self):
return self._item
########## PRIVATE CLASSES ##############################
class ValuesGetterMixin:
def __init__(self, origList):
self._origList = origList
def _getValues(self, field=None, criteria=None):
if field:
ret = [lookup_func(elm, field) for elm in self._origList]
else:
ret = self._origList
if criteria:
ret = list(filter(criteria, ret))
return ret
class RecordStats(IndexFormats, ValuesGetterMixin):
"""The statistics that depend on the current record.
"""
def __init__(self, origList, index):
record = origList[index] # Raises IndexError.
IndexFormats.__init__(self, index, record)
ValuesGetterMixin.__init__(self, origList)
def length(self):
return len(self._origList)
def first(self):
return self._index == 0
def last(self):
return self._index >= len(self._origList) - 1
def _firstOrLastValue(self, field, currentIndex, otherIndex):
currentValue = self._origList[currentIndex] # Raises IndexError.
try:
otherValue = self._origList[otherIndex]
except IndexError:
return True
if field:
currentValue = lookup_func(currentValue, field)
otherValue = lookup_func(otherValue, field)
return currentValue != otherValue
def firstValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index - 1)
def lastValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index + 1)
# firstPage and lastPage not implemented. Needed?
def percentOfTotal(self, field=None, suffix='%', default='N/A', decimals=2):
rec = self._origList[self._index]
if field:
val = lookup_func(rec, field)
else:
val = rec
try:
lis = self._getValues(field, isNumeric)
except NegativeError:
return default
total = sum(lis)
if total == 0.00: # Avoid ZeroDivisionError.
return default
val = float(val)
try:
percent = (val / total) * 100
except ZeroDivisionError:
return default
if decimals == 0:
percent = int(percent)
else:
percent = round(percent, decimals)
if suffix:
return str(percent) + suffix # String.
else:
return percent # Numeric.
def __call__(self): # Overrides IndexFormats.__call__
"""This instance is not callable, so we override the super method.
"""
raise NotImplementedError()
def prev(self):
if self._index == 0:
return None
else:
length = self.length()
start = self._index - length
return PrevNextPage(self._origList, length, start)
def next(self):
if self._index + self.length() == self.length():
return None
else:
length = self.length()
start = self._index + length
return PrevNextPage(self._origList, length, start)
def prevPages(self):
raise NotImplementedError()
def nextPages(self):
raise NotImplementedError()
prev_batches = prevPages
next_batches = nextPages
def summary(self):
raise NotImplementedError()
def _prevNextHelper(self, start, end, size, orphan, sequence):
"""Copied from Zope's DT_InSV.py's "opt" function.
"""
if size < 1:
if start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except: start=len(sequence)
# if start > l: start=l
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
elif end > 0:
try: sequence[end-1]
except: end=len(sequence)
# if end > l: end=l
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
return start, end, size
class Summary(ValuesGetterMixin):
"""The summary statistics, that don't depend on the current record.
"""
def __init__(self, origList):
ValuesGetterMixin.__init__(self, origList)
def sum(self, field=None):
lis = self._getValues(field, isNumeric)
return sum(lis)
total = sum
def count(self, field=None):
lis = self._getValues(field, isNotNone)
return len(lis)
def min(self, field=None):
lis = self._getValues(field, isNotNone)
return min(lis) # Python builtin function min.
def max(self, field=None):
lis = self._getValues(field, isNotNone)
return max(lis) # Python builtin function max.
def mean(self, field=None):
"""Always returns a floating point number.
"""
lis = self._getValues(field, isNumeric)
return mean(lis)
average = mean
def median(self, field=None):
lis = self._getValues(field, isNumeric)
return median(lis)
def variance(self, field=None):
raiseNotImplementedError()
def variance_n(self, field=None):
raiseNotImplementedError()
def standardDeviation(self, field=None):
raiseNotImplementedError()
def standardDeviation_n(self, field=None):
raiseNotImplementedError()
class PrevNextPage:
def __init__(self, origList, size, start):
end = start + size
self.start = IndexFormats(start, origList[start])
self.end = IndexFormats(end, origList[end])
self.length = size
########## MAIN PUBLIC CLASS ##############################
class MondoReport:
_RecordStatsClass = RecordStats
_SummaryClass = Summary
def __init__(self, origlist):
self._origList = origlist
def page(self, size, start, overlap=0, orphan=0):
"""Returns list of ($r, $a, $b)
"""
if overlap != 0:
raise NotImplementedError("non-zero overlap")
if orphan != 0:
raise NotImplementedError("non-zero orphan")
origList = self._origList
origList_len = len(origList)
start = max(0, start)
end = min( start + size, len(self._origList) )
mySlice = origList[start:end]
ret = []
for rel in range(size):
abs_ = start + rel
r = mySlice[rel]
a = self._RecordStatsClass(origList, abs_)
b = self._RecordStatsClass(mySlice, rel)
tup = r, a, b
ret.append(tup)
return ret
batch = page
def all(self):
origList_len = len(self._origList)
return self.page(origList_len, 0, 0, 0)
def summary(self):
return self._SummaryClass(self._origList)
"""
**********************************
Return a pageful of records from a sequence, with statistics.
in : origlist, list or tuple. The entire set of records. This is
usually a list of objects or a list of dictionaries.
page, int >= 0. Which page to display.
size, int >= 1. How many records per page.
widow, int >=0. Not implemented.
orphan, int >=0. Not implemented.
base, int >=0. Number of first page (usually 0 or 1).
out: list of (o, b) pairs. The records for the current page. 'o' is
the original element from 'origlist' unchanged. 'b' is a Batch
object containing meta-info about 'o'.
exc: IndexError if 'page' or 'size' is < 1. If 'origlist' is empty or
'page' is too high, it returns an empty list rather than raising
an error.
origlist_len = len(origlist)
start = (page + base) * size
end = min(start + size, origlist_len)
ret = []
# widow, orphan calculation: adjust 'start' and 'end' up and down,
# Set 'widow', 'orphan', 'first_nonwidow', 'first_nonorphan' attributes.
for i in range(start, end):
o = origlist[i]
b = Batch(origlist, size, i)
tup = o, b
ret.append(tup)
return ret
def prev(self):
# return a PrevNextPage or None
def next(self):
# return a PrevNextPage or None
def prev_batches(self):
# return a list of SimpleBatch for the previous batches
def next_batches(self):
# return a list of SimpleBatch for the next batches
########## PUBLIC MIXIN CLASS FOR CHEETAH TEMPLATES ##############
class MondoReportMixin:
def batch(self, origList, size=None, start=0, overlap=0, orphan=0):
bat = MondoReport(origList)
return bat.batch(size, start, overlap, orphan)
def batchstats(self, origList):
bat = MondoReport(origList)
return bat.stats()
"""
# vim: shiftwidth=4 tabstop=4 expandtab textwidth=79
| Python |
# $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $
"""A subclass of Cheetah.Template for use in CGI scripts.
Usage in a template:
#extends Cheetah.Tools.CGITemplate
#implements respond
$cgiHeaders#slurp
Usage in a template inheriting a Python class:
1. The template
#extends MyPythonClass
#implements respond
$cgiHeaders#slurp
2. The Python class
from Cheetah.Tools import CGITemplate
class MyPythonClass(CGITemplate):
def cgiHeadersHook(self):
return "Content-Type: text/html; charset=koi8-r\n\n"
To read GET/POST variables, use the .webInput method defined in
Cheetah.Utils.WebInputMixin (available in all templates without importing
anything), use Python's 'cgi' module, or make your own arrangements.
This class inherits from Cheetah.Template to make it usable in Cheetah's
single-inheritance model.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.6 $
Start Date: 2001/10/03
Last Revision Date: $Date: 2006/01/29 02:09:59 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.6 $"[11:-2]
import os
from Cheetah.Template import Template
class CGITemplate(Template):
"""Methods useful in CGI scripts.
Any class that inherits this mixin must also inherit Cheetah.Servlet.
"""
def cgiHeaders(self):
"""Outputs the CGI headers if this is a CGI script.
Usage: $cgiHeaders#slurp
Override .cgiHeadersHook() if you want to customize the headers.
"""
if self.isCgi():
return self.cgiHeadersHook()
def cgiHeadersHook(self):
"""Override if you want to customize the CGI headers.
"""
return "Content-type: text/html\n\n"
def isCgi(self):
"""Is this a CGI script?
"""
env = 'REQUEST_METHOD' in os.environ
wk = self._CHEETAH__isControlledByWebKit
return env and not wk
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: SiteHierarchy.py,v 1.1 2001/10/11 03:25:54 tavis_rudd Exp $
"""Create menus and crumbs from a site hierarchy.
You define the site hierarchy as lists/tuples. Each location in the hierarchy
is a (url, description) tuple. Each list has the base URL/text in the 0
position, and all the children coming after it. Any child can be a list,
representing further depth to the hierarchy. See the end of the file for an
example hierarchy.
Use Hierarchy(contents, currentURL), where contents is this hierarchy, and
currentURL is the position you are currently in. The menubar and crumbs methods
give you the HTML output.
There are methods you can override to customize the HTML output.
"""
##################################################
## DEPENDENCIES
import string
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
##################################################
## CLASSES
class Hierarchy:
def __init__(self, hierarchy, currentURL, prefix='', menuCSSClass=None,
crumbCSSClass=None):
"""
hierarchy is described above, currentURL should be somewhere in
the hierarchy. prefix will be added before all of the URLs (to
help mitigate the problems with absolute URLs), and if given,
cssClass will be used for both links *and* nonlinks.
"""
self._contents = hierarchy
self._currentURL = currentURL
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
else:
self._menuCSSClass = ''
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
else:
self._crumbCSSClass = ''
self._prefix=prefix
## Main output methods
def menuList(self, menuCSSClass=None):
"""An indented menu list"""
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
stream = StringIO()
for item in self._contents[1:]:
self._menubarRecurse(item, 0, stream)
return stream.getvalue()
def crumbs(self, crumbCSSClass=None):
"""The home>where>you>are crumbs"""
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
path = []
pos = self._contents
while True:
## This is not the fastest algorithm, I'm afraid.
## But it probably won't be for a huge hierarchy anyway.
foundAny = False
path.append(pos[0])
for item in pos[1:]:
if self._inContents(item):
if isinstance(item, tuple):
path.append(item)
break
else:
pos = item
foundAny = True
break
if not foundAny:
break
if len(path) == 1:
return self.emptyCrumb()
return string.join(map(lambda x, self=self: self.crumbLink(x[0], x[1]),
path), self.crumbSeperator()) + \
self.crumbTerminator()
## Methods to control the Aesthetics
# - override these methods for your own look
def menuLink(self, url, text, indent):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '%s<B%s>%s</B> <BR>\n' % (' '*2*indent,
self._menuCSSClass, text)
else:
return '%s<A HREF="%s%s"%s>%s</A> <BR>\n' % \
(' '*2*indent, self._prefix, url,
self._menuCSSClass, text)
def crumbLink(self, url, text):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '<B%s>%s</B>' % (text, self._crumbCSSClass)
else:
return '<A HREF="%s%s"%s>%s</A>' % \
(self._prefix, url, self._crumbCSSClass, text)
def crumbSeperator(self):
return ' > '
def crumbTerminator(self):
return ''
def emptyCrumb(self):
"""When you are at the homepage"""
return ''
## internal methods
def _menubarRecurse(self, contents, indent, stream):
if isinstance(contents, tuple):
url, text = contents
rest = []
else:
url, text = contents[0]
rest = contents[1:]
stream.write(self.menuLink(url, text, indent))
if self._inContents(contents):
for item in rest:
self._menubarRecurse(item, indent+1, stream)
def _inContents(self, contents):
if isinstance(contents, tuple):
return self._currentURL == contents[0]
for item in contents:
if self._inContents(item):
return True
return False
##################################################
## from the command line
if __name__ == '__main__':
hierarchy = [('/', 'home'),
('/about', 'About Us'),
[('/services', 'Services'),
[('/services/products', 'Products'),
('/services/products/widget', 'The Widget'),
('/services/products/wedge', 'The Wedge'),
('/services/products/thimble', 'The Thimble'),
],
('/services/prices', 'Prices'),
],
('/contact', 'Contact Us'),
]
for url in ['/', '/services', '/services/products/widget', '/contact']:
print('<p>', '='*50)
print('<br> %s: <br>\n' % url)
n = Hierarchy(hierarchy, url, menuCSSClass='menu', crumbCSSClass='crumb',
prefix='/here')
print(n.menuList())
print('<p>', '-'*50)
print(n.crumbs())
| Python |
"""
Nothing, but in a friendly way. Good for filling in for objects you want to
hide. If $form.f1 is a RecursiveNull object, then
$form.f1.anything["you"].might("use") will resolve to the empty string.
This module was contributed by Ian Bicking.
"""
class RecursiveNull(object):
def __getattr__(self, attr):
return self
def __getitem__(self, item):
return self
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return ''
def __repr__(self):
return ''
def __nonzero__(self):
return 0
def __eq__(self, x):
if x:
return False
return True
def __ne__(self, x):
return x and True or False
| Python |
"""This package contains classes, functions, objects and packages contributed
by Cheetah users. They are not used by Cheetah itself. There is no
guarantee that this directory will be included in Cheetah releases, that
these objects will remain here forever, or that they will remain
backward-compatible.
"""
# vim: shiftwidth=5 tabstop=5 expandtab
| Python |
Version = '2.4.1'
VersionTuple = (2, 4, 1, 'final', 0)
MinCompatibleVersion = '2.0rc6'
MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6)
####
def convertVersionStringToTuple(s):
versionNum = [0, 0, 0]
releaseType = 'final'
releaseTypeSubNum = 0
if s.find('a')!=-1:
num, releaseTypeSubNum = s.split('a')
releaseType = 'alpha'
elif s.find('b')!=-1:
num, releaseTypeSubNum = s.split('b')
releaseType = 'beta'
elif s.find('rc')!=-1:
num, releaseTypeSubNum = s.split('rc')
releaseType = 'candidate'
else:
num = s
num = num.split('.')
for i in range(len(num)):
versionNum[i] = int(num[i])
if len(versionNum)<3:
versionNum += [0]
releaseTypeSubNum = int(releaseTypeSubNum)
return tuple(versionNum+[releaseType, releaseTypeSubNum])
if __name__ == '__main__':
c = convertVersionStringToTuple
print(c('2.0a1'))
print(c('2.0b1'))
print(c('2.0rc1'))
print(c('2.0'))
print(c('2.0.2'))
assert c('0.9.19b1') < c('0.9.19')
assert c('0.9b1') < c('0.9.19')
assert c('2.0a2') > c('2.0a1')
assert c('2.0b1') > c('2.0a2')
assert c('2.0b2') > c('2.0b1')
assert c('2.0b2') == c('2.0b2')
assert c('2.0rc1') > c('2.0b1')
assert c('2.0rc2') > c('2.0rc1')
assert c('2.0rc2') > c('2.0b1')
assert c('2.0') > c('2.0a1')
assert c('2.0') > c('2.0b1')
assert c('2.0') > c('2.0rc1')
assert c('2.0.1') > c('2.0')
assert c('2.0rc1') > c('2.0b1')
| Python |
"""SourceReader class for Cheetah's Parser and CodeGenerator
"""
import re
import sys
EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)')
EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)')
ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search
class Error(Exception):
pass
class SourceReader(object):
def __init__(self, src, filename=None, breakPoint=None, encoding=None):
## @@TR 2005-01-17: the following comes from a patch Terrel Shumway
## contributed to add unicode support to the reading of Cheetah source
## files with dynamically compiled templates. All the existing unit
## tests pass but, it needs more testing and some test cases of its
## own. My instinct is to move this up into the code that passes in the
## src string rather than leaving it here. As implemented here it
## forces all src strings to unicode, which IMO is not what we want.
# if encoding is None:
# # peek at the encoding in the first two lines
# m = EOLZre.search(src)
# pos = m.end()
# if pos<len(src):
# m = EOLZre.search(src,pos)
# pos = m.end()
# m = ENCODINGsearch(src,0,pos)
# if m:
# encoding = m.group(1)
# else:
# encoding = sys.getfilesystemencoding()
# self._encoding = encoding
# if type(src) is not unicode:
# src = src.decode(encoding)
## end of Terrel's patch
self._src = src
self._filename = filename
self._srcLen = len(src)
if breakPoint == None:
self._breakPoint = self._srcLen
else:
self.setBreakPoint(breakPoint)
self._pos = 0
self._bookmarks = {}
self._posTobookmarkMap = {}
## collect some meta-information
self._EOLs = []
pos = 0
while pos < len(self):
EOLmatch = EOLZre.search(src, pos)
self._EOLs.append(EOLmatch.start())
pos = EOLmatch.end()
self._BOLs = []
for pos in self._EOLs:
BOLpos = self.findBOL(pos)
self._BOLs.append(BOLpos)
def src(self):
return self._src
def filename(self):
return self._filename
def __len__(self):
return self._breakPoint
def __getitem__(self, i):
self.checkPos(i)
return self._src[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self._src[i:j]
def splitlines(self):
if not hasattr(self, '_srcLines'):
self._srcLines = self._src.splitlines()
return self._srcLines
def lineNum(self, pos=None):
if pos == None:
pos = self._pos
for i in range(len(self._BOLs)):
if pos >= self._BOLs[i] and pos <= self._EOLs[i]:
return i
def getRowCol(self, pos=None):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum]
return lineNum+1, pos-BOL+1
def getRowColLine(self, pos=None):
if pos == None:
pos = self._pos
row, col = self.getRowCol(pos)
return row, col, self.splitlines()[row-1]
def getLine(self, pos):
if pos == None:
pos = self._pos
lineNum = self.lineNum(pos)
return self.splitlines()[lineNum]
def pos(self):
return self._pos
def setPos(self, pos):
self.checkPos(pos)
self._pos = pos
def validPos(self, pos):
return pos <= self._breakPoint and pos >=0
def checkPos(self, pos):
if not pos <= self._breakPoint:
raise Error("pos (" + str(pos) + ") is invalid: beyond the stream's end (" +
str(self._breakPoint-1) + ")" )
elif not pos >=0:
raise Error("pos (" + str(pos) + ") is invalid: less than 0" )
def breakPoint(self):
return self._breakPoint
def setBreakPoint(self, pos):
if pos > self._srcLen:
raise Error("New breakpoint (" + str(pos) +
") is invalid: beyond the end of stream's source string (" +
str(self._srcLen) + ")" )
elif not pos >= 0:
raise Error("New breakpoint (" + str(pos) + ") is invalid: less than 0" )
self._breakPoint = pos
def setBookmark(self, name):
self._bookmarks[name] = self._pos
self._posTobookmarkMap[self._pos] = name
def hasBookmark(self, name):
return name in self._bookmarks
def gotoBookmark(self, name):
if not self.hasBookmark(name):
raise Error("Invalid bookmark (" + name + ") is invalid: does not exist")
pos = self._bookmarks[name]
if not self.validPos(pos):
raise Error("Invalid bookmark (" + name + ', '+
str(pos) + ") is invalid: pos is out of range" )
self._pos = pos
def atEnd(self):
return self._pos >= self._breakPoint
def atStart(self):
return self._pos == 0
def peek(self, offset=0):
self.checkPos(self._pos+offset)
pos = self._pos + offset
return self._src[pos]
def getc(self):
pos = self._pos
if self.validPos(pos+1):
self._pos += 1
return self._src[pos]
def ungetc(self, c=None):
if not self.atStart():
raise Error('Already at beginning of stream')
self._pos -= 1
if not c==None:
self._src[self._pos] = c
def advance(self, offset=1):
self.checkPos(self._pos + offset)
self._pos += offset
def rev(self, offset=1):
self.checkPos(self._pos - offset)
self._pos -= offset
def read(self, offset):
self.checkPos(self._pos + offset)
start = self._pos
self._pos += offset
return self._src[start:self._pos]
def readTo(self, to, start=None):
self.checkPos(to)
if start == None:
start = self._pos
self._pos = to
return self._src[start:to]
def readToEOL(self, start=None, gobble=True):
EOLmatch = EOLZre.search(self.src(), self.pos())
if gobble:
pos = EOLmatch.end()
else:
pos = EOLmatch.start()
return self.readTo(to=pos, start=start)
def find(self, it, pos=None):
if pos == None:
pos = self._pos
return self._src.find(it, pos )
def startswith(self, it, pos=None):
if self.find(it, pos) == self.pos():
return True
else:
return False
def rfind(self, it, pos):
if pos == None:
pos = self._pos
return self._src.rfind(it, pos)
def findBOL(self, pos=None):
if pos == None:
pos = self._pos
src = self.src()
return max(src.rfind('\n', 0, pos)+1, src.rfind('\r', 0, pos)+1, 0)
def findEOL(self, pos=None, gobble=False):
if pos == None:
pos = self._pos
match = EOLZre.search(self.src(), pos)
if gobble:
return match.end()
else:
return match.start()
def isLineClearToPos(self, pos=None):
if pos == None:
pos = self.pos()
self.checkPos(pos)
src = self.src()
BOL = self.findBOL()
return BOL == pos or src[BOL:pos].isspace()
def matches(self, strOrRE):
if isinstance(strOrRE, (str, unicode)):
return self.startswith(strOrRE, pos=self.pos())
else: # assume an re object
return strOrRE.match(self.src(), self.pos())
def matchWhiteSpace(self, WSchars=' \f\t'):
return (not self.atEnd()) and self.peek() in WSchars
def getWhiteSpace(self, max=None, WSchars=' \f\t'):
if not self.matchWhiteSpace(WSchars):
return ''
start = self.pos()
breakPoint = self.breakPoint()
if max is not None:
breakPoint = min(breakPoint, self.pos()+max)
while self.pos() < breakPoint:
self.advance()
if not self.matchWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
def matchNonWhiteSpace(self, WSchars=' \f\t\n\r'):
return self.atEnd() or not self.peek() in WSchars
def getNonWhiteSpace(self, WSchars=' \f\t\n\r'):
if not self.matchNonWhiteSpace(WSchars):
return ''
start = self.pos()
while self.pos() < self.breakPoint():
self.advance()
if not self.matchNonWhiteSpace(WSchars):
break
return self.src()[start:self.pos()]
| Python |
import gettext
_ = gettext.gettext
class I18n(object):
def __init__(self, parser):
pass
## junk I'm playing with to test the macro framework
# def parseArgs(self, parser, startPos):
# parser.getWhiteSpace()
# args = parser.getExpression(useNameMapper=False,
# pyTokensToBreakAt=[':']).strip()
# return args
#
# def convertArgStrToDict(self, args, parser=None, startPos=None):
# def getArgs(*pargs, **kws):
# return pargs, kws
# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
# return kwArgs
def __call__(self,
src, # aka message,
plural=None,
n=None, # should be a string representing the name of the
# '$var' rather than $var itself
id=None,
domain=None,
source=None,
target=None,
comment=None,
# args that are automatically supplied by the parser when the
# macro is called:
parser=None,
macros=None,
isShortForm=False,
EOLCharsInShortForm=None,
startPos=None,
endPos=None,
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
#print macros['i18n']
src = _(src)
if isShortForm and endPos<len(parser):
return src+EOLCharsInShortForm
else:
return src
| Python |
#
| Python |
# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.7 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2005/01/03 19:59:07 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.7 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
| Python |
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'),
('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'),
('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
## Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''),
('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
]
DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1])
if reprstr[i]=="'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
#self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self", None) ]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname, defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name, defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(_initMethod_initCheetah % {'className' : self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className #self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, basestring): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName)
if not isinstance(source, basestring):
source = unicode(source)
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.match(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, str):
encoding = unicodeMatch.group(1) or 'ascii'
source = unicode(source, encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
source = source.decode(encoding)
else:
source = unicode(source)
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"import __builtin__",
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = baseClassName.split(',')
for klass in baseclasses:
chunks = klass.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline in the middle of a source block
# we don't want to inadvertantly import the module at the top of the file either
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if not var == '*']
self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| Python |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
from Cheetah.Utils.memcache import Client as MemcachedClient
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val, time)
def add(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
| Python |
'''
Filters for the #filter directive as well as #transform
#filter results in output filters Cheetah's $placeholders .
#transform results in a filter on the entirety of the output
'''
import sys
# Additional entities WebSafe knows how to transform. No need to include
# '<', '>' or '&' since those will have been done already.
webSafeEntities = {' ': ' ', '"': '"'}
class Filter(object):
"""A baseclass for the Cheetah Filters."""
def __init__(self, template=None):
"""Setup a reference to the template that is using the filter instance.
This reference isn't used by any of the standard filters, but is
available to Filter subclasses, should they need it.
Subclasses should call this method.
"""
self.template = template
def filter(self, val, encoding=None, str=str, **kw):
'''
Pass Unicode strings through unmolested, unless an encoding is specified.
'''
if val is None:
return u''
if isinstance(val, unicode):
# ignore the encoding and return the unicode object
return val
else:
try:
return unicode(val)
except UnicodeDecodeError:
# we could put more fallbacks here, but we'll just pass the str
# on and let DummyTransaction worry about it
return str(val)
RawOrEncodedUnicode = Filter
EncodeUnicode = Filter
class Markdown(EncodeUnicode):
'''
Markdown will change regular strings to Markdown
(http://daringfireball.net/projects/markdown/)
Such that:
My Header
=========
Becaomes:
<h1>My Header</h1>
and so on.
Markdown is meant to be used with the #transform
tag, as it's usefulness with #filter is marginal at
best
'''
def filter(self, value, **kwargs):
# This is a bit of a hack to allow outright embedding of the markdown module
try:
import markdown
except ImportError:
print('>>> Exception raised importing the "markdown" module')
print('>>> Are you sure you have the ElementTree module installed?')
print(' http://effbot.org/downloads/#elementtree')
raise
encoded = super(Markdown, self).filter(value, **kwargs)
return markdown.markdown(encoded)
class CodeHighlighter(EncodeUnicode):
'''
The CodeHighlighter filter depends on the "pygments" module which you can
download and install from: http://pygments.org
What the CodeHighlighter assumes the string that it's receiving is source
code and uses pygments.lexers.guess_lexer() to try to guess which parser
to use when highlighting it.
CodeHighlighter will return the HTML and CSS to render the code block, syntax
highlighted, in a browser
NOTE: I had an issue installing pygments on Linux/amd64/Python 2.6 dealing with
importing of pygments.lexers, I was able to correct the failure by adding:
raise ImportError
to line 39 of pygments/plugin.py (since importing pkg_resources was causing issues)
'''
def filter(self, source, **kwargs):
encoded = super(CodeHighlighter, self).filter(source, **kwargs)
try:
from pygments import highlight
from pygments import lexers
from pygments import formatters
except ImportError, ex:
print('<%s> - Failed to import pygments! (%s)' % (self.__class__.__name__, ex))
print('-- You may need to install it from: http://pygments.org')
return encoded
lexer = None
try:
lexer = lexers.guess_lexer(source)
except lexers.ClassNotFound:
lexer = lexers.PythonLexer()
formatter = formatters.HtmlFormatter(cssclass='code_highlighter')
encoded = highlight(encoded, lexer, formatter)
css = formatter.get_style_defs('.code_highlighter')
return '''<style type="text/css"><!--
%(css)s
--></style>%(source)s''' % {'css' : css, 'source' : encoded}
class MaxLen(Filter):
def filter(self, val, **kw):
"""Replace None with '' and cut off at maxlen."""
output = super(MaxLen, self).filter(val, **kw)
if 'maxlen' in kw and len(output) > kw['maxlen']:
return output[:kw['maxlen']]
return output
class WebSafe(Filter):
"""Escape HTML entities in $placeholders.
"""
def filter(self, val, **kw):
s = super(WebSafe, self).filter(val, **kw)
# These substitutions are copied from cgi.escape().
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
# Process the additional transformations if any.
if 'also' in kw:
also = kw['also']
entities = webSafeEntities # Global variable.
for k in also:
if k in entities:
v = entities[k]
else:
v = "&#%s;" % ord(k)
s = s.replace(k, v)
return s
class Strip(Filter):
"""Strip leading/trailing whitespace but preserve newlines.
This filter goes through the value line by line, removing leading and
trailing whitespace on each line. It does not strip newlines, so every
input line corresponds to one output line, with its trailing newline intact.
We do not use val.split('\n') because that would squeeze out consecutive
blank lines. Instead, we search for each newline individually. This
makes us unable to use the fast C .split method, but it makes the filter
much more widely useful.
This filter is intended to be usable both with the #filter directive and
with the proposed #sed directive (which has not been ratified yet.)
"""
def filter(self, val, **kw):
s = super(Strip, self).filter(val, **kw)
result = []
start = 0 # The current line will be s[start:end].
while True: # Loop through each line.
end = s.find('\n', start) # Find next newline.
if end == -1: # If no more newlines.
break
chunk = s[start:end].strip()
result.append(chunk)
result.append('\n')
start = end + 1
# Write the unfinished portion after the last newline, if any.
chunk = s[start:].strip()
result.append(chunk)
return "".join(result)
class StripSqueeze(Filter):
"""Canonicalizes every chunk of whitespace to a single space.
Strips leading/trailing whitespace. Removes all newlines, so multi-line
input is joined into one ling line with NO trailing newline.
"""
def filter(self, val, **kw):
s = super(StripSqueeze, self).filter(val, **kw)
s = s.split()
return " ".join(s)
##################################################
## MAIN ROUTINE -- testing
def test():
s1 = "abc <=> &"
s2 = " asdf \n\t 1 2 3\n"
print("WebSafe INPUT:", repr(s1))
print(" WebSafe:", repr(WebSafe().filter(s1)))
print()
print(" Strip INPUT:", repr(s2))
print(" Strip:", repr(Strip().filter(s2)))
print("StripSqueeze:", repr(StripSqueeze().filter(s2)))
print("Unicode:", repr(EncodeUnicode().filter(u'aoeu12345\u1234')))
if __name__ == "__main__":
test()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: ImportHooks.py,v 1.27 2007/11/16 18:28:47 tavis_rudd Exp $
"""Provides some import hooks to allow Cheetah's .tmpl files to be imported
directly like Python .py modules.
To use these:
import Cheetah.ImportHooks
Cheetah.ImportHooks.install()
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.27 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/11/16 18:28:47 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.27 $"[11:-2]
import sys
import os.path
import types
import __builtin__
import new
import imp
from threading import RLock
import string
import traceback
from Cheetah import ImportManager
from Cheetah.ImportManager import DirOwner
from Cheetah.Compiler import Compiler
from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
_installed = False
##################################################
## HELPER FUNCS
_cacheDir = []
def setCacheDir(cacheDir):
global _cacheDir
_cacheDir.append(cacheDir)
##################################################
## CLASSES
class CheetahDirOwner(DirOwner):
_lock = RLock()
_acquireLock = _lock.acquire
_releaseLock = _lock.release
templateFileExtensions = ('.tmpl',)
def getmod(self, name):
self._acquireLock()
try:
mod = DirOwner.getmod(self, name)
if mod:
return mod
for ext in self.templateFileExtensions:
tmplPath = os.path.join(self.path, name + ext)
if os.path.exists(tmplPath):
try:
return self._compile(name, tmplPath)
except:
# @@TR: log the error
exc_txt = traceback.format_exc()
exc_txt =' '+(' \n'.join(exc_txt.splitlines()))
raise ImportError(
'Error while compiling Cheetah module'
' %(name)s, original traceback follows:\n%(exc_txt)s'%locals())
##
return None
finally:
self._releaseLock()
def _compile(self, name, tmplPath):
## @@ consider adding an ImportError raiser here
code = str(Compiler(file=tmplPath, moduleName=name,
mainClassName=name))
if _cacheDir:
__file__ = os.path.join(_cacheDir[0],
convertTmplPathToModuleName(tmplPath)) + '.py'
try:
open(__file__, 'w').write(code)
except OSError:
## @@ TR: need to add some error code here
traceback.print_exc(file=sys.stderr)
__file__ = tmplPath
else:
__file__ = tmplPath
co = compile(code+'\n', __file__, 'exec')
mod = imp.new_module(name)
mod.__file__ = co.co_filename
if _cacheDir:
mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit
# filemonitoring code
mod.__co__ = co
return mod
##################################################
## FUNCTIONS
def install(templateFileExtensions=('.tmpl',)):
"""Install the Cheetah Import Hooks"""
global _installed
if not _installed:
CheetahDirOwner.templateFileExtensions = templateFileExtensions
import __builtin__
if isinstance(__builtin__.__import__, types.BuiltinFunctionType):
global __oldimport__
__oldimport__ = __builtin__.__import__
ImportManager._globalOwnerTypes.insert(0, CheetahDirOwner)
#ImportManager._globalOwnerTypes.append(CheetahDirOwner)
global _manager
_manager=ImportManager.ImportManager()
_manager.setThreaded()
_manager.install()
def uninstall():
"""Uninstall the Cheetah Import Hooks"""
global _installed
if not _installed:
import __builtin__
if isinstance(__builtin__.__import__, types.MethodType):
__builtin__.__import__ = __oldimport__
global _manager
del _manager
if __name__ == '__main__':
install()
| Python |
import os.path
import string
l = ['_'] * 256
for c in string.digits + string.letters:
l[ord(c)] = c
_pathNameTransChars = string.join(l, '')
del l, c
def convertTmplPathToModuleName(tmplPath,
_pathNameTransChars=_pathNameTransChars,
splitdrive=os.path.splitdrive,
translate=string.translate,
):
return translate(splitdrive(tmplPath)[1], _pathNameTransChars)
| Python |
'''
Provides dummy Transaction and Response classes is used by Cheetah in place
of real Webware transactions when the Template obj is not used directly as a
Webware servlet.
Warning: This may be deprecated in the future, please do not rely on any
specific DummyTransaction or DummyResponse behavior
'''
import logging
import types
class DummyResponseFailure(Exception):
pass
class DummyResponse(object):
'''
A dummy Response class is used by Cheetah in place of real Webware
Response objects when the Template obj is not used directly as a Webware
servlet
'''
def __init__(self):
self._outputChunks = []
def flush(self):
pass
def safeConvert(self, chunk):
# Exceptionally gross, but the safest way
# I've found to ensure I get a legit unicode object
if not chunk:
return u''
if isinstance(chunk, unicode):
return chunk
try:
return chunk.decode('utf-8', 'strict')
except UnicodeDecodeError:
try:
return chunk.decode('latin-1', 'strict')
except UnicodeDecodeError:
return chunk.decode('ascii', 'ignore')
except AttributeError:
return unicode(chunk, errors='ignore')
return chunk
def write(self, value):
self._outputChunks.append(value)
def writeln(self, txt):
write(txt)
write('\n')
def getvalue(self, outputChunks=None):
chunks = outputChunks or self._outputChunks
try:
return u''.join(chunks)
except UnicodeDecodeError, ex:
logging.debug('Trying to work around a UnicodeDecodeError in getvalue()')
logging.debug('...perhaps you could fix "%s" while you\'re debugging')
return ''.join((self.safeConvert(c) for c in chunks))
def writelines(self, *lines):
## not used
[self.writeln(ln) for ln in lines]
class DummyTransaction(object):
'''
A dummy Transaction class is used by Cheetah in place of real Webware
transactions when the Template obj is not used directly as a Webware
servlet.
It only provides a response object and method. All other methods and
attributes make no sense in this context.
'''
def __init__(self, *args, **kwargs):
self._response = None
def response(self, resp=None):
if self._response is None:
self._response = resp or DummyResponse()
return self._response
class TransformerResponse(DummyResponse):
def __init__(self, *args, **kwargs):
super(TransformerResponse, self).__init__(*args, **kwargs)
self._filter = None
def getvalue(self, **kwargs):
output = super(TransformerResponse, self).getvalue(**kwargs)
if self._filter:
_filter = self._filter
if isinstance(_filter, type):
_filter = _filter()
return _filter.filter(output)
return output
class TransformerTransaction(object):
def __init__(self, *args, **kwargs):
self._response = None
def response(self):
if self._response:
return self._response
return TransformerResponse()
| Python |
"""
Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
"""
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
from Cheetah.Macros.I18n import I18n
# re tools
_regexCache = {}
def cachedRegex(pattern):
if pattern not in _regexCache:
_regexCache[pattern] = re.compile(pattern)
return _regexCache[pattern]
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1', txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
##################################################
## CONSTANTS & GLOBALS ##
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
##################################################
## Tokens for the parser ##
#generic
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
#operators
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//', '%')
shiftOps = ('>>', '<<')
bitwiseOps = ('&', '|', '^')
assignOp = '='
augAssignOps = ('+=', '-=', '/=', '*=', '**=', '^=', '%=',
'>>=', '<<=', '&=', '|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<', '>', '==', '!=', '<=', '>=', '<>', 'is', 'in',)
booleanOps = ('and', 'or', 'not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(', ')', '{', '}', '[', ']',
',', '.', ':', ';', '=', '`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
##################################################
## Regex chunks for the parser ##
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS = r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)', r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
# e.g. ##author@ Tavis Rudd
unicodeDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
encodingDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
escapedNewlineRE = re.compile(r'(?<!\\)((\\\\)*)\\(n|012)')
directiveNamesAndParsers = {
# importing and inheritance
'import': None,
'from': None,
'extends': 'eatExtends',
'implements': 'eatImplements',
'super': 'eatSuper',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'transform': 'eatTransform',
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher': None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
##################################################
## CLASSES ##
# @@TR: SyntaxError doesn't call exception.__str__ for some reason!
#class ParseError(SyntaxError):
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if self.lineno:
lineno = self.lineno
row, col, line = (lineno, (self.col or 0),
self.stream.splitlines()[lineno-1])
else:
row, col, line = self.stream.getRowColLine()
## get the surrounding lines
lines = stream.splitlines()
prevLines = [] # (rowNum, content)
for i in range(1, 4):
if row-1-i <=0:
break
prevLines.append( (row-i, lines[row-1-i]) )
nextLines = [] # (rowNum, content)
for i in range(1, 4):
if not row-1+i < len(lines):
break
nextLines.append( (row+i, lines[row-1+i]) )
nextLines.reverse()
## print the main message
report += "\n\n%s\n" %self.msg
report += "Line %i, column %i%s\n\n" % (row, col, f)
report += 'Line|Cheetah Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line}
report += ' '*5 +' '*(col-1) + "^\n"
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
## add the extra msg
if self.extMsg:
report += self.extMsg + '\n'
return report
class ForbiddenSyntax(ParseError):
pass
class ForbiddenExpression(ForbiddenSyntax):
pass
class ForbiddenDirective(ForbiddenSyntax):
pass
class CheetahVariable(object):
def __init__(self, nameChunks, useNameMapper=True, cacheToken=None,
rawSource=None):
self.nameChunks = nameChunks
self.useNameMapper = useNameMapper
self.cacheToken = cacheToken
self.rawSource = rawSource
class Placeholder(CheetahVariable):
pass
class ArgList(object):
"""Used by _LowLevelParser.getArgList()"""
def __init__(self):
self.arguments = []
self.defaults = []
self.count = 0
def add_argument(self, name):
self.arguments.append(name)
self.defaults.append(None)
def next(self):
self.count += 1
def add_default(self, token):
count = self.count
if self.defaults[count] is None:
self.defaults[count] = ''
self.defaults[count] += token
def merge(self):
defaults = (isinstance(d, basestring) and d.strip() or None for d in self.defaults)
return list(map(None, (a.strip() for a in self.arguments), defaults))
def __str__(self):
return str(self.merge())
class _LowLevelParser(SourceReader):
"""This class implements the methods to match or extract ('get*') the basic
elements of Cheetah's grammar. It does NOT handle any code generation or
state management.
"""
_settingsManager = None
def setSettingsManager(self, settingsManager):
self._settingsManager = settingsManager
def setting(self, key, default=Unspecified):
if default is Unspecified:
return self._settingsManager.setting(key)
else:
return self._settingsManager.setting(key, default=default)
def setSetting(self, key, val):
self._settingsManager.setSetting(key, val)
def settings(self):
return self._settingsManager.settings()
def updateSettings(self, settings):
self._settingsManager.updateSettings(settings)
def _initializeSettings(self):
self._settingsManager._initializeSettings()
def configureParser(self):
"""Is called by the Compiler instance after the parser has had a
settingsManager assigned with self.setSettingsManager()
"""
self._makeCheetahVarREs()
self._makeCommentREs()
self._makeDirectiveREs()
self._makePspREs()
self._possibleNonStrConstantChars = (
self.setting('commentStartToken')[0] +
self.setting('multiLineCommentStartToken')[0] +
self.setting('cheetahVarStartToken')[0] +
self.setting('directiveStartToken')[0] +
self.setting('PSPStartToken')[0])
self._nonStrConstMatchers = [
self.matchCommentStartToken,
self.matchMultiLineCommentStartToken,
self.matchVariablePlaceholderStart,
self.matchExpressionPlaceholderStart,
self.matchDirective,
self.matchPSPStartToken,
self.matchEOLSlurpToken,
]
## regex setup ##
def _makeCheetahVarREs(self):
"""Setup the regexs for Cheetah $var parsing."""
num = r'[0-9\.]+'
interval = (r'(?P<interval>' +
num + r's|' +
num + r'm|' +
num + r'h|' +
num + r'd|' +
num + r'w|' +
num + ')'
)
cacheToken = (r'(?:' +
r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+
'|' +
r'(?P<STATIC_CACHE>\*)' +
'|' +
r'(?P<NO_CACHE>)' +
')')
self.cacheTokenRE = cachedRegex(cacheToken)
silentPlaceholderToken = (r'(?:' +
r'(?P<SILENT>' +escapeRegexChars('!')+')'+
'|' +
r'(?P<NOT_SILENT>)' +
')')
self.silentPlaceholderTokenRE = cachedRegex(silentPlaceholderToken)
self.cheetahVarStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+
r'(?P<silenceToken>'+silentPlaceholderToken+')'+
r'(?P<cacheToken>'+cacheToken+')'+
r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure
r'(?=[A-Za-z_])')
validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])'
self.cheetahVarStartToken = self.setting('cheetahVarStartToken')
self.cheetahVarStartTokenRE = cachedRegex(
escCharLookBehind +
escapeRegexChars(self.setting('cheetahVarStartToken'))
+validCharsLookAhead
)
self.cheetahVarInExpressionStartTokenRE = cachedRegex(
escapeRegexChars(self.setting('cheetahVarStartToken'))
+r'(?=[A-Za-z_])'
)
self.expressionPlaceholderStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' +
r'(?P<cacheToken>' + cacheToken + ')' +
#r'\[[ \t\f]*'
r'(?:\{|\(|\[)[ \t\f]*'
+ r'(?=[^\)\}\]])'
)
if self.setting('EOLSlurpToken'):
self.EOLSlurpRE = cachedRegex(
escapeRegexChars(self.setting('EOLSlurpToken'))
+ r'[ \t\f]*'
+ r'(?:'+EOL+')'
)
else:
self.EOLSlurpRE = None
def _makeCommentREs(self):
"""Construct the regex bits that are used in comment parsing."""
startTokenEsc = escapeRegexChars(self.setting('commentStartToken'))
self.commentStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
del startTokenEsc
startTokenEsc = escapeRegexChars(
self.setting('multiLineCommentStartToken'))
endTokenEsc = escapeRegexChars(
self.setting('multiLineCommentEndToken'))
self.multiLineCommentTokenStartRE = cachedRegex(escCharLookBehind +
startTokenEsc)
self.multiLineCommentEndTokenRE = cachedRegex(escCharLookBehind +
endTokenEsc)
def _makeDirectiveREs(self):
"""Construct the regexs that are used in directive parsing."""
startToken = self.setting('directiveStartToken')
endToken = self.setting('directiveEndToken')
startTokenEsc = escapeRegexChars(startToken)
endTokenEsc = escapeRegexChars(endToken)
validSecondCharsLookAhead = r'(?=[A-Za-z_@])'
reParts = [escCharLookBehind, startTokenEsc]
if self.setting('allowWhitespaceAfterDirectiveStartToken'):
reParts.append('[ \t]*')
reParts.append(validSecondCharsLookAhead)
self.directiveStartTokenRE = cachedRegex(''.join(reParts))
self.directiveEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _makePspREs(self):
"""Setup the regexs for PSP parsing."""
startToken = self.setting('PSPStartToken')
startTokenEsc = escapeRegexChars(startToken)
self.PSPStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
endToken = self.setting('PSPEndToken')
endTokenEsc = escapeRegexChars(endToken)
self.PSPEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc)
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah directives in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def isLineClearToStartToken(self, pos=None):
return self.isLineClearToPos(pos)
def matchTopLevelToken(self):
"""Returns the first match found from the following methods:
self.matchCommentStartToken
self.matchMultiLineCommentStartToken
self.matchVariablePlaceholderStart
self.matchExpressionPlaceholderStart
self.matchDirective
self.matchPSPStartToken
self.matchEOLSlurpToken
Returns None if no match.
"""
match = None
if self.peek() in self._possibleNonStrConstantChars:
for matcher in self._nonStrConstMatchers:
match = matcher()
if match:
break
return match
def matchPyToken(self):
match = pseudoprog.match(self.src(), self.pos())
if match and match.group() in tripleQuotedStringStarts:
TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos())
if TQSmatch:
return TQSmatch
return match
def getPyToken(self):
match = self.matchPyToken()
if match is None:
raise ParseError(self)
elif match.group() in tripleQuotedStringStarts:
raise ParseError(self, msg='Malformed triple-quoted string')
return self.readTo(match.end())
def matchEOLSlurpToken(self):
if self.EOLSlurpRE:
return self.EOLSlurpRE.match(self.src(), self.pos())
def getEOLSlurpToken(self):
match = self.matchEOLSlurpToken()
if not match:
raise ParseError(self, msg='Invalid EOL slurp token')
return self.readTo(match.end())
def matchCommentStartToken(self):
return self.commentStartTokenRE.match(self.src(), self.pos())
def getCommentStartToken(self):
match = self.matchCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid single-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentStartToken(self):
return self.multiLineCommentTokenStartRE.match(self.src(), self.pos())
def getMultiLineCommentStartToken(self):
match = self.matchMultiLineCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentEndToken(self):
return self.multiLineCommentEndTokenRE.match(self.src(), self.pos())
def getMultiLineCommentEndToken(self):
match = self.matchMultiLineCommentEndToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment end token')
return self.readTo(match.end())
def getCommaSeparatedSymbols(self):
"""
Loosely based on getDottedName to pull out comma separated
named chunks
"""
srcLen = len(self)
pieces = []
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
elif c == ',':
self.getc()
pieces.append(''.join(nameChunks))
nameChunks = []
elif c in (' ', '\t'):
self.getc()
else:
break
if nameChunks:
pieces.append(''.join(nameChunks))
return pieces
def getDottedName(self):
srcLen = len(self)
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
else:
break
return ''.join(nameChunks)
def matchIdentifier(self):
return identRE.match(self.src(), self.pos())
def getIdentifier(self):
match = self.matchIdentifier()
if not match:
raise ParseError(self, msg='Invalid identifier')
return self.readTo(match.end())
def matchOperator(self):
match = self.matchPyToken()
if match and match.group() not in operators:
match = None
return match
def getOperator(self):
match = self.matchOperator()
if not match:
raise ParseError(self, msg='Expected operator')
return self.readTo( match.end() )
def matchAssignmentOperator(self):
match = self.matchPyToken()
if match and match.group() not in assignmentOps:
match = None
return match
def getAssignmentOperator(self):
match = self.matchAssignmentOperator()
if not match:
raise ParseError(self, msg='Expected assignment operator')
return self.readTo( match.end() )
def matchDirective(self):
"""Returns False or the name of the directive matched.
"""
startPos = self.pos()
if not self.matchDirectiveStartToken():
return False
self.getDirectiveStartToken()
directiveName = self.matchDirectiveName()
self.setPos(startPos)
return directiveName
def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'):
startPos = self.pos()
possibleMatches = self._directiveNamesAndParsers.keys()
name = ''
match = None
while not self.atEnd():
c = self.getc()
if not c in directiveNameChars:
break
name += c
if name == '@':
if not self.atEnd() and self.peek() in identchars:
match = '@'
break
possibleMatches = [dn for dn in possibleMatches if dn.startswith(name)]
if not possibleMatches:
break
elif (name in possibleMatches and (self.atEnd() or self.peek() not in directiveNameChars)):
match = name
break
self.setPos(startPos)
return match
def matchDirectiveStartToken(self):
return self.directiveStartTokenRE.match(self.src(), self.pos())
def getDirectiveStartToken(self):
match = self.matchDirectiveStartToken()
if not match:
raise ParseError(self, msg='Invalid directive start token')
return self.readTo(match.end())
def matchDirectiveEndToken(self):
return self.directiveEndTokenRE.match(self.src(), self.pos())
def getDirectiveEndToken(self):
match = self.matchDirectiveEndToken()
if not match:
raise ParseError(self, msg='Invalid directive end token')
return self.readTo(match.end())
def matchColonForSingleLineShortFormDirective(self):
if not self.atEnd() and self.peek()==':':
restOfLine = self[self.pos()+1:self.findEOL()]
restOfLine = restOfLine.strip()
if not restOfLine:
return False
elif self.commentStartTokenRE.match(restOfLine):
return False
else: # non-whitespace, non-commment chars found
return True
return False
def matchPSPStartToken(self):
return self.PSPStartTokenRE.match(self.src(), self.pos())
def matchPSPEndToken(self):
return self.PSPEndTokenRE.match(self.src(), self.pos())
def getPSPStartToken(self):
match = self.matchPSPStartToken()
if not match:
raise ParseError(self, msg='Invalid psp start token')
return self.readTo(match.end())
def getPSPEndToken(self):
match = self.matchPSPEndToken()
if not match:
raise ParseError(self, msg='Invalid psp end token')
return self.readTo(match.end())
def matchCheetahVarStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchCheetahVarStartToken(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartTokenRE.match(self.src(), self.pos())
def matchCheetahVarInExpressionStartToken(self):
"""no enclosures or cache tokens allowed"""
return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos())
def matchVariablePlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchExpressionPlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.expressionPlaceholderStartRE.match(self.src(), self.pos())
def getCheetahVarStartToken(self):
"""just the start token, not the enclosure or cache token"""
match = self.matchCheetahVarStartToken()
if not match:
raise ParseError(self, msg='Expected Cheetah $var start token')
return self.readTo( match.end() )
def getCacheToken(self):
try:
token = self.cacheTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected cache token')
def getSilentPlaceholderToken(self):
try:
token = self.silentPlaceholderTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected silent placeholder token')
def getTargetVarsList(self):
varnames = []
while not self.atEnd():
if self.peek() in ' \t\f':
self.getWhiteSpace()
elif self.peek() in '\r\n':
break
elif self.startswith(','):
self.advance()
elif self.startswith('in ') or self.startswith('in\t'):
break
#elif self.matchCheetahVarStart():
elif self.matchCheetahVarInExpressionStartToken():
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
varnames.append( self.getDottedName() )
elif self.matchIdentifier():
varnames.append( self.getDottedName() )
else:
break
return varnames
def getCheetahVar(self, plain=False, skipStartToken=False):
"""This is called when parsing inside expressions. Cache tokens are only
valid in placeholders so this method discards any cache tokens found.
"""
if not skipStartToken:
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
return self.getCheetahVarBody(plain=plain)
def getCheetahVarBody(self, plain=False):
# @@TR: this should be in the compiler
return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain)
def getCheetahVarNameChunks(self):
"""
nameChunks = list of Cheetah $var subcomponents represented as tuples
[ (namemapperPart,autoCall,restOfName),
]
where:
namemapperPart = the dottedName base
autocall = where NameMapper should use autocalling on namemapperPart
restOfName = any arglist, index, or slice
If restOfName contains a call arglist (e.g. '(1234)') then autocall is
False, otherwise it defaults to True.
EXAMPLE
------------------------------------------------------------------------
if the raw CheetahVar is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'),
('d',False,'()'),
('x.y.z',True,''),
]
"""
chunks = []
while self.pos() < len(self):
rest = ''
autoCall = True
if not self.peek() in identchars + '.':
break
elif self.peek() == '.':
if self.pos()+1 < len(self) and self.peek(1) in identchars:
self.advance() # discard the period as it isn't needed with NameMapper
else:
break
dottedName = self.getDottedName()
if not self.atEnd() and self.peek() in '([':
if self.peek() == '(':
rest = self.getCallArgString()
else:
rest = self.getExpression(enclosed=True)
period = max(dottedName.rfind('.'), 0)
if period:
chunks.append( (dottedName[:period], autoCall, '') )
dottedName = dottedName[period+1:]
if rest and rest[0]=='(':
autoCall = False
chunks.append( (dottedName, autoCall, rest) )
return chunks
def getCallArgString(self,
enclosures=[], # list of tuples (char, pos), where char is ({ or [
useNameMapper=Unspecified):
""" Get a method/function call argument string.
This method understands *arg, and **kw
"""
# @@TR: this settings mangling should be removed
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures:
pass
else:
if not self.peek() == '(':
raise ParseError(self, msg="Expected '('")
startPos = self.pos()
self.getc()
enclosures = [('(', startPos),
]
argStringBits = ['(']
addBit = argStringBits.append
while True:
if self.atEnd():
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
c = self.peek()
if c in ")}]": # get the ending enclosure and break
if not enclosures:
raise ParseError(self)
c = self.getc()
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
addBit(')')
break
else:
raise ParseError(self)
elif c in " \t\f\r\n":
addBit(self.getc())
elif self.matchCheetahVarInExpressionStartToken():
startPos = self.pos()
codeFor1stToken = self.getCheetahVar()
WS = self.getWhiteSpace()
if not self.atEnd() and self.peek() == '=':
nextToken = self.getPyToken()
if nextToken == '=':
endPos = self.pos()
self.setPos(startPos)
codeFor1stToken = self.getCheetahVar(plain=True)
self.setPos(endPos)
## finally
addBit( codeFor1stToken + WS + nextToken )
else:
addBit( codeFor1stToken + WS)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
addBit(token)
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return ''.join(argStringBits)
def getDefArgList(self, exitPos=None, useNameMapper=False):
""" Get an argument list. Can be used for method/function definition
argument lists or for #directive argument lists. Returns a list of
tuples in the form (argName, defVal=None) with one tuple for each arg
name.
These defVals are always strings, so (argName, defVal=None) is safe even
with a case like (arg1, arg2=None, arg3=1234*2), which would be returned as
[('arg1', None),
('arg2', 'None'),
('arg3', '1234*2'),
]
This method understands *arg, and **kw
"""
if self.peek() == '(':
self.advance()
else:
exitPos = self.findEOL() # it's a directive so break at the EOL
argList = ArgList()
onDefVal = False
# @@TR: this settings mangling should be removed
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
while True:
if self.atEnd():
raise ParseError(
self, msg="EOF was reached before a matching ')'"+
" was found for the '('")
if self.pos() == exitPos:
break
c = self.peek()
if c == ")" or self.matchDirectiveEndToken():
break
elif c == ":":
break
elif c in " \t\f\r\n":
if onDefVal:
argList.add_default(c)
self.advance()
elif c == '=':
onDefVal = True
self.advance()
elif c == ",":
argList.next()
onDefVal = False
self.advance()
elif self.startswith(self.cheetahVarStartToken) and not onDefVal:
self.advance(len(self.cheetahVarStartToken))
elif self.matchIdentifier() and not onDefVal:
argList.add_argument( self.getIdentifier() )
elif onDefVal:
if self.matchCheetahVarInExpressionStartToken():
token = self.getCheetahVar()
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{', '(', '['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
argList.add_default(token)
elif c == '*' and not onDefVal:
varName = self.getc()
if self.peek() == '*':
varName += self.getc()
if not self.matchIdentifier():
raise ParseError(self)
varName += self.getIdentifier()
argList.add_argument(varName)
else:
raise ParseError(self)
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return argList.merge()
def getExpressionParts(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where char is ({ or [
pyTokensToBreakAt=None, # only works if not enclosed
useNameMapper=Unspecified,
):
""" Get a Cheetah expression that includes $CheetahVars and break at
directive end tokens, the end of an enclosure, or at a specified
pyToken.
"""
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures is None:
enclosures = []
srcLen = len(self)
exprBits = []
while True:
if self.atEnd():
if enclosures:
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
else:
break
c = self.peek()
if c in "{([":
exprBits.append(c)
enclosures.append( (c, self.pos()) )
self.advance()
elif enclosed and not enclosures:
break
elif c in "])}":
if not enclosures:
raise ParseError(self)
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
exprBits.append(c)
else:
open = enclosures[-1][0]
close = closurePairsRev[open]
row, col = self.getRowCol()
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg= "A '" + c + "' was found at line " + str(row) +
", col " + str(col) +
" before a matching '" + close +
"' was found\nfor the '" + open + "'")
self.advance()
elif c in " \f\t":
exprBits.append(self.getWhiteSpace())
elif self.matchDirectiveEndToken() and not enclosures:
break
elif c == "\\" and self.pos()+1 < srcLen:
eolMatch = EOLre.match(self.src(), self.pos()+1)
if not eolMatch:
self.advance()
raise ParseError(self, msg='Line ending expected')
self.setPos( eolMatch.end() )
elif c in '\r\n':
if enclosures:
self.advance()
else:
break
elif self.matchCheetahVarInExpressionStartToken():
expr = self.getCheetahVar()
exprBits.append(expr)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if (not enclosures
and pyTokensToBreakAt
and token in pyTokensToBreakAt):
self.setPos(beforeTokenPos)
break
token = self.transformToken(token, beforeTokenPos)
exprBits.append(token)
if identRE.match(token):
if token == 'for':
expr = self.getExpression(useNameMapper=False, pyTokensToBreakAt=['in'])
exprBits.append(expr)
else:
exprBits.append(self.getWhiteSpace())
if not self.atEnd() and self.peek() == '(':
exprBits.append(self.getCallArgString())
##
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return exprBits
def getExpression(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where # char is ({ or [
pyTokensToBreakAt=None,
useNameMapper=Unspecified,
):
"""Returns the output of self.getExpressionParts() as a concatenated
string rather than as a list.
"""
return ''.join(self.getExpressionParts(
enclosed=enclosed, enclosures=enclosures,
pyTokensToBreakAt=pyTokensToBreakAt,
useNameMapper=useNameMapper))
def transformToken(self, token, beforeTokenPos):
"""Takes a token from the expression being parsed and performs and
special transformations required by Cheetah.
At the moment only Cheetah's c'$placeholder strings' are transformed.
"""
if token=='c' and not self.atEnd() and self.peek() in '\'"':
nextToken = self.getPyToken()
token = nextToken.upper()
theStr = eval(token)
endPos = self.pos()
if not theStr:
return
if token.startswith(single3) or token.startswith(double3):
startPosIdx = 3
else:
startPosIdx = 1
self.setPos(beforeTokenPos+startPosIdx+1)
outputExprs = []
strConst = ''
while self.pos() < (endPos-startPosIdx):
if self.matchCheetahVarStart() or self.matchExpressionPlaceholderStart():
if strConst:
outputExprs.append(repr(strConst))
strConst = ''
placeholderExpr = self.getPlaceholder()
outputExprs.append('str('+placeholderExpr+')')
else:
strConst += self.getc()
self.setPos(endPos)
if strConst:
outputExprs.append(repr(strConst))
token = "''.join(["+','.join(outputExprs)+"])"
return token
def _raiseErrorAboutInvalidCheetahVarSyntaxInExpr(self):
match = self.matchCheetahVarStart()
groupdict = match.groupdict()
if groupdict.get('cacheToken'):
raise ParseError(
self,
msg='Cache tokens are not valid inside expressions. '
'Use them in top-level $placeholders only.')
elif groupdict.get('enclosure'):
raise ParseError(
self,
msg='Long-form placeholders - ${}, $(), $[], etc. are not valid inside expressions. '
'Use them in top-level $placeholders only.')
else:
raise ParseError(
self,
msg='This form of $placeholder syntax is not valid here.')
def getPlaceholder(self, allowCacheTokens=False, plain=False, returnEverything=False):
# filtered
for callback in self.setting('preparsePlaceholderHooks'):
callback(parser=self)
startPos = self.pos()
lineCol = self.getRowCol(startPos)
startToken = self.getCheetahVarStartToken()
silentPlaceholderToken = self.getSilentPlaceholderToken()
if silentPlaceholderToken:
isSilentPlaceholder = True
else:
isSilentPlaceholder = False
if allowCacheTokens:
cacheToken = self.getCacheToken()
cacheTokenParts = self.cacheTokenRE.match(cacheToken).groupdict()
else:
cacheTokenParts = {}
if self.peek() in '({[':
pos = self.pos()
enclosureOpenChar = self.getc()
enclosures = [ (enclosureOpenChar, pos) ]
self.getWhiteSpace()
else:
enclosures = []
filterArgs = None
if self.matchIdentifier():
nameChunks = self.getCheetahVarNameChunks()
expr = self._compiler.genCheetahVar(nameChunks[:], plain=plain)
restOfExpr = None
if enclosures:
WS = self.getWhiteSpace()
expr += WS
if self.setting('allowPlaceholderFilterArgs') and self.peek()==',':
filterArgs = self.getCallArgString(enclosures=enclosures)[1:-1]
else:
if self.peek()==closurePairsRev[enclosureOpenChar]:
self.getc()
else:
restOfExpr = self.getExpression(enclosed=True, enclosures=enclosures)
if restOfExpr[-1] == closurePairsRev[enclosureOpenChar]:
restOfExpr = restOfExpr[:-1]
expr += restOfExpr
rawPlaceholder = self[startPos: self.pos()]
else:
expr = self.getExpression(enclosed=True, enclosures=enclosures)
if expr[-1] == closurePairsRev[enclosureOpenChar]:
expr = expr[:-1]
rawPlaceholder=self[startPos: self.pos()]
expr = self._applyExpressionFilters(expr, 'placeholder',
rawExpr=rawPlaceholder, startPos=startPos)
for callback in self.setting('postparsePlaceholderHooks'):
callback(parser=self)
if returnEverything:
return (expr, rawPlaceholder, lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder)
else:
return expr
class _HighLevelParser(_LowLevelParser):
"""This class is a StateMachine for parsing Cheetah source and
sending state dependent code generation commands to
Cheetah.Compiler.Compiler.
"""
def __init__(self, src, filename=None, breakPoint=None, compiler=None):
super(_HighLevelParser, self).__init__(src, filename=filename, breakPoint=breakPoint)
self.setSettingsManager(compiler)
self._compiler = compiler
self.setupState()
self.configureParser()
def setupState(self):
self._macros = {}
self._macroDetails = {}
self._openDirectivesStack = []
def cleanup(self):
"""Cleanup to remove any possible reference cycles
"""
self._macros.clear()
for macroname, macroDetails in self._macroDetails.items():
macroDetails.template.shutdown()
del macroDetails.template
self._macroDetails.clear()
def configureParser(self):
super(_HighLevelParser, self).configureParser()
self._initDirectives()
def _initDirectives(self):
def normalizeParserVal(val):
if isinstance(val, (str, unicode)):
handler = getattr(self, val)
elif type(val) in (ClassType, TypeType):
handler = val(self)
elif hasattr(val, '__call__'):
handler = val
elif val is None:
handler = val
else:
raise Exception('Invalid parser/handler value %r for %s'%(val, name))
return handler
normalizeHandlerVal = normalizeParserVal
_directiveNamesAndParsers = directiveNamesAndParsers.copy()
customNamesAndParsers = self.setting('directiveNamesAndParsers', {})
_directiveNamesAndParsers.update(customNamesAndParsers)
_endDirectiveNamesAndHandlers = endDirectiveNamesAndHandlers.copy()
customNamesAndHandlers = self.setting('endDirectiveNamesAndHandlers', {})
_endDirectiveNamesAndHandlers.update(customNamesAndHandlers)
self._directiveNamesAndParsers = {}
for name, val in _directiveNamesAndParsers.items():
if val in (False, 0):
continue
self._directiveNamesAndParsers[name] = normalizeParserVal(val)
self._endDirectiveNamesAndHandlers = {}
for name, val in _endDirectiveNamesAndHandlers.items():
if val in (False, 0):
continue
self._endDirectiveNamesAndHandlers[name] = normalizeHandlerVal(val)
self._closeableDirectives = ['def', 'block', 'closure', 'defmacro',
'call',
'capture',
'cache',
'filter',
'if', 'unless',
'for', 'while', 'repeat',
'try',
]
for directiveName in self.setting('closeableDirectives', []):
self._closeableDirectives.append(directiveName)
macroDirectives = self.setting('macroDirectives', {})
macroDirectives['i18n'] = I18n
for macroName, callback in macroDirectives.items():
if type(callback) in (ClassType, TypeType):
callback = callback(parser=self)
assert callback
self._macros[macroName] = callback
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def _applyExpressionFilters(self, expr, exprType, rawExpr=None, startPos=None):
"""Pipes cheetah expressions through a set of optional filter hooks.
The filters are functions which may modify the expressions or raise
a ForbiddenExpression exception if the expression is not allowed. They
are defined in the compiler setting 'expressionFilterHooks'.
Some intended use cases:
- to implement 'restricted execution' safeguards in cases where you
can't trust the author of the template.
- to enforce style guidelines
filter call signature: (parser, expr, exprType, rawExpr=None, startPos=None)
- parser is the Cheetah parser
- expr is the expression to filter. In some cases the parser will have
already modified it from the original source code form. For example,
placeholders will have been translated into namemapper calls. If you
need to work with the original source, see rawExpr.
- exprType is the name of the directive, 'psp', or 'placeholder'. All
lowercase. @@TR: These will eventually be replaced with a set of
constants.
- rawExpr is the original source string that Cheetah parsed. This
might be None in some cases.
- startPos is the character position in the source string/file
where the parser started parsing the current expression.
@@TR: I realize this use of the term 'expression' is a bit wonky as many
of the 'expressions' are actually statements, but I haven't thought of
a better name yet. Suggestions?
"""
for callback in self.setting('expressionFilterHooks'):
expr = callback(parser=self, expr=expr, exprType=exprType,
rawExpr=rawExpr, startPos=startPos)
return expr
def _filterDisabledDirectives(self, directiveName):
directiveName = directiveName.lower()
if (directiveName in self.setting('disabledDirectives')
or (self.setting('enabledDirectives')
and directiveName not in self.setting('enabledDirectives'))):
for callback in self.setting('disabledDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
raise ForbiddenDirective(self, msg='This %r directive is disabled'%directiveName)
## main parse loop
def parse(self, breakPoint=None, assertEmptyStack=True):
if breakPoint:
origBP = self.breakPoint()
self.setBreakPoint(breakPoint)
assertEmptyStack = False
while not self.atEnd():
if self.matchCommentStartToken():
self.eatComment()
elif self.matchMultiLineCommentStartToken():
self.eatMultiLineComment()
elif self.matchVariablePlaceholderStart():
self.eatPlaceholder()
elif self.matchExpressionPlaceholderStart():
self.eatPlaceholder()
elif self.matchDirective():
self.eatDirective()
elif self.matchPSPStartToken():
self.eatPSP()
elif self.matchEOLSlurpToken():
self.eatEOLSlurpToken()
else:
self.eatPlainText()
if assertEmptyStack:
self.assertEmptyOpenDirectivesStack()
if breakPoint:
self.setBreakPoint(origBP)
## non-directive eat methods
def eatPlainText(self):
startPos = self.pos()
match = None
while not self.atEnd():
match = self.matchTopLevelToken()
if match:
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
strConst = self._unescapeCheetahVars(strConst)
strConst = self._unescapeDirectives(strConst)
self._compiler.addStrConst(strConst)
return match
def eatComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
if isLineClearToStartToken:
self._compiler.handleWSBeforeDirective()
self.getCommentStartToken()
comm = self.readToEOL(gobble=isLineClearToStartToken)
self._compiler.addComment(comm)
def eatMultiLineComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getMultiLineCommentStartToken()
endPos = startPos = self.pos()
level = 1
while True:
endPos = self.pos()
if self.atEnd():
break
if self.matchMultiLineCommentStartToken():
self.getMultiLineCommentStartToken()
level += 1
elif self.matchMultiLineCommentEndToken():
self.getMultiLineCommentEndToken()
level -= 1
if not level:
break
self.advance()
comm = self.readTo(endPos, start=startPos)
if not self.atEnd():
self.getMultiLineCommentEndToken()
if (not self.atEnd()) and self.setting('gobbleWhitespaceAroundMultiLineComments'):
restOfLine = self[self.pos():self.findEOL()]
if not restOfLine.strip(): # WS only to EOL
self.readToEOL(gobble=isLineClearToStartToken)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLine):
self._compiler.handleWSBeforeDirective()
self._compiler.addComment(comm)
def eatPlaceholder(self):
(expr, rawPlaceholder,
lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder) = self.getPlaceholder(
allowCacheTokens=True, returnEverything=True)
self._compiler.addPlaceholder(
expr,
filterArgs=filterArgs,
rawPlaceholder=rawPlaceholder,
cacheTokenParts=cacheTokenParts,
lineCol=lineCol,
silentMode=isSilentPlaceholder)
return
def eatPSP(self):
# filtered
self._filterDisabledDirectives(directiveName='psp')
self.getPSPStartToken()
endToken = self.setting('PSPEndToken')
startPos = self.pos()
while not self.atEnd():
if self.peek() == endToken[0]:
if self.matchPSPEndToken():
break
self.advance()
pspString = self.readTo(self.pos(), start=startPos).strip()
pspString = self._applyExpressionFilters(pspString, 'psp', startPos=startPos)
self._compiler.addPSP(pspString)
self.getPSPEndToken()
## generic directive eat methods
_simpleIndentingDirectives = '''
else elif for while repeat unless try except finally'''.split()
_simpleExprDirectives = '''
pass continue stop return yield break
del assert raise
silent echo
import from'''.split()
_directiveHandlerNames = {'import': 'addImportStatement',
'from': 'addImportStatement', }
def eatDirective(self):
directiveName = self.matchDirective()
self._filterDisabledDirectives(directiveName)
for callback in self.setting('preparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
# subclasses can override the default behaviours here by providing an
# eater method in self._directiveNamesAndParsers[directiveName]
directiveParser = self._directiveNamesAndParsers.get(directiveName)
if directiveParser:
directiveParser()
elif directiveName in self._simpleIndentingDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
self.eatSimpleIndentingDirective(directiveName, callback=handler)
elif directiveName in self._simpleExprDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
if directiveName in ('silent', 'echo'):
includeDirectiveNameInExpr = False
else:
includeDirectiveNameInExpr = True
expr = self.eatSimpleExprDirective(
directiveName,
includeDirectiveNameInExpr=includeDirectiveNameInExpr)
handler(expr)
##
for callback in self.setting('postparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
def _eatRestOfDirectiveTag(self, isLineClearToStartToken, endOfFirstLinePos):
foundComment = False
if self.matchCommentStartToken():
pos = self.pos()
self.advance()
if not self.matchDirective():
self.setPos(pos)
foundComment = True
self.eatComment() # this won't gobble the EOL
else:
self.setPos(pos)
if not foundComment and self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
# still gobble the EOL if a comment was found.
self.readToEOL(gobble=True)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLinePos):
self._compiler.handleWSBeforeDirective()
def _eatToThisEndDirective(self, directiveName):
finalPos = endRawPos = startPos = self.pos()
directiveChar = self.setting('directiveStartToken')[0]
isLineClearToStartToken = False
while not self.atEnd():
if self.peek() == directiveChar:
if self.matchDirective() == 'end':
endRawPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('end'))
self.getWhiteSpace()
if self.startswith(directiveName):
if self.isLineClearToStartToken(endRawPos):
isLineClearToStartToken = True
endRawPos = self.findBOL(endRawPos)
self.advance(len(directiveName)) # to end of directiveName
self.getWhiteSpace()
finalPos = self.pos()
break
self.advance()
finalPos = endRawPos = self.pos()
textEaten = self.readTo(endRawPos, start=startPos)
self.setPos(finalPos)
endOfFirstLinePos = self.findEOL()
if self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
self.readToEOL(gobble=True)
if isLineClearToStartToken and self.pos() > endOfFirstLinePos:
self._compiler.handleWSBeforeDirective()
return textEaten
def eatSimpleExprDirective(self, directiveName, includeDirectiveNameInExpr=True):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
if not includeDirectiveNameInExpr:
self.advance(len(directiveName))
startPos = self.pos()
expr = self.getExpression().strip()
directiveName = expr.split()[0]
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
return expr
def eatSimpleIndentingDirective(self, directiveName, callback,
includeDirectiveNameInExpr=False):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
if directiveName not in 'else elif for while try except finally'.split():
self.advance(len(directiveName))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
if directiveName in 'else elif except finally'.split():
callback(expr, dedent=False, lineCol=lineCol)
else:
callback(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
callback(expr, lineCol=lineCol)
def eatEndDirective(self):
isLineClearToStartToken = self.isLineClearToStartToken()
self.getDirectiveStartToken()
self.advance(3) # to end of 'end'
self.getWhiteSpace()
pos = self.pos()
directiveName = False
for key in self._endDirectiveNamesAndHandlers.keys():
if self.find(key, pos) == pos:
directiveName = key
break
if not directiveName:
raise ParseError(self, msg='Invalid end directive')
endOfFirstLinePos = self.findEOL()
self.getExpression() # eat in any extra comment-like crap
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.popFromOpenDirectivesStack(directiveName)
# subclasses can override the default behaviours here by providing an
# end-directive handler in self._endDirectiveNamesAndHandlers[directiveName]
if self._endDirectiveNamesAndHandlers.get(directiveName):
handler = self._endDirectiveNamesAndHandlers[directiveName]
handler()
elif directiveName in 'block capture cache call filter errorCatcher'.split():
if key == 'block':
self._compiler.closeBlock()
elif key == 'capture':
self._compiler.endCaptureRegion()
elif key == 'cache':
self._compiler.endCacheRegion()
elif key == 'call':
self._compiler.endCallRegion()
elif key == 'filter':
self._compiler.closeFilterBlock()
elif key == 'errorCatcher':
self._compiler.turnErrorCatcherOff()
elif directiveName in 'while for if try repeat unless'.split():
self._compiler.commitStrConst()
self._compiler.dedent()
elif directiveName=='closure':
self._compiler.commitStrConst()
self._compiler.dedent()
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
## specific directive eat methods
def eatBreakPoint(self):
"""Tells the parser to stop parsing at this point and completely ignore
everything else.
This is a debugging tool.
"""
self.setBreakPoint(self.pos())
def eatShbang(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('shBang'))
self.getWhiteSpace()
startPos = self.pos()
shBang = self.readToEOL()
shBang = self._applyExpressionFilters(shBang, 'shbang', startPos=startPos)
self._compiler.setShBang(shBang.strip())
def eatEncoding(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('encoding'))
self.getWhiteSpace()
startPos = self.pos()
encoding = self.readToEOL()
encoding = self._applyExpressionFilters(encoding, 'encoding', startPos=startPos)
self._compiler.setModuleEncoding(encoding.strip())
def eatCompiler(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('compiler')) # to end of 'compiler'
self.getWhiteSpace()
startPos = self.pos()
settingName = self.getIdentifier()
if settingName.lower() == 'reset':
self.getExpression() # gobble whitespace & junk
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._initializeSettings()
self.configureParser()
return
self.getWhiteSpace()
if self.peek() == '=':
self.advance()
else:
raise ParseError(self)
valueExpr = self.getExpression()
endPos = self.pos()
# @@TR: it's unlikely that anyone apply filters would have left this
# directive enabled:
# @@TR: fix up filtering, regardless
self._applyExpressionFilters('%s=%r'%(settingName, valueExpr),
'compiler', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
try:
self._compiler.setCompilerSetting(settingName, valueExpr)
except:
sys.stderr.write('An error occurred while processing the following #compiler directive.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % self[startPos:endPos])
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatCompilerSettings(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('compiler-settings')) # to end of 'settings'
keywords = self.getTargetVarsList()
self.getExpression() # gobble any garbage
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
if 'reset' in keywords:
self._compiler._initializeSettings()
self.configureParser()
# @@TR: this implies a single-line #compiler-settings directive, and
# thus we should parse forward for an end directive.
# Subject to change in the future
return
startPos = self.pos()
settingsStr = self._eatToThisEndDirective('compiler-settings')
settingsStr = self._applyExpressionFilters(settingsStr, 'compilerSettings',
startPos=startPos)
try:
self._compiler.setCompilerSettings(keywords=keywords, settingsStr=settingsStr)
except:
sys.stderr.write('An error occurred while processing the following compiler settings.\n')
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('%s\n' % settingsStr.strip())
sys.stderr.write('----------------------------------------------------------------------\n')
sys.stderr.write('Please check the syntax of these settings.\n\n')
raise
def eatAttr(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('attr'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
attribName = self.getIdentifier()
self.getWhiteSpace()
self.getAssignmentOperator()
expr = self.getExpression()
expr = self._applyExpressionFilters(expr, 'attr', startPos=startPos)
self._compiler.addAttribute(attribName, expr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatDecorator(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
#self.advance() # eat @
startPos = self.pos()
decoratorExpr = self.getExpression()
decoratorExpr = self._applyExpressionFilters(decoratorExpr, 'decorator', startPos=startPos)
self._compiler.addDecorator(decoratorExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.getWhiteSpace()
directiveName = self.matchDirective()
if not directiveName or directiveName not in ('def', 'block', 'closure', '@'):
raise ParseError(
self, msg='Expected #def, #block, #closure or another @decorator')
self.eatDirective()
def eatDef(self):
# filtered
self._eatDefOrBlock('def')
def eatBlock(self):
# filtered
startPos = self.pos()
methodName, rawSignature = self._eatDefOrBlock('block')
self._compiler._blockMetaData[methodName] = {
'raw': rawSignature,
'lineCol': self.getRowCol(startPos),
}
def eatClosure(self):
# filtered
self._eatDefOrBlock('closure')
def _eatDefOrBlock(self, directiveName):
# filtered
assert directiveName in ('def', 'block', 'closure')
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len(directiveName))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
methodName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
def includeBlockMarkers():
if self.setting('includeBlockMarkers'):
startMarker = self.setting('blockMarkerStart')
self._compiler.addStrConst(startMarker[0] + methodName + startMarker[1])
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'def', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
self.getc()
rawSignature = self[startPos:endOfFirstLinePos]
self._eatSingleLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
endPos=endOfFirstLinePos)
if directiveName == 'def' and not isNestedDef:
#@@TR: must come before _eatRestOfDirectiveTag ... for some reason
self._compiler.closeDef()
elif directiveName == 'block':
includeBlockMarkers()
self._compiler.closeBlock()
elif directiveName == 'closure' or isNestedDef:
self._compiler.dedent()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
else:
if self.peek()==':':
self.getc()
self.pushToOpenDirectivesStack(directiveName)
rawSignature = self[startPos:self.pos()]
self._eatMultiLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
isLineClearToStartToken=isLineClearToStartToken)
if directiveName == 'block':
includeBlockMarkers()
return methodName, rawSignature
def _eatMultiLineDef(self, directiveName, methodName, argsList, startPos,
isLineClearToStartToken=False):
# filtered in calling method
self.getExpression() # slurp up any garbage left at the end
signature = self[startPos:self.pos()]
endOfFirstLinePos = self.findEOL()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
signature = ' '.join([line.strip() for line in signature.splitlines()])
parserComment = ('## CHEETAH: generated from ' + signature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and len([name for name in self._openDirectivesStack if name=='def'])>1)
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
self._useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
return methodName
def _eatSingleLineDef(self, directiveName, methodName, argsList, startPos, endPos):
# filtered in calling method
fullSignature = self[startPos:endPos]
parserComment = ('## Generated from ' + fullSignature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
# @@TR: temporary hack of useSearchList
useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
self.getWhiteSpace(max=1)
self.parse(breakPoint=endPos)
if directiveName=='closure' or isNestedDef: # @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', useSearchList_orig)
def eatExtends(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('extends'))
self.getWhiteSpace()
startPos = self.pos()
if self.setting('allowExpressionsInExtendsDirective'):
baseName = self.getExpression()
else:
baseName = self.getCommaSeparatedSymbols()
baseName = ', '.join(baseName)
baseName = self._applyExpressionFilters(baseName, 'extends', startPos=startPos)
self._compiler.setBaseClass(baseName) # in compiler
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatImplements(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('implements'))
self.getWhiteSpace()
startPos = self.pos()
methodName = self.getIdentifier()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
# @@TR: need to split up filtering of the methodname and the args
#methodName = self._applyExpressionFilters(methodName, 'implements', startPos=startPos)
self._applyExpressionFilters(self[startPos:self.pos()], 'implements', startPos=startPos)
self._compiler.setMainMethodName(methodName)
self._compiler.setMainMethodArgs(argsList)
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatSuper(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('super'))
self.getWhiteSpace()
startPos = self.pos()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
self._applyExpressionFilters(self[startPos:self.pos()], 'super', startPos=startPos)
#parserComment = ('## CHEETAH: generated from ' + signature +
# ' at line %s, col %s' % self.getRowCol(startPos)
# + '.')
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addSuper(argsList)
def eatSet(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(3)
self.getWhiteSpace()
style = SET_LOCAL
if self.startswith('local'):
self.getIdentifier()
self.getWhiteSpace()
elif self.startswith('global'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_GLOBAL
elif self.startswith('module'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_MODULE
startsWithDollar = self.matchCheetahVarStart()
startPos = self.pos()
LVALUE = self.getExpression(pyTokensToBreakAt=assignmentOps, useNameMapper=False).strip()
OP = self.getAssignmentOperator()
RVALUE = self.getExpression()
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
expr = self._applyExpressionFilters(expr, 'set', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
class Components: pass # used for 'set global'
exprComponents = Components()
exprComponents.LVALUE = LVALUE
exprComponents.OP = OP
exprComponents.RVALUE = RVALUE
self._compiler.addSet(expr, exprComponents, style)
def eatSlurp(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatEOLSlurpToken(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatRaw(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('raw'))
self.getWhiteSpace()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
rawBlock = self.readToEOL(gobble=False)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
rawBlock = self._eatToThisEndDirective('raw')
self._compiler.addRawText(rawBlock)
def eatInclude(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('include'))
self.getWhiteSpace()
includeFrom = 'file'
isRaw = False
if self.startswith('raw'):
self.advance(3)
isRaw=True
self.getWhiteSpace()
if self.startswith('source'):
self.advance(len('source'))
includeFrom = 'str'
self.getWhiteSpace()
if not self.peek() == '=':
raise ParseError(self)
self.advance()
startPos = self.pos()
sourceExpr = self.getExpression()
sourceExpr = self._applyExpressionFilters(sourceExpr, 'include', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.addInclude(sourceExpr, includeFrom, isRaw)
def eatDefMacro(self):
# @@TR: not filtered yet
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('defmacro'))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
macroName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList(useNameMapper=False)
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
assert macroName not in self._directiveNamesAndParsers
argsList.insert(0, ('src', None))
argsList.append(('parser', 'None'))
argsList.append(('macros', 'None'))
argsList.append(('compilerSettings', 'None'))
argsList.append(('isShortForm', 'None'))
argsList.append(('EOLCharsInShortForm', 'None'))
argsList.append(('startPos', 'None'))
argsList.append(('endPos', 'None'))
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
macroSrc = self.readToEOL(gobble=False)
self.readToEOL(gobble=True)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
macroSrc = self._eatToThisEndDirective('defmacro')
#print argsList
normalizedMacroSrc = ''.join(
['%def callMacro('+','.join([defv and '%s=%s'%(n, defv) or n
for n, defv in argsList])
+')\n',
macroSrc,
'%end def'])
from Cheetah.Template import Template
templateAPIClass = self.setting('templateAPIClassForDefMacro', default=Template)
compilerSettings = self.setting('compilerSettingsForDefMacro', default={})
searchListForMacros = self.setting('searchListForDefMacro', default=[])
searchListForMacros = list(searchListForMacros) # copy to avoid mutation bugs
searchListForMacros.append({'macros': self._macros,
'parser': self,
'compilerSettings': self.settings(),
})
templateAPIClass._updateSettingsWithPreprocessTokens(
compilerSettings, placeholderToken='@', directiveToken='%')
macroTemplateClass = templateAPIClass.compile(source=normalizedMacroSrc,
compilerSettings=compilerSettings)
#print normalizedMacroSrc
#t = macroTemplateClass()
#print t.callMacro('src')
#print t.generatedClassCode()
class MacroDetails: pass
macroDetails = MacroDetails()
macroDetails.macroSrc = macroSrc
macroDetails.argsList = argsList
macroDetails.template = macroTemplateClass(searchList=searchListForMacros)
self._macroDetails[macroName] = macroDetails
self._macros[macroName] = macroDetails.template.callMacro
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def eatMacroCall(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
macroName = self.getIdentifier()
macro = self._macros[macroName]
if hasattr(macro, 'parse'):
return macro.parse(parser=self, startPos=startPos)
if hasattr(macro, 'parseArgs'):
args = macro.parseArgs(parser=self, startPos=startPos)
else:
self.getWhiteSpace()
args = self.getExpression(useNameMapper=False,
pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
isShortForm = True
self.advance() # skip over :
self.getWhiteSpace(max=1)
srcBlock = self.readToEOL(gobble=False)
EOLCharsInShortForm = self.readToEOL(gobble=True)
#self.readToEOL(gobble=False)
else:
isShortForm = False
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
srcBlock = self._eatToThisEndDirective(macroName)
if hasattr(macro, 'convertArgStrToDict'):
kwArgs = macro.convertArgStrToDict(args, parser=self, startPos=startPos)
else:
def getArgs(*pargs, **kws):
return pargs, kws
exec('positionalArgs, kwArgs = getArgs(%(args)s)'%locals())
assert 'src' not in kwArgs
kwArgs['src'] = srcBlock
if isinstance(macro, new.instancemethod):
co = macro.im_func.func_code
elif (hasattr(macro, '__call__')
and hasattr(macro.__call__, 'im_func')):
co = macro.__call__.im_func.func_code
else:
co = macro.func_code
availableKwArgs = inspect.getargs(co)[0]
if 'parser' in availableKwArgs:
kwArgs['parser'] = self
if 'macros' in availableKwArgs:
kwArgs['macros'] = self._macros
if 'compilerSettings' in availableKwArgs:
kwArgs['compilerSettings'] = self.settings()
if 'isShortForm' in availableKwArgs:
kwArgs['isShortForm'] = isShortForm
if isShortForm and 'EOLCharsInShortForm' in availableKwArgs:
kwArgs['EOLCharsInShortForm'] = EOLCharsInShortForm
if 'startPos' in availableKwArgs:
kwArgs['startPos'] = startPos
if 'endPos' in availableKwArgs:
kwArgs['endPos'] = self.pos()
srcFromMacroOutput = macro(**kwArgs)
origParseSrc = self._src
origBreakPoint = self.breakPoint()
origPos = self.pos()
# add a comment to the output about the macro src that is being parsed
# or add a comment prefix to all the comments added by the compiler
self._src = srcFromMacroOutput
self.setPos(0)
self.setBreakPoint(len(srcFromMacroOutput))
self.parse(assertEmptyStack=False)
self._src = origParseSrc
self.setBreakPoint(origBreakPoint)
self.setPos(origPos)
#self._compiler.addRawText('end')
def eatCache(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('cache'))
startPos = self.pos()
argList = self.getDefArgList(useNameMapper=True)
argList = self._applyExpressionFilters(argList, 'cache', startPos=startPos)
def startCache():
cacheInfo = self._compiler.genCacheInfoFromArgList(argList)
self._compiler.startCacheRegion(cacheInfo, lineCol)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
startCache()
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.endCacheRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack('cache')
startCache()
def eatCall(self):
# @@TR: need to enable single line version of this
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('call'))
startPos = self.pos()
useAutocallingOrig = self.setting('useAutocalling')
self.setSetting('useAutocalling', False)
self.getWhiteSpace()
if self.matchCheetahVarStart():
functionName = self.getCheetahVar()
else:
functionName = self.getCheetahVar(plain=True, skipStartToken=True)
self.setSetting('useAutocalling', useAutocallingOrig)
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'call', startPos=startPos)
self.getWhiteSpace()
args = self.getExpression(pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCallRegion(functionName, args, lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCallRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("call")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.startCallRegion(functionName, args, lineCol)
def eatCallArg(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('arg'))
startPos = self.pos()
self.getWhiteSpace()
argName = self.getIdentifier()
self.getWhiteSpace()
argName = self._applyExpressionFilters(argName, 'arg', startPos=startPos)
self._compiler.setCallArg(argName, lineCol)
if self.peek() == ':':
self.getc()
else:
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatFilter(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('filter'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
theFilter = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
theFilter = self.getIdentifier()
self.getWhiteSpace()
theFilter = self._applyExpressionFilters(theFilter, 'filter', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
self._compiler.setFilter(theFilter, isKlass)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.closeFilterBlock()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("filter")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setFilter(theFilter, isKlass)
def eatTransform(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('transform'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
transformer = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
transformer = self.getIdentifier()
self.getWhiteSpace()
transformer = self._applyExpressionFilters(transformer, 'transform', startPos=startPos)
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setTransform(transformer, isKlass)
def eatErrorCatcher(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('errorCatcher'))
self.getWhiteSpace()
startPos = self.pos()
errorCatcherName = self.getIdentifier()
errorCatcherName = self._applyExpressionFilters(
errorCatcherName, 'errorcatcher', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setErrorCatcher(errorCatcherName)
def eatCapture(self):
# @@TR: this could be refactored to use the code in eatSimpleIndentingDirective
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('capture'))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, 'capture', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCaptureRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack("capture")
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
def eatIf(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
startPos = self.pos()
expressionParts = self.getExpressionParts(pyTokensToBreakAt=[':'])
expr = ''.join(expressionParts).strip()
expr = self._applyExpressionFilters(expr, 'if', startPos=startPos)
isTernaryExpr = ('then' in expressionParts and 'else' in expressionParts)
if isTernaryExpr:
conditionExpr = []
trueExpr = []
falseExpr = []
currentExpr = conditionExpr
for part in expressionParts:
if part.strip()=='then':
currentExpr = trueExpr
elif part.strip()=='else':
currentExpr = falseExpr
else:
currentExpr.append(part)
conditionExpr = ''.join(conditionExpr)
trueExpr = ''.join(trueExpr)
falseExpr = ''.join(falseExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addTernaryExpr(conditionExpr, trueExpr, falseExpr, lineCol=lineCol)
elif self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.addIf(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self.pushToOpenDirectivesStack('if')
self._compiler.addIf(expr, lineCol=lineCol)
## end directive handlers
def handleEndDef(self):
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if not isNestedDef:
self._compiler.closeDef()
else:
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
self._compiler.commitStrConst()
self._compiler.dedent()
###
def pushToOpenDirectivesStack(self, directiveName):
assert directiveName in self._closeableDirectives
self._openDirectivesStack.append(directiveName)
def popFromOpenDirectivesStack(self, directiveName):
if not self._openDirectivesStack:
raise ParseError(self, msg="#end found, but nothing to end")
if self._openDirectivesStack[-1] == directiveName:
del self._openDirectivesStack[-1]
else:
raise ParseError(self, msg="#end %s found, expected #end %s" %(
directiveName, self._openDirectivesStack[-1]))
def assertEmptyOpenDirectivesStack(self):
if self._openDirectivesStack:
errorMsg = (
"Some #directives are missing their corresponding #end ___ tag: %s" %(
', '.join(self._openDirectivesStack)))
raise ParseError(self, msg=errorMsg)
##################################################
## Make an alias to export
Parser = _HighLevelParser
| Python |
#!/usr/bin/env python
import os
import pprint
try:
from functools import reduce
except ImportError:
# Assume we have reduce
pass
from Cheetah import Parser
from Cheetah import Compiler
from Cheetah import Template
class Analyzer(Parser.Parser):
def __init__(self, *args, **kwargs):
self.calls = {}
super(Analyzer, self).__init__(*args, **kwargs)
def eatDirective(self):
directive = self.matchDirective()
try:
self.calls[directive] += 1
except KeyError:
self.calls[directive] = 1
super(Analyzer, self).eatDirective()
class AnalysisCompiler(Compiler.ModuleCompiler):
parserClass = Analyzer
def analyze(source):
klass = Template.Template.compile(source, compilerClass=AnalysisCompiler)
return klass._CHEETAH_compilerInstance._parser.calls
def main_file(f):
fd = open(f, 'r')
try:
print u'>>> Analyzing %s' % f
calls = analyze(fd.read())
return calls
finally:
fd.close()
def _find_templates(directory, suffix):
for root, dirs, files in os.walk(directory):
for f in files:
if not f.endswith(suffix):
continue
yield root + os.path.sep + f
def _analyze_templates(iterable):
for template in iterable:
yield main_file(template)
def main_dir(opts):
results = _analyze_templates(_find_templates(opts.dir, opts.suffix))
totals = {}
for series in results:
if not series:
continue
for k, v in series.iteritems():
try:
totals[k] += v
except KeyError:
totals[k] = v
return totals
def main():
from optparse import OptionParser
op = OptionParser()
op.add_option('-f', '--file', dest='file', default=None,
help='Specify a single file to analyze')
op.add_option('-d', '--dir', dest='dir', default=None,
help='Specify a directory of templates to analyze')
op.add_option('--suffix', default='tmpl', dest='suffix',
help='Specify a custom template file suffix for the -d option (default: "tmpl")')
opts, args = op.parse_args()
if not opts.file and not opts.dir:
op.print_help()
return
results = None
if opts.file:
results = main_file(opts.file)
if opts.dir:
results = main_dir(opts)
pprint.pprint(results)
if __name__ == '__main__':
main()
| Python |
try:
from ds.sys.Unspecified import Unspecified
except ImportError:
class _Unspecified:
def __repr__(self):
return 'Unspecified'
def __str__(self):
return 'Unspecified'
Unspecified = _Unspecified()
| Python |
# $Id: TemplateCmdLineIface.py,v 1.13 2006/01/10 20:34:35 tavis_rudd Exp $
"""Provides a command line interface to compiled Cheetah template modules.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.13 $
Start Date: 2001/12/06
Last Revision Date: $Date: 2006/01/10 20:34:35 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.13 $"[11:-2]
import sys
import os
import getopt
import os.path
try:
from cPickle import load
except ImportError:
from pickle import load
from Cheetah.Version import Version
class Error(Exception):
pass
class CmdLineIface:
"""A command line interface to compiled Cheetah template modules."""
def __init__(self, templateObj,
scriptName=os.path.basename(sys.argv[0]),
cmdLineArgs=sys.argv[1:]):
self._template = templateObj
self._scriptName = scriptName
self._cmdLineArgs = cmdLineArgs
def run(self):
"""The main program controller."""
self._processCmdLineArgs()
print(self._template)
def _processCmdLineArgs(self):
try:
self._opts, self._args = getopt.getopt(
self._cmdLineArgs, 'h', ['help',
'env',
'pickle=',
])
except getopt.GetoptError, v:
# print help information and exit:
print(v)
print(self.usage())
sys.exit(2)
for o, a in self._opts:
if o in ('-h', '--help'):
print(self.usage())
sys.exit()
if o == '--env':
self._template.searchList().insert(0, os.environ)
if o == '--pickle':
if a == '-':
unpickled = load(sys.stdin)
self._template.searchList().insert(0, unpickled)
else:
f = open(a)
unpickled = load(f)
f.close()
self._template.searchList().insert(0, unpickled)
def usage(self):
return """Cheetah %(Version)s template module command-line interface
Usage
-----
%(scriptName)s [OPTION]
Options
-------
-h, --help Print this help information
--env Use shell ENVIRONMENT variables to fill the
$placeholders in the template.
--pickle <file> Use a variables from a dictionary stored in Python
pickle file to fill $placeholders in the template.
If <file> is - stdin is used:
'%(scriptName)s --pickle -'
Description
-----------
This interface allows you to execute a Cheetah template from the command line
and collect the output. It can prepend the shell ENVIRONMENT or a pickled
Python dictionary to the template's $placeholder searchList, overriding the
defaults for the $placeholders.
""" % {'scriptName': self._scriptName,
'Version': Version,
}
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
try:
import cPickle as pickle
except ImportError:
import pickle
__author__ = "Evan Martin <martine@danga.com>"
__version__ = "1.2_tummy5"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
__license__ = "Python"
class _Error(Exception):
pass
class Client:
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, add, replace, get, get_multi, incr, decr, delete
"""
_usePickle = False
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_SERVER_RETRIES = 10 # how many times to try finding a free server.
def __init__(self, servers, debug=0):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
"""
self.set_servers(servers)
self.debug = debug
self.stats = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debuglog) for s in servers]
self._init_buckets()
def get_stats(self):
'''Get statistics from each of the servers.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
s.send_cmd('stats')
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while True:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.dead_until = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash = key[0]
key = key[1]
else:
serverhash = hash(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
return server, key
serverhash = hash(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@rtype: int
'''
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
server.expect("DELETED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
return 1
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value for C{key} by
C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
exist on server, otherwise it returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache, and it
must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
return int(line)
except socket.error, msg:
server.mark_dead(msg[1])
return None
def add(self, key, val, time=0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn\'t already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time)
def replace(self, key, val, time=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time)
def set(self, key, val, time=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element being the
hash value, if you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects on the
same memcache server, so you could use the user's unique id as the hash
value.
@return: Nonzero on success.
@rtype: int
'''
return self._set("set", key, val, time)
def _set(self, cmd, key, val, time):
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
elif self._usePickle:
flags |= Client._FLAG_PICKLE
val = pickle.dumps(val, 2)
else:
pass
fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, flags, time, len(val), val)
try:
server.send_cmd(fullcmd)
server.expect("STORED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
return 1
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
server, key = self._get_server(key)
if not server:
return None
self._statlog('get')
try:
server.send_cmd("get %s" % key)
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
value = self._recv_value(server, flags, rlen)
server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple):
msg = msg[1]
server.mark_dead(msg)
return None
return value
def get_multi(self, keys):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn\'t have to wait for each round-trip of L{get} before sending
the next one.
@param keys: An array of keys.
@return: A dictionary of key/value pairs that were available.
'''
self._statlog('get_multi')
server_keys = {}
# build up a list for each server of all the keys we want.
for key in keys:
server, key = self._get_server(key)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.keys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
server.mark_dead(msg[1])
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.keys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[rkey] = val
line = server.readline()
except (_Error, socket.error), msg:
server.mark_dead(msg)
return retvals
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags == 0:
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif self._usePickle and flags & Client._FLAG_PICKLE:
try:
val = pickle.loads(buf)
except:
self.debuglog('Pickle error...\n')
val = None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
class _Host:
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
def __init__(self, host, debugfunc=None):
if isinstance(host, tuple):
host = host[0]
self.weight = host[1]
else:
self.weight = 1
if host.find(":") > 0:
self.ip, self.port = host.split(":")
self.port = int(self.port)
else:
self.ip, self.port = host, 11211
if not debugfunc:
debugfunc = lambda x: x
self.debuglog = debugfunc
self.deaduntil = 0
self.socket = None
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + _Host._DEAD_RETRY
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Python 2.3-ism: s.settimeout(1)
try:
s.connect((self.ip, self.port))
except socket.error, msg:
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
if len(cmd) > 100:
self.socket.sendall(cmd)
self.socket.sendall('\r\n')
else:
self.socket.sendall(cmd + '\r\n')
def readline(self):
buffers = ''
recv = self.socket.recv
while True:
data = recv(1)
if not data:
self.mark_dead('Connection closed while reading from %s'
% repr(self))
break
if data == '\n' and buffers and buffers[-1] == '\r':
return(buffers[:-1])
buffers = buffers + data
return(buffers)
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
return line
def recv(self, rlen):
buf = ''
recv = self.socket.recv
while len(buf) < rlen:
buf = buf + recv(rlen - len(buf))
return buf
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
return "%s:%d%s" % (self.ip, self.port, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
print("Testing docstrings...")
_doctest()
print("Running tests:")
#servers = ["127.0.0.1:11211", "127.0.0.1:11212"]
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, str):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
print("Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)))
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print("OK")
return 1
else:
print("FAIL")
return 0
class FooStruct:
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print("Testing delete ...")
if mc.delete("long"):
print("OK")
else:
print("FAIL")
print("Testing get_multi ...")
print(mc.get_multi(["a_string", "an_integer"]))
print("Testing get(unknown value) ...")
print(to_s(mc.get("unknown_value")))
f = FooStruct()
test_setget("foostruct", f)
print("Testing incr ...")
x = mc.incr("an_integer", 1)
if x == 43:
print("OK")
else:
print("FAIL")
print("Testing decr ...")
x = mc.decr("an_integer", 1)
if x == 42:
print("OK")
else:
print("FAIL")
# vim: ts=4 sw=4 et :
| Python |
# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $
"""Provides helpers for Template.webInput(), a method for importing web
transaction variables in bulk. See the docstring of webInput for full details.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.10 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2006/01/06 21:56:54 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.10 $"[11:-2]
from Cheetah.Utils.Misc import useOrRaise
class NonNumericInputError(ValueError): pass
##################################################
## PRIVATE FUNCTIONS AND CLASSES
class _Converter:
"""A container object for info about type converters.
.name, string, name of this converter (for error messages).
.func, function, factory function.
.default, value to use or raise if the real value is missing.
.error, value to use or raise if .func() raises an exception.
"""
def __init__(self, name, func, default, error):
self.name = name
self.func = func
self.default = default
self.error = error
def _lookup(name, func, multi, converters):
"""Look up a Webware field/cookie/value/session value. Return
'(realName, value)' where 'realName' is like 'name' but with any
conversion suffix strips off. Applies numeric conversion and
single vs multi values according to the comments in the source.
"""
# Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
# If there's no colon, the suffix is "". 'longName' is the name with the
# suffix, 'shortName' is without.
# XXX This implementation assumes "height:" means "height".
colon = name.find(':')
if colon != -1:
longName = name
shortName, ext = name[:colon], name[colon+1:]
else:
longName = shortName = name
ext = ''
# Step 2 -- look up the values by calling 'func'.
if longName != shortName:
values = func(longName, None) or func(shortName, None)
else:
values = func(shortName, None)
# 'values' is a list of strings, a string or None.
# Step 3 -- Coerce 'values' to a list of zero, one or more strings.
if values is None:
values = []
elif isinstance(values, str):
values = [values]
# Step 4 -- Find a _Converter object or raise TypeError.
try:
converter = converters[ext]
except KeyError:
fmt = "'%s' is not a valid converter name in '%s'"
tup = (ext, longName)
raise TypeError(fmt % tup)
# Step 5 -- if there's a converter func, run it on each element.
# If the converter raises an exception, use or raise 'converter.error'.
if converter.func is not None:
tmp = values[:]
values = []
for elm in tmp:
try:
elm = converter.func(elm)
except (TypeError, ValueError):
tup = converter.name, elm
errmsg = "%s '%s' contains invalid characters" % tup
elm = useOrRaise(converter.error, errmsg)
values.append(elm)
# 'values' is now a list of strings, ints or floats.
# Step 6 -- If we're supposed to return a multi value, return the list
# as is. If we're supposed to return a single value and the list is
# empty, return or raise 'converter.default'. Otherwise, return the
# first element in the list and ignore any additional values.
if multi:
return shortName, values
if len(values) == 0:
return shortName, useOrRaise(converter.default)
return shortName, values[0]
# vim: sw=4 ts=4 expandtab
| Python |
## statprof.py
## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this program; if not, contact:
##
## Free Software Foundation Voice: +1-617-542-5942
## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
## Boston, MA 02111-1307, USA gnu@gnu.org
"""
statprof is intended to be a fairly simple statistical profiler for
python. It was ported directly from a statistical profiler for guile,
also named statprof, available from guile-lib [0].
[0] http://wingolog.org/software/guile-lib/statprof/
To start profiling, call statprof.start():
>>> start()
Then run whatever it is that you want to profile, for example:
>>> import test.pystone; test.pystone.pystones()
Then stop the profiling and print out the results:
>>> stop()
>>> display()
% cumulative self
time seconds seconds name
26.72 1.40 0.37 pystone.py:79:Proc0
13.79 0.56 0.19 pystone.py:133:Proc1
13.79 0.19 0.19 pystone.py:208:Proc8
10.34 0.16 0.14 pystone.py:229:Func2
6.90 0.10 0.10 pystone.py:45:__init__
4.31 0.16 0.06 pystone.py:53:copy
...
All of the numerical data with the exception of the calls column is
statistically approximate. In the following column descriptions, and
in all of statprof, "time" refers to execution time (both user and
system), not wall clock time.
% time
The percent of the time spent inside the procedure itself (not
counting children).
cumulative seconds
The total number of seconds spent in the procedure, including
children.
self seconds
The total number of seconds spent in the procedure itself (not
counting children).
name
The name of the procedure.
By default statprof keeps the data collected from previous runs. If you
want to clear the collected data, call reset():
>>> reset()
reset() can also be used to change the sampling frequency. For example,
to tell statprof to sample 50 times a second:
>>> reset(50)
This means that statprof will sample the call stack after every 1/50 of
a second of user + system time spent running on behalf of the python
process. When your process is idle (for example, blocking in a read(),
as is the case at the listener), the clock does not advance. For this
reason statprof is not currently not suitable for profiling io-bound
operations.
The profiler uses the hash of the code object itself to identify the
procedures, so it won't confuse different procedures with the same name.
They will show up as two different rows in the output.
Right now the profiler is quite simplistic. I cannot provide
call-graphs or other higher level information. What you see in the
table is pretty much all there is. Patches are welcome :-)
Threading
---------
Because signals only get delivered to the main thread in Python,
statprof only profiles the main thread. However because the time
reporting function uses per-process timers, the results can be
significantly off if other threads' work patterns are not similar to the
main thread's work patterns.
Implementation notes
--------------------
The profiler works by setting the unix profiling signal ITIMER_PROF to
go off after the interval you define in the call to reset(). When the
signal fires, a sampling routine is run which looks at the current
procedure that's executing, and then crawls up the stack, and for each
frame encountered, increments that frame's code object's sample count.
Note that if a procedure is encountered multiple times on a given stack,
it is only counted once. After the sampling is complete, the profiler
resets profiling timer to fire again after the appropriate interval.
Meanwhile, the profiler keeps track, via os.times(), how much CPU time
(system and user -- which is also what ITIMER_PROF tracks), has elapsed
while code has been executing within a start()/stop() block.
The profiler also tries to avoid counting or timing its own code as
much as possible.
"""
try:
import itimer
except ImportError:
raise ImportError('''statprof requires the itimer python extension.
To install it, enter the following commands from a terminal:
wget http://www.cute.fi/~torppa/py-itimer/py-itimer.tar.gz
tar zxvf py-itimer.tar.gz
cd py-itimer
sudo python setup.py install
''')
import signal
import os
__all__ = ['start', 'stop', 'reset', 'display']
###########################################################################
## Utils
def clock():
times = os.times()
return times[0] + times[1]
###########################################################################
## Collection data structures
class ProfileState(object):
def __init__(self, frequency=None):
self.reset(frequency)
def reset(self, frequency=None):
# total so far
self.accumulated_time = 0.0
# start_time when timer is active
self.last_start_time = None
# total count of sampler calls
self.sample_count = 0
# a float
if frequency:
self.sample_interval = 1.0/frequency
elif not hasattr(self, 'sample_interval'):
# default to 100 Hz
self.sample_interval = 1.0/100.0
else:
# leave the frequency as it was
pass
self.remaining_prof_time = None
# for user start/stop nesting
self.profile_level = 0
# whether to catch apply-frame
self.count_calls = False
# gc time between start() and stop()
self.gc_time_taken = 0
def accumulate_time(self, stop_time):
self.accumulated_time += stop_time - self.last_start_time
state = ProfileState()
## call_data := { code object: CallData }
call_data = {}
class CallData(object):
def __init__(self, code):
self.name = code.co_name
self.filename = code.co_filename
self.lineno = code.co_firstlineno
self.call_count = 0
self.cum_sample_count = 0
self.self_sample_count = 0
call_data[code] = self
def get_call_data(code):
return call_data.get(code, None) or CallData(code)
###########################################################################
## SIGPROF handler
def sample_stack_procs(frame):
state.sample_count += 1
get_call_data(frame.f_code).self_sample_count += 1
code_seen = {}
while frame:
code_seen[frame.f_code] = True
frame = frame.f_back
for code in code_seen.iterkeys():
get_call_data(code).cum_sample_count += 1
def profile_signal_handler(signum, frame):
if state.profile_level > 0:
state.accumulate_time(clock())
sample_stack_procs(frame)
itimer.setitimer(itimer.ITIMER_PROF,
state.sample_interval, 0.0)
state.last_start_time = clock()
###########################################################################
## Profiling API
def is_active():
return state.profile_level > 0
def start():
state.profile_level += 1
if state.profile_level == 1:
state.last_start_time = clock()
rpt = state.remaining_prof_time
state.remaining_prof_time = None
signal.signal(signal.SIGPROF, profile_signal_handler)
itimer.setitimer(itimer.ITIMER_PROF,
rpt or state.sample_interval, 0.0)
state.gc_time_taken = 0 # dunno
def stop():
state.profile_level -= 1
if state.profile_level == 0:
state.accumulate_time(clock())
state.last_start_time = None
rpt = itimer.setitimer(itimer.ITIMER_PROF, 0.0, 0.0)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
state.remaining_prof_time = rpt[0]
state.gc_time_taken = 0 # dunno
def reset(frequency=None):
assert state.profile_level == 0, "Can't reset() while statprof is running"
call_data.clear()
state.reset(frequency)
###########################################################################
## Reporting API
class CallStats(object):
def __init__(self, call_data):
self_samples = call_data.self_sample_count
cum_samples = call_data.cum_sample_count
nsamples = state.sample_count
secs_per_sample = state.accumulated_time / nsamples
basename = os.path.basename(call_data.filename)
self.name = '%s:%d:%s' % (basename, call_data.lineno, call_data.name)
self.pcnt_time_in_proc = self_samples / nsamples * 100
self.cum_secs_in_proc = cum_samples * secs_per_sample
self.self_secs_in_proc = self_samples * secs_per_sample
self.num_calls = None
self.self_secs_per_call = None
self.cum_secs_per_call = None
def display(self):
print('%6.2f %9.2f %9.2f %s' % (self.pcnt_time_in_proc,
self.cum_secs_in_proc,
self.self_secs_in_proc,
self.name))
def display():
if state.sample_count == 0:
print('No samples recorded.')
return
l = [CallStats(x) for x in call_data.itervalues()]
l = [(x.self_secs_in_proc, x.cum_secs_in_proc, x) for x in l]
l.sort(reverse=True)
l = [x[2] for x in l]
print('%5.5s %10.10s %7.7s %-8.8s' % ('% ', 'cumulative', 'self', ''))
print('%5.5s %9.9s %8.8s %-8.8s' % ("time", "seconds", "seconds", "name"))
for x in l:
x.display()
print('---')
print('Sample count: %d' % state.sample_count)
print('Total time: %f seconds' % state.accumulated_time)
| Python |
"""This is a copy of the htmlDecode function in Webware.
@@TR: It implemented more efficiently.
"""
from Cheetah.Utils.htmlEncode import htmlCodesReversed
def htmlDecode(s, codes=htmlCodesReversed):
""" Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode()."""
for code in codes:
s = s.replace(code[1], code[0])
return s
| Python |
#!/usr/bin/env python
"""
Miscellaneous functions/objects used by Cheetah but also useful standalone.
"""
import os # Used in mkdirsWithPyInitFile.
import sys # Used in die.
##################################################
## MISCELLANEOUS FUNCTIONS
def die(reason):
sys.stderr.write(reason + '\n')
sys.exit(1)
def useOrRaise(thing, errmsg=''):
"""Raise 'thing' if it's a subclass of Exception. Otherwise return it.
Called by: Cheetah.Servlet.cgiImport()
"""
if isinstance(thing, type) and issubclass(thing, Exception):
raise thing(errmsg)
return thing
def checkKeywords(dic, legalKeywords, what='argument'):
"""Verify no illegal keyword arguments were passed to a function.
in : dic, dictionary (**kw in the calling routine).
legalKeywords, list of strings, the keywords that are allowed.
what, string, suffix for error message (see function source).
out: None.
exc: TypeError if 'dic' contains a key not in 'legalKeywords'.
called by: Cheetah.Template.__init__()
"""
# XXX legalKeywords could be a set when sets get added to Python.
for k in dic.keys(): # Can be dic.iterkeys() if Python >= 2.2.
if k not in legalKeywords:
raise TypeError("'%s' is not a valid %s" % (k, what))
def removeFromList(list_, *elements):
"""Save as list_.remove(each element) but don't raise an error if
element is missing. Modifies 'list_' in place! Returns None.
"""
for elm in elements:
try:
list_.remove(elm)
except ValueError:
pass
def mkdirsWithPyInitFiles(path):
"""Same as os.makedirs (mkdir 'path' and all missing parent directories)
but also puts a Python '__init__.py' file in every directory it
creates. Does nothing (without creating an '__init__.py' file) if the
directory already exists.
"""
dir, fil = os.path.split(path)
if dir and not os.path.exists(dir):
mkdirsWithPyInitFiles(dir)
if not os.path.exists(path):
os.mkdir(path)
init = os.path.join(path, "__init__.py")
f = open(init, 'w') # Open and close to produce empty file.
f.close()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
"""
Indentation maker.
@@TR: this code is unsupported and largely undocumented ...
This version is based directly on code by Robert Kuzelj
<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and
attributes have been renamed. Indentation is output via
$self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the
searchList and another one being found. The directive syntax will
soon be changed somewhat.
"""
import re
import sys
def indentize(source):
return IndentProcessor().process(source)
class IndentProcessor(object):
"""Preprocess #indent tags."""
LINE_SEP = '\n'
ARGS = "args"
INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)')
DIRECTIVE = re.compile(r"[ \t]*#")
WS = "ws"
WHITESPACES = re.compile(r"(?P<ws>[ \t]*)")
INC = "++"
DEC = "--"
SET = "="
CHAR = "char"
ON = "on"
OFF = "off"
PUSH = "push"
POP = "pop"
def process(self, _txt):
result = []
for line in _txt.splitlines():
match = self.INDENT_DIR.match(line)
if match:
#is indention directive
args = match.group(self.ARGS).strip()
if args == self.ON:
line = "#silent $self._CHEETAH__indenter.on()"
elif args == self.OFF:
line = "#silent $self._CHEETAH__indenter.off()"
elif args == self.INC:
line = "#silent $self._CHEETAH__indenter.inc()"
elif args == self.DEC:
line = "#silent $self._CHEETAH__indenter.dec()"
elif args.startswith(self.SET):
level = int(args[1:])
line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level}
elif args.startswith('chars'):
self.indentChars = eval(args.split('=')[1])
line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level}
elif args.startswith(self.PUSH):
line = "#silent $self._CHEETAH__indenter.push()"
elif args.startswith(self.POP):
line = "#silent $self._CHEETAH__indenter.pop()"
else:
match = self.DIRECTIVE.match(line)
if not match:
#is not another directive
match = self.WHITESPACES.match(line)
if match:
size = len(match.group("ws").expandtabs(4))
line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip()
else:
line = "${self._CHEETAH__indenter.indent(0)}" + line
result.append(line)
return self.LINE_SEP.join(result)
class Indenter(object):
"""
A class that keeps track of the current indentation level.
.indent() returns the appropriate amount of indentation.
"""
On = 1
Level = 0
Chars = ' '
LevelStack = []
def on(self):
self.On = 1
def off(self):
self.On = 0
def inc(self):
self.Level += 1
def dec(self):
"""decrement can only be applied to values greater zero
values below zero don't make any sense at all!"""
if self.Level > 0:
self.Level -= 1
def push(self):
self.LevelStack.append(self.Level)
def pop(self):
"""the levestack can not become -1. any attempt to do so
sets the level to 0!"""
if len(self.LevelStack) > 0:
self.Level = self.LevelStack.pop()
else:
self.Level = 0
def setLevel(self, _level):
"""the leve can't be less than zero. any attempt to do so
sets the level automatically to zero!"""
if _level < 0:
self.Level = 0
else:
self.Level = _level
def setChar(self, _chars):
self.Chars = _chars
def indent(self, _default=0):
if self.On:
return self.Chars * self.Level
return " " * _default
| Python |
"""This is a copy of the htmlEncode function in Webware.
@@TR: It implemented more efficiently.
"""
htmlCodes = [
['&', '&'],
['<', '<'],
['>', '>'],
['"', '"'],
]
htmlCodesReversed = htmlCodes[:]
htmlCodesReversed.reverse()
def htmlEncode(s, codes=htmlCodes):
""" Returns the HTML encoded version of the given string. This is useful to
display a plain ASCII text string on a web page."""
for code in codes:
s = s.replace(code[0], code[1])
return s
| Python |
#
| Python |
import sys
import os.path
import copy as copyModule
from ConfigParser import ConfigParser
import re
from tokenize import Intnumber, Floatnumber, Number
from types import *
import types
import new
import time
from StringIO import StringIO # not cStringIO because of unicode support
import imp # used by SettingsManager.updateSettingsFromPySrcFile()
numberRE = re.compile(Number)
complexNumberRE = re.compile('[\(]*' +Number + r'[ \t]*\+[ \t]*' + Number + '[\)]*')
convertableToStrTypes = (StringType, IntType, FloatType,
LongType, ComplexType, NoneType,
UnicodeType)
##################################################
## FUNCTIONS ##
def mergeNestedDictionaries(dict1, dict2, copy=False, deepcopy=False):
"""Recursively merge the values of dict2 into dict1.
This little function is very handy for selectively overriding settings in a
settings dictionary that has a nested structure.
"""
if copy:
dict1 = copyModule.copy(dict1)
elif deepcopy:
dict1 = copyModule.deepcopy(dict1)
for key, val in dict2.iteritems():
if key in dict1 and isinstance(val, dict) and isinstance(dict1[key], dict):
dict1[key] = mergeNestedDictionaries(dict1[key], val)
else:
dict1[key] = val
return dict1
def stringIsNumber(S):
"""Return True if theString represents a Python number, False otherwise.
This also works for complex numbers and numbers with +/- in front."""
S = S.strip()
if S[0] in '-+' and len(S) > 1:
S = S[1:].strip()
match = complexNumberRE.match(S)
if not match:
match = numberRE.match(S)
if not match or (match.end() != len(S)):
return False
else:
return True
def convStringToNum(theString):
"""Convert a string representation of a Python number to the Python version"""
if not stringIsNumber(theString):
raise Error(theString + ' cannot be converted to a Python number')
return eval(theString, {}, {})
class Error(Exception):
pass
class NoDefault(object):
pass
class ConfigParserCaseSensitive(ConfigParser):
"""A case sensitive version of the standard Python ConfigParser."""
def optionxform(self, optionstr):
"""Don't change the case as is done in the default implemenation."""
return optionstr
class _SettingsCollector(object):
"""An abstract base class that provides the methods SettingsManager uses to
collect settings from config files and strings.
This class only collects settings it doesn't modify the _settings dictionary
of SettingsManager instances in any way.
"""
_ConfigParserClass = ConfigParserCaseSensitive
def readSettingsFromModule(self, mod, ignoreUnderscored=True):
"""Returns all settings from a Python module.
"""
S = {}
attrs = vars(mod)
for k, v in attrs.iteritems():
if (ignoreUnderscored and k.startswith('_')):
continue
else:
S[k] = v
return S
def readSettingsFromPySrcStr(self, theString):
"""Return a dictionary of the settings in a Python src string."""
globalsDict = {'True': (1==1),
'False': (0==1),
}
newSettings = {'self':self}
exec((theString+os.linesep), globalsDict, newSettings)
del newSettings['self']
module = new.module('temp_settings_module')
module.__dict__.update(newSettings)
return self.readSettingsFromModule(module)
def readSettingsFromConfigFileObj(self, inFile, convert=True):
"""Return the settings from a config file that uses the syntax accepted by
Python's standard ConfigParser module (like Windows .ini files).
NOTE:
this method maintains case unlike the ConfigParser module, unless this
class was initialized with the 'caseSensitive' keyword set to False.
All setting values are initially parsed as strings. However, If the
'convert' arg is True this method will do the following value
conversions:
* all Python numeric literals will be coverted from string to number
* The string 'None' will be converted to the Python value None
* The string 'True' will be converted to a Python truth value
* The string 'False' will be converted to a Python false value
* Any string starting with 'python:' will be treated as a Python literal
or expression that needs to be eval'd. This approach is useful for
declaring lists and dictionaries.
If a config section titled 'Globals' is present the options defined
under it will be treated as top-level settings.
"""
p = self._ConfigParserClass()
p.readfp(inFile)
sects = p.sections()
newSettings = {}
sects = p.sections()
newSettings = {}
for s in sects:
newSettings[s] = {}
for o in p.options(s):
if o != '__name__':
newSettings[s][o] = p.get(s, o)
## loop through new settings -> deal with global settings, numbers,
## booleans and None ++ also deal with 'importSettings' commands
for sect, subDict in newSettings.items():
for key, val in subDict.items():
if convert:
if val.lower().startswith('python:'):
subDict[key] = eval(val[7:], {}, {})
if val.lower() == 'none':
subDict[key] = None
if val.lower() == 'true':
subDict[key] = True
if val.lower() == 'false':
subDict[key] = False
if stringIsNumber(val):
subDict[key] = convStringToNum(val)
## now deal with any 'importSettings' commands
if key.lower() == 'importsettings':
if val.find(';') < 0:
importedSettings = self.readSettingsFromPySrcFile(val)
else:
path = val.split(';')[0]
rest = ''.join(val.split(';')[1:]).strip()
parentDict = self.readSettingsFromPySrcFile(path)
importedSettings = eval('parentDict["' + rest + '"]')
subDict.update(mergeNestedDictionaries(subDict,
importedSettings))
if sect.lower() == 'globals':
newSettings.update(newSettings[sect])
del newSettings[sect]
return newSettings
class SettingsManager(_SettingsCollector):
"""A mixin class that provides facilities for managing application settings.
SettingsManager is designed to work well with nested settings dictionaries
of any depth.
"""
def __init__(self):
super(SettingsManager, self).__init__()
self._settings = {}
self._initializeSettings()
def _defaultSettings(self):
return {}
def _initializeSettings(self):
"""A hook that allows for complex setting initialization sequences that
involve references to 'self' or other settings. For example:
self._settings['myCalcVal'] = self._settings['someVal'] * 15
This method should be called by the class' __init__() method when needed.
The dummy implementation should be reimplemented by subclasses.
"""
pass
## core post startup methods
def setting(self, name, default=NoDefault):
"""Get a setting from self._settings, with or without a default value."""
if default is NoDefault:
return self._settings[name]
else:
return self._settings.get(name, default)
def hasSetting(self, key):
"""True/False"""
return key in self._settings
def setSetting(self, name, value):
"""Set a setting in self._settings."""
self._settings[name] = value
def settings(self):
"""Return a reference to the settings dictionary"""
return self._settings
def copySettings(self):
"""Returns a shallow copy of the settings dictionary"""
return copyModule.copy(self._settings)
def deepcopySettings(self):
"""Returns a deep copy of the settings dictionary"""
return copyModule.deepcopy(self._settings)
def updateSettings(self, newSettings, merge=True):
"""Update the settings with a selective merge or a complete overwrite."""
if merge:
mergeNestedDictionaries(self._settings, newSettings)
else:
self._settings.update(newSettings)
## source specific update methods
def updateSettingsFromPySrcStr(self, theString, merge=True):
"""Update the settings from a code in a Python src string."""
newSettings = self.readSettingsFromPySrcStr(theString)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge) )
def updateSettingsFromConfigFileObj(self, inFile, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
The caller of this method is responsible for closing the inFile file
object."""
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
def updateSettingsFromConfigStr(self, configStr, convert=True, merge=True):
"""See the docstring for .updateSettingsFromConfigFile()
"""
configStr = '[globals]\n' + configStr
inFile = StringIO(configStr)
newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
self.updateSettings(newSettings,
merge=newSettings.get('mergeSettings', merge))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.